From 117123b705f38808266bcbd6ed43e011317bb41c Mon Sep 17 00:00:00 2001 From: Pradeep Chandrasekaran <160489301+chandrasekaranpradeep@users.noreply.github.com> Date: Mon, 2 Sep 2024 15:38:12 +0530 Subject: [PATCH] Rename from pybuda to forge (#208) --- .gitignore | 4 +- .gitlab-ci.perf.yml | 14 +- CMakeLists.txt | 4 +- compile_flags.txt | 2 +- forge/CMakeLists.txt | 2 + {pybuda => forge}/csrc/CMakeLists.txt | 12 +- .../csrc/autograd/CMakeLists.txt | 0 {pybuda => forge}/csrc/autograd/autograd.cpp | 2 +- {pybuda => forge}/csrc/autograd/autograd.hpp | 0 {pybuda => forge}/csrc/autograd/binding.cpp | 14 +- {pybuda => forge}/csrc/autograd/binding.hpp | 0 .../csrc/autograd/python_bindings.cpp | 2 +- .../csrc/autograd/python_bindings.hpp | 0 .../csrc/backend_api/CMakeLists.txt | 0 .../csrc/backend_api/arch_type.cpp | 0 .../csrc/backend_api/arch_type.hpp | 0 .../csrc/backend_api/backend_api.cpp | 0 .../csrc/backend_api/device_config.hpp | 0 {pybuda => forge}/csrc/buda_passes.cpp | 16 +- {pybuda => forge}/csrc/buda_passes.hpp | 0 .../csrc/forge_bindings.cpp | 8 +- .../csrc/graph_lib/CMakeLists.txt | 0 {pybuda => forge}/csrc/graph_lib/defines.cpp | 0 {pybuda => forge}/csrc/graph_lib/defines.hpp | 0 {pybuda => forge}/csrc/graph_lib/edge.cpp | 0 {pybuda => forge}/csrc/graph_lib/edge.hpp | 0 {pybuda => forge}/csrc/graph_lib/graph.cpp | 0 {pybuda => forge}/csrc/graph_lib/graph.hpp | 0 {pybuda => forge}/csrc/graph_lib/node.cpp | 2 +- {pybuda => forge}/csrc/graph_lib/node.hpp | 0 .../csrc/graph_lib/node_types.cpp | 6 +- .../csrc/graph_lib/node_types.hpp | 0 .../csrc/graph_lib/python_bindings.cpp | 36 +- .../csrc/graph_lib/python_bindings.hpp | 0 {pybuda => forge}/csrc/graph_lib/query.hpp | 0 {pybuda => forge}/csrc/graph_lib/shape.cpp | 0 {pybuda => forge}/csrc/graph_lib/shape.hpp | 0 .../csrc/graph_lib/tests/test_graphlib.cpp | 0 .../graph_lib/tests/test_graphlib_utils.cpp | 0 {pybuda => forge}/csrc/graph_lib/utils.cpp | 8 +- {pybuda => forge}/csrc/graph_lib/utils.hpp | 0 .../csrc/lower_to_buda/common.cpp | 0 .../csrc/lower_to_buda/common.hpp | 0 {pybuda => forge}/csrc/passes/amp.cpp | 4 +- {pybuda => forge}/csrc/passes/amp.hpp | 0 .../csrc/passes/bind_reshape_to_io.cpp | 0 .../csrc/passes/bind_reshape_to_io.hpp | 0 .../csrc/passes/commutable_pattern.cpp | 0 .../csrc/passes/commute_utils.cpp | 4 +- .../csrc/passes/commute_utils.hpp | 0 .../csrc/passes/constant_folding.cpp | 0 .../csrc/passes/constant_folding.hpp | 0 {pybuda => forge}/csrc/passes/consteval.cpp | 0 {pybuda => forge}/csrc/passes/consteval.hpp | 0 {pybuda => forge}/csrc/passes/dataformat.cpp | 4 +- {pybuda => forge}/csrc/passes/dataformat.hpp | 0 .../csrc/passes/decomposing_context.cpp | 12 +- .../csrc/passes/decomposing_context.hpp | 0 .../csrc/passes/erase_consecutive_reshape.cpp | 0 .../csrc/passes/erase_consecutive_reshape.hpp | 0 .../csrc/passes/erase_inverse_ops.cpp | 0 .../csrc/passes/erase_inverse_ops.hpp | 0 .../erase_unnecessary_4d_tm_sequence.cpp | 0 .../erase_unnecessary_4d_tm_sequence.hpp | 0 .../csrc/passes/explicate_unsqueeze.cpp | 0 .../csrc/passes/explicate_unsqueeze.hpp | 0 {pybuda => forge}/csrc/passes/fracture.cpp | 0 {pybuda => forge}/csrc/passes/fracture.hpp | 0 .../csrc/passes/fuse_conv2d_bias.cpp | 0 .../csrc/passes/fuse_conv2d_bias.hpp | 0 .../csrc/passes/fuse_pad_conv2d.cpp | 0 .../csrc/passes/fuse_pad_conv2d.hpp | 0 .../csrc/passes/fuse_per_channel_ops.cpp | 4 +- .../csrc/passes/fuse_per_channel_ops.hpp | 0 .../passes/fuse_redundant_tm_sequence.cpp | 10 +- .../passes/fuse_redundant_tm_sequence.hpp | 0 .../fuse_reshape_transpose_into_slice.cpp | 10 +- .../fuse_reshape_transpose_into_slice.hpp | 0 .../generate_initial_flops_estimate.cpp | 10 +- .../generate_initial_flops_estimate.hpp | 0 .../passes/hoist_transforms_to_inputs.cpp | 0 .../passes/hoist_transforms_to_inputs.hpp | 0 .../csrc/passes/insert_inverse_on_io.cpp | 0 .../csrc/passes/insert_inverse_on_io.hpp | 0 .../csrc/passes/limit_to_4d_reshape.cpp | 0 .../csrc/passes/limit_to_4d_reshape.hpp | 0 .../csrc/passes/link_past_cache_ios.cpp | 2 +- .../csrc/passes/link_past_cache_ios.hpp | 0 .../lower_concat_to_runtime_transform.cpp | 2 +- .../lower_concat_to_runtime_transform.hpp | 0 .../csrc/passes/lower_reinterpret_shape.cpp | 0 .../csrc/passes/lower_reinterpret_shape.hpp | 0 .../csrc/passes/lower_to_mlir.cpp | 0 .../csrc/passes/lower_to_mlir.hpp | 0 .../csrc/passes/lowering_context.cpp | 12 +- .../csrc/passes/lowering_context.hpp | 2 +- .../csrc/passes/mlir_compiler.cpp | 4 +- .../csrc/passes/mlir_compiler.hpp | 0 {pybuda => forge}/csrc/passes/mlir_passes.cpp | 0 {pybuda => forge}/csrc/passes/mlir_passes.hpp | 0 .../csrc/passes/move_index_to_mm_weights.cpp | 0 .../csrc/passes/move_index_to_mm_weights.hpp | 0 .../csrc/passes/move_requantize.cpp | 0 .../csrc/passes/move_requantize.hpp | 0 .../move_select_after_matmul_optional.cpp | 4 +- .../move_select_after_matmul_optional.hpp | 0 {pybuda => forge}/csrc/passes/nd_slice.hpp | 0 .../csrc/passes/pad_output_buffer.cpp | 4 +- .../csrc/passes/pad_output_buffer.hpp | 0 .../csrc/passes/passes_utils.cpp | 2 +- .../csrc/passes/passes_utils.hpp | 0 .../passes/post_autograd_graph_passes.cpp | 0 .../passes/post_autograd_graph_passes.hpp | 0 .../csrc/passes/pre_lowering_passes.cpp | 6 +- .../csrc/passes/pre_lowering_passes.hpp | 0 .../csrc/passes/pre_placer_buda_passes.cpp | 12 +- .../csrc/passes/pre_placer_buda_passes.hpp | 0 {pybuda => forge}/csrc/passes/print_graph.cpp | 12 +- {pybuda => forge}/csrc/passes/print_graph.hpp | 0 .../csrc/passes/python_bindings.cpp | 4 +- .../csrc/passes/python_bindings.hpp | 0 .../passes/replace_incommutable_patterns.cpp | 0 .../passes/replace_incommutable_patterns.hpp | 0 .../csrc/passes/set_tile_dim.cpp | 0 .../csrc/passes/set_tile_dim.hpp | 0 .../csrc/passes/squeeze_to_reshape.cpp | 0 .../csrc/passes/squeeze_to_reshape.hpp | 0 .../csrc/passes/tests/gtest_main.cpp | 0 .../passes/tests/test_constant_folding.cpp | 6 +- .../csrc/passes/tests/test_data_formats.cpp | 0 .../passes/tests/test_erase_inverse_ops.cpp | 0 .../test_erase_unnecessary_4d_tm_sequence.cpp | 0 .../csrc/passes/tests/test_fracturing.cpp | 10 +- .../passes/tests/test_fuse_pad_conv2d.cpp | 0 ...transpose_pairs_into_slice_or_stack_tm.cpp | 0 .../passes/tests/test_link_past_cache_ios.cpp | 4 +- .../csrc/passes/tests/test_mm_fuse_bias.cpp | 2 +- .../tests/test_move_index_to_mm_weights.cpp | 0 ...test_move_select_after_matmul_optional.cpp | 12 +- .../tests/test_past_cache_ublock_order.cpp | 0 .../passes/tests/test_split_unsupp_ops.cpp | 0 .../csrc/passes/tests/test_tilize.cpp | 0 .../csrc/passes/tests/test_transpose_srca.cpp | 0 .../csrc/python_bindings_common.hpp | 0 .../csrc/reportify/CMakeLists.txt | 0 {pybuda => forge}/csrc/reportify/paths.cpp | 2 +- {pybuda => forge}/csrc/reportify/paths.hpp | 0 .../csrc/reportify/reportify.cpp | 8 +- .../csrc/reportify/reportify.hpp | 0 {pybuda => forge}/csrc/reportify/to_json.cpp | 0 {pybuda => forge}/csrc/reportify/to_json.hpp | 0 {pybuda => forge}/csrc/runtime/CMakeLists.txt | 0 .../csrc/runtime/python_bindings.cpp | 0 .../csrc/runtime/python_bindings.hpp | 0 {pybuda => forge}/csrc/runtime/runtime.cpp | 0 {pybuda => forge}/csrc/runtime/runtime.hpp | 0 {pybuda => forge}/csrc/runtime/tt_device.cpp | 0 {pybuda => forge}/csrc/runtime/tt_device.hpp | 2 +- .../csrc/shared_utils/CMakeLists.txt | 0 .../csrc/shared_utils/json_extension.hpp | 0 .../csrc/shared_utils/placement_printer.cpp | 0 .../csrc/shared_utils/placement_printer.hpp | 0 .../csrc/shared_utils/pretty_table.cpp | 0 .../csrc/shared_utils/pretty_table.hpp | 0 .../csrc/shared_utils/sparse_matmul_utils.cpp | 20 +- .../csrc/shared_utils/sparse_matmul_utils.hpp | 0 .../csrc/shared_utils/string_extension.cpp | 0 .../csrc/shared_utils/string_extension.hpp | 0 {pybuda => forge}/csrc/test/common.hpp | 2 +- {pybuda => forge}/csrc/test/graph_api.hpp | 0 .../csrc/tt_torch_device/CMakeLists.txt | 0 .../csrc/tt_torch_device/python_bindings.cpp | 4 +- .../csrc/tt_torch_device/python_bindings.hpp | 0 .../tt_torch_device/torch_device_impl.cpp | 6 +- .../csrc/tt_torch_device/tt_device.cpp | 8 +- .../csrc/tt_torch_device/tt_device.hpp | 2 +- {pybuda/pybuda => forge/forge}/CMakeLists.txt | 2 +- {pybuda/pybuda => forge/forge}/_C.pyi | 0 .../pybuda => forge/forge}/_C/__init__.pyi | 0 forge/forge/_C/autograd.pyi | 24 + {pybuda/pybuda => forge/forge}/_C/graph.pyi | 22 +- {pybuda/pybuda => forge/forge}/_C/runtime.pyi | 0 .../forge}/_C/torch_device.pyi | 4 +- {pybuda/pybuda => forge/forge}/__init__.py | 22 +- {pybuda/pybuda => forge/forge}/ci.py | 14 +- {pybuda/pybuda => forge/forge}/compile.py | 98 +- .../forge}/compiled_graph_state.py | 12 +- {pybuda/pybuda => forge/forge}/config.py | 100 +- .../forge/forgeglobal.py | 20 +- {pybuda/pybuda => forge/forge}/fx/__init__.py | 0 {pybuda/pybuda => forge/forge}/fx/capture.py | 38 +- .../pybuda => forge/forge}/fx/graph_utils.py | 2 +- .../pybuda => forge/forge}/fx/mixed_graph.py | 16 +- {pybuda/pybuda => forge/forge}/fx/nodes.py | 184 ++-- {pybuda/pybuda => forge/forge}/fx/schedule.py | 2 +- .../forge}/fx/torch_decomp_reconstruct.py | 6 +- {pybuda/pybuda => forge/forge}/fx/trace.py | 0 {pybuda/pybuda => forge/forge}/module.py | 62 +- {pybuda/pybuda => forge/forge}/op/__init__.py | 2 +- {pybuda/pybuda => forge/forge}/op/common.py | 18 +- {pybuda/pybuda => forge/forge}/op/constant.py | 2 +- .../pybuda => forge/forge}/op/convolution.py | 4 +- .../pybuda => forge/forge}/op/dram_queue.py | 2 +- .../forge}/op/eltwise_binary.py | 2 +- .../pybuda => forge/forge}/op/eltwise_nary.py | 2 +- .../forge}/op/eltwise_unary.py | 2 +- .../pybuda => forge/forge}/op/embedding.py | 2 +- .../forge}/op/eval/__init__.py | 0 .../forge}/op/eval/buda/__init__.py | 12 +- .../forge}/op/eval/buda/abs.py | 10 +- .../forge}/op/eval/buda/buffer.py | 8 +- .../forge}/op/eval/buda/clip.py | 8 +- .../forge}/op/eval/buda/constant.py | 0 .../forge}/op/eval/buda/cosine.py | 12 +- .../forge}/op/eval/buda/cyclenet.py | 0 .../forge}/op/eval/buda/depthwise.py | 12 +- .../forge}/op/eval/buda/dram_queue.py | 2 +- .../forge}/op/eval/buda/eltwise_binary.py | 12 +- .../forge}/op/eval/buda/eltwise_nary.py | 12 +- .../forge}/op/eval/buda/eltwise_unary.py | 20 +- .../forge}/op/eval/buda/embedding.py | 4 +- .../forge}/op/eval/buda/ethernet_datacopy.py | 10 +- .../forge}/op/eval/buda/exp.py | 14 +- .../forge}/op/eval/buda/fused_ops.py | 4 +- .../forge}/op/eval/buda/log.py | 8 +- .../forge}/op/eval/buda/matmul.pth | Bin .../forge}/op/eval/buda/matmul.py | 18 +- .../forge}/op/eval/buda/nop.py | 8 +- .../forge}/op/eval/buda/quantize.py | 14 +- .../forge}/op/eval/buda/reciprocal.py | 10 +- .../forge}/op/eval/buda/splice.py | 8 +- .../forge}/op/eval/buda/sqrt.py | 8 +- .../forge}/op/eval/buda/tanh.py | 8 +- .../forge}/op/eval/buda/tilizer.py | 8 +- .../pybuda => forge/forge}/op/eval/buda/tm.py | 10 +- .../forge}/op/eval/buda/transpose.py | 0 .../forge}/op/eval/buda/void.py | 2 +- .../pybuda => forge/forge}/op/eval/common.py | 20 +- .../forge/op/eval/forge}/__init__.py | 20 +- .../forge/op/eval/forge}/abs.py | 6 +- .../forge/op/eval/forge}/argmax.py | 4 +- .../forge/op/eval/forge}/buffer.py | 6 +- .../forge/op/eval/forge}/clip.py | 4 +- .../forge/op/eval/forge}/constant.py | 0 .../forge/op/eval/forge}/convolution.py | 24 +- .../forge/op/eval/forge}/cosine.py | 6 +- .../forge/op/eval/forge}/cumulativesum.py | 4 +- .../forge/op/eval/forge}/depthwise.py | 2 +- .../forge/op/eval/forge}/dram_queue.py | 0 .../forge/op/eval/forge}/eltwise_binary.py | 16 +- .../forge/op/eval/forge}/eltwise_nary.py | 18 +- .../forge/op/eval/forge}/eltwise_unary.py | 22 +- .../forge/op/eval/forge}/embedding.py | 6 +- .../forge/op/eval/forge}/ethernet_datacopy.py | 6 +- .../forge/op/eval/forge}/exp.py | 6 +- .../forge/op/eval/forge}/log.py | 6 +- .../forge/op/eval/forge}/mask.py | 6 +- .../forge/op/eval/forge}/matmul.py | 14 +- .../forge/op/eval/forge}/nn.py | 14 +- .../forge/op/eval/forge}/nop.py | 6 +- .../forge/op/eval/forge}/pooling.py | 10 +- .../forge/op/eval/forge}/quantize.py | 6 +- .../forge/op/eval/forge}/reciprocal.py | 6 +- .../forge/op/eval/forge}/reduce.py | 2 +- .../forge/op/eval/forge}/resize.py | 14 +- .../forge/op/eval/forge}/sqrt.py | 6 +- .../forge/op/eval/forge}/tanh.py | 6 +- .../forge/op/eval/forge}/tilizer.py | 4 +- .../forge/op/eval/forge}/tm.py | 18 +- .../forge/op/eval/forge}/transpose.py | 2 +- .../forge}/op/eval/interface.py | 14 +- .../forge}/op/eval/sparse_utils.py | 24 +- {pybuda/pybuda => forge/forge}/op/loss.py | 6 +- {pybuda/pybuda => forge/forge}/op/matmul.py | 6 +- {pybuda/pybuda => forge/forge}/op/nn.py | 22 +- {pybuda/pybuda => forge/forge}/op/pooling.py | 2 +- {pybuda/pybuda => forge/forge}/op/quantize.py | 2 +- {pybuda/pybuda => forge/forge}/op/reduce.py | 2 +- {pybuda/pybuda => forge/forge}/op/resize.py | 2 +- {pybuda/pybuda => forge/forge}/op/tm.py | 4 +- {pybuda/pybuda => forge/forge}/optimizers.py | 36 +- {pybuda/pybuda => forge/forge}/parameter.py | 26 +- .../pybuda => forge/forge}/python_codegen.py | 60 +- {pybuda/pybuda => forge/forge}/query.py | 2 +- {pybuda/pybuda => forge/forge}/schedulers.py | 2 +- {pybuda/pybuda => forge/forge}/tensor.py | 52 +- .../pybuda => forge/forge}/tools/__init__.py | 0 .../pybuda => forge/forge}/tools/autotune.py | 6 +- .../pybuda => forge/forge}/tools/autotune.sh | 6 +- .../forge}/tools/net2reportify.py | 8 +- .../forge}/tools/perf_analysis.py | 8 +- .../forge}/tools/run_net2pipe.py | 0 .../pybuda => forge/forge}/tools/tti_merge.py | 4 +- .../pybuda => forge/forge}/torch_compile.py | 38 +- .../forge}/torch_optimizers.py | 0 .../forge}/torch_schedulers.py | 0 .../forge}/transformers/__init__.py | 0 .../forge}/transformers/pipeline.py | 26 +- {pybuda/pybuda => forge/forge}/tvm.py | 84 +- .../pybuda => forge/forge}/tvm_to_python.py | 274 +++--- {pybuda/pybuda => forge/forge}/tvm_utils.py | 4 +- {pybuda/pybuda => forge/forge}/typing.py | 4 +- {pybuda/pybuda => forge/forge}/utils.py | 26 +- .../pybuda => forge/forge}/verify/__init__.py | 0 .../pybuda => forge/forge}/verify/config.py | 20 +- .../pybuda => forge/forge}/verify/cpueval.py | 18 +- .../pybuda => forge/forge}/verify/utils.py | 0 .../pybuda => forge/forge}/verify/verify.py | 12 +- {pybuda => forge}/setup.py | 6 +- {pybuda => forge}/test/README.debug.md | 2 +- {pybuda => forge}/test/__init__.py | 0 {pybuda => forge}/test/backend/__init__.py | 0 .../test/backend/benchmark/test_simple.py | 22 +- .../test/backend/models/__init__.py | 0 .../test/backend/models/gpt2_forge.py | 70 +- .../test/backend/models/test_bert.py | 82 +- .../test/backend/models/test_gpt2.py | 22 +- .../backend/models/test_mixed_precision.py | 8 +- .../test/backend/test_backend.py | 44 +- {pybuda => forge}/test/backend/test_device.py | 16 +- {pybuda => forge}/test/backend/test_e2e.py | 66 +- .../test/backend/test_gpu_device.py | 28 +- .../test/backend/test_large_matmul.py | 10 +- {pybuda => forge}/test/backend/test_loss.py | 22 +- .../test/backend/test_pipeline.py | 26 +- forge/test/backend/test_random_grids.py | 69 ++ .../test/backend/test_silicon.py | 48 +- {pybuda => forge}/test/benchmark/README.md | 10 +- {pybuda => forge}/test/benchmark/benchmark.py | 100 +- .../test/benchmark/benchmark/__init__.py | 0 .../benchmark/benchmark/common/__init__.py | 0 .../test/benchmark/benchmark/common/common.py | 40 +- .../benchmark/benchmark/models/__init__.py | 0 .../test/benchmark/benchmark/models/bert.py | 44 +- .../models/custom/custom_resnet_highres.py | 26 +- .../models/custom/custom_vit_highres.py | 12 +- .../test/benchmark/benchmark/models/deit.py | 18 +- .../test/benchmark/benchmark/models/hrnet.py | 22 +- .../yolo_v3/holli_src/utils.py | 0 .../yolo_v3/holli_src/yolo_layer.py | 0 .../yolo_v3/holli_src/yolov3.py | 0 .../yolo_v3/holli_src/yolov3_base.py | 0 .../yolo_v3/holli_src/yolov3_tiny.py | 0 .../models/implementations/yolo_v3/license | 0 .../benchmark/models/inception_v4.py | 10 +- .../benchmark/models/mobilenet_v1.py | 28 +- .../benchmark/models/mobilenet_v2.py | 32 +- .../benchmark/models/mobilenet_v3_timm.py | 16 +- .../benchmark/models/openpose_body.py | 16 +- .../benchmark/models/openpose_hand.py | 20 +- .../test/benchmark/benchmark/models/other.py | 28 +- .../test/benchmark/benchmark/models/resnet.py | 58 +- .../benchmark/models/resnet_bringup.py | 126 +-- .../test/benchmark/benchmark/models/t5.py | 32 +- .../test/benchmark/benchmark/models/unet.py | 18 +- .../test/benchmark/benchmark/models/vit.py | 20 +- .../benchmark/benchmark/models/vovnet_v2.py | 22 +- .../benchmark/benchmark/models/whisper.py | 14 +- .../benchmark/benchmark/models/yolo_v3.py | 20 +- .../benchmark/benchmark/models/yolo_v5.py | 28 +- .../test/benchmark/run_benchmark.py | 4 +- forge/test/benchmark/run_benchmark_debug | 8 + .../benchmark/run_benchmark_gs_e150_df_bfp8 | 49 + .../benchmark/run_benchmark_gs_e150_df_fp16 | 8 +- .../benchmark/run_benchmark_gs_e150_release | 61 ++ .../benchmark/run_benchmark_gs_e75_df_bfp8 | 51 ++ .../benchmark/run_benchmark_gs_e75_df_fp16 | 10 +- .../benchmark/run_benchmark_gs_e75_release | 63 ++ forge/test/benchmark/run_benchmark_tti | 11 + forge/test/benchmark/run_benchmark_wh_df_bfp8 | 25 + forge/test/benchmark/run_benchmark_wh_df_fp16 | 41 + forge/test/benchmark/run_benchmark_wh_release | 62 ++ {pybuda => forge}/test/bert/__init__.py | 0 {pybuda => forge}/test/bert/modules.py | 26 +- {pybuda => forge}/test/common.py | 18 +- {pybuda => forge}/test/conftest.py | 68 +- .../test/data_formats/test_df.py | 108 +-- .../test/data_formats/test_int8.py | 26 +- .../emulation/test_emulation_basic_ops.py | 18 +- {pybuda => forge}/test/falcon/__init__.py | 0 .../test/falcon/data/two_cities.json | 0 .../falcon/finetune_configs/ci_basic.json | 4 +- .../finetune_configs/ci_basic_lora.json | 4 +- .../test/falcon/models/falcon7b/README.md | 0 .../test/falcon/models/falcon7b/config.json | 0 .../falcon/models/falcon7b/config_padded.json | 0 .../models/falcon7b/configuration_RW.py | 0 .../models/falcon7b/generation_config.json | 0 .../falcon/models/falcon7b/modelling_RW.py | 0 .../models/falcon7b/modelling_RW_original.py | 0 .../falcon7b/pytorch_model.bin.index.json | 0 .../models/falcon7b/special_tokens_map.json | 0 .../models/falcon7b/tokenizer_config.json | 0 .../falcon/models/falcon7b/tt_modeling_RW.py | 0 .../models/falcon7b/tt_modeling_RW_pad.py | 0 .../tt_modeling_RW_pad_masked_odkv.py | 0 .../falcon7b/tt_modeling_RW_pad_odkv.py | 0 .../falcon7b/tt_modeling_RW_pad_odkv_conc.py | 0 .../falcon7b/tt_modeling_RW_pad_split.py | 0 .../tt_modeling_RW_pad_split_cache.py | 0 {pybuda => forge}/test/falcon/pybudify.py | 204 ++--- .../test/falcon/requirements.txt | 0 .../test/falcon/tests/__init__.py | 0 .../falcon/tests/falcon_modules/falcon.py | 286 +++--- .../test/falcon/tests/test_falcon7b_decode.py | 8 +- .../falcon/tests/test_falcon7b_finetune.py | 2 +- {pybuda => forge}/test/falcon/tests/utils.py | 6 +- {pybuda => forge}/test/fx/__init__.py | 0 {pybuda => forge}/test/fx/conftest.py | 4 +- {pybuda => forge}/test/fx/test_basics.py | 2 +- {pybuda => forge}/test/fx/test_features.py | 46 +- {pybuda => forge}/test/fx/test_models.py | 94 +- {pybuda => forge}/test/fx/test_ops.py | 46 +- .../test/galaxy/bert/run_squad_wh.py | 574 ++++++------ .../bert/squad_preprocessing/evaluate-v1.1.py | 0 .../squad_preprocessing/helpers/__init__.py | 0 .../helpers/data_processing.py | 0 .../helpers/tokenization.py | 0 {pybuda => forge}/test/galaxy/conftest.py | 0 .../galaxy/one_shelf_eth_connections.yaml | 0 .../test/galaxy/one_shelf_runtime_params.yaml | 2 +- .../test/galaxy/test_galaxy_bert_demo.py | 72 +- .../test/galaxy/test_galaxy_inputs.py | 34 +- .../test/galaxy/test_galaxy_multichip.py | 290 +++--- .../test/galaxy/test_galaxy_shelf_setup.py | 46 +- .../test/galaxy/test_galaxy_unit_tests.py | 220 ++--- .../test/galaxy/test_multichip_golden.py | 38 +- .../galaxy/two_shelf_eth_connections.yaml | 0 .../test/galaxy/two_shelf_runtime_params.yaml | 2 +- .../galaxy/utils/generate_system_params.py | 10 +- .../galaxy/utils/verify_push_bandwidth.py | 0 {pybuda => forge}/test/gpt2/gpt2.py | 18 +- {pybuda => forge}/test/gpt2/test_gpt2.py | 74 +- .../test/llama/amp_configs/amp_config.py | 98 +- .../test/llama/amp_configs/w6.json | 0 {pybuda => forge}/test/llama/decode.py | 34 +- .../test/llama/eval_data/episode_iv.txt | 0 {pybuda => forge}/test/llama/generate_eval.py | 0 {pybuda => forge}/test/llama/hang.py | 2 +- {pybuda => forge}/test/llama/llama_test.py | 6 +- .../test/llama/modeling_alpaca_caching.py | 0 {pybuda => forge}/test/llama/placement.py | 2 +- .../test/llama/pybudify_caching.py | 72 +- {pybuda => forge}/test/llama/tt_eval.py | 2 +- .../test/mlir/llama/test_llama_inference.py | 4 +- .../mlir/llama/tests/test_llama_embedding.py | 6 +- .../mlir/llama/tests/test_llama_lm_head.py | 6 +- .../test/mlir/llama/tests/test_llama_mlp.py | 6 +- .../mlir/llama/tests/test_llama_rms_norm.py | 6 +- .../mlir/llama/tests/test_llama_rotary_emb.py | 6 +- .../mlir/llama/tests/test_llama_self_attn.py | 6 +- .../test/mlir/llama/utils/utils.py | 4 +- {pybuda => forge}/test/mlir/mnist/__init__.py | 0 .../test/mlir/mnist/test_inference.py | 6 +- .../mlir/mnist/training/mnist_linear_forge.py | 30 +- .../mnist/training/mnist_linear_pytorch.py | 0 .../test/mlir/mnist/training/test_training.py | 6 +- {pybuda => forge}/test/mlir/mnist/utils.py | 0 .../test/mlir/resnet/test_resnet_inference.py | 6 +- {pybuda => forge}/test/mlir/test_ops.py | 28 +- {pybuda => forge}/test/mlir/test_training.py | 8 +- .../test/model_demos/__init__.py | 0 .../model_demos/high_prio/cnn/__init__.py | 0 .../high_prio/cnn/onnx/__init__.py | 0 .../high_prio/cnn/onnx/test_ddrnet.py | 40 +- .../high_prio/cnn/onnx/test_dla.py | 20 +- .../high_prio/cnn/onnx/test_fpn.py | 16 +- .../high_prio/cnn/onnx/test_hardnet.py | 26 +- .../high_prio/cnn/onnx/test_lstm_genom.py | 12 +- .../high_prio/cnn/onnx/test_lstm_valence.py | 24 +- .../high_prio/cnn/onnx/test_perceiverio.py | 28 +- .../high_prio/cnn/onnx/test_retinanet.py | 58 +- .../cnn/onnx/test_segformer_imgcls.py | 28 +- .../cnn/onnx/test_segformer_semseg.py | 32 +- .../high_prio/cnn/onnx/test_yolo_v3.py | 26 +- .../high_prio/cnn/onnx/test_yolo_v5.py | 70 +- .../high_prio/cnn/onnx/test_yolo_x.py | 50 +- .../high_prio/cnn/pytorch/__init__.py | 0 .../high_prio/cnn/pytorch/test_alexnet.py | 26 +- .../high_prio/cnn/pytorch/test_autoencoder.py | 30 +- .../high_prio/cnn/pytorch/test_blazepose.py | 74 +- .../high_prio/cnn/pytorch/test_clip.py | 28 +- .../high_prio/cnn/pytorch/test_ddrnet.py | 32 +- .../high_prio/cnn/pytorch/test_deit.py | 8 +- .../high_prio/cnn/pytorch/test_densenet.py | 68 +- .../high_prio/cnn/pytorch/test_dla.py | 24 +- .../cnn/pytorch/test_efficientnet.py | 42 +- .../cnn/pytorch/test_efficientnet_lite.py | 104 +-- .../high_prio/cnn/pytorch/test_fpn.py | 16 +- .../high_prio/cnn/pytorch/test_ghostnet.py | 18 +- .../cnn/pytorch/test_ghostnet_100.py | 8 +- .../high_prio/cnn/pytorch/test_googlenet.py | 22 +- .../high_prio/cnn/pytorch/test_hardnet.py | 24 +- .../high_prio/cnn/pytorch/test_hrnet.py | 36 +- .../cnn/pytorch/test_inception_v4.py | 68 +- .../high_prio/cnn/pytorch/test_mlp_mixer.py | 20 +- .../cnn/pytorch/test_mobilenet_v1.py | 48 +- .../cnn/pytorch/test_mobilenet_v1_ssd.py | 22 +- .../cnn/pytorch/test_mobilenet_v2.py | 86 +- .../cnn/pytorch/test_mobilenet_v3.py | 40 +- .../high_prio/cnn/pytorch/test_openpose.py | 38 +- .../high_prio/cnn/pytorch/test_perceiverio.py | 38 +- .../high_prio/cnn/pytorch/test_rcnn.py | 22 +- .../high_prio/cnn/pytorch/test_resnet.py | 34 +- .../high_prio/cnn/pytorch/test_resnext.py | 112 +-- .../high_prio/cnn/pytorch/test_retinanet.py | 28 +- .../cnn/pytorch/test_segformer_imgcls.py | 34 +- .../cnn/pytorch/test_segformer_semseg.py | 34 +- .../cnn/pytorch/test_ssd300_resnet50.py | 20 +- .../high_prio/cnn/pytorch/test_swin.py | 14 +- .../high_prio/cnn/pytorch/test_unet.py | 76 +- .../high_prio/cnn/pytorch/test_vgg.py | 68 +- .../high_prio/cnn/pytorch/test_vilt.py | 30 +- .../high_prio/cnn/pytorch/test_vit.py | 38 +- .../high_prio/cnn/pytorch/test_vovnet.py | 58 +- .../high_prio/cnn/pytorch/test_wideresnet.py | 12 +- .../high_prio/cnn/pytorch/test_xception.py | 16 +- .../high_prio/cnn/pytorch/test_yolo_v3.py | 46 +- .../high_prio/cnn/pytorch/test_yolo_v5.py | 156 ++-- .../high_prio/cnn/pytorch/test_yolo_v6.py | 36 +- .../high_prio/cnn/tflite/__init__.py | 0 .../cnn/tflite/test_efficientnet_lite.py | 52 +- .../cnn/tflite/test_hand_landmarker.py | 22 +- .../cnn/tflite/test_mobilenet_ssd.py | 16 +- .../cnn/tflite/test_pose_landmark.py | 32 +- .../model_demos/high_prio/nlp/__init__.py | 0 .../high_prio/nlp/pytorch/__init__.py | 0 .../high_prio/nlp/pytorch/test_albert.py | 58 +- .../high_prio/nlp/pytorch/test_bart.py | 18 +- .../high_prio/nlp/pytorch/test_bert.py | 46 +- .../high_prio/nlp/pytorch/test_codegen.py | 20 +- .../high_prio/nlp/pytorch/test_distilbert.py | 42 +- .../high_prio/nlp/pytorch/test_dpr.py | 36 +- .../high_prio/nlp/pytorch/test_falcon.py | 2 +- .../high_prio/nlp/pytorch/test_fuyu_8b.py | 56 +- .../high_prio/nlp/pytorch/test_gemma_2b.py | 70 +- .../high_prio/nlp/pytorch/test_gpt2.py | 38 +- .../high_prio/nlp/pytorch/test_gptneo.py | 30 +- .../high_prio/nlp/pytorch/test_mistral.py | 44 +- .../high_prio/nlp/pytorch/test_opt.py | 34 +- .../high_prio/nlp/pytorch/test_roberta.py | 26 +- .../high_prio/nlp/pytorch/test_squeezebert.py | 18 +- .../high_prio/nlp/pytorch/test_t5.py | 154 ++-- .../high_prio/nlp/pytorch/test_whisper_0.py | 74 +- .../high_prio/nlp/pytorch/test_whisper_1.py | 102 +-- .../high_prio/nlp/pytorch/test_xglm.py | 20 +- .../test/model_demos/models/__init__.py | 0 .../test/model_demos/models/deit.py | 12 +- .../test/model_demos/models/dla.py | 0 .../model_demos/models/falcon/__init__.py | 0 .../models/falcon/configuration_RW.py | 0 .../test/model_demos/models/falcon/model.py | 6 +- .../model_demos/models/falcon/pybudify.py | 232 ++--- .../falcon/tt_modeling_RW_pad_masked_odkv.py | 0 .../test/model_demos/models/ghostnet.py | 14 +- .../test/model_demos/models/t5.py | 28 +- .../test/model_demos/models/whisper.py | 48 +- .../test/model_demos/models/wideresnet.py | 26 +- .../test/model_demos/models/xception.py | 18 +- .../test/model_demos/utils/__init__.py | 0 .../utils/cnn/onnx/images/carvana.jpg | Bin .../utils/cnn/pytorch/images/car.jpg | Bin .../utils/cnn/pytorch/images/girl.png | Bin .../utils/cnn/pytorch/images/img.jpeg | Bin .../src_efficientnet_lite.py | 0 .../mobilenetv1_ssd/vision/nn/mobilenet.py | 0 .../pytorch/saved/yolo_v3/holli_src/license | 0 .../utils/nlp/pytorch/1272-128104-0000.pt | Bin .../utils/nlp/pytorch/1272-128104-0001.pt | Bin .../utils/nlp/pytorch/1272-128104-0002.pt | Bin .../utils/nlp/pytorch/1272-128104-0003.pt | Bin {pybuda => forge}/test/module_utils.py | 14 +- .../building_blocks/test_building_blocks.py | 2 +- .../cnn/building_blocks/test_mobilenet.py | 14 +- .../cnn/building_blocks/test_resnet.py | 24 +- .../nightly/cnn/building_blocks/test_unet.py | 16 +- .../nightly/cnn/building_blocks/test_vit.py | 12 +- {pybuda => forge}/test/nn/__init__.py | 0 .../test/nn/architectures/cnn/__init__.py | 0 .../nn/architectures/cnn/resnet/__init__.py | 0 .../cnn/resnet/resnet_blocks/__init__.py | 0 .../resnet/resnet_blocks/basic/__init__.py | 0 .../cnn/resnet/resnet_blocks/basic/basic.py | 16 +- .../resnet/resnet_blocks/basic/conftest.py | 0 .../resnet/resnet_blocks/basic/test_basic.py | 10 +- .../resnet_blocks/basic/test_basic_single.py | 20 +- .../resnet_blocks/basic/test_command.sh | 0 .../resnet_blocks/bottleneck/__init__.py | 0 .../resnet_blocks/bottleneck/bottleneck.py | 18 +- .../resnet_blocks/bottleneck/conftest.py | 0 .../bottleneck/test_bottleneck.py | 10 +- .../bottleneck/test_bottleneck_single.py | 20 +- .../resnet_blocks/bottleneck/test_command.sh | 0 .../test/nn/functional/__init__.py | 0 .../test/nn/functional/softmax/__init__.py | 0 .../nn/functional/softmax/models/__init__.py | 0 .../nn/functional/softmax/models/model_0.py | 18 +- .../nn/functional/softmax/models/model_1.py | 24 +- .../nn/functional/softmax/models/model_2.py | 42 +- .../nn/functional/softmax/models/model_3.py | 34 +- .../nn/functional/softmax/models/model_4.py | 42 +- .../nn/functional/softmax/models/model_5.py | 42 +- .../nn/functional/softmax/test_softmax.py | 10 +- .../test/nn/layers/normalization/__init__.py | 0 .../layers/normalization/models/__init__.py | 0 .../nn/layers/normalization/models/model_1.py | 18 +- .../layers/normalization/models/model_10.py | 72 +- .../nn/layers/normalization/models/model_2.py | 24 +- .../nn/layers/normalization/models/model_3.py | 38 +- .../nn/layers/normalization/models/model_4.py | 34 +- .../nn/layers/normalization/models/model_5.py | 62 +- .../nn/layers/normalization/models/model_6.py | 62 +- .../nn/layers/normalization/models/model_7.py | 62 +- .../nn/layers/normalization/models/model_8.py | 62 +- .../nn/layers/normalization/models/model_9.py | 78 +- .../nn/layers/normalization/test_layernorm.py | 10 +- .../test/operators/eltwise_binary/__init__.py | 0 .../test/operators/eltwise_binary/conftest.py | 0 .../eltwise_binary/models/__init__.py | 0 .../eltwise_binary/models/model_1.py | 14 +- .../eltwise_binary/models/model_10.py | 30 +- .../eltwise_binary/models/model_11.py | 24 +- .../eltwise_binary/models/model_2.py | 18 +- .../eltwise_binary/models/model_3.py | 18 +- .../eltwise_binary/models/model_4.py | 18 +- .../eltwise_binary/models/model_5.py | 18 +- .../eltwise_binary/models/model_6.py | 20 +- .../eltwise_binary/models/model_7.py | 20 +- .../eltwise_binary/models/model_8.py | 18 +- .../eltwise_binary/models/model_9.py | 18 +- .../operators/eltwise_binary/test_command.sh | 0 .../eltwise_binary/test_eltwise_binary.py | 14 +- .../test_eltwise_binary_single.py | 12 +- .../eltwise_binary_comparison/__init__.py | 0 .../models/__init__.py | 0 .../models/model_1.py | 18 +- .../models/model_10.py | 78 +- .../models/model_2.py | 28 +- .../models/model_3.py | 46 +- .../models/model_4.py | 56 +- .../models/model_5.py | 50 +- .../models/model_6.py | 46 +- .../models/model_7.py | 56 +- .../models/model_8.py | 74 +- .../models/model_9.py | 80 +- .../test_eltwise_binary_comparison.py | 14 +- .../test/operators/eltwise_unary/__init__.py | 0 .../test/operators/eltwise_unary/conftest.py | 0 .../eltwise_unary/models/__init__.py | 0 .../operators/eltwise_unary/models/model_1.py | 16 +- .../eltwise_unary/models/model_10.py | 54 +- .../operators/eltwise_unary/models/model_2.py | 28 +- .../operators/eltwise_unary/models/model_3.py | 34 +- .../operators/eltwise_unary/models/model_4.py | 30 +- .../operators/eltwise_unary/models/model_5.py | 64 +- .../operators/eltwise_unary/models/model_6.py | 52 +- .../operators/eltwise_unary/models/model_7.py | 48 +- .../operators/eltwise_unary/models/model_8.py | 30 +- .../operators/eltwise_unary/models/model_9.py | 34 +- .../operators/eltwise_unary/test_command.sh | 37 + .../eltwise_unary/test_eltwise_unary.py | 16 +- .../test_eltwise_unary_single.py | 12 +- .../operators/eltwise_unary_attr/__init__.py | 0 .../eltwise_unary_attr/clip/__init__.py | 0 .../clip/models/__init__.py | 0 .../eltwise_unary_attr/clip/models/model_1.py | 20 +- .../eltwise_unary_attr/clip/models/model_2.py | 40 +- .../eltwise_unary_attr/clip/models/model_3.py | 104 +++ .../eltwise_unary_attr/clip/models/model_4.py | 132 +++ .../eltwise_unary_attr/clip/models/model_5.py | 145 +++ .../eltwise_unary_attr/clip/test_clip.py | 12 +- .../eltwise_unary_attr/leaky_relu/__init__.py | 0 .../leaky_relu/models/__init__.py | 0 .../leaky_relu/models/model_1.py | 20 +- .../leaky_relu/models/model_2.py | 40 +- .../leaky_relu/models/model_3.py | 101 ++ .../leaky_relu/models/model_4.py | 140 +++ .../leaky_relu/models/model_5.py | 121 +++ .../leaky_relu/test_leaky_relu.py | 12 +- .../test/operators/grouped_reduce/__init__.py | 0 .../grouped_reduce/models/__init__.py | 0 .../grouped_reduce/models/model_0.py | 16 +- .../grouped_reduce/test_grouped_reduce.py | 14 +- .../test/operators/matmul/__init__.py | 0 .../test/operators/matmul/conftest.py | 0 .../test/operators/matmul/models/__init__.py | 0 .../operators/matmul/models/custom/model_4.py | 32 +- .../operators/matmul/models/custom/model_5.py | 38 +- .../operators/matmul/models/custom/model_9.py | 108 +-- .../matmul/models/generic/model_1.py | 16 +- .../matmul/models/generic/model_10.py | 101 ++ .../matmul/models/generic/model_2.py | 32 +- .../matmul/models/generic/model_3.py | 67 ++ .../matmul/models/generic/model_6.py | 75 ++ .../matmul/models/generic/model_7.py | 82 +- .../matmul/models/generic/model_8.py | 84 +- .../test/operators/matmul/test_command.sh | 0 .../test/operators/matmul/test_matmul.py | 16 +- .../operators/matmul/test_matmul_single.py | 12 +- .../test/operators/nary/__init__.py | 0 .../test/operators/nary/test_eltwise_nary.py | 50 +- .../test/operators/nary/test_where.py | 56 +- .../test/operators/reduce/__init__.py | 0 .../test/operators/reduce/conftest.py | 0 .../operators/reduce/models_4d/__init__.py | 0 .../operators/reduce/models_4d/model_0.py | 18 +- .../operators/reduce/models_4d/model_1.py | 18 +- .../operators/reduce/models_4d/model_2.py | 20 +- .../operators/reduce/models_4d/model_3.py | 24 +- .../operators/reduce/models_4d/model_4.py | 34 +- .../operators/reduce/models_4d/model_5.py | 44 +- .../operators/reduce/models_nd/__init__.py | 0 .../operators/reduce/models_nd/model_1.py | 20 +- .../operators/reduce/models_nd/model_2.py | 30 +- .../operators/reduce/models_nd/model_3.py | 40 +- .../operators/reduce/models_nd/model_4.py | 68 +- .../operators/reduce/models_nd/model_5.py | 62 +- .../test/operators/reduce/test_command.sh | 0 .../test/operators/reduce/test_reduce_4d.py | 14 +- .../test/operators/reduce/test_reduce_nd.py | 12 +- .../operators/reduce/test_reduce_nd_single.py | 12 +- .../test/operators/tm/__init__.py | 0 .../test/operators/tm/fuse/__init__.py | 0 .../tm/fuse/test_fuse_tm_sequence.py | 46 +- .../operators/tm/hstack_hslice/__init__.py | 0 .../operators/tm/hstack_hslice/conftest.py | 0 .../tm/hstack_hslice/models/__init__.py | 0 .../tm/hstack_hslice/models/model_1.py | 32 +- .../tm/hstack_hslice/models/model_2.py | 36 +- .../tm/hstack_hslice/models/model_3.py | 60 +- .../tm/hstack_hslice/models/model_4.py | 50 +- .../tm/hstack_hslice/models/model_5.py | 52 +- .../tm/hstack_hslice/test_command.sh | 0 .../tm/hstack_hslice/test_hstack_hslice.py | 10 +- .../test_hstack_hslice_single.py | 10 +- .../test/operators/tm/pad/__init__.py | 0 .../test/operators/tm/pad/models/__init__.py | 0 .../test/operators/tm/pad/models/model_1.py | 20 +- forge/test/operators/tm/pad/models/model_2.py | 72 ++ forge/test/operators/tm/pad/models/model_3.py | 86 ++ forge/test/operators/tm/pad/models/model_4.py | 101 ++ forge/test/operators/tm/pad/models/model_5.py | 123 +++ .../test/operators/tm/pad/test_pad.py | 12 +- .../test/operators/tm/reshape/__init__.py | 0 .../test/operators/tm/reshape/conftest.py | 0 .../operators/tm/reshape/models/__init__.py | 0 .../operators/tm/reshape/models/model_1.py | 24 +- .../operators/tm/reshape/models/model_2.py | 34 +- .../operators/tm/reshape/models/model_3.py | 73 ++ .../operators/tm/reshape/models/model_4.py | 84 ++ .../operators/tm/reshape/models/model_5.py | 72 ++ .../test/operators/tm/reshape/test_command.sh | 0 .../test/operators/tm/reshape/test_reshape.py | 10 +- .../tm/reshape/test_reshape_single.py | 8 +- .../operators/tm/vstack_vslice/__init__.py | 0 .../tm/vstack_vslice/models/__init__.py | 0 .../tm/vstack_vslice/models/model_1.py | 32 +- .../tm/vstack_vslice/models/model_2.py | 36 +- .../tm/vstack_vslice/models/model_3.py | 95 ++ .../tm/vstack_vslice/models/model_4.py | 50 +- .../tm/vstack_vslice/models/model_5.py | 52 +- .../tm/vstack_vslice/test_vstack_vslice.py | 12 +- .../test/quantized/test_onnx_quantized.py | 72 +- .../test_onnx_quantized_mobilenet.py | 36 +- .../quantized/test_onnx_quantized_resnet.py | 42 +- .../test/quantized/test_onnx_quantized_vit.py | 18 +- {pybuda => forge}/test/random/__init__.py | 0 {pybuda => forge}/test/random/conftest.py | 12 +- {pybuda => forge}/test/random/test_bert.py | 6 +- {pybuda => forge}/test/random/test_resnet.py | 20 +- .../test/random/test_three_ops.py | 6 +- {pybuda => forge}/test/santacoder/README.md | 8 +- .../test/santacoder/configuration_gpt2_mq.py | 0 {pybuda => forge}/test/santacoder/decode.py | 22 +- {pybuda => forge}/test/santacoder/gpt2_mq.py | 0 {pybuda => forge}/test/santacoder/kv_cache.pt | Bin .../test/santacoder/modeling_gpt2.py | 2 +- {pybuda => forge}/test/santacoder/prefill.py | 0 {pybuda => forge}/test/santacoder/pybudify.py | 64 +- .../test/santacoder/requirements.txt | 0 {pybuda => forge}/test/serve/README.md | 2 +- {pybuda => forge}/test/serve/ask.py | 0 {pybuda => forge}/test/serve/qa_serve.py | 16 +- {pybuda => forge}/test/test_api.py | 18 +- {pybuda => forge}/test/test_bert.py | 28 +- .../test/test_broadcast_splits.py | 20 +- {pybuda => forge}/test/test_consteval.py | 74 +- {pybuda => forge}/test/test_constraints.py | 88 +- {pybuda => forge}/test/test_conv2d.py | 98 +- {pybuda => forge}/test/test_conv2d_perf.py | 44 +- .../test/test_cross_entropy_loss.py | 44 +- {pybuda => forge}/test/test_error.py | 28 +- forge/test/test_fork_join.py | 860 ++++++++++++++++++ {pybuda => forge}/test/test_fracturing.py | 102 +-- {pybuda => forge}/test/test_fusing.py | 310 +++---- {pybuda => forge}/test/test_indexing.py | 22 +- .../test/test_kernel_broadcast.py | 26 +- .../test/test_large_parameters.py | 20 +- .../test/test_long_short_path.py | 82 +- {pybuda => forge}/test/test_multichip.py | 92 +- {pybuda => forge}/test/test_nlp_pipeline.py | 4 +- {pybuda => forge}/test/test_nn.py | 26 +- {pybuda => forge}/test/test_optimizers.py | 158 ++-- .../test/test_padding/__init__.py | 0 .../test/test_padding/other/__init__.py | 0 .../test_padding/other/test_padding_pass_a.py | 98 +- .../test_padding/other/test_padding_pass_b.py | 144 +-- .../test_padding/other/test_padding_pass_c.py | 136 +-- .../test_padding/other/test_padding_pass_d.py | 134 +-- .../test_padding/other/test_padding_pass_e.py | 200 ++-- .../test_padding/other/test_padding_pass_f.py | 148 +-- .../test_padding/other/test_padding_pass_g.py | 114 +-- .../test_padding/other/test_padding_pass_h.py | 130 +-- .../test_padding/other/test_padding_pass_i.py | 122 +-- .../test_padding/other/test_padding_pass_k.py | 100 +- .../test/test_padding/sanity/__init__.py | 0 .../test/test_padding/sanity/test_padding.py | 68 +- .../test/test_padding/tms/__init__.py | 0 .../test/test_padding/tms/test_padding_tms.py | 156 ++-- forge/test/test_perf_simulator.py | 132 +++ {pybuda => forge}/test/test_placer_apis.py | 128 +-- {pybuda => forge}/test/test_recompile.py | 28 +- {pybuda => forge}/test/test_sanity.py | 640 ++++++------- {pybuda => forge}/test/test_shapes.py | 82 +- {pybuda => forge}/test/test_splice.py | 10 +- forge/test/test_streaming.py | 137 +++ .../test/test_transpose_ops_placement.py | 142 +-- {pybuda => forge}/test/test_user.py | 400 ++++---- .../clip_guided_diffusion/CLIP/__init__.py | 0 .../clip_guided_diffusion/CLIP/test_CLIP.py | 20 +- .../CLIP/test_CLIP_units.py | 38 +- .../clip_guided_diffusion/UNet/__init__.py | 0 .../clip_guided_diffusion/UNet/test_UNet.py | 30 +- .../UNet/test_UNet_blocks.py | 94 +- .../UNet/test_resblock.py | 16 +- .../test/tvm/cnn/mxnet/test_alexnet.py | 10 +- .../test/tvm/cnn/mxnet/test_densenet.py | 10 +- .../test/tvm/cnn/mxnet/test_mobilenet.py | 10 +- .../test/tvm/cnn/mxnet/test_resnet.py | 10 +- .../test/tvm/cnn/mxnet/test_squeezenet.py | 10 +- .../test/tvm/cnn/mxnet/test_vgg.py | 10 +- .../test/tvm/cnn/onnx/test_fcn.py | 8 +- .../test/tvm/cnn/onnx/test_lstm_genom.py | 12 +- .../test/tvm/cnn/onnx/test_mnist.py | 12 +- .../test/tvm/cnn/onnx/test_resnet.py | 12 +- .../alphapose/256x192_res50_lr1e-3_1x.yaml | 0 .../cnn/pytorch/alphapose/models/__init__.py | 0 .../cnn/pytorch/alphapose/models/builder.py | 0 .../cnn/pytorch/alphapose/models/fastpose.py | 0 .../pytorch/alphapose/models/fastpose_duc.py | 0 .../alphapose/models/fastpose_duc_dense.py | 0 .../pytorch/alphapose/models/layers/DUC.py | 0 .../alphapose/models/layers/PixelUnshuffle.py | 0 .../pytorch/alphapose/models/layers/Resnet.py | 0 .../alphapose/models/layers/SE_Resnet.py | 0 .../alphapose/models/layers/SE_module.py | 0 .../alphapose/models/layers/ShuffleResnet.py | 0 .../pytorch/alphapose/models/simplepose.py | 0 .../cnn/pytorch/alphapose/utils/__init__.py | 0 .../tvm/cnn/pytorch/alphapose/utils/config.py | 0 .../cnn/pytorch/alphapose/utils/registry.py | 0 .../cnn/pytorch/alphapose/utils/transforms.py | 0 .../tvm/cnn/pytorch/dall_e_vae/__init__.py | 0 .../tvm/cnn/pytorch/dall_e_vae/decoder.py | 0 .../tvm/cnn/pytorch/dall_e_vae/encoder.py | 0 .../test/tvm/cnn/pytorch/dall_e_vae/license | 0 .../test/tvm/cnn/pytorch/dall_e_vae/utils.py | 0 .../pytorch/fastdepth/imagenet/__init__.py | 0 .../pytorch/fastdepth/imagenet/mobilenet.py | 0 .../test/tvm/cnn/pytorch/fastdepth/license | 0 .../test/tvm/cnn/pytorch/fastdepth/metrics.py | 0 .../test/tvm/cnn/pytorch/fastdepth/models.py | 0 .../test/tvm/cnn/pytorch/fastdepth/utils.py | 0 .../test/tvm/cnn/pytorch/gscnn/Resnet.py | 0 .../test/tvm/cnn/pytorch/gscnn/SEresnext.py | 0 .../test/tvm/cnn/pytorch/gscnn/__init__.py | 0 .../test/tvm/cnn/pytorch/gscnn/config.py | 0 .../cnn/pytorch/gscnn/gated_spatial_conv.py | 0 .../test/tvm/cnn/pytorch/gscnn/gscnn.py | 0 .../test/tvm/cnn/pytorch/gscnn/mynn.py | 0 .../tvm/cnn/pytorch/gscnn/wider_resnet.py | 0 .../test/tvm/cnn/pytorch/tests_A/__init__.py | 0 .../tvm/cnn/pytorch/tests_A/test_alexnet.py | 12 +- .../cnn/pytorch/tests_A/test_autoencoder.py | 8 +- .../tvm/cnn/pytorch/tests_A/test_convnext.py | 18 +- .../tvm/cnn/pytorch/tests_A/test_dalle_vae.py | 16 +- .../tvm/cnn/pytorch/tests_A/test_deeplab.py | 8 +- .../cnn/pytorch/tests_A/test_efficientnet.py | 8 +- .../test/tvm/cnn/pytorch/tests_A/test_fcn.py | 20 +- .../tvm/cnn/pytorch/tests_A/test_googlenet.py | 16 +- .../tvm/cnn/pytorch/tests_A/test_gscnn.py | 12 +- .../tvm/cnn/pytorch/tests_A/test_hrnet.py | 8 +- .../tvm/cnn/pytorch/tests_A/test_inception.py | 10 +- .../tvm/cnn/pytorch/tests_A/test_midas.py | 8 +- .../tvm/cnn/pytorch/tests_A/test_mnasnet.py | 8 +- .../tvm/cnn/pytorch/tests_B/SSD/__init__.py | 0 .../test/tvm/cnn/pytorch/tests_B/SSD/ssd.py | 0 .../test/tvm/cnn/pytorch/tests_B/__init__.py | 0 .../tvm/cnn/pytorch/tests_B/test_alphapose.py | 8 +- .../tvm/cnn/pytorch/tests_B/test_fastdepth.py | 8 +- .../tvm/cnn/pytorch/tests_B/test_ghostnet.py | 16 +- .../tvm/cnn/pytorch/tests_B/test_graph_cnn.py | 10 +- .../tvm/cnn/pytorch/tests_B/test_hf_clip.py | 14 +- .../tvm/cnn/pytorch/tests_B/test_mnist.py | 8 +- .../cnn/pytorch/tests_B/test_mobilenet_v2.py | 16 +- .../cnn/pytorch/tests_B/test_mobilenet_v3.py | 14 +- .../tvm/cnn/pytorch/tests_B/test_regnety.py | 8 +- .../tvm/cnn/pytorch/tests_B/test_resnet.py | 14 +- .../tvm/cnn/pytorch/tests_B/test_resnext.py | 14 +- .../cnn/pytorch/tests_B/test_shufflenet.py | 8 +- .../test/tvm/cnn/pytorch/tests_B/test_ssd.py | 8 +- .../test/tvm/cnn/pytorch/tests_B/test_vgg.py | 8 +- .../tvm/cnn/pytorch/tests_B/test_videopose.py | 8 +- .../test/tvm/cnn/pytorch/tests_B/test_vilt.py | 8 +- .../test/tvm/cnn/pytorch/tests_B/test_vit.py | 16 +- .../tvm/cnn/pytorch/tests_C/test_densenet.py | 30 +- .../tvm/cnn/pytorch/tests_C/test_yolov5.py | 30 +- .../cnn/tensorflow/tests_A/test_convnext.py | 12 +- .../tvm/cnn/tensorflow/tests_A/test_nasnet.py | 12 +- .../tvm/cnn/tensorflow/tests_A/test_resnet.py | 10 +- .../cnn/tensorflow/tests_A/test_xception.py | 10 +- .../cnn/tensorflow/tests_B/test_alexnet.py | 8 +- .../tensorflow/tests_B/test_autoencoder.py | 8 +- .../cnn/tensorflow/tests_B/test_densenet.py | 12 +- .../tensorflow/tests_B/test_efficientnet.py | 18 +- .../cnn/tensorflow/tests_B/test_inception.py | 10 +- .../tvm/cnn/tensorflow/tests_B/test_mnist.py | 10 +- .../cnn/tensorflow/tests_B/test_mobilenet.py | 12 +- .../cnn/tensorflow/tests_B/test_regnety.py | 10 +- .../tvm/cnn/tensorflow/tests_B/test_vgg.py | 12 +- .../tvm/cnn/tflite/test_efficientnet_lite.py | 20 +- .../test/tvm/cnn/tflite/test_pose_landmark.py | 8 +- .../get_pytorch_model_with_activations.py | 14 +- .../get_tensorflow_model_with_activations.py | 8 +- .../test/tvm/nightly/test_pytorch_models.py | 20 +- .../nightly/test_supported_pytorch_models.py | 16 +- .../test_supported_tensorflow_models.py | 16 +- .../tvm/nightly/test_tensorflow_models.py | 20 +- {pybuda => forge}/test/tvm/nlp/__init__.py | 0 .../test/tvm/nlp/jax/test_bert.py | 36 +- .../test/tvm/nlp/onnx/__init__.py | 0 .../test/tvm/nlp/onnx/tests_A/__init__.py | 0 .../test/tvm/nlp/onnx/tests_A/test_roberta.py | 14 +- .../tvm/nlp/onnx/tests_A/test_unispeech.py | 14 +- .../test/tvm/nlp/onnx/tests_A/test_wav2vec.py | 14 +- .../test/tvm/nlp/onnx/tests_B/__init__.py | 0 .../test/tvm/nlp/onnx/tests_B/test_albert.py | 14 +- .../test/tvm/nlp/onnx/tests_B/test_bart.py | 6 +- .../test/tvm/nlp/onnx/tests_B/test_bert.py | 18 +- .../test/tvm/nlp/onnx/tests_B/test_detr.py | 14 +- .../tvm/nlp/onnx/tests_B/test_distilbert.py | 8 +- .../test/tvm/nlp/onnx/tests_B/test_gpt2.py | 12 +- .../test/tvm/nlp/onnx/tests_B/test_gptj.py | 14 +- .../test/tvm/nlp/onnx/tests_C/__init__.py | 0 .../test/tvm/nlp/onnx/tests_C/test_gptneo.py | 10 +- .../test/tvm/nlp/onnx/tests_C/test_nbeats.py | 10 +- .../test/tvm/nlp/onnx/tests_C/test_opt.py | 6 +- .../tvm/nlp/onnx/tests_C/test_squeeze_bert.py | 14 +- .../test/tvm/nlp/onnx/tests_C/test_t5.py | 12 +- .../test/tvm/nlp/onnx/tests_C/test_xglm.py | 14 +- .../test/tvm/nlp/onnx/tests_C/test_xlm.py | 14 +- .../test/tvm/nlp/pytorch/__init__.py | 0 .../test/tvm/nlp/pytorch/bloom/__init__.py | 0 .../test/tvm/nlp/pytorch/bloom/model.py | 0 .../test/tvm/nlp/pytorch/bloom/ttmodel.py | 2 +- .../test/tvm/nlp/pytorch/gnmt/__init__.py | 0 .../test/tvm/nlp/pytorch/gnmt/attention.py | 0 .../test/tvm/nlp/pytorch/gnmt/config.py | 0 .../test/tvm/nlp/pytorch/gnmt/decoder.py | 0 .../test/tvm/nlp/pytorch/gnmt/encoder.py | 0 .../test/tvm/nlp/pytorch/gnmt/gnmt.py | 0 .../test/tvm/nlp/pytorch/gnmt/seq2seq_base.py | 0 .../test/tvm/nlp/pytorch/gnmt/utils.py | 0 .../test/tvm/nlp/pytorch/tests_A/__init__.py | 0 .../tvm/nlp/pytorch/tests_A/test_albert.py | 22 +- .../test/tvm/nlp/pytorch/tests_A/test_bert.py | 24 +- .../test/tvm/nlp/pytorch/tests_A/test_detr.py | 22 +- .../tvm/nlp/pytorch/tests_A/test_t5_small.py | 116 +-- .../test/tvm/nlp/pytorch/tests_A/test_xlm.py | 14 +- .../tvm/nlp/pytorch/tests_A/test_xlnet.py | 12 +- .../test/tvm/nlp/pytorch/tests_B/__init__.py | 0 .../nlp/pytorch/tests_B/test_distilbert.py | 28 +- .../test/tvm/nlp/pytorch/tests_B/test_wmt.py | 8 +- .../test/tvm/nlp/pytorch/tests_C/__init__.py | 0 .../test/tvm/nlp/pytorch/tests_C/test_opt.py | 6 +- .../tvm/nlp/pytorch/tests_C/test_roberta.py | 24 +- .../tvm/nlp/pytorch/tests_C/test_trocr.py | 10 +- .../tvm/nlp/pytorch/tests_C/test_unispeech.py | 20 +- .../tvm/nlp/pytorch/tests_C/test_wav2vec2.py | 18 +- .../test/tvm/nlp/pytorch/tests_D/__init__.py | 0 .../test/tvm/nlp/pytorch/tests_D/test_bart.py | 20 +- .../tvm/nlp/pytorch/tests_D/test_bloom.py | 16 +- .../test/tvm/nlp/pytorch/tests_D/test_gnmt.py | 16 +- .../test/tvm/nlp/pytorch/tests_D/test_gpt2.py | 64 +- .../test/tvm/nlp/pytorch/tests_D/test_gptj.py | 22 +- .../tvm/nlp/pytorch/tests_D/test_gptneo.py | 20 +- .../nlp/pytorch/tests_D/test_longformer.py | 16 +- .../tvm/nlp/pytorch/tests_D/test_nbeats.py | 14 +- .../nlp/pytorch/tests_D/test_squeeze_bert.py | 14 +- .../test/tvm/nlp/pytorch/tests_D/test_xglm.py | 16 +- .../nlp/pytorch/tests_E/1272-128104-0000.pt | Bin .../test/tvm/nlp/pytorch/tests_E/__init__.py | 0 .../tvm/nlp/pytorch/tests_E/test_codegen.py | 10 +- .../tvm/nlp/pytorch/tests_E/test_whisper.py | 66 +- .../test/tvm/nlp/tensorflow/__init__.py | 0 .../tvm/nlp/tensorflow/tests_A/test_albert.py | 14 +- .../tensorflow/tests_A/test_t5_small_tf.py | 14 +- .../tvm/nlp/tensorflow/tests_B/__init__.py | 0 .../tvm/nlp/tensorflow/tests_B/test_bart.py | 20 +- .../tvm/nlp/tensorflow/tests_B/test_bert.py | 18 +- .../tvm/nlp/tensorflow/tests_B/test_gpt2.py | 14 +- .../nlp/tensorflow/tests_B/test_gptj_tf.py | 22 +- .../nlp/tensorflow/tests_B/test_wav2vec2.py | 10 +- .../tensorflow/tests_C/test_distillbert.py | 12 +- .../tvm/nlp/tensorflow/tests_C/test_opt.py | 16 +- .../nlp/tensorflow/tests_C/test_roberta.py | 14 +- .../tvm/nlp/tensorflow/tests_C/test_xlm.py | 14 +- .../tvm/nlp/tensorflow/tests_C/test_xlnet.py | 14 +- .../test/tvm/python/test_fracturing.py | 46 +- .../test/tvm/python/test_sanity.py | 84 +- .../pytorch/deepctr_torch/__init__.py | 0 .../pytorch/deepctr_torch/callbacks.py | 0 .../pytorch/deepctr_torch/inputs.py | 0 .../pytorch/deepctr_torch/layers/__init__.py | 0 .../deepctr_torch/layers/activation.py | 0 .../deepctr_torch/layers/core_modules.py | 0 .../deepctr_torch/layers/interaction.py | 0 .../pytorch/deepctr_torch/layers/sequence.py | 0 .../pytorch/deepctr_torch/layers/utils.py | 0 .../pytorch/deepctr_torch/license | 0 .../pytorch/deepctr_torch/models/__init__.py | 0 .../pytorch/deepctr_torch/models/afm.py | 0 .../pytorch/deepctr_torch/models/afn.py | 0 .../pytorch/deepctr_torch/models/autoint.py | 0 .../pytorch/deepctr_torch/models/basemodel.py | 0 .../pytorch/deepctr_torch/models/ccpm.py | 0 .../pytorch/deepctr_torch/models/dcn.py | 0 .../pytorch/deepctr_torch/models/dcnmix.py | 0 .../pytorch/deepctr_torch/models/deepfm.py | 0 .../pytorch/deepctr_torch/models/dien.py | 0 .../pytorch/deepctr_torch/models/difm.py | 0 .../pytorch/deepctr_torch/models/din.py | 0 .../pytorch/deepctr_torch/models/fibinet.py | 0 .../pytorch/deepctr_torch/models/ifm.py | 0 .../pytorch/deepctr_torch/models/mlr.py | 0 .../pytorch/deepctr_torch/models/nfm.py | 0 .../pytorch/deepctr_torch/models/onn.py | 0 .../pytorch/deepctr_torch/models/pnn.py | 0 .../pytorch/deepctr_torch/models/wdl.py | 0 .../pytorch/deepctr_torch/models/xdeepfm.py | 0 .../deepctr_torch/movielens_sample.txt | 0 .../pytorch/deepctr_torch/utils.py | 0 .../tvm/recommendation/pytorch/test_afn.py | 10 +- .../pytorch/test_core_modules.py | 20 +- .../tvm/recommendation/pytorch/test_deepfm.py | 22 +- .../tvm/recommendation/pytorch/test_dlrm.py | 16 +- .../recommendation/pytorch/test_fibinet.py | 16 +- .../pytorch/test_interaction.py | 32 +- .../recommendation/pytorch/test_xdeepfm.py | 12 +- {pybuda => forge}/test/tvm/sanity/__init__.py | 0 .../test/tvm/sanity/tests_A/__init__.py | 0 .../sanity/tests_A/test_sanity_passthrough.py | 18 +- .../tvm/sanity/tests_A/test_sanity_pytorch.py | 238 ++--- .../test/tvm/sanity/tests_A/test_tvm.py | 138 +-- .../test/tvm/sanity/tests_B/__init__.py | 0 .../test/tvm/sanity/tests_B/test_df.py | 14 +- .../tvm/sanity/tests_B/test_fallback_only.py | 18 +- .../sanity/tests_B/test_pattern_matcher.py | 46 +- .../tests_B/test_propped_params_tensorflow.py | 18 +- .../tvm/sanity/tests_B/test_sanity_onnx.py | 12 +- .../test/tvm/sanity/tests_C/__init__.py | 0 .../test/tvm/sanity/tests_C/test_decomps.py | 388 ++++---- .../tvm/sanity/tests_C/test_sanity_jax.py | 50 +- .../test/tvm/sanity/tests_C/test_sanity_tf.py | 40 +- .../stable_diffusion/run_stable_diffusion.py | 32 +- .../stable_diffusion/test_stable_diffusion.py | 112 +-- {pybuda => forge}/test/tvm/utils.py | 14 +- {pybuda => forge}/test/utils.py | 0 .../test/versim/test_versim_basic_ops.py | 12 +- pybuda/CMakeLists.txt | 2 - pybuda/pybuda/_C/autograd.pyi | 24 - pybuda/test/backend/test_random_grids.py | 69 -- pybuda/test/benchmark/run_benchmark_debug | 8 - .../benchmark/run_benchmark_gs_e150_df_bfp8 | 49 - .../benchmark/run_benchmark_gs_e150_release | 61 -- .../benchmark/run_benchmark_gs_e75_df_bfp8 | 51 -- .../benchmark/run_benchmark_gs_e75_release | 63 -- pybuda/test/benchmark/run_benchmark_tti | 11 - .../test/benchmark/run_benchmark_wh_df_bfp8 | 25 - .../test/benchmark/run_benchmark_wh_df_fp16 | 41 - .../test/benchmark/run_benchmark_wh_release | 62 -- .../operators/eltwise_unary/test_command.sh | 37 - .../eltwise_unary_attr/clip/models/model_3.py | 104 --- .../eltwise_unary_attr/clip/models/model_4.py | 132 --- .../eltwise_unary_attr/clip/models/model_5.py | 145 --- .../leaky_relu/models/model_3.py | 101 -- .../leaky_relu/models/model_4.py | 140 --- .../leaky_relu/models/model_5.py | 121 --- .../matmul/models/generic/model_10.py | 101 -- .../matmul/models/generic/model_3.py | 67 -- .../matmul/models/generic/model_6.py | 75 -- .../test/operators/tm/pad/models/model_2.py | 72 -- .../test/operators/tm/pad/models/model_3.py | 86 -- .../test/operators/tm/pad/models/model_4.py | 101 -- .../test/operators/tm/pad/models/model_5.py | 123 --- .../operators/tm/reshape/models/model_3.py | 73 -- .../operators/tm/reshape/models/model_4.py | 84 -- .../operators/tm/reshape/models/model_5.py | 72 -- .../tm/vstack_vslice/models/model_3.py | 95 -- pybuda/test/test_fork_join.py | 860 ------------------ pybuda/test/test_perf_simulator.py | 132 --- pybuda/test/test_streaming.py | 137 --- pytest.ini | 8 +- scripts/bisect.sh | 26 +- scripts/compare_perf.py | 2 +- setup.py | 30 +- third_party/tvm | 2 +- 1117 files changed, 14192 insertions(+), 14192 deletions(-) create mode 100644 forge/CMakeLists.txt rename {pybuda => forge}/csrc/CMakeLists.txt (93%) rename {pybuda => forge}/csrc/autograd/CMakeLists.txt (100%) rename {pybuda => forge}/csrc/autograd/autograd.cpp (99%) rename {pybuda => forge}/csrc/autograd/autograd.hpp (100%) rename {pybuda => forge}/csrc/autograd/binding.cpp (66%) rename {pybuda => forge}/csrc/autograd/binding.hpp (100%) rename {pybuda => forge}/csrc/autograd/python_bindings.cpp (98%) rename {pybuda => forge}/csrc/autograd/python_bindings.hpp (100%) rename {pybuda => forge}/csrc/backend_api/CMakeLists.txt (100%) rename {pybuda => forge}/csrc/backend_api/arch_type.cpp (100%) rename {pybuda => forge}/csrc/backend_api/arch_type.hpp (100%) rename {pybuda => forge}/csrc/backend_api/backend_api.cpp (100%) rename {pybuda => forge}/csrc/backend_api/device_config.hpp (100%) rename {pybuda => forge}/csrc/buda_passes.cpp (95%) rename {pybuda => forge}/csrc/buda_passes.hpp (100%) rename pybuda/csrc/pybuda_bindings.cpp => forge/csrc/forge_bindings.cpp (98%) rename {pybuda => forge}/csrc/graph_lib/CMakeLists.txt (100%) rename {pybuda => forge}/csrc/graph_lib/defines.cpp (100%) rename {pybuda => forge}/csrc/graph_lib/defines.hpp (100%) rename {pybuda => forge}/csrc/graph_lib/edge.cpp (100%) rename {pybuda => forge}/csrc/graph_lib/edge.hpp (100%) rename {pybuda => forge}/csrc/graph_lib/graph.cpp (100%) rename {pybuda => forge}/csrc/graph_lib/graph.hpp (100%) rename {pybuda => forge}/csrc/graph_lib/node.cpp (99%) rename {pybuda => forge}/csrc/graph_lib/node.hpp (100%) rename {pybuda => forge}/csrc/graph_lib/node_types.cpp (98%) rename {pybuda => forge}/csrc/graph_lib/node_types.hpp (100%) rename {pybuda => forge}/csrc/graph_lib/python_bindings.cpp (98%) rename {pybuda => forge}/csrc/graph_lib/python_bindings.hpp (100%) rename {pybuda => forge}/csrc/graph_lib/query.hpp (100%) rename {pybuda => forge}/csrc/graph_lib/shape.cpp (100%) rename {pybuda => forge}/csrc/graph_lib/shape.hpp (100%) rename {pybuda => forge}/csrc/graph_lib/tests/test_graphlib.cpp (100%) rename {pybuda => forge}/csrc/graph_lib/tests/test_graphlib_utils.cpp (100%) rename {pybuda => forge}/csrc/graph_lib/utils.cpp (99%) rename {pybuda => forge}/csrc/graph_lib/utils.hpp (100%) rename {pybuda => forge}/csrc/lower_to_buda/common.cpp (100%) rename {pybuda => forge}/csrc/lower_to_buda/common.hpp (100%) rename {pybuda => forge}/csrc/passes/amp.cpp (99%) rename {pybuda => forge}/csrc/passes/amp.hpp (100%) rename {pybuda => forge}/csrc/passes/bind_reshape_to_io.cpp (100%) rename {pybuda => forge}/csrc/passes/bind_reshape_to_io.hpp (100%) rename {pybuda => forge}/csrc/passes/commutable_pattern.cpp (100%) rename {pybuda => forge}/csrc/passes/commute_utils.cpp (99%) rename {pybuda => forge}/csrc/passes/commute_utils.hpp (100%) rename {pybuda => forge}/csrc/passes/constant_folding.cpp (100%) rename {pybuda => forge}/csrc/passes/constant_folding.hpp (100%) rename {pybuda => forge}/csrc/passes/consteval.cpp (100%) rename {pybuda => forge}/csrc/passes/consteval.hpp (100%) rename {pybuda => forge}/csrc/passes/dataformat.cpp (99%) rename {pybuda => forge}/csrc/passes/dataformat.hpp (100%) rename {pybuda => forge}/csrc/passes/decomposing_context.cpp (95%) rename {pybuda => forge}/csrc/passes/decomposing_context.hpp (100%) rename {pybuda => forge}/csrc/passes/erase_consecutive_reshape.cpp (100%) rename {pybuda => forge}/csrc/passes/erase_consecutive_reshape.hpp (100%) rename {pybuda => forge}/csrc/passes/erase_inverse_ops.cpp (100%) rename {pybuda => forge}/csrc/passes/erase_inverse_ops.hpp (100%) rename {pybuda => forge}/csrc/passes/erase_unnecessary_4d_tm_sequence.cpp (100%) rename {pybuda => forge}/csrc/passes/erase_unnecessary_4d_tm_sequence.hpp (100%) rename {pybuda => forge}/csrc/passes/explicate_unsqueeze.cpp (100%) rename {pybuda => forge}/csrc/passes/explicate_unsqueeze.hpp (100%) rename {pybuda => forge}/csrc/passes/fracture.cpp (100%) rename {pybuda => forge}/csrc/passes/fracture.hpp (100%) rename {pybuda => forge}/csrc/passes/fuse_conv2d_bias.cpp (100%) rename {pybuda => forge}/csrc/passes/fuse_conv2d_bias.hpp (100%) rename {pybuda => forge}/csrc/passes/fuse_pad_conv2d.cpp (100%) rename {pybuda => forge}/csrc/passes/fuse_pad_conv2d.hpp (100%) rename {pybuda => forge}/csrc/passes/fuse_per_channel_ops.cpp (99%) rename {pybuda => forge}/csrc/passes/fuse_per_channel_ops.hpp (100%) rename {pybuda => forge}/csrc/passes/fuse_redundant_tm_sequence.cpp (96%) rename {pybuda => forge}/csrc/passes/fuse_redundant_tm_sequence.hpp (100%) rename {pybuda => forge}/csrc/passes/fuse_reshape_transpose_into_slice.cpp (98%) rename {pybuda => forge}/csrc/passes/fuse_reshape_transpose_into_slice.hpp (100%) rename {pybuda => forge}/csrc/passes/generate_initial_flops_estimate.cpp (86%) rename {pybuda => forge}/csrc/passes/generate_initial_flops_estimate.hpp (100%) rename {pybuda => forge}/csrc/passes/hoist_transforms_to_inputs.cpp (100%) rename {pybuda => forge}/csrc/passes/hoist_transforms_to_inputs.hpp (100%) rename {pybuda => forge}/csrc/passes/insert_inverse_on_io.cpp (100%) rename {pybuda => forge}/csrc/passes/insert_inverse_on_io.hpp (100%) rename {pybuda => forge}/csrc/passes/limit_to_4d_reshape.cpp (100%) rename {pybuda => forge}/csrc/passes/limit_to_4d_reshape.hpp (100%) rename {pybuda => forge}/csrc/passes/link_past_cache_ios.cpp (99%) rename {pybuda => forge}/csrc/passes/link_past_cache_ios.hpp (100%) rename {pybuda => forge}/csrc/passes/lower_concat_to_runtime_transform.cpp (98%) rename {pybuda => forge}/csrc/passes/lower_concat_to_runtime_transform.hpp (100%) rename {pybuda => forge}/csrc/passes/lower_reinterpret_shape.cpp (100%) rename {pybuda => forge}/csrc/passes/lower_reinterpret_shape.hpp (100%) rename {pybuda => forge}/csrc/passes/lower_to_mlir.cpp (100%) rename {pybuda => forge}/csrc/passes/lower_to_mlir.hpp (100%) rename {pybuda => forge}/csrc/passes/lowering_context.cpp (97%) rename {pybuda => forge}/csrc/passes/lowering_context.hpp (98%) rename {pybuda => forge}/csrc/passes/mlir_compiler.cpp (97%) rename {pybuda => forge}/csrc/passes/mlir_compiler.hpp (100%) rename {pybuda => forge}/csrc/passes/mlir_passes.cpp (100%) rename {pybuda => forge}/csrc/passes/mlir_passes.hpp (100%) rename {pybuda => forge}/csrc/passes/move_index_to_mm_weights.cpp (100%) rename {pybuda => forge}/csrc/passes/move_index_to_mm_weights.hpp (100%) rename {pybuda => forge}/csrc/passes/move_requantize.cpp (100%) rename {pybuda => forge}/csrc/passes/move_requantize.hpp (100%) rename {pybuda => forge}/csrc/passes/move_select_after_matmul_optional.cpp (99%) rename {pybuda => forge}/csrc/passes/move_select_after_matmul_optional.hpp (100%) rename {pybuda => forge}/csrc/passes/nd_slice.hpp (100%) rename {pybuda => forge}/csrc/passes/pad_output_buffer.cpp (97%) rename {pybuda => forge}/csrc/passes/pad_output_buffer.hpp (100%) rename {pybuda => forge}/csrc/passes/passes_utils.cpp (99%) rename {pybuda => forge}/csrc/passes/passes_utils.hpp (100%) rename {pybuda => forge}/csrc/passes/post_autograd_graph_passes.cpp (100%) rename {pybuda => forge}/csrc/passes/post_autograd_graph_passes.hpp (100%) rename {pybuda => forge}/csrc/passes/pre_lowering_passes.cpp (99%) rename {pybuda => forge}/csrc/passes/pre_lowering_passes.hpp (100%) rename {pybuda => forge}/csrc/passes/pre_placer_buda_passes.cpp (99%) rename {pybuda => forge}/csrc/passes/pre_placer_buda_passes.hpp (100%) rename {pybuda => forge}/csrc/passes/print_graph.cpp (92%) rename {pybuda => forge}/csrc/passes/print_graph.hpp (100%) rename {pybuda => forge}/csrc/passes/python_bindings.cpp (98%) rename {pybuda => forge}/csrc/passes/python_bindings.hpp (100%) rename {pybuda => forge}/csrc/passes/replace_incommutable_patterns.cpp (100%) rename {pybuda => forge}/csrc/passes/replace_incommutable_patterns.hpp (100%) rename {pybuda => forge}/csrc/passes/set_tile_dim.cpp (100%) rename {pybuda => forge}/csrc/passes/set_tile_dim.hpp (100%) rename {pybuda => forge}/csrc/passes/squeeze_to_reshape.cpp (100%) rename {pybuda => forge}/csrc/passes/squeeze_to_reshape.hpp (100%) rename {pybuda => forge}/csrc/passes/tests/gtest_main.cpp (100%) rename {pybuda => forge}/csrc/passes/tests/test_constant_folding.cpp (94%) rename {pybuda => forge}/csrc/passes/tests/test_data_formats.cpp (100%) rename {pybuda => forge}/csrc/passes/tests/test_erase_inverse_ops.cpp (100%) rename {pybuda => forge}/csrc/passes/tests/test_erase_unnecessary_4d_tm_sequence.cpp (100%) rename {pybuda => forge}/csrc/passes/tests/test_fracturing.cpp (97%) rename {pybuda => forge}/csrc/passes/tests/test_fuse_pad_conv2d.cpp (100%) rename {pybuda => forge}/csrc/passes/tests/test_fuse_reshape_transpose_pairs_into_slice_or_stack_tm.cpp (100%) rename {pybuda => forge}/csrc/passes/tests/test_link_past_cache_ios.cpp (99%) rename {pybuda => forge}/csrc/passes/tests/test_mm_fuse_bias.cpp (98%) rename {pybuda => forge}/csrc/passes/tests/test_move_index_to_mm_weights.cpp (100%) rename {pybuda => forge}/csrc/passes/tests/test_move_select_after_matmul_optional.cpp (92%) rename {pybuda => forge}/csrc/passes/tests/test_past_cache_ublock_order.cpp (100%) rename {pybuda => forge}/csrc/passes/tests/test_split_unsupp_ops.cpp (100%) rename {pybuda => forge}/csrc/passes/tests/test_tilize.cpp (100%) rename {pybuda => forge}/csrc/passes/tests/test_transpose_srca.cpp (100%) rename {pybuda => forge}/csrc/python_bindings_common.hpp (100%) rename {pybuda => forge}/csrc/reportify/CMakeLists.txt (100%) rename {pybuda => forge}/csrc/reportify/paths.cpp (96%) rename {pybuda => forge}/csrc/reportify/paths.hpp (100%) rename {pybuda => forge}/csrc/reportify/reportify.cpp (98%) rename {pybuda => forge}/csrc/reportify/reportify.hpp (100%) rename {pybuda => forge}/csrc/reportify/to_json.cpp (100%) rename {pybuda => forge}/csrc/reportify/to_json.hpp (100%) rename {pybuda => forge}/csrc/runtime/CMakeLists.txt (100%) rename {pybuda => forge}/csrc/runtime/python_bindings.cpp (100%) rename {pybuda => forge}/csrc/runtime/python_bindings.hpp (100%) rename {pybuda => forge}/csrc/runtime/runtime.cpp (100%) rename {pybuda => forge}/csrc/runtime/runtime.hpp (100%) rename {pybuda => forge}/csrc/runtime/tt_device.cpp (100%) rename {pybuda => forge}/csrc/runtime/tt_device.hpp (97%) rename {pybuda => forge}/csrc/shared_utils/CMakeLists.txt (100%) rename {pybuda => forge}/csrc/shared_utils/json_extension.hpp (100%) rename {pybuda => forge}/csrc/shared_utils/placement_printer.cpp (100%) rename {pybuda => forge}/csrc/shared_utils/placement_printer.hpp (100%) rename {pybuda => forge}/csrc/shared_utils/pretty_table.cpp (100%) rename {pybuda => forge}/csrc/shared_utils/pretty_table.hpp (100%) rename {pybuda => forge}/csrc/shared_utils/sparse_matmul_utils.cpp (98%) rename {pybuda => forge}/csrc/shared_utils/sparse_matmul_utils.hpp (100%) rename {pybuda => forge}/csrc/shared_utils/string_extension.cpp (100%) rename {pybuda => forge}/csrc/shared_utils/string_extension.hpp (100%) rename {pybuda => forge}/csrc/test/common.hpp (99%) rename {pybuda => forge}/csrc/test/graph_api.hpp (100%) rename {pybuda => forge}/csrc/tt_torch_device/CMakeLists.txt (100%) rename {pybuda => forge}/csrc/tt_torch_device/python_bindings.cpp (95%) rename {pybuda => forge}/csrc/tt_torch_device/python_bindings.hpp (100%) rename {pybuda => forge}/csrc/tt_torch_device/torch_device_impl.cpp (99%) rename {pybuda => forge}/csrc/tt_torch_device/tt_device.cpp (98%) rename {pybuda => forge}/csrc/tt_torch_device/tt_device.hpp (98%) rename {pybuda/pybuda => forge/forge}/CMakeLists.txt (68%) rename {pybuda/pybuda => forge/forge}/_C.pyi (100%) rename {pybuda/pybuda => forge/forge}/_C/__init__.pyi (100%) create mode 100644 forge/forge/_C/autograd.pyi rename {pybuda/pybuda => forge/forge}/_C/graph.pyi (94%) rename {pybuda/pybuda => forge/forge}/_C/runtime.pyi (100%) rename {pybuda/pybuda => forge/forge}/_C/torch_device.pyi (96%) rename {pybuda/pybuda => forge/forge}/__init__.py (75%) rename {pybuda/pybuda => forge/forge}/ci.py (90%) rename {pybuda/pybuda => forge/forge}/compile.py (93%) rename {pybuda/pybuda => forge/forge}/compiled_graph_state.py (98%) rename {pybuda/pybuda => forge/forge}/config.py (92%) rename pybuda/pybuda/pybudaglobal.py => forge/forge/forgeglobal.py (86%) rename {pybuda/pybuda => forge/forge}/fx/__init__.py (100%) rename {pybuda/pybuda => forge/forge}/fx/capture.py (93%) rename {pybuda/pybuda => forge/forge}/fx/graph_utils.py (98%) rename {pybuda/pybuda => forge/forge}/fx/mixed_graph.py (97%) rename {pybuda/pybuda => forge/forge}/fx/nodes.py (83%) rename {pybuda/pybuda => forge/forge}/fx/schedule.py (99%) rename {pybuda/pybuda => forge/forge}/fx/torch_decomp_reconstruct.py (97%) rename {pybuda/pybuda => forge/forge}/fx/trace.py (100%) rename {pybuda/pybuda => forge/forge}/module.py (93%) rename {pybuda/pybuda => forge/forge}/op/__init__.py (98%) rename {pybuda/pybuda => forge/forge}/op/common.py (84%) rename {pybuda/pybuda => forge/forge}/op/constant.py (93%) rename {pybuda/pybuda => forge/forge}/op/convolution.py (97%) rename {pybuda/pybuda => forge/forge}/op/dram_queue.py (96%) rename {pybuda/pybuda => forge/forge}/op/eltwise_binary.py (99%) rename {pybuda/pybuda => forge/forge}/op/eltwise_nary.py (98%) rename {pybuda/pybuda => forge/forge}/op/eltwise_unary.py (99%) rename {pybuda/pybuda => forge/forge}/op/embedding.py (95%) rename {pybuda/pybuda => forge/forge}/op/eval/__init__.py (100%) rename {pybuda/pybuda => forge/forge}/op/eval/buda/__init__.py (95%) rename {pybuda/pybuda => forge/forge}/op/eval/buda/abs.py (90%) rename {pybuda/pybuda => forge/forge}/op/eval/buda/buffer.py (90%) rename {pybuda/pybuda => forge/forge}/op/eval/buda/clip.py (91%) rename {pybuda/pybuda => forge/forge}/op/eval/buda/constant.py (100%) rename {pybuda/pybuda => forge/forge}/op/eval/buda/cosine.py (88%) rename {pybuda/pybuda => forge/forge}/op/eval/buda/cyclenet.py (100%) rename {pybuda/pybuda => forge/forge}/op/eval/buda/depthwise.py (91%) rename {pybuda/pybuda => forge/forge}/op/eval/buda/dram_queue.py (94%) rename {pybuda/pybuda => forge/forge}/op/eval/buda/eltwise_binary.py (93%) rename {pybuda/pybuda => forge/forge}/op/eval/buda/eltwise_nary.py (94%) rename {pybuda/pybuda => forge/forge}/op/eval/buda/eltwise_unary.py (93%) rename {pybuda/pybuda => forge/forge}/op/eval/buda/embedding.py (90%) rename {pybuda/pybuda => forge/forge}/op/eval/buda/ethernet_datacopy.py (90%) rename {pybuda/pybuda => forge/forge}/op/eval/buda/exp.py (87%) rename {pybuda/pybuda => forge/forge}/op/eval/buda/fused_ops.py (96%) rename {pybuda/pybuda => forge/forge}/op/eval/buda/log.py (91%) rename {pybuda/pybuda => forge/forge}/op/eval/buda/matmul.pth (100%) rename {pybuda/pybuda => forge/forge}/op/eval/buda/matmul.py (97%) rename {pybuda/pybuda => forge/forge}/op/eval/buda/nop.py (94%) rename {pybuda/pybuda => forge/forge}/op/eval/buda/quantize.py (89%) rename {pybuda/pybuda => forge/forge}/op/eval/buda/reciprocal.py (91%) rename {pybuda/pybuda => forge/forge}/op/eval/buda/splice.py (98%) rename {pybuda/pybuda => forge/forge}/op/eval/buda/sqrt.py (89%) rename {pybuda/pybuda => forge/forge}/op/eval/buda/tanh.py (91%) rename {pybuda/pybuda => forge/forge}/op/eval/buda/tilizer.py (90%) rename {pybuda/pybuda => forge/forge}/op/eval/buda/tm.py (98%) rename {pybuda/pybuda => forge/forge}/op/eval/buda/transpose.py (100%) rename {pybuda/pybuda => forge/forge}/op/eval/buda/void.py (98%) rename {pybuda/pybuda => forge/forge}/op/eval/common.py (96%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/__init__.py (95%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/abs.py (93%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/argmax.py (97%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/buffer.py (91%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/clip.py (97%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/constant.py (100%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/convolution.py (97%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/cosine.py (92%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/cumulativesum.py (95%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/depthwise.py (98%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/dram_queue.py (100%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/eltwise_binary.py (97%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/eltwise_nary.py (96%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/eltwise_unary.py (96%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/embedding.py (89%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/ethernet_datacopy.py (91%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/exp.py (89%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/log.py (92%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/mask.py (86%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/matmul.py (96%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/nn.py (98%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/nop.py (90%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/pooling.py (98%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/quantize.py (98%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/reciprocal.py (90%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/reduce.py (99%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/resize.py (97%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/sqrt.py (92%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/tanh.py (92%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/tilizer.py (92%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/tm.py (99%) rename {pybuda/pybuda/op/eval/pybuda => forge/forge/op/eval/forge}/transpose.py (99%) rename {pybuda/pybuda => forge/forge}/op/eval/interface.py (95%) rename {pybuda/pybuda => forge/forge}/op/eval/sparse_utils.py (98%) rename {pybuda/pybuda => forge/forge}/op/loss.py (95%) rename {pybuda/pybuda => forge/forge}/op/matmul.py (93%) rename {pybuda/pybuda => forge/forge}/op/nn.py (96%) rename {pybuda/pybuda => forge/forge}/op/pooling.py (99%) rename {pybuda/pybuda => forge/forge}/op/quantize.py (99%) rename {pybuda/pybuda => forge/forge}/op/reduce.py (99%) rename {pybuda/pybuda => forge/forge}/op/resize.py (98%) rename {pybuda/pybuda => forge/forge}/op/tm.py (99%) rename {pybuda/pybuda => forge/forge}/optimizers.py (98%) rename {pybuda/pybuda => forge/forge}/parameter.py (91%) rename {pybuda/pybuda => forge/forge}/python_codegen.py (97%) rename {pybuda/pybuda => forge/forge}/query.py (99%) rename {pybuda/pybuda => forge/forge}/schedulers.py (96%) rename {pybuda/pybuda => forge/forge}/tensor.py (96%) rename {pybuda/pybuda => forge/forge}/tools/__init__.py (100%) rename {pybuda/pybuda => forge/forge}/tools/autotune.py (94%) rename {pybuda/pybuda => forge/forge}/tools/autotune.sh (71%) rename {pybuda/pybuda => forge/forge}/tools/net2reportify.py (96%) rename {pybuda/pybuda => forge/forge}/tools/perf_analysis.py (98%) rename {pybuda/pybuda => forge/forge}/tools/run_net2pipe.py (100%) rename {pybuda/pybuda => forge/forge}/tools/tti_merge.py (99%) rename {pybuda/pybuda => forge/forge}/torch_compile.py (92%) rename {pybuda/pybuda => forge/forge}/torch_optimizers.py (100%) rename {pybuda/pybuda => forge/forge}/torch_schedulers.py (100%) rename {pybuda/pybuda => forge/forge}/transformers/__init__.py (100%) rename {pybuda/pybuda => forge/forge}/transformers/pipeline.py (93%) rename {pybuda/pybuda => forge/forge}/tvm.py (92%) rename {pybuda/pybuda => forge/forge}/tvm_to_python.py (90%) rename {pybuda/pybuda => forge/forge}/tvm_utils.py (98%) rename {pybuda/pybuda => forge/forge}/typing.py (79%) rename {pybuda/pybuda => forge/forge}/utils.py (94%) rename {pybuda/pybuda => forge/forge}/verify/__init__.py (100%) rename {pybuda/pybuda => forge/forge}/verify/config.py (85%) rename {pybuda/pybuda => forge/forge}/verify/cpueval.py (92%) rename {pybuda/pybuda => forge/forge}/verify/utils.py (100%) rename {pybuda/pybuda => forge/forge}/verify/verify.py (96%) rename {pybuda => forge}/setup.py (72%) rename {pybuda => forge}/test/README.debug.md (91%) rename {pybuda => forge}/test/__init__.py (100%) rename {pybuda => forge}/test/backend/__init__.py (100%) rename {pybuda => forge}/test/backend/benchmark/test_simple.py (70%) rename {pybuda => forge}/test/backend/models/__init__.py (100%) rename pybuda/test/backend/models/gpt2_pybuda.py => forge/test/backend/models/gpt2_forge.py (87%) rename {pybuda => forge}/test/backend/models/test_bert.py (93%) rename {pybuda => forge}/test/backend/models/test_gpt2.py (90%) rename {pybuda => forge}/test/backend/models/test_mixed_precision.py (93%) rename {pybuda => forge}/test/backend/test_backend.py (82%) rename {pybuda => forge}/test/backend/test_device.py (83%) rename {pybuda => forge}/test/backend/test_e2e.py (69%) rename {pybuda => forge}/test/backend/test_gpu_device.py (77%) rename {pybuda => forge}/test/backend/test_large_matmul.py (89%) rename {pybuda => forge}/test/backend/test_loss.py (61%) rename {pybuda => forge}/test/backend/test_pipeline.py (91%) create mode 100644 forge/test/backend/test_random_grids.py rename {pybuda => forge}/test/backend/test_silicon.py (82%) rename {pybuda => forge}/test/benchmark/README.md (92%) rename {pybuda => forge}/test/benchmark/benchmark.py (86%) rename {pybuda => forge}/test/benchmark/benchmark/__init__.py (100%) rename {pybuda => forge}/test/benchmark/benchmark/common/__init__.py (100%) rename {pybuda => forge}/test/benchmark/benchmark/common/common.py (74%) rename {pybuda => forge}/test/benchmark/benchmark/models/__init__.py (100%) rename {pybuda => forge}/test/benchmark/benchmark/models/bert.py (73%) rename {pybuda => forge}/test/benchmark/benchmark/models/custom/custom_resnet_highres.py (68%) rename {pybuda => forge}/test/benchmark/benchmark/models/custom/custom_vit_highres.py (84%) rename {pybuda => forge}/test/benchmark/benchmark/models/deit.py (73%) rename {pybuda => forge}/test/benchmark/benchmark/models/hrnet.py (78%) rename {pybuda => forge}/test/benchmark/benchmark/models/implementations/yolo_v3/holli_src/utils.py (100%) rename {pybuda => forge}/test/benchmark/benchmark/models/implementations/yolo_v3/holli_src/yolo_layer.py (100%) rename {pybuda => forge}/test/benchmark/benchmark/models/implementations/yolo_v3/holli_src/yolov3.py (100%) rename {pybuda => forge}/test/benchmark/benchmark/models/implementations/yolo_v3/holli_src/yolov3_base.py (100%) rename {pybuda => forge}/test/benchmark/benchmark/models/implementations/yolo_v3/holli_src/yolov3_tiny.py (100%) rename {pybuda => forge}/test/benchmark/benchmark/models/implementations/yolo_v3/license (100%) rename {pybuda => forge}/test/benchmark/benchmark/models/inception_v4.py (82%) rename {pybuda => forge}/test/benchmark/benchmark/models/mobilenet_v1.py (59%) rename {pybuda => forge}/test/benchmark/benchmark/models/mobilenet_v2.py (59%) rename {pybuda => forge}/test/benchmark/benchmark/models/mobilenet_v3_timm.py (74%) rename {pybuda => forge}/test/benchmark/benchmark/models/openpose_body.py (74%) rename {pybuda => forge}/test/benchmark/benchmark/models/openpose_hand.py (74%) rename {pybuda => forge}/test/benchmark/benchmark/models/other.py (78%) rename {pybuda => forge}/test/benchmark/benchmark/models/resnet.py (84%) rename {pybuda => forge}/test/benchmark/benchmark/models/resnet_bringup.py (74%) rename {pybuda => forge}/test/benchmark/benchmark/models/t5.py (62%) rename {pybuda => forge}/test/benchmark/benchmark/models/unet.py (74%) rename {pybuda => forge}/test/benchmark/benchmark/models/vit.py (68%) rename {pybuda => forge}/test/benchmark/benchmark/models/vovnet_v2.py (72%) rename {pybuda => forge}/test/benchmark/benchmark/models/whisper.py (80%) rename {pybuda => forge}/test/benchmark/benchmark/models/yolo_v3.py (76%) rename {pybuda => forge}/test/benchmark/benchmark/models/yolo_v5.py (72%) rename {pybuda => forge}/test/benchmark/run_benchmark.py (98%) create mode 100644 forge/test/benchmark/run_benchmark_debug create mode 100644 forge/test/benchmark/run_benchmark_gs_e150_df_bfp8 rename {pybuda => forge}/test/benchmark/run_benchmark_gs_e150_df_fp16 (53%) create mode 100644 forge/test/benchmark/run_benchmark_gs_e150_release create mode 100644 forge/test/benchmark/run_benchmark_gs_e75_df_bfp8 rename {pybuda => forge}/test/benchmark/run_benchmark_gs_e75_df_fp16 (52%) create mode 100644 forge/test/benchmark/run_benchmark_gs_e75_release create mode 100644 forge/test/benchmark/run_benchmark_tti create mode 100644 forge/test/benchmark/run_benchmark_wh_df_bfp8 create mode 100644 forge/test/benchmark/run_benchmark_wh_df_fp16 create mode 100644 forge/test/benchmark/run_benchmark_wh_release rename {pybuda => forge}/test/bert/__init__.py (100%) rename {pybuda => forge}/test/bert/modules.py (94%) rename {pybuda => forge}/test/common.py (90%) rename {pybuda => forge}/test/conftest.py (87%) rename {pybuda => forge}/test/data_formats/test_df.py (76%) rename {pybuda => forge}/test/data_formats/test_int8.py (82%) rename {pybuda => forge}/test/emulation/test_emulation_basic_ops.py (80%) rename {pybuda => forge}/test/falcon/__init__.py (100%) rename {pybuda => forge}/test/falcon/data/two_cities.json (100%) rename {pybuda => forge}/test/falcon/finetune_configs/ci_basic.json (93%) rename {pybuda => forge}/test/falcon/finetune_configs/ci_basic_lora.json (93%) rename {pybuda => forge}/test/falcon/models/falcon7b/README.md (100%) rename {pybuda => forge}/test/falcon/models/falcon7b/config.json (100%) rename {pybuda => forge}/test/falcon/models/falcon7b/config_padded.json (100%) rename {pybuda => forge}/test/falcon/models/falcon7b/configuration_RW.py (100%) rename {pybuda => forge}/test/falcon/models/falcon7b/generation_config.json (100%) rename {pybuda => forge}/test/falcon/models/falcon7b/modelling_RW.py (100%) rename {pybuda => forge}/test/falcon/models/falcon7b/modelling_RW_original.py (100%) rename {pybuda => forge}/test/falcon/models/falcon7b/pytorch_model.bin.index.json (100%) rename {pybuda => forge}/test/falcon/models/falcon7b/special_tokens_map.json (100%) rename {pybuda => forge}/test/falcon/models/falcon7b/tokenizer_config.json (100%) rename {pybuda => forge}/test/falcon/models/falcon7b/tt_modeling_RW.py (100%) rename {pybuda => forge}/test/falcon/models/falcon7b/tt_modeling_RW_pad.py (100%) rename {pybuda => forge}/test/falcon/models/falcon7b/tt_modeling_RW_pad_masked_odkv.py (100%) rename {pybuda => forge}/test/falcon/models/falcon7b/tt_modeling_RW_pad_odkv.py (100%) rename {pybuda => forge}/test/falcon/models/falcon7b/tt_modeling_RW_pad_odkv_conc.py (100%) rename {pybuda => forge}/test/falcon/models/falcon7b/tt_modeling_RW_pad_split.py (100%) rename {pybuda => forge}/test/falcon/models/falcon7b/tt_modeling_RW_pad_split_cache.py (100%) rename {pybuda => forge}/test/falcon/pybudify.py (63%) rename {pybuda => forge}/test/falcon/requirements.txt (100%) rename {pybuda => forge}/test/falcon/tests/__init__.py (100%) rename {pybuda => forge}/test/falcon/tests/falcon_modules/falcon.py (88%) rename {pybuda => forge}/test/falcon/tests/test_falcon7b_decode.py (96%) rename {pybuda => forge}/test/falcon/tests/test_falcon7b_finetune.py (80%) rename {pybuda => forge}/test/falcon/tests/utils.py (99%) rename {pybuda => forge}/test/fx/__init__.py (100%) rename {pybuda => forge}/test/fx/conftest.py (92%) rename {pybuda => forge}/test/fx/test_basics.py (97%) rename {pybuda => forge}/test/fx/test_features.py (89%) rename {pybuda => forge}/test/fx/test_models.py (72%) rename {pybuda => forge}/test/fx/test_ops.py (60%) rename {pybuda => forge}/test/galaxy/bert/run_squad_wh.py (82%) rename {pybuda => forge}/test/galaxy/bert/squad_preprocessing/evaluate-v1.1.py (100%) rename {pybuda => forge}/test/galaxy/bert/squad_preprocessing/helpers/__init__.py (100%) rename {pybuda => forge}/test/galaxy/bert/squad_preprocessing/helpers/data_processing.py (100%) rename {pybuda => forge}/test/galaxy/bert/squad_preprocessing/helpers/tokenization.py (100%) rename {pybuda => forge}/test/galaxy/conftest.py (100%) rename {pybuda => forge}/test/galaxy/one_shelf_eth_connections.yaml (100%) rename {pybuda => forge}/test/galaxy/one_shelf_runtime_params.yaml (99%) rename {pybuda => forge}/test/galaxy/test_galaxy_bert_demo.py (77%) rename {pybuda => forge}/test/galaxy/test_galaxy_inputs.py (84%) rename {pybuda => forge}/test/galaxy/test_galaxy_multichip.py (68%) rename {pybuda => forge}/test/galaxy/test_galaxy_shelf_setup.py (86%) rename {pybuda => forge}/test/galaxy/test_galaxy_unit_tests.py (77%) rename {pybuda => forge}/test/galaxy/test_multichip_golden.py (83%) rename {pybuda => forge}/test/galaxy/two_shelf_eth_connections.yaml (100%) rename {pybuda => forge}/test/galaxy/two_shelf_runtime_params.yaml (99%) rename {pybuda => forge}/test/galaxy/utils/generate_system_params.py (79%) rename {pybuda => forge}/test/galaxy/utils/verify_push_bandwidth.py (100%) rename {pybuda => forge}/test/gpt2/gpt2.py (96%) rename {pybuda => forge}/test/gpt2/test_gpt2.py (81%) rename {pybuda => forge}/test/llama/amp_configs/amp_config.py (63%) rename {pybuda => forge}/test/llama/amp_configs/w6.json (100%) rename {pybuda => forge}/test/llama/decode.py (96%) rename {pybuda => forge}/test/llama/eval_data/episode_iv.txt (100%) rename {pybuda => forge}/test/llama/generate_eval.py (100%) rename {pybuda => forge}/test/llama/hang.py (98%) rename {pybuda => forge}/test/llama/llama_test.py (90%) rename {pybuda => forge}/test/llama/modeling_alpaca_caching.py (100%) rename {pybuda => forge}/test/llama/placement.py (99%) rename {pybuda => forge}/test/llama/pybudify_caching.py (74%) rename {pybuda => forge}/test/llama/tt_eval.py (99%) rename {pybuda => forge}/test/mlir/llama/test_llama_inference.py (98%) rename {pybuda => forge}/test/mlir/llama/tests/test_llama_embedding.py (85%) rename {pybuda => forge}/test/mlir/llama/tests/test_llama_lm_head.py (84%) rename {pybuda => forge}/test/mlir/llama/tests/test_llama_mlp.py (84%) rename {pybuda => forge}/test/mlir/llama/tests/test_llama_rms_norm.py (84%) rename {pybuda => forge}/test/mlir/llama/tests/test_llama_rotary_emb.py (86%) rename {pybuda => forge}/test/mlir/llama/tests/test_llama_self_attn.py (89%) rename {pybuda => forge}/test/mlir/llama/utils/utils.py (90%) rename {pybuda => forge}/test/mlir/mnist/__init__.py (100%) rename {pybuda => forge}/test/mlir/mnist/test_inference.py (76%) rename pybuda/test/mlir/mnist/training/mnist_linear_pybuda.py => forge/test/mlir/mnist/training/mnist_linear_forge.py (76%) rename {pybuda => forge}/test/mlir/mnist/training/mnist_linear_pytorch.py (100%) rename {pybuda => forge}/test/mlir/mnist/training/test_training.py (94%) rename {pybuda => forge}/test/mlir/mnist/utils.py (100%) rename {pybuda => forge}/test/mlir/resnet/test_resnet_inference.py (78%) rename {pybuda => forge}/test/mlir/test_ops.py (90%) rename {pybuda => forge}/test/mlir/test_training.py (89%) rename {pybuda => forge}/test/model_demos/__init__.py (100%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/__init__.py (100%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/onnx/__init__.py (100%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/onnx/test_ddrnet.py (83%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/onnx/test_dla.py (82%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/onnx/test_fpn.py (68%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/onnx/test_hardnet.py (73%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/onnx/test_lstm_genom.py (75%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/onnx/test_lstm_valence.py (61%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/onnx/test_perceiverio.py (83%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/onnx/test_retinanet.py (78%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/onnx/test_segformer_imgcls.py (72%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/onnx/test_segformer_semseg.py (78%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/onnx/test_yolo_v3.py (80%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/onnx/test_yolo_v5.py (82%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/onnx/test_yolo_x.py (89%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/__init__.py (100%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_alexnet.py (87%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_autoencoder.py (82%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_blazepose.py (76%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_clip.py (89%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_ddrnet.py (84%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_deit.py (76%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_densenet.py (73%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_dla.py (79%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_efficientnet.py (87%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_efficientnet_lite.py (66%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_fpn.py (77%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_ghostnet.py (85%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_ghostnet_100.py (83%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_googlenet.py (74%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_hardnet.py (80%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_hrnet.py (84%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_inception_v4.py (70%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_mlp_mixer.py (84%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_mobilenet_v1.py (83%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_mobilenet_v1_ssd.py (74%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_mobilenet_v2.py (77%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_mobilenet_v3.py (79%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_openpose.py (92%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_perceiverio.py (79%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_rcnn.py (85%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_resnet.py (77%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_resnext.py (69%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_retinanet.py (86%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_segformer_imgcls.py (72%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_segformer_semseg.py (74%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_ssd300_resnet50.py (87%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_swin.py (80%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_unet.py (75%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_vgg.py (75%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_vilt.py (89%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_vit.py (80%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_vovnet.py (78%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_wideresnet.py (84%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_xception.py (69%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_yolo_v3.py (69%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_yolo_v5.py (67%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/pytorch/test_yolo_v6.py (85%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/tflite/__init__.py (100%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/tflite/test_efficientnet_lite.py (84%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/tflite/test_hand_landmarker.py (77%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/tflite/test_mobilenet_ssd.py (73%) rename {pybuda => forge}/test/model_demos/high_prio/cnn/tflite/test_pose_landmark.py (84%) rename {pybuda => forge}/test/model_demos/high_prio/nlp/__init__.py (100%) rename {pybuda => forge}/test/model_demos/high_prio/nlp/pytorch/__init__.py (100%) rename {pybuda => forge}/test/model_demos/high_prio/nlp/pytorch/test_albert.py (74%) rename {pybuda => forge}/test/model_demos/high_prio/nlp/pytorch/test_bart.py (87%) rename {pybuda => forge}/test/model_demos/high_prio/nlp/pytorch/test_bert.py (75%) rename {pybuda => forge}/test/model_demos/high_prio/nlp/pytorch/test_codegen.py (81%) rename {pybuda => forge}/test/model_demos/high_prio/nlp/pytorch/test_distilbert.py (76%) rename {pybuda => forge}/test/model_demos/high_prio/nlp/pytorch/test_dpr.py (75%) rename {pybuda => forge}/test/model_demos/high_prio/nlp/pytorch/test_falcon.py (95%) rename {pybuda => forge}/test/model_demos/high_prio/nlp/pytorch/test_fuyu_8b.py (89%) rename {pybuda => forge}/test/model_demos/high_prio/nlp/pytorch/test_gemma_2b.py (90%) rename {pybuda => forge}/test/model_demos/high_prio/nlp/pytorch/test_gpt2.py (77%) rename {pybuda => forge}/test/model_demos/high_prio/nlp/pytorch/test_gptneo.py (84%) rename {pybuda => forge}/test/model_demos/high_prio/nlp/pytorch/test_mistral.py (89%) rename {pybuda => forge}/test/model_demos/high_prio/nlp/pytorch/test_opt.py (80%) rename {pybuda => forge}/test/model_demos/high_prio/nlp/pytorch/test_roberta.py (70%) rename {pybuda => forge}/test/model_demos/high_prio/nlp/pytorch/test_squeezebert.py (66%) rename {pybuda => forge}/test/model_demos/high_prio/nlp/pytorch/test_t5.py (83%) rename {pybuda => forge}/test/model_demos/high_prio/nlp/pytorch/test_whisper_0.py (81%) rename {pybuda => forge}/test/model_demos/high_prio/nlp/pytorch/test_whisper_1.py (83%) rename {pybuda => forge}/test/model_demos/high_prio/nlp/pytorch/test_xglm.py (79%) rename {pybuda => forge}/test/model_demos/models/__init__.py (100%) rename {pybuda => forge}/test/model_demos/models/deit.py (75%) rename {pybuda => forge}/test/model_demos/models/dla.py (100%) rename {pybuda => forge}/test/model_demos/models/falcon/__init__.py (100%) rename {pybuda => forge}/test/model_demos/models/falcon/configuration_RW.py (100%) rename {pybuda => forge}/test/model_demos/models/falcon/model.py (98%) rename {pybuda => forge}/test/model_demos/models/falcon/pybudify.py (73%) rename {pybuda => forge}/test/model_demos/models/falcon/tt_modeling_RW_pad_masked_odkv.py (100%) rename {pybuda => forge}/test/model_demos/models/ghostnet.py (71%) rename {pybuda => forge}/test/model_demos/models/t5.py (81%) rename {pybuda => forge}/test/model_demos/models/whisper.py (85%) rename {pybuda => forge}/test/model_demos/models/wideresnet.py (71%) rename {pybuda => forge}/test/model_demos/models/xception.py (69%) rename {pybuda => forge}/test/model_demos/utils/__init__.py (100%) rename {pybuda => forge}/test/model_demos/utils/cnn/onnx/images/carvana.jpg (100%) rename {pybuda => forge}/test/model_demos/utils/cnn/pytorch/images/car.jpg (100%) rename {pybuda => forge}/test/model_demos/utils/cnn/pytorch/images/girl.png (100%) rename {pybuda => forge}/test/model_demos/utils/cnn/pytorch/images/img.jpeg (100%) rename {pybuda => forge}/test/model_demos/utils/cnn/pytorch/saved/efficientnet_lite/src_efficientnet_lite.py (100%) rename {pybuda => forge}/test/model_demos/utils/cnn/pytorch/saved/mobilenetv1_ssd/vision/nn/mobilenet.py (100%) rename {pybuda => forge}/test/model_demos/utils/cnn/pytorch/saved/yolo_v3/holli_src/license (100%) rename {pybuda => forge}/test/model_demos/utils/nlp/pytorch/1272-128104-0000.pt (100%) rename {pybuda => forge}/test/model_demos/utils/nlp/pytorch/1272-128104-0001.pt (100%) rename {pybuda => forge}/test/model_demos/utils/nlp/pytorch/1272-128104-0002.pt (100%) rename {pybuda => forge}/test/model_demos/utils/nlp/pytorch/1272-128104-0003.pt (100%) rename {pybuda => forge}/test/module_utils.py (81%) rename {pybuda => forge}/test/nightly/cnn/building_blocks/test_building_blocks.py (96%) rename {pybuda => forge}/test/nightly/cnn/building_blocks/test_mobilenet.py (97%) rename {pybuda => forge}/test/nightly/cnn/building_blocks/test_resnet.py (96%) rename {pybuda => forge}/test/nightly/cnn/building_blocks/test_unet.py (97%) rename {pybuda => forge}/test/nightly/cnn/building_blocks/test_vit.py (96%) rename {pybuda => forge}/test/nn/__init__.py (100%) rename {pybuda => forge}/test/nn/architectures/cnn/__init__.py (100%) rename {pybuda => forge}/test/nn/architectures/cnn/resnet/__init__.py (100%) rename {pybuda => forge}/test/nn/architectures/cnn/resnet/resnet_blocks/__init__.py (100%) rename {pybuda => forge}/test/nn/architectures/cnn/resnet/resnet_blocks/basic/__init__.py (100%) rename {pybuda => forge}/test/nn/architectures/cnn/resnet/resnet_blocks/basic/basic.py (83%) rename {pybuda => forge}/test/nn/architectures/cnn/resnet/resnet_blocks/basic/conftest.py (100%) rename {pybuda => forge}/test/nn/architectures/cnn/resnet/resnet_blocks/basic/test_basic.py (92%) rename {pybuda => forge}/test/nn/architectures/cnn/resnet/resnet_blocks/basic/test_basic_single.py (83%) rename {pybuda => forge}/test/nn/architectures/cnn/resnet/resnet_blocks/basic/test_command.sh (100%) rename {pybuda => forge}/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/__init__.py (100%) rename {pybuda => forge}/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/bottleneck.py (85%) rename {pybuda => forge}/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/conftest.py (100%) rename {pybuda => forge}/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/test_bottleneck.py (92%) rename {pybuda => forge}/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/test_bottleneck_single.py (84%) rename {pybuda => forge}/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/test_command.sh (100%) rename {pybuda => forge}/test/nn/functional/__init__.py (100%) rename {pybuda => forge}/test/nn/functional/softmax/__init__.py (100%) rename {pybuda => forge}/test/nn/functional/softmax/models/__init__.py (100%) rename {pybuda => forge}/test/nn/functional/softmax/models/model_0.py (83%) rename {pybuda => forge}/test/nn/functional/softmax/models/model_1.py (78%) rename {pybuda => forge}/test/nn/functional/softmax/models/model_2.py (69%) rename {pybuda => forge}/test/nn/functional/softmax/models/model_3.py (74%) rename {pybuda => forge}/test/nn/functional/softmax/models/model_4.py (68%) rename {pybuda => forge}/test/nn/functional/softmax/models/model_5.py (70%) rename {pybuda => forge}/test/nn/functional/softmax/test_softmax.py (93%) rename {pybuda => forge}/test/nn/layers/normalization/__init__.py (100%) rename {pybuda => forge}/test/nn/layers/normalization/models/__init__.py (100%) rename {pybuda => forge}/test/nn/layers/normalization/models/model_1.py (71%) rename {pybuda => forge}/test/nn/layers/normalization/models/model_10.py (74%) rename {pybuda => forge}/test/nn/layers/normalization/models/model_2.py (74%) rename {pybuda => forge}/test/nn/layers/normalization/models/model_3.py (77%) rename {pybuda => forge}/test/nn/layers/normalization/models/model_4.py (75%) rename {pybuda => forge}/test/nn/layers/normalization/models/model_5.py (72%) rename {pybuda => forge}/test/nn/layers/normalization/models/model_6.py (77%) rename {pybuda => forge}/test/nn/layers/normalization/models/model_7.py (76%) rename {pybuda => forge}/test/nn/layers/normalization/models/model_8.py (77%) rename {pybuda => forge}/test/nn/layers/normalization/models/model_9.py (73%) rename {pybuda => forge}/test/nn/layers/normalization/test_layernorm.py (93%) rename {pybuda => forge}/test/operators/eltwise_binary/__init__.py (100%) rename {pybuda => forge}/test/operators/eltwise_binary/conftest.py (100%) rename {pybuda => forge}/test/operators/eltwise_binary/models/__init__.py (100%) rename {pybuda => forge}/test/operators/eltwise_binary/models/model_1.py (77%) rename {pybuda => forge}/test/operators/eltwise_binary/models/model_10.py (74%) rename {pybuda => forge}/test/operators/eltwise_binary/models/model_11.py (78%) rename {pybuda => forge}/test/operators/eltwise_binary/models/model_2.py (75%) rename {pybuda => forge}/test/operators/eltwise_binary/models/model_3.py (77%) rename {pybuda => forge}/test/operators/eltwise_binary/models/model_4.py (75%) rename {pybuda => forge}/test/operators/eltwise_binary/models/model_5.py (76%) rename {pybuda => forge}/test/operators/eltwise_binary/models/model_6.py (77%) rename {pybuda => forge}/test/operators/eltwise_binary/models/model_7.py (83%) rename {pybuda => forge}/test/operators/eltwise_binary/models/model_8.py (84%) rename {pybuda => forge}/test/operators/eltwise_binary/models/model_9.py (80%) rename {pybuda => forge}/test/operators/eltwise_binary/test_command.sh (100%) rename {pybuda => forge}/test/operators/eltwise_binary/test_eltwise_binary.py (89%) rename {pybuda => forge}/test/operators/eltwise_binary/test_eltwise_binary_single.py (87%) rename {pybuda => forge}/test/operators/eltwise_binary_comparison/__init__.py (100%) rename {pybuda => forge}/test/operators/eltwise_binary_comparison/models/__init__.py (100%) rename {pybuda => forge}/test/operators/eltwise_binary_comparison/models/model_1.py (76%) rename {pybuda => forge}/test/operators/eltwise_binary_comparison/models/model_10.py (54%) rename {pybuda => forge}/test/operators/eltwise_binary_comparison/models/model_2.py (68%) rename {pybuda => forge}/test/operators/eltwise_binary_comparison/models/model_3.py (60%) rename {pybuda => forge}/test/operators/eltwise_binary_comparison/models/model_4.py (59%) rename {pybuda => forge}/test/operators/eltwise_binary_comparison/models/model_5.py (61%) rename {pybuda => forge}/test/operators/eltwise_binary_comparison/models/model_6.py (57%) rename {pybuda => forge}/test/operators/eltwise_binary_comparison/models/model_7.py (57%) rename {pybuda => forge}/test/operators/eltwise_binary_comparison/models/model_8.py (52%) rename {pybuda => forge}/test/operators/eltwise_binary_comparison/models/model_9.py (51%) rename {pybuda => forge}/test/operators/eltwise_binary_comparison/test_eltwise_binary_comparison.py (89%) rename {pybuda => forge}/test/operators/eltwise_unary/__init__.py (100%) rename {pybuda => forge}/test/operators/eltwise_unary/conftest.py (100%) rename {pybuda => forge}/test/operators/eltwise_unary/models/__init__.py (100%) rename {pybuda => forge}/test/operators/eltwise_unary/models/model_1.py (75%) rename {pybuda => forge}/test/operators/eltwise_unary/models/model_10.py (65%) rename {pybuda => forge}/test/operators/eltwise_unary/models/model_2.py (68%) rename {pybuda => forge}/test/operators/eltwise_unary/models/model_3.py (69%) rename {pybuda => forge}/test/operators/eltwise_unary/models/model_4.py (72%) rename {pybuda => forge}/test/operators/eltwise_unary/models/model_5.py (64%) rename {pybuda => forge}/test/operators/eltwise_unary/models/model_6.py (61%) rename {pybuda => forge}/test/operators/eltwise_unary/models/model_7.py (52%) rename {pybuda => forge}/test/operators/eltwise_unary/models/model_8.py (72%) rename {pybuda => forge}/test/operators/eltwise_unary/models/model_9.py (70%) create mode 100644 forge/test/operators/eltwise_unary/test_command.sh rename {pybuda => forge}/test/operators/eltwise_unary/test_eltwise_unary.py (87%) rename {pybuda => forge}/test/operators/eltwise_unary/test_eltwise_unary_single.py (89%) rename {pybuda => forge}/test/operators/eltwise_unary_attr/__init__.py (100%) rename {pybuda => forge}/test/operators/eltwise_unary_attr/clip/__init__.py (100%) rename {pybuda => forge}/test/operators/eltwise_unary_attr/clip/models/__init__.py (100%) rename {pybuda => forge}/test/operators/eltwise_unary_attr/clip/models/model_1.py (75%) rename {pybuda => forge}/test/operators/eltwise_unary_attr/clip/models/model_2.py (56%) create mode 100644 forge/test/operators/eltwise_unary_attr/clip/models/model_3.py create mode 100644 forge/test/operators/eltwise_unary_attr/clip/models/model_4.py create mode 100644 forge/test/operators/eltwise_unary_attr/clip/models/model_5.py rename {pybuda => forge}/test/operators/eltwise_unary_attr/clip/test_clip.py (90%) rename {pybuda => forge}/test/operators/eltwise_unary_attr/leaky_relu/__init__.py (100%) rename {pybuda => forge}/test/operators/eltwise_unary_attr/leaky_relu/models/__init__.py (100%) rename {pybuda => forge}/test/operators/eltwise_unary_attr/leaky_relu/models/model_1.py (74%) rename {pybuda => forge}/test/operators/eltwise_unary_attr/leaky_relu/models/model_2.py (56%) create mode 100644 forge/test/operators/eltwise_unary_attr/leaky_relu/models/model_3.py create mode 100644 forge/test/operators/eltwise_unary_attr/leaky_relu/models/model_4.py create mode 100644 forge/test/operators/eltwise_unary_attr/leaky_relu/models/model_5.py rename {pybuda => forge}/test/operators/eltwise_unary_attr/leaky_relu/test_leaky_relu.py (89%) rename {pybuda => forge}/test/operators/grouped_reduce/__init__.py (100%) rename {pybuda => forge}/test/operators/grouped_reduce/models/__init__.py (100%) rename {pybuda => forge}/test/operators/grouped_reduce/models/model_0.py (73%) rename {pybuda => forge}/test/operators/grouped_reduce/test_grouped_reduce.py (87%) rename {pybuda => forge}/test/operators/matmul/__init__.py (100%) rename {pybuda => forge}/test/operators/matmul/conftest.py (100%) rename {pybuda => forge}/test/operators/matmul/models/__init__.py (100%) rename {pybuda => forge}/test/operators/matmul/models/custom/model_4.py (72%) rename {pybuda => forge}/test/operators/matmul/models/custom/model_5.py (70%) rename {pybuda => forge}/test/operators/matmul/models/custom/model_9.py (62%) rename {pybuda => forge}/test/operators/matmul/models/generic/model_1.py (69%) create mode 100644 forge/test/operators/matmul/models/generic/model_10.py rename {pybuda => forge}/test/operators/matmul/models/generic/model_2.py (62%) create mode 100644 forge/test/operators/matmul/models/generic/model_3.py create mode 100644 forge/test/operators/matmul/models/generic/model_6.py rename {pybuda => forge}/test/operators/matmul/models/generic/model_7.py (57%) rename {pybuda => forge}/test/operators/matmul/models/generic/model_8.py (55%) rename {pybuda => forge}/test/operators/matmul/test_command.sh (100%) rename {pybuda => forge}/test/operators/matmul/test_matmul.py (91%) rename {pybuda => forge}/test/operators/matmul/test_matmul_single.py (90%) rename {pybuda => forge}/test/operators/nary/__init__.py (100%) rename {pybuda => forge}/test/operators/nary/test_eltwise_nary.py (71%) rename {pybuda => forge}/test/operators/nary/test_where.py (75%) rename {pybuda => forge}/test/operators/reduce/__init__.py (100%) rename {pybuda => forge}/test/operators/reduce/conftest.py (100%) rename {pybuda => forge}/test/operators/reduce/models_4d/__init__.py (100%) rename {pybuda => forge}/test/operators/reduce/models_4d/model_0.py (75%) rename {pybuda => forge}/test/operators/reduce/models_4d/model_1.py (75%) rename {pybuda => forge}/test/operators/reduce/models_4d/model_2.py (76%) rename {pybuda => forge}/test/operators/reduce/models_4d/model_3.py (79%) rename {pybuda => forge}/test/operators/reduce/models_4d/model_4.py (70%) rename {pybuda => forge}/test/operators/reduce/models_4d/model_5.py (71%) rename {pybuda => forge}/test/operators/reduce/models_nd/__init__.py (100%) rename {pybuda => forge}/test/operators/reduce/models_nd/model_1.py (77%) rename {pybuda => forge}/test/operators/reduce/models_nd/model_2.py (71%) rename {pybuda => forge}/test/operators/reduce/models_nd/model_3.py (65%) rename {pybuda => forge}/test/operators/reduce/models_nd/model_4.py (54%) rename {pybuda => forge}/test/operators/reduce/models_nd/model_5.py (60%) rename {pybuda => forge}/test/operators/reduce/test_command.sh (100%) rename {pybuda => forge}/test/operators/reduce/test_reduce_4d.py (86%) rename {pybuda => forge}/test/operators/reduce/test_reduce_nd.py (87%) rename {pybuda => forge}/test/operators/reduce/test_reduce_nd_single.py (86%) rename {pybuda => forge}/test/operators/tm/__init__.py (100%) rename {pybuda => forge}/test/operators/tm/fuse/__init__.py (100%) rename {pybuda => forge}/test/operators/tm/fuse/test_fuse_tm_sequence.py (56%) rename {pybuda => forge}/test/operators/tm/hstack_hslice/__init__.py (100%) rename {pybuda => forge}/test/operators/tm/hstack_hslice/conftest.py (100%) rename {pybuda => forge}/test/operators/tm/hstack_hslice/models/__init__.py (100%) rename {pybuda => forge}/test/operators/tm/hstack_hslice/models/model_1.py (58%) rename {pybuda => forge}/test/operators/tm/hstack_hslice/models/model_2.py (56%) rename {pybuda => forge}/test/operators/tm/hstack_hslice/models/model_3.py (50%) rename {pybuda => forge}/test/operators/tm/hstack_hslice/models/model_4.py (51%) rename {pybuda => forge}/test/operators/tm/hstack_hslice/models/model_5.py (68%) rename {pybuda => forge}/test/operators/tm/hstack_hslice/test_command.sh (100%) rename {pybuda => forge}/test/operators/tm/hstack_hslice/test_hstack_hslice.py (91%) rename {pybuda => forge}/test/operators/tm/hstack_hslice/test_hstack_hslice_single.py (90%) rename {pybuda => forge}/test/operators/tm/pad/__init__.py (100%) rename {pybuda => forge}/test/operators/tm/pad/models/__init__.py (100%) rename {pybuda => forge}/test/operators/tm/pad/models/model_1.py (63%) create mode 100644 forge/test/operators/tm/pad/models/model_2.py create mode 100644 forge/test/operators/tm/pad/models/model_3.py create mode 100644 forge/test/operators/tm/pad/models/model_4.py create mode 100644 forge/test/operators/tm/pad/models/model_5.py rename {pybuda => forge}/test/operators/tm/pad/test_pad.py (90%) rename {pybuda => forge}/test/operators/tm/reshape/__init__.py (100%) rename {pybuda => forge}/test/operators/tm/reshape/conftest.py (100%) rename {pybuda => forge}/test/operators/tm/reshape/models/__init__.py (100%) rename {pybuda => forge}/test/operators/tm/reshape/models/model_1.py (63%) rename {pybuda => forge}/test/operators/tm/reshape/models/model_2.py (54%) create mode 100644 forge/test/operators/tm/reshape/models/model_3.py create mode 100644 forge/test/operators/tm/reshape/models/model_4.py create mode 100644 forge/test/operators/tm/reshape/models/model_5.py rename {pybuda => forge}/test/operators/tm/reshape/test_command.sh (100%) rename {pybuda => forge}/test/operators/tm/reshape/test_reshape.py (90%) rename {pybuda => forge}/test/operators/tm/reshape/test_reshape_single.py (92%) rename {pybuda => forge}/test/operators/tm/vstack_vslice/__init__.py (100%) rename {pybuda => forge}/test/operators/tm/vstack_vslice/models/__init__.py (100%) rename {pybuda => forge}/test/operators/tm/vstack_vslice/models/model_1.py (58%) rename {pybuda => forge}/test/operators/tm/vstack_vslice/models/model_2.py (56%) create mode 100644 forge/test/operators/tm/vstack_vslice/models/model_3.py rename {pybuda => forge}/test/operators/tm/vstack_vslice/models/model_4.py (51%) rename {pybuda => forge}/test/operators/tm/vstack_vslice/models/model_5.py (50%) rename {pybuda => forge}/test/operators/tm/vstack_vslice/test_vstack_vslice.py (90%) rename {pybuda => forge}/test/quantized/test_onnx_quantized.py (80%) rename {pybuda => forge}/test/quantized/test_onnx_quantized_mobilenet.py (83%) rename {pybuda => forge}/test/quantized/test_onnx_quantized_resnet.py (67%) rename {pybuda => forge}/test/quantized/test_onnx_quantized_vit.py (83%) rename {pybuda => forge}/test/random/__init__.py (100%) rename {pybuda => forge}/test/random/conftest.py (80%) rename {pybuda => forge}/test/random/test_bert.py (87%) rename {pybuda => forge}/test/random/test_resnet.py (78%) rename {pybuda => forge}/test/random/test_three_ops.py (93%) rename {pybuda => forge}/test/santacoder/README.md (87%) rename {pybuda => forge}/test/santacoder/configuration_gpt2_mq.py (100%) rename {pybuda => forge}/test/santacoder/decode.py (91%) rename {pybuda => forge}/test/santacoder/gpt2_mq.py (100%) rename {pybuda => forge}/test/santacoder/kv_cache.pt (100%) rename {pybuda => forge}/test/santacoder/modeling_gpt2.py (99%) rename {pybuda => forge}/test/santacoder/prefill.py (100%) rename {pybuda => forge}/test/santacoder/pybudify.py (62%) rename {pybuda => forge}/test/santacoder/requirements.txt (100%) rename {pybuda => forge}/test/serve/README.md (92%) rename {pybuda => forge}/test/serve/ask.py (100%) rename {pybuda => forge}/test/serve/qa_serve.py (91%) rename {pybuda => forge}/test/test_api.py (79%) rename {pybuda => forge}/test/test_bert.py (93%) rename {pybuda => forge}/test/test_broadcast_splits.py (78%) rename {pybuda => forge}/test/test_consteval.py (69%) rename {pybuda => forge}/test/test_constraints.py (70%) rename {pybuda => forge}/test/test_conv2d.py (89%) rename {pybuda => forge}/test/test_conv2d_perf.py (57%) rename {pybuda => forge}/test/test_cross_entropy_loss.py (80%) rename {pybuda => forge}/test/test_error.py (69%) create mode 100644 forge/test/test_fork_join.py rename {pybuda => forge}/test/test_fracturing.py (73%) rename {pybuda => forge}/test/test_fusing.py (68%) rename {pybuda => forge}/test/test_indexing.py (87%) rename {pybuda => forge}/test/test_kernel_broadcast.py (50%) rename {pybuda => forge}/test/test_large_parameters.py (85%) rename {pybuda => forge}/test/test_long_short_path.py (67%) rename {pybuda => forge}/test/test_multichip.py (75%) rename {pybuda => forge}/test/test_nlp_pipeline.py (98%) rename {pybuda => forge}/test/test_nn.py (87%) rename {pybuda => forge}/test/test_optimizers.py (86%) rename {pybuda => forge}/test/test_padding/__init__.py (100%) rename {pybuda => forge}/test/test_padding/other/__init__.py (100%) rename {pybuda => forge}/test/test_padding/other/test_padding_pass_a.py (86%) rename {pybuda => forge}/test/test_padding/other/test_padding_pass_b.py (83%) rename {pybuda => forge}/test/test_padding/other/test_padding_pass_c.py (80%) rename {pybuda => forge}/test/test_padding/other/test_padding_pass_d.py (76%) rename {pybuda => forge}/test/test_padding/other/test_padding_pass_e.py (70%) rename {pybuda => forge}/test/test_padding/other/test_padding_pass_f.py (71%) rename {pybuda => forge}/test/test_padding/other/test_padding_pass_g.py (67%) rename {pybuda => forge}/test/test_padding/other/test_padding_pass_h.py (71%) rename {pybuda => forge}/test/test_padding/other/test_padding_pass_i.py (68%) rename {pybuda => forge}/test/test_padding/other/test_padding_pass_k.py (87%) rename {pybuda => forge}/test/test_padding/sanity/__init__.py (100%) rename {pybuda => forge}/test/test_padding/sanity/test_padding.py (84%) rename {pybuda => forge}/test/test_padding/tms/__init__.py (100%) rename {pybuda => forge}/test/test_padding/tms/test_padding_tms.py (76%) create mode 100644 forge/test/test_perf_simulator.py rename {pybuda => forge}/test/test_placer_apis.py (69%) rename {pybuda => forge}/test/test_recompile.py (56%) rename {pybuda => forge}/test/test_sanity.py (74%) rename {pybuda => forge}/test/test_shapes.py (76%) rename {pybuda => forge}/test/test_splice.py (95%) create mode 100644 forge/test/test_streaming.py rename {pybuda => forge}/test/test_transpose_ops_placement.py (66%) rename {pybuda => forge}/test/test_user.py (65%) rename {pybuda => forge}/test/tvm/clip_guided_diffusion/CLIP/__init__.py (100%) rename {pybuda => forge}/test/tvm/clip_guided_diffusion/CLIP/test_CLIP.py (87%) rename {pybuda => forge}/test/tvm/clip_guided_diffusion/CLIP/test_CLIP_units.py (88%) rename {pybuda => forge}/test/tvm/clip_guided_diffusion/UNet/__init__.py (100%) rename {pybuda => forge}/test/tvm/clip_guided_diffusion/UNet/test_UNet.py (88%) rename {pybuda => forge}/test/tvm/clip_guided_diffusion/UNet/test_UNet_blocks.py (86%) rename {pybuda => forge}/test/tvm/clip_guided_diffusion/UNet/test_resblock.py (91%) rename {pybuda => forge}/test/tvm/cnn/mxnet/test_alexnet.py (83%) rename {pybuda => forge}/test/tvm/cnn/mxnet/test_densenet.py (83%) rename {pybuda => forge}/test/tvm/cnn/mxnet/test_mobilenet.py (89%) rename {pybuda => forge}/test/tvm/cnn/mxnet/test_resnet.py (83%) rename {pybuda => forge}/test/tvm/cnn/mxnet/test_squeezenet.py (83%) rename {pybuda => forge}/test/tvm/cnn/mxnet/test_vgg.py (83%) rename {pybuda => forge}/test/tvm/cnn/onnx/test_fcn.py (89%) rename {pybuda => forge}/test/tvm/cnn/onnx/test_lstm_genom.py (72%) rename {pybuda => forge}/test/tvm/cnn/onnx/test_mnist.py (86%) rename {pybuda => forge}/test/tvm/cnn/onnx/test_resnet.py (86%) rename {pybuda => forge}/test/tvm/cnn/pytorch/alphapose/256x192_res50_lr1e-3_1x.yaml (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/alphapose/models/__init__.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/alphapose/models/builder.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/alphapose/models/fastpose.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/alphapose/models/fastpose_duc.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/alphapose/models/fastpose_duc_dense.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/alphapose/models/layers/DUC.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/alphapose/models/layers/PixelUnshuffle.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/alphapose/models/layers/Resnet.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/alphapose/models/layers/SE_Resnet.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/alphapose/models/layers/SE_module.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/alphapose/models/layers/ShuffleResnet.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/alphapose/models/simplepose.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/alphapose/utils/__init__.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/alphapose/utils/config.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/alphapose/utils/registry.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/alphapose/utils/transforms.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/dall_e_vae/__init__.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/dall_e_vae/decoder.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/dall_e_vae/encoder.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/dall_e_vae/license (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/dall_e_vae/utils.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/fastdepth/imagenet/__init__.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/fastdepth/imagenet/mobilenet.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/fastdepth/license (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/fastdepth/metrics.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/fastdepth/models.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/fastdepth/utils.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/gscnn/Resnet.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/gscnn/SEresnext.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/gscnn/__init__.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/gscnn/config.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/gscnn/gated_spatial_conv.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/gscnn/gscnn.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/gscnn/mynn.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/gscnn/wider_resnet.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_A/__init__.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_A/test_alexnet.py (83%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_A/test_autoencoder.py (95%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_A/test_convnext.py (87%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_A/test_dalle_vae.py (82%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_A/test_deeplab.py (85%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_A/test_efficientnet.py (94%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_A/test_fcn.py (76%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_A/test_googlenet.py (84%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_A/test_gscnn.py (93%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_A/test_hrnet.py (94%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_A/test_inception.py (95%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_A/test_midas.py (84%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_A/test_mnasnet.py (83%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_B/SSD/__init__.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_B/SSD/ssd.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_B/__init__.py (100%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_B/test_alphapose.py (87%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_B/test_fastdepth.py (85%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_B/test_ghostnet.py (87%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_B/test_graph_cnn.py (89%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_B/test_hf_clip.py (90%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_B/test_mnist.py (91%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_B/test_mobilenet_v2.py (87%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_B/test_mobilenet_v3.py (88%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_B/test_regnety.py (85%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_B/test_resnet.py (92%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_B/test_resnext.py (80%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_B/test_shufflenet.py (85%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_B/test_ssd.py (85%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_B/test_vgg.py (85%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_B/test_videopose.py (87%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_B/test_vilt.py (91%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_B/test_vit.py (88%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_C/test_densenet.py (76%) rename {pybuda => forge}/test/tvm/cnn/pytorch/tests_C/test_yolov5.py (85%) rename {pybuda => forge}/test/tvm/cnn/tensorflow/tests_A/test_convnext.py (82%) rename {pybuda => forge}/test/tvm/cnn/tensorflow/tests_A/test_nasnet.py (84%) rename {pybuda => forge}/test/tvm/cnn/tensorflow/tests_A/test_resnet.py (90%) rename {pybuda => forge}/test/tvm/cnn/tensorflow/tests_A/test_xception.py (83%) rename {pybuda => forge}/test/tvm/cnn/tensorflow/tests_B/test_alexnet.py (95%) rename {pybuda => forge}/test/tvm/cnn/tensorflow/tests_B/test_autoencoder.py (93%) rename {pybuda => forge}/test/tvm/cnn/tensorflow/tests_B/test_densenet.py (86%) rename {pybuda => forge}/test/tvm/cnn/tensorflow/tests_B/test_efficientnet.py (88%) rename {pybuda => forge}/test/tvm/cnn/tensorflow/tests_B/test_inception.py (84%) rename {pybuda => forge}/test/tvm/cnn/tensorflow/tests_B/test_mnist.py (90%) rename {pybuda => forge}/test/tvm/cnn/tensorflow/tests_B/test_mobilenet.py (92%) rename {pybuda => forge}/test/tvm/cnn/tensorflow/tests_B/test_regnety.py (86%) rename {pybuda => forge}/test/tvm/cnn/tensorflow/tests_B/test_vgg.py (86%) rename {pybuda => forge}/test/tvm/cnn/tflite/test_efficientnet_lite.py (83%) rename {pybuda => forge}/test/tvm/cnn/tflite/test_pose_landmark.py (90%) rename {pybuda => forge}/test/tvm/nightly/get_pytorch_model_with_activations.py (98%) rename {pybuda => forge}/test/tvm/nightly/get_tensorflow_model_with_activations.py (98%) rename {pybuda => forge}/test/tvm/nightly/test_pytorch_models.py (85%) rename {pybuda => forge}/test/tvm/nightly/test_supported_pytorch_models.py (86%) rename {pybuda => forge}/test/tvm/nightly/test_supported_tensorflow_models.py (85%) rename {pybuda => forge}/test/tvm/nightly/test_tensorflow_models.py (85%) rename {pybuda => forge}/test/tvm/nlp/__init__.py (100%) rename {pybuda => forge}/test/tvm/nlp/jax/test_bert.py (94%) rename {pybuda => forge}/test/tvm/nlp/onnx/__init__.py (100%) rename {pybuda => forge}/test/tvm/nlp/onnx/tests_A/__init__.py (100%) rename {pybuda => forge}/test/tvm/nlp/onnx/tests_A/test_roberta.py (95%) rename {pybuda => forge}/test/tvm/nlp/onnx/tests_A/test_unispeech.py (88%) rename {pybuda => forge}/test/tvm/nlp/onnx/tests_A/test_wav2vec.py (86%) rename {pybuda => forge}/test/tvm/nlp/onnx/tests_B/__init__.py (100%) rename {pybuda => forge}/test/tvm/nlp/onnx/tests_B/test_albert.py (88%) rename {pybuda => forge}/test/tvm/nlp/onnx/tests_B/test_bart.py (97%) rename {pybuda => forge}/test/tvm/nlp/onnx/tests_B/test_bert.py (90%) rename {pybuda => forge}/test/tvm/nlp/onnx/tests_B/test_detr.py (93%) rename {pybuda => forge}/test/tvm/nlp/onnx/tests_B/test_distilbert.py (91%) rename {pybuda => forge}/test/tvm/nlp/onnx/tests_B/test_gpt2.py (96%) rename {pybuda => forge}/test/tvm/nlp/onnx/tests_B/test_gptj.py (95%) rename {pybuda => forge}/test/tvm/nlp/onnx/tests_C/__init__.py (100%) rename {pybuda => forge}/test/tvm/nlp/onnx/tests_C/test_gptneo.py (90%) rename {pybuda => forge}/test/tvm/nlp/onnx/tests_C/test_nbeats.py (97%) rename {pybuda => forge}/test/tvm/nlp/onnx/tests_C/test_opt.py (94%) rename {pybuda => forge}/test/tvm/nlp/onnx/tests_C/test_squeeze_bert.py (93%) rename {pybuda => forge}/test/tvm/nlp/onnx/tests_C/test_t5.py (96%) rename {pybuda => forge}/test/tvm/nlp/onnx/tests_C/test_xglm.py (92%) rename {pybuda => forge}/test/tvm/nlp/onnx/tests_C/test_xlm.py (87%) rename {pybuda => forge}/test/tvm/nlp/pytorch/__init__.py (100%) rename {pybuda => forge}/test/tvm/nlp/pytorch/bloom/__init__.py (100%) rename {pybuda => forge}/test/tvm/nlp/pytorch/bloom/model.py (100%) rename {pybuda => forge}/test/tvm/nlp/pytorch/bloom/ttmodel.py (99%) rename {pybuda => forge}/test/tvm/nlp/pytorch/gnmt/__init__.py (100%) rename {pybuda => forge}/test/tvm/nlp/pytorch/gnmt/attention.py (100%) rename {pybuda => forge}/test/tvm/nlp/pytorch/gnmt/config.py (100%) rename {pybuda => forge}/test/tvm/nlp/pytorch/gnmt/decoder.py (100%) rename {pybuda => forge}/test/tvm/nlp/pytorch/gnmt/encoder.py (100%) rename {pybuda => forge}/test/tvm/nlp/pytorch/gnmt/gnmt.py (100%) rename {pybuda => forge}/test/tvm/nlp/pytorch/gnmt/seq2seq_base.py (100%) rename {pybuda => forge}/test/tvm/nlp/pytorch/gnmt/utils.py (100%) rename {pybuda => forge}/test/tvm/nlp/pytorch/tests_A/__init__.py (100%) rename {pybuda => forge}/test/tvm/nlp/pytorch/tests_A/test_albert.py (88%) rename {pybuda => forge}/test/tvm/nlp/pytorch/tests_A/test_bert.py (90%) rename {pybuda => forge}/test/tvm/nlp/pytorch/tests_A/test_detr.py (94%) rename {pybuda => forge}/test/tvm/nlp/pytorch/tests_A/test_t5_small.py (92%) rename {pybuda => forge}/test/tvm/nlp/pytorch/tests_A/test_xlm.py (96%) rename {pybuda => forge}/test/tvm/nlp/pytorch/tests_A/test_xlnet.py (90%) rename {pybuda => forge}/test/tvm/nlp/pytorch/tests_B/__init__.py (100%) rename {pybuda => forge}/test/tvm/nlp/pytorch/tests_B/test_distilbert.py (91%) rename {pybuda => forge}/test/tvm/nlp/pytorch/tests_B/test_wmt.py (94%) rename {pybuda => forge}/test/tvm/nlp/pytorch/tests_C/__init__.py (100%) rename {pybuda => forge}/test/tvm/nlp/pytorch/tests_C/test_opt.py (95%) rename {pybuda => forge}/test/tvm/nlp/pytorch/tests_C/test_roberta.py (90%) rename {pybuda => forge}/test/tvm/nlp/pytorch/tests_C/test_trocr.py (90%) rename {pybuda => forge}/test/tvm/nlp/pytorch/tests_C/test_unispeech.py (90%) rename {pybuda => forge}/test/tvm/nlp/pytorch/tests_C/test_wav2vec2.py (88%) rename {pybuda => forge}/test/tvm/nlp/pytorch/tests_D/__init__.py (100%) rename {pybuda => forge}/test/tvm/nlp/pytorch/tests_D/test_bart.py (94%) rename {pybuda => forge}/test/tvm/nlp/pytorch/tests_D/test_bloom.py (91%) rename {pybuda => forge}/test/tvm/nlp/pytorch/tests_D/test_gnmt.py (93%) rename {pybuda => forge}/test/tvm/nlp/pytorch/tests_D/test_gpt2.py (92%) rename {pybuda => forge}/test/tvm/nlp/pytorch/tests_D/test_gptj.py (87%) rename {pybuda => forge}/test/tvm/nlp/pytorch/tests_D/test_gptneo.py (82%) rename {pybuda => forge}/test/tvm/nlp/pytorch/tests_D/test_longformer.py (94%) rename {pybuda => forge}/test/tvm/nlp/pytorch/tests_D/test_nbeats.py (93%) rename {pybuda => forge}/test/tvm/nlp/pytorch/tests_D/test_squeeze_bert.py (88%) rename {pybuda => forge}/test/tvm/nlp/pytorch/tests_D/test_xglm.py (96%) rename {pybuda => forge}/test/tvm/nlp/pytorch/tests_E/1272-128104-0000.pt (100%) rename {pybuda => forge}/test/tvm/nlp/pytorch/tests_E/__init__.py (100%) rename {pybuda => forge}/test/tvm/nlp/pytorch/tests_E/test_codegen.py (89%) rename {pybuda => forge}/test/tvm/nlp/pytorch/tests_E/test_whisper.py (86%) rename {pybuda => forge}/test/tvm/nlp/tensorflow/__init__.py (100%) rename {pybuda => forge}/test/tvm/nlp/tensorflow/tests_A/test_albert.py (94%) rename {pybuda => forge}/test/tvm/nlp/tensorflow/tests_A/test_t5_small_tf.py (89%) rename {pybuda => forge}/test/tvm/nlp/tensorflow/tests_B/__init__.py (100%) rename {pybuda => forge}/test/tvm/nlp/tensorflow/tests_B/test_bart.py (94%) rename {pybuda => forge}/test/tvm/nlp/tensorflow/tests_B/test_bert.py (95%) rename {pybuda => forge}/test/tvm/nlp/tensorflow/tests_B/test_gpt2.py (89%) rename {pybuda => forge}/test/tvm/nlp/tensorflow/tests_B/test_gptj_tf.py (90%) rename {pybuda => forge}/test/tvm/nlp/tensorflow/tests_B/test_wav2vec2.py (96%) rename {pybuda => forge}/test/tvm/nlp/tensorflow/tests_C/test_distillbert.py (92%) rename {pybuda => forge}/test/tvm/nlp/tensorflow/tests_C/test_opt.py (93%) rename {pybuda => forge}/test/tvm/nlp/tensorflow/tests_C/test_roberta.py (94%) rename {pybuda => forge}/test/tvm/nlp/tensorflow/tests_C/test_xlm.py (89%) rename {pybuda => forge}/test/tvm/nlp/tensorflow/tests_C/test_xlnet.py (84%) rename {pybuda => forge}/test/tvm/python/test_fracturing.py (78%) rename {pybuda => forge}/test/tvm/python/test_sanity.py (84%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/__init__.py (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/callbacks.py (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/inputs.py (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/layers/__init__.py (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/layers/activation.py (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/layers/core_modules.py (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/layers/interaction.py (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/layers/sequence.py (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/layers/utils.py (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/license (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/models/__init__.py (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/models/afm.py (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/models/afn.py (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/models/autoint.py (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/models/basemodel.py (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/models/ccpm.py (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/models/dcn.py (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/models/dcnmix.py (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/models/deepfm.py (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/models/dien.py (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/models/difm.py (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/models/din.py (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/models/fibinet.py (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/models/ifm.py (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/models/mlr.py (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/models/nfm.py (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/models/onn.py (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/models/pnn.py (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/models/wdl.py (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/models/xdeepfm.py (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/movielens_sample.txt (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/deepctr_torch/utils.py (100%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/test_afn.py (93%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/test_core_modules.py (93%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/test_deepfm.py (91%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/test_dlrm.py (98%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/test_fibinet.py (84%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/test_interaction.py (93%) rename {pybuda => forge}/test/tvm/recommendation/pytorch/test_xdeepfm.py (95%) rename {pybuda => forge}/test/tvm/sanity/__init__.py (100%) rename {pybuda => forge}/test/tvm/sanity/tests_A/__init__.py (100%) rename {pybuda => forge}/test/tvm/sanity/tests_A/test_sanity_passthrough.py (83%) rename {pybuda => forge}/test/tvm/sanity/tests_A/test_sanity_pytorch.py (94%) rename {pybuda => forge}/test/tvm/sanity/tests_A/test_tvm.py (92%) rename {pybuda => forge}/test/tvm/sanity/tests_B/__init__.py (100%) rename {pybuda => forge}/test/tvm/sanity/tests_B/test_df.py (95%) rename {pybuda => forge}/test/tvm/sanity/tests_B/test_fallback_only.py (90%) rename {pybuda => forge}/test/tvm/sanity/tests_B/test_pattern_matcher.py (82%) rename {pybuda => forge}/test/tvm/sanity/tests_B/test_propped_params_tensorflow.py (90%) rename {pybuda => forge}/test/tvm/sanity/tests_B/test_sanity_onnx.py (97%) rename {pybuda => forge}/test/tvm/sanity/tests_C/__init__.py (100%) rename {pybuda => forge}/test/tvm/sanity/tests_C/test_decomps.py (68%) rename {pybuda => forge}/test/tvm/sanity/tests_C/test_sanity_jax.py (92%) rename {pybuda => forge}/test/tvm/sanity/tests_C/test_sanity_tf.py (97%) rename {pybuda => forge}/test/tvm/stable_diffusion/run_stable_diffusion.py (91%) rename {pybuda => forge}/test/tvm/stable_diffusion/test_stable_diffusion.py (83%) rename {pybuda => forge}/test/tvm/utils.py (70%) rename {pybuda => forge}/test/utils.py (100%) rename {pybuda => forge}/test/versim/test_versim_basic_ops.py (75%) delete mode 100644 pybuda/CMakeLists.txt delete mode 100644 pybuda/pybuda/_C/autograd.pyi delete mode 100644 pybuda/test/backend/test_random_grids.py delete mode 100644 pybuda/test/benchmark/run_benchmark_debug delete mode 100644 pybuda/test/benchmark/run_benchmark_gs_e150_df_bfp8 delete mode 100644 pybuda/test/benchmark/run_benchmark_gs_e150_release delete mode 100644 pybuda/test/benchmark/run_benchmark_gs_e75_df_bfp8 delete mode 100644 pybuda/test/benchmark/run_benchmark_gs_e75_release delete mode 100644 pybuda/test/benchmark/run_benchmark_tti delete mode 100644 pybuda/test/benchmark/run_benchmark_wh_df_bfp8 delete mode 100644 pybuda/test/benchmark/run_benchmark_wh_df_fp16 delete mode 100644 pybuda/test/benchmark/run_benchmark_wh_release delete mode 100644 pybuda/test/operators/eltwise_unary/test_command.sh delete mode 100644 pybuda/test/operators/eltwise_unary_attr/clip/models/model_3.py delete mode 100644 pybuda/test/operators/eltwise_unary_attr/clip/models/model_4.py delete mode 100644 pybuda/test/operators/eltwise_unary_attr/clip/models/model_5.py delete mode 100644 pybuda/test/operators/eltwise_unary_attr/leaky_relu/models/model_3.py delete mode 100644 pybuda/test/operators/eltwise_unary_attr/leaky_relu/models/model_4.py delete mode 100644 pybuda/test/operators/eltwise_unary_attr/leaky_relu/models/model_5.py delete mode 100644 pybuda/test/operators/matmul/models/generic/model_10.py delete mode 100644 pybuda/test/operators/matmul/models/generic/model_3.py delete mode 100644 pybuda/test/operators/matmul/models/generic/model_6.py delete mode 100644 pybuda/test/operators/tm/pad/models/model_2.py delete mode 100644 pybuda/test/operators/tm/pad/models/model_3.py delete mode 100644 pybuda/test/operators/tm/pad/models/model_4.py delete mode 100644 pybuda/test/operators/tm/pad/models/model_5.py delete mode 100644 pybuda/test/operators/tm/reshape/models/model_3.py delete mode 100644 pybuda/test/operators/tm/reshape/models/model_4.py delete mode 100644 pybuda/test/operators/tm/reshape/models/model_5.py delete mode 100644 pybuda/test/operators/tm/vstack_vslice/models/model_3.py delete mode 100644 pybuda/test/test_fork_join.py delete mode 100644 pybuda/test/test_perf_simulator.py delete mode 100644 pybuda/test/test_streaming.py diff --git a/.gitignore b/.gitignore index ca6929ebd..36f16f260 100644 --- a/.gitignore +++ b/.gitignore @@ -29,9 +29,9 @@ blobgen_cmd_log python_api/env python_api/*.so python_api/test_run_on_device_out -pybuda/pybuda/*.so +forge/forge/*.so __pycache__ -pybuda/pybuda.egg-info/ +forge/forge.egg-info/ *.vcd diff --git a/.gitlab-ci.perf.yml b/.gitlab-ci.perf.yml index d683dc410..5a0dbc595 100644 --- a/.gitlab-ci.perf.yml +++ b/.gitlab-ci.perf.yml @@ -1,17 +1,17 @@ include: - .gitlab-ci.wheels.yml - # PyBuda repo, Grayskull e150 + # Forge repo, Grayskull e150 - ci/gitlab-test-lists/.gitlab-ci.grayskull_e150_perf_bfp8_b_nightly.yml - ci/gitlab-test-lists/.gitlab-ci.grayskull_e150_perf_fp16_nightly.yml - ci/gitlab-test-lists/.gitlab-ci.grayskull_e150_perf_release_nightly.yml - # PyBuda repo, Grayskull e75 + # Forge repo, Grayskull e75 - ci/gitlab-test-lists/.gitlab-ci.grayskull_e75_perf_bfp8_b_nightly.yml - ci/gitlab-test-lists/.gitlab-ci.grayskull_e75_perf_fp16_nightly.yml - ci/gitlab-test-lists/.gitlab-ci.grayskull_e75_perf_release_nightly.yml - # PyBuda repo, Wormhole B0 + # Forge repo, Wormhole B0 - ci/gitlab-test-lists/.gitlab-ci.wormhole_b0_silicon_perf_bfp8_b_nightly.yml - ci/gitlab-test-lists/.gitlab-ci.wormhole_b0_silicon_perf_fp16_nightly.yml - ci/gitlab-test-lists/.gitlab-ci.wormhole_b0_silicon_perf_release_nightly.yml @@ -27,18 +27,18 @@ include: - ci/gitlab-test-lists/benchmarking/.gitlab-ci.grayskull_e150_perf_release_public.yml # Dissable other jobs from .gitlab-ci.wheels.yml -pybuda-gs-latest-bbe-wheel: +forge-gs-latest-bbe-wheel: rules: - if: ($CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_MESSAGE !~ /\[no_ci_perf/) -pybuda-wh-b0-latest-bbe-wheel: +forge-wh-b0-latest-bbe-wheel: rules: - if: ($CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_MESSAGE !~ /\[no_ci_perf/) -pybuda-gs-unittests: +forge-gs-unittests: rules: - if: ($CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_MESSAGE !~ /\[no_ci_perf/) -pybuda-wh-b0-unittests: +forge-wh-b0-unittests: rules: - if: ($CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_MESSAGE !~ /\[no_ci_perf/) \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt index 61348a903..1165ff8a2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -50,14 +50,14 @@ set(STATIC_LIB_FLAGS -fPIC) set(SHARED_LIB_FLAGS -fPIC) add_subdirectory(third_party) -add_subdirectory(pybuda) +add_subdirectory(forge) add_subdirectory(docs) ### Generate stubs for ttforge ### Run `cmake --build build -- make_stubs` to generate stubs add_custom_target(make_stubs COMMAND pip install mypy==1.10 - COMMAND stubgen -m pybuda._C -m pybuda._C.autograd -m pybuda._C.graph -m pybuda._C.torch_device -m pybuda._C.runtime -o pybuda -v + COMMAND stubgen -m forge._C -m forge._C.autograd -m forge._C.graph -m forge._C.torch_device -m forge._C.runtime -o forge -v COMMENT "Generating stubs for ttforge" USES_TERMINAL WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) diff --git a/compile_flags.txt b/compile_flags.txt index b2630aca4..f1165786f 100644 --- a/compile_flags.txt +++ b/compile_flags.txt @@ -11,5 +11,5 @@ -I/usr/include/python3.8 -Igui_lib -Ithird_party/json --Ipybuda/csrc +-Iforge/csrc -Ithird_party/fmt diff --git a/forge/CMakeLists.txt b/forge/CMakeLists.txt new file mode 100644 index 000000000..81d4797b6 --- /dev/null +++ b/forge/CMakeLists.txt @@ -0,0 +1,2 @@ +add_subdirectory(csrc) +add_subdirectory(forge) diff --git a/pybuda/csrc/CMakeLists.txt b/forge/csrc/CMakeLists.txt similarity index 93% rename from pybuda/csrc/CMakeLists.txt rename to forge/csrc/CMakeLists.txt index 3826eab47..b0d51d6fb 100644 --- a/pybuda/csrc/CMakeLists.txt +++ b/forge/csrc/CMakeLists.txt @@ -45,7 +45,7 @@ add_subdirectory(tt_torch_device) ### ttforge_csrc_objs ### file(GLOB CPP_FILES - "pybuda_bindings.cpp" + "forge_bindings.cpp" "buda_passes.cpp" "passes/*.cpp" "lower_to_buda/common.cpp" @@ -116,11 +116,11 @@ target_link_directories(ttforge_csrc PRIVATE #### Copy python module extension to ttforge directory #### add_custom_target(run_after_ttforge_csrc ALL - COMMAND mkdir -p ${TTFORGE_VENV_DIR}/lib/${TTFORGE_PYTHON_VERSION}/site-packages/pybuda - COMMAND cp $ ${TTFORGE_VENV_DIR}/lib/${TTFORGE_PYTHON_VERSION}/site-packages/pybuda/_C.so - COMMAND touch -r $ ${TTFORGE_VENV_DIR}/lib/${TTFORGE_PYTHON_VERSION}/site-packages/pybuda/_C.so - COMMAND ln -sf ${TTFORGE_VENV_DIR}/lib/${TTFORGE_PYTHON_VERSION}/site-packages/pybuda/_C.so ${CMAKE_SOURCE_DIR}/pybuda/pybuda/_C.so - COMMENT "Running run_after_ttforge_csrc to copy the python module extension to pybuda directory" + COMMAND mkdir -p ${TTFORGE_VENV_DIR}/lib/${TTFORGE_PYTHON_VERSION}/site-packages/forge + COMMAND cp $ ${TTFORGE_VENV_DIR}/lib/${TTFORGE_PYTHON_VERSION}/site-packages/forge/_C.so + COMMAND touch -r $ ${TTFORGE_VENV_DIR}/lib/${TTFORGE_PYTHON_VERSION}/site-packages/forge/_C.so + COMMAND ln -sf ${TTFORGE_VENV_DIR}/lib/${TTFORGE_PYTHON_VERSION}/site-packages/forge/_C.so ${CMAKE_SOURCE_DIR}/forge/forge/_C.so + COMMENT "Running run_after_ttforge_csrc to copy the python module extension to forge directory" USES_TERMINAL ) diff --git a/pybuda/csrc/autograd/CMakeLists.txt b/forge/csrc/autograd/CMakeLists.txt similarity index 100% rename from pybuda/csrc/autograd/CMakeLists.txt rename to forge/csrc/autograd/CMakeLists.txt diff --git a/pybuda/csrc/autograd/autograd.cpp b/forge/csrc/autograd/autograd.cpp similarity index 99% rename from pybuda/csrc/autograd/autograd.cpp rename to forge/csrc/autograd/autograd.cpp index ca9a7df26..9da507705 100644 --- a/pybuda/csrc/autograd/autograd.cpp +++ b/forge/csrc/autograd/autograd.cpp @@ -159,7 +159,7 @@ void autograd2_engine::create_backward_graph(const grad_map &requires_grad_map) if (output->is_loss_output()) { // Grad of loss is 1. Create constant and use that as "input". - py::object eval_module = py::module_::import("pybuda.op.eval"); + py::object eval_module = py::module_::import("forge.op.eval"); auto const_tensor = make_shared_py_object( eval_module.attr("create_constant_tensor_from_tensor") (std::vector{1.0}, node->shape().as_vector(), false, node->output_df())); diff --git a/pybuda/csrc/autograd/autograd.hpp b/forge/csrc/autograd/autograd.hpp similarity index 100% rename from pybuda/csrc/autograd/autograd.hpp rename to forge/csrc/autograd/autograd.hpp diff --git a/pybuda/csrc/autograd/binding.cpp b/forge/csrc/autograd/binding.cpp similarity index 66% rename from pybuda/csrc/autograd/binding.cpp rename to forge/csrc/autograd/binding.cpp index 793c084e5..7d38d191b 100644 --- a/pybuda/csrc/autograd/binding.cpp +++ b/forge/csrc/autograd/binding.cpp @@ -9,15 +9,15 @@ std::tuple> get_op_shape(OpType type, std::vect { int tile_height = tt::graphlib::get_row_size_from_tile_size(tile_dim); int tile_width = tt::graphlib::get_col_size_from_tile_size(tile_dim); - auto eval_module = is_buda ? py::module_::import("pybuda.op.eval.buda") : py::module_::import("pybuda.op.eval.pybuda"); - py::function pybuda_shape = is_buda ? eval_module.attr("get_f_pybuda_shape")(type, tile_height, tile_width) - : eval_module.attr("get_f_pybuda_shape")(type); + auto eval_module = is_buda ? py::module_::import("forge.op.eval.buda") : py::module_::import("forge.op.eval.forge"); + py::function forge_shape = is_buda ? eval_module.attr("get_f_forge_shape")(type, tile_height, tile_width) + : eval_module.attr("get_f_forge_shape")(type); std::vector> operand_tuples; for(Shape &shape : operands) operand_tuples.push_back(shape.as_vector()); - py::tuple ret = pybuda_shape(operand_tuples); + py::tuple ret = forge_shape(operand_tuples); Shape s = is_buda ? Shape::create_buda(ret[0].cast>(), tile_height, tile_width) : Shape::create(ret[0].cast>()); @@ -32,9 +32,9 @@ NodeContext insert_backward( NodeContext output, NodeContext gradient) { - auto eval_module = py::module_::import("pybuda.op.eval.pybuda"); - py::function pybuda_backward = eval_module.attr("get_f_pybuda_backward")(type); + auto eval_module = py::module_::import("forge.op.eval.forge"); + py::function forge_backward = eval_module.attr("get_f_forge_backward")(type); - return pybuda_backward(context, operand, inputs, output, gradient).cast(); + return forge_backward(context, operand, inputs, output, gradient).cast(); } diff --git a/pybuda/csrc/autograd/binding.hpp b/forge/csrc/autograd/binding.hpp similarity index 100% rename from pybuda/csrc/autograd/binding.hpp rename to forge/csrc/autograd/binding.hpp diff --git a/pybuda/csrc/autograd/python_bindings.cpp b/forge/csrc/autograd/python_bindings.cpp similarity index 98% rename from pybuda/csrc/autograd/python_bindings.cpp rename to forge/csrc/autograd/python_bindings.cpp index 837278595..7345c5bb3 100644 --- a/pybuda/csrc/autograd/python_bindings.cpp +++ b/forge/csrc/autograd/python_bindings.cpp @@ -14,7 +14,7 @@ namespace tt { static bool has_newstyle_interface(std::string const &op_name, bool is_buda) { py::object eval_module = - is_buda ? py::module_::import("pybuda.op.eval.buda") : py::module_::import("pybuda.op.eval.pybuda"); + is_buda ? py::module_::import("forge.op.eval.buda") : py::module_::import("forge.op.eval.forge"); return eval_module.attr("has_newstyle_interface")(op_name).cast(); } diff --git a/pybuda/csrc/autograd/python_bindings.hpp b/forge/csrc/autograd/python_bindings.hpp similarity index 100% rename from pybuda/csrc/autograd/python_bindings.hpp rename to forge/csrc/autograd/python_bindings.hpp diff --git a/pybuda/csrc/backend_api/CMakeLists.txt b/forge/csrc/backend_api/CMakeLists.txt similarity index 100% rename from pybuda/csrc/backend_api/CMakeLists.txt rename to forge/csrc/backend_api/CMakeLists.txt diff --git a/pybuda/csrc/backend_api/arch_type.cpp b/forge/csrc/backend_api/arch_type.cpp similarity index 100% rename from pybuda/csrc/backend_api/arch_type.cpp rename to forge/csrc/backend_api/arch_type.cpp diff --git a/pybuda/csrc/backend_api/arch_type.hpp b/forge/csrc/backend_api/arch_type.hpp similarity index 100% rename from pybuda/csrc/backend_api/arch_type.hpp rename to forge/csrc/backend_api/arch_type.hpp diff --git a/pybuda/csrc/backend_api/backend_api.cpp b/forge/csrc/backend_api/backend_api.cpp similarity index 100% rename from pybuda/csrc/backend_api/backend_api.cpp rename to forge/csrc/backend_api/backend_api.cpp diff --git a/pybuda/csrc/backend_api/device_config.hpp b/forge/csrc/backend_api/device_config.hpp similarity index 100% rename from pybuda/csrc/backend_api/device_config.hpp rename to forge/csrc/backend_api/device_config.hpp diff --git a/pybuda/csrc/buda_passes.cpp b/forge/csrc/buda_passes.cpp similarity index 95% rename from pybuda/csrc/buda_passes.cpp rename to forge/csrc/buda_passes.cpp index 7ec103f72..bf899428a 100644 --- a/pybuda/csrc/buda_passes.cpp +++ b/forge/csrc/buda_passes.cpp @@ -91,7 +91,7 @@ run_post_initial_graph_passes(graphlib::Graph *graph, py::object compiler_cfg_ob passes::explicate_unsqueeze(graph); passes::fuse_conv2d_bias(graph); - auto inserted_node_id_mapping = decompose_tt_forge_graph(graph, "get_f_pybuda_decompose", compiler_cfg); + auto inserted_node_id_mapping = decompose_tt_forge_graph(graph, "get_f_forge_decompose", compiler_cfg); auto chip_id_assignments = passes::fracture(graph, fracture_groups); return std::make_tuple(inserted_node_id_mapping, chip_id_assignments); } @@ -165,7 +165,7 @@ void run_optimization_graph_passes(graphlib::Graph *graph) passes::bind_reshape_to_io(graph); passes::fuse_per_channel_ops(graph); - if (not env_as("PYBUDA_DISABLE_CONSTANT_FOLDING")) + if (not env_as("FORGE_DISABLE_CONSTANT_FOLDING")) passes::constant_folding(graph); passes::move_select_after_matmul_optional(graph); @@ -182,7 +182,7 @@ std::vector> run_post_optimize_dec std::shared_ptr compiler_cfg = make_shared_py_object(compiler_cfg_object); passes::print_graph(graph, "POST_OPTIMIZE"); - auto inserted_node_id_mapping = decompose_tt_forge_graph(graph, "get_f_pybuda_decompose_post_optimize", compiler_cfg); + auto inserted_node_id_mapping = decompose_tt_forge_graph(graph, "get_f_forge_decompose_post_optimize", compiler_cfg); return inserted_node_id_mapping; } @@ -195,7 +195,7 @@ std::vector> run_post_autograd_gra passes::print_graph(graph, "POST_AUTOGRAD"); lower_bwd_gather_ops(graph); - return decompose_tt_forge_graph(graph, "get_f_pybuda_decompose_post_autograd", compiler_cfg); + return decompose_tt_forge_graph(graph, "get_f_forge_decompose_post_autograd", compiler_cfg); } // ********** Run pre-lowering passes ********** @@ -214,12 +214,12 @@ graphlib::Graph* run_pre_lowering_passes( fuse_requantize(graph); // Fuse gelu into matmuls - if (env_as("PYBUDA_FUSE_MATMUL_GELU")) { + if (env_as("FORGE_FUSE_MATMUL_GELU")) { fuse_gelu(graph); } // Manually convert broadcast ops to tms, so insert tile broadcast ops can work generically - // Note this is not lowering, these are still pybuda tms + // Note this is not lowering, these are still forge tms convert_broadcast_ops_to_tms(graph); // Insert tile broadcast ops @@ -286,7 +286,7 @@ std::unique_ptr run_pre_placer_buda_passes( // Add buffer NOP between host input and ops if there are multiple ops reading from same host input. // - if (input_queues_on_host and env_as("PYBUDA_ENABLE_HOST_INPUT_NOP_BUFFERING")) + if (input_queues_on_host and env_as("FORGE_ENABLE_HOST_INPUT_NOP_BUFFERING")) { fix_host_inputs(lowered_graph.get()); } @@ -331,7 +331,7 @@ std::unique_ptr run_pre_placer_buda_passes( split_broadcasts(lowered_graph.get()); } - if (env_as("PYBUDA_ENABLE_CONSTANT_PRE_BROADCAST")) + if (env_as("FORGE_ENABLE_CONSTANT_PRE_BROADCAST")) { constant_pre_broadcast(lowered_graph.get()); } diff --git a/pybuda/csrc/buda_passes.hpp b/forge/csrc/buda_passes.hpp similarity index 100% rename from pybuda/csrc/buda_passes.hpp rename to forge/csrc/buda_passes.hpp diff --git a/pybuda/csrc/pybuda_bindings.cpp b/forge/csrc/forge_bindings.cpp similarity index 98% rename from pybuda/csrc/pybuda_bindings.cpp rename to forge/csrc/forge_bindings.cpp index 2f5a3d2ab..556662a99 100644 --- a/pybuda/csrc/pybuda_bindings.cpp +++ b/forge/csrc/forge_bindings.cpp @@ -40,11 +40,11 @@ namespace tt { PYBIND11_MODULE(_C, m) { - // Register signal handlers when loading pybuda module. + // Register signal handlers when loading forge module. static SignalHandlers signal_handlers; - m.attr("__name__") = "pybuda._C"; - m.doc() = "python bindings to pybuda framwork"; + m.attr("__name__") = "forge._C"; + m.doc() = "python bindings to forge framwork"; m.attr("VERSION") = py::int_(1); @@ -108,7 +108,7 @@ PYBIND11_MODULE(_C, m) { return decode.at(encoded); }); - py::module_ m_graph = m.def_submodule("graph", "Submodule defining pybuda graph functions"); + py::module_ m_graph = m.def_submodule("graph", "Submodule defining forge graph functions"); GraphModule(m_graph); py::module_ m_autograd = m.def_submodule("autograd", "Submodule defining autograd_engine."); diff --git a/pybuda/csrc/graph_lib/CMakeLists.txt b/forge/csrc/graph_lib/CMakeLists.txt similarity index 100% rename from pybuda/csrc/graph_lib/CMakeLists.txt rename to forge/csrc/graph_lib/CMakeLists.txt diff --git a/pybuda/csrc/graph_lib/defines.cpp b/forge/csrc/graph_lib/defines.cpp similarity index 100% rename from pybuda/csrc/graph_lib/defines.cpp rename to forge/csrc/graph_lib/defines.cpp diff --git a/pybuda/csrc/graph_lib/defines.hpp b/forge/csrc/graph_lib/defines.hpp similarity index 100% rename from pybuda/csrc/graph_lib/defines.hpp rename to forge/csrc/graph_lib/defines.hpp diff --git a/pybuda/csrc/graph_lib/edge.cpp b/forge/csrc/graph_lib/edge.cpp similarity index 100% rename from pybuda/csrc/graph_lib/edge.cpp rename to forge/csrc/graph_lib/edge.cpp diff --git a/pybuda/csrc/graph_lib/edge.hpp b/forge/csrc/graph_lib/edge.hpp similarity index 100% rename from pybuda/csrc/graph_lib/edge.hpp rename to forge/csrc/graph_lib/edge.hpp diff --git a/pybuda/csrc/graph_lib/graph.cpp b/forge/csrc/graph_lib/graph.cpp similarity index 100% rename from pybuda/csrc/graph_lib/graph.cpp rename to forge/csrc/graph_lib/graph.cpp diff --git a/pybuda/csrc/graph_lib/graph.hpp b/forge/csrc/graph_lib/graph.hpp similarity index 100% rename from pybuda/csrc/graph_lib/graph.hpp rename to forge/csrc/graph_lib/graph.hpp diff --git a/pybuda/csrc/graph_lib/node.cpp b/forge/csrc/graph_lib/node.cpp similarity index 99% rename from pybuda/csrc/graph_lib/node.cpp rename to forge/csrc/graph_lib/node.cpp index 6ccdf6812..3498876ce 100644 --- a/pybuda/csrc/graph_lib/node.cpp +++ b/forge/csrc/graph_lib/node.cpp @@ -119,7 +119,7 @@ std::string node_type_to_string(const NodeType& node_type) case NodeType::kQueue: return "Queue"; case NodeType::kBudaOp: return "BudaOp"; case NodeType::kBudaNaryTM: return "BudaNaryTM"; - case NodeType::kPyOp: return "PyBudaOp"; + case NodeType::kPyOp: return "ForgeOp"; default: TT_ASSERT(false, "Invalid node type"); } return ""; diff --git a/pybuda/csrc/graph_lib/node.hpp b/forge/csrc/graph_lib/node.hpp similarity index 100% rename from pybuda/csrc/graph_lib/node.hpp rename to forge/csrc/graph_lib/node.hpp diff --git a/pybuda/csrc/graph_lib/node_types.cpp b/forge/csrc/graph_lib/node_types.cpp similarity index 98% rename from pybuda/csrc/graph_lib/node_types.cpp rename to forge/csrc/graph_lib/node_types.cpp index 5f298836c..59235a5ae 100644 --- a/pybuda/csrc/graph_lib/node_types.cpp +++ b/forge/csrc/graph_lib/node_types.cpp @@ -211,7 +211,7 @@ void BudaOpNode::copy_lowered_op_attributes(PyOpNode *node) set_intermediate_df(node->output_df()); // by default, same as output // accumulate df will not be set here, we'll have an overall default - // If there are golden transforms, they operate on pybuda shapes, + // If there are golden transforms, they operate on forge shapes, // so we need to insert narrowing in order make BUDA compatible set_golden_transforms(node->get_golden_transforms()); if (not get_golden_transforms().empty()) @@ -260,7 +260,7 @@ void PyOpNode::copy_parent_op_attributes(PyOpNode *node) bool OpNode::is_tm() const { - std::string path = node_type() == NodeType::kPyOp ? "pybuda.op.eval.pybuda" : "pybuda.op.eval.buda"; + std::string path = node_type() == NodeType::kPyOp ? "forge.op.eval.forge" : "forge.op.eval.buda"; py::object eval_module = py::module_::import(path.c_str()); py::function is_tm = eval_module.attr("is_tm"); return is_tm(op_type()).cast(); @@ -383,7 +383,7 @@ std::unique_ptr OutputNode::clone(std::string const& name) { static py::function get_f_instance(IRLevel ir_level) { auto eval_module = - py::module_::import((ir_level == IRLevel::IR_BUDA) ? "pybuda.op.eval.buda" : "pybuda.op.eval.pybuda"); + py::module_::import((ir_level == IRLevel::IR_BUDA) ? "forge.op.eval.buda" : "forge.op.eval.forge"); return eval_module.attr("get_f_instance"); } diff --git a/pybuda/csrc/graph_lib/node_types.hpp b/forge/csrc/graph_lib/node_types.hpp similarity index 100% rename from pybuda/csrc/graph_lib/node_types.hpp rename to forge/csrc/graph_lib/node_types.hpp diff --git a/pybuda/csrc/graph_lib/python_bindings.cpp b/forge/csrc/graph_lib/python_bindings.cpp similarity index 98% rename from pybuda/csrc/graph_lib/python_bindings.cpp rename to forge/csrc/graph_lib/python_bindings.cpp index d64d90649..c88f36250 100644 --- a/pybuda/csrc/graph_lib/python_bindings.cpp +++ b/forge/csrc/graph_lib/python_bindings.cpp @@ -595,7 +595,7 @@ void GraphModule(py::module &m_graph) }); // Query - py::module_ m_graph_query = m_graph.def_submodule("query", "Submodule defining pybuda graph queries"); + py::module_ m_graph_query = m_graph.def_submodule("query", "Submodule defining forge graph queries"); py::class_(m_graph_query, "NodePredicate") .def( @@ -618,12 +618,12 @@ py::object eval_op(graphlib::OpType type, std::vector inputs, graphl py::object eval_module; switch (ir_level) { - case graphlib::IRLevel::IR_TT_FORGE: eval_module = py::module_::import("pybuda.op.eval.pybuda"); break; - case graphlib::IRLevel::IR_BUDA: eval_module = py::module_::import("pybuda.op.eval.buda"); break; - case graphlib::IRLevel::IR_CONSTEVAL: eval_module = py::module_::import("pybuda.op.eval.pybuda"); break; + case graphlib::IRLevel::IR_TT_FORGE: eval_module = py::module_::import("forge.op.eval.forge"); break; + case graphlib::IRLevel::IR_BUDA: eval_module = py::module_::import("forge.op.eval.buda"); break; + case graphlib::IRLevel::IR_CONSTEVAL: eval_module = py::module_::import("forge.op.eval.forge"); break; } - py::function pybuda_eval = eval_module.attr("get_f_pybuda_eval")(type); + py::function forge_eval = eval_module.attr("get_f_forge_eval")(type); log_trace(LogEval, " eval_op: {}", type); bool has_requant = type.buda_attrs.find("requant") != type.buda_attrs.end() and std::get(type.buda_attrs.at("requant")); @@ -636,16 +636,16 @@ py::object eval_op(graphlib::OpType type, std::vector inputs, graphl inputs_ = inputs; } - py::object result = pybuda_eval(inputs_); + py::object result = forge_eval(inputs_); - py::object common_module = py::module_::import("pybuda.op.eval"); + py::object common_module = py::module_::import("forge.op.eval"); common_module.attr("eval_debug_print")(type.op, inputs, result); if (has_requant and ir_level == graphlib::IRLevel::IR_BUDA and type.op == "matmul") { std::vector requant_inps = {result, inputs.back()}; graphlib::OpType requant("requantization", {type.buda_attrs.at("zero_point")}); - auto requant_eval = eval_module.attr("get_f_pybuda_eval")(requant); + auto requant_eval = eval_module.attr("get_f_forge_eval")(requant); result = requant_eval(requant_inps); } @@ -736,7 +736,7 @@ void eval_partial_datacopy_golden_transforms( bool compare_tensor_to_golden(const std::string &name, const py::object &golden, const py::object &calculated, float relative_atol, float pcc, graphlib::IRLevel ir_level, bool warning_only = false) { - py::object eval_module = py::module_::import("pybuda.op.eval"); + py::object eval_module = py::module_::import("forge.op.eval"); bool is_buda = ir_level == graphlib::IRLevel::IR_BUDA; if (pcc == 0.0) @@ -758,24 +758,24 @@ bool compare_tensor_to_golden(const std::string &name, const py::object &golden, } py::object create_constant_tensor(float constant_value, std::pair constant_dims, bool is_buda, DataFormat df) { - py::object eval_module = py::module_::import("pybuda.op.eval"); + py::object eval_module = py::module_::import("forge.op.eval"); return eval_module.attr("create_constant_tensor_from_value")(constant_value, constant_dims, is_buda, df); } py::object create_constant_tensor(const std::vector &tile_value, bool is_buda, DataFormat df) { - py::object eval_module = py::module_::import("pybuda.op.eval"); + py::object eval_module = py::module_::import("forge.op.eval"); return eval_module.attr("create_constant_tensor_from_tile")(tile_value, is_buda, df); } py::object create_constant_tensor(const std::vector &tensor_value, const Shape &tensor_shape, bool is_buda, tt::DataFormat df) { - py::object eval_module = py::module_::import("pybuda.op.eval"); + py::object eval_module = py::module_::import("forge.op.eval"); return eval_module.attr("create_constant_tensor_from_tensor")(tensor_value, tensor_shape.as_vector(), is_buda, df); } void dump_tensor(py::object tensor, std::string filename) { - py::object eval_module = py::module_::import("pybuda.op.eval"); + py::object eval_module = py::module_::import("forge.op.eval"); eval_module.attr("dump_tensor")(tensor, filename); } @@ -836,7 +836,7 @@ py::object consteval_input( py::function narrow_buda_tensor_to_pytorch; py::function pad_pytorch_tensor_to_buda; if (is_buda) { - tensor_module = py::module_::import("pybuda.tensor"); + tensor_module = py::module_::import("forge.tensor"); narrow_buda_tensor_to_pytorch = tensor_module.attr("narrow_buda_tensor_to_pytorch"); pad_pytorch_tensor_to_buda = tensor_module.attr("pad_pytorch_tensor_to_buda"); } @@ -918,7 +918,7 @@ py::object eval_input( } else if (is_buda) { - auto tensor_module = py::module_::import("pybuda.tensor"); + auto tensor_module = py::module_::import("forge.tensor"); auto pad_pytorch_tensor_to_buda = tensor_module.attr("pad_pytorch_tensor_to_buda"); return pad_pytorch_tensor_to_buda(inputs.at(node->name()), input->get_tile_broadcast_dims()); } @@ -1160,7 +1160,7 @@ std::vector eval_runtime_tensor_transform(Graph *graph, std::vector< bool compare_tensors(std::shared_ptr tensor0, std::shared_ptr tensor1) { - py::object tensor_module = py::module_::import("pybuda.tensor"); + py::object tensor_module = py::module_::import("forge.tensor"); py::function compare_tensors_func = tensor_module.attr("compare_tensors"); auto tensor0_pt = borrow_shared_py_object(tensor0); auto tensor1_pt = borrow_shared_py_object(tensor1); @@ -1182,7 +1182,7 @@ py::object get_constant_input_value(graphlib::Node *node, bool is_buda) } else if (cnode->is_tensor()) { auto tensor = borrow_shared_py_object(cnode->tensor()); if (is_buda) { - py::object tensor_module = py::module_::import("pybuda.tensor"); + py::object tensor_module = py::module_::import("forge.tensor"); py::function pad_pytorch_tensor_to_buda = tensor_module.attr("pad_pytorch_tensor_to_buda"); tensor = pad_pytorch_tensor_to_buda( tensor, @@ -1368,7 +1368,7 @@ eval_graph( } } - // Populate Pybuda input tensor mapping + // Populate Forge input tensor mapping int input_index = 0; std::vector input_tensors = eval_runtime_tensor_transform(graph, graph->ordered_module_inputs(), inputs); for (Node *node : graph->ordered_module_inputs()) { diff --git a/pybuda/csrc/graph_lib/python_bindings.hpp b/forge/csrc/graph_lib/python_bindings.hpp similarity index 100% rename from pybuda/csrc/graph_lib/python_bindings.hpp rename to forge/csrc/graph_lib/python_bindings.hpp diff --git a/pybuda/csrc/graph_lib/query.hpp b/forge/csrc/graph_lib/query.hpp similarity index 100% rename from pybuda/csrc/graph_lib/query.hpp rename to forge/csrc/graph_lib/query.hpp diff --git a/pybuda/csrc/graph_lib/shape.cpp b/forge/csrc/graph_lib/shape.cpp similarity index 100% rename from pybuda/csrc/graph_lib/shape.cpp rename to forge/csrc/graph_lib/shape.cpp diff --git a/pybuda/csrc/graph_lib/shape.hpp b/forge/csrc/graph_lib/shape.hpp similarity index 100% rename from pybuda/csrc/graph_lib/shape.hpp rename to forge/csrc/graph_lib/shape.hpp diff --git a/pybuda/csrc/graph_lib/tests/test_graphlib.cpp b/forge/csrc/graph_lib/tests/test_graphlib.cpp similarity index 100% rename from pybuda/csrc/graph_lib/tests/test_graphlib.cpp rename to forge/csrc/graph_lib/tests/test_graphlib.cpp diff --git a/pybuda/csrc/graph_lib/tests/test_graphlib_utils.cpp b/forge/csrc/graph_lib/tests/test_graphlib_utils.cpp similarity index 100% rename from pybuda/csrc/graph_lib/tests/test_graphlib_utils.cpp rename to forge/csrc/graph_lib/tests/test_graphlib_utils.cpp diff --git a/pybuda/csrc/graph_lib/utils.cpp b/forge/csrc/graph_lib/utils.cpp similarity index 99% rename from pybuda/csrc/graph_lib/utils.cpp rename to forge/csrc/graph_lib/utils.cpp index 37cf34dc5..3ed34b1ae 100644 --- a/pybuda/csrc/graph_lib/utils.cpp +++ b/forge/csrc/graph_lib/utils.cpp @@ -27,7 +27,7 @@ namespace graphlib bool is_eltwise(const OpNode *op) { bool is_buda = dynamic_cast(op) != nullptr; - py::object eval_module = py::module_::import(is_buda ? "pybuda.op.eval.buda" : "pybuda.op.eval.pybuda"); + py::object eval_module = py::module_::import(is_buda ? "forge.op.eval.buda" : "forge.op.eval.forge"); py::function is_eltwise = eval_module.attr("is_eltwise"); // TODO: better determination of non elementwise ops bool is_concatenate = op->op_name() == "concatenate"; @@ -37,7 +37,7 @@ bool is_eltwise(const OpNode *op) bool is_eltwise_nary(const OpNode *op) { bool is_buda = dynamic_cast(op) != nullptr; - py::object eval_module = py::module_::import(is_buda ? "pybuda.op.eval.buda" : "pybuda.op.eval.pybuda"); + py::object eval_module = py::module_::import(is_buda ? "forge.op.eval.buda" : "forge.op.eval.forge"); py::function is_eltwise_nary = eval_module.attr("is_eltwise_nary"); return is_eltwise_nary(op->op_type()).cast(); } @@ -45,7 +45,7 @@ bool is_eltwise_nary(const OpNode *op) bool is_eltwise_unary(const OpNode *op) { bool is_buda = dynamic_cast(op) != nullptr; - py::object eval_module = py::module_::import(is_buda ? "pybuda.op.eval.buda" : "pybuda.op.eval.pybuda"); + py::object eval_module = py::module_::import(is_buda ? "forge.op.eval.buda" : "forge.op.eval.forge"); py::function is_eltwise_unary = eval_module.attr("is_eltwise_unary"); return is_eltwise_unary(op->op_type()).cast(); } @@ -53,7 +53,7 @@ bool is_eltwise_unary(const OpNode *op) bool is_eltwise_binary(const OpNode *op) { bool is_buda = dynamic_cast(op) != nullptr; - py::object eval_module = py::module_::import(is_buda ? "pybuda.op.eval.buda" : "pybuda.op.eval.pybuda"); + py::object eval_module = py::module_::import(is_buda ? "forge.op.eval.buda" : "forge.op.eval.forge"); py::function is_eltwise_binary = eval_module.attr("is_eltwise_binary"); return is_eltwise_binary(op->op_type()).cast(); } diff --git a/pybuda/csrc/graph_lib/utils.hpp b/forge/csrc/graph_lib/utils.hpp similarity index 100% rename from pybuda/csrc/graph_lib/utils.hpp rename to forge/csrc/graph_lib/utils.hpp diff --git a/pybuda/csrc/lower_to_buda/common.cpp b/forge/csrc/lower_to_buda/common.cpp similarity index 100% rename from pybuda/csrc/lower_to_buda/common.cpp rename to forge/csrc/lower_to_buda/common.cpp diff --git a/pybuda/csrc/lower_to_buda/common.hpp b/forge/csrc/lower_to_buda/common.hpp similarity index 100% rename from pybuda/csrc/lower_to_buda/common.hpp rename to forge/csrc/lower_to_buda/common.hpp diff --git a/pybuda/csrc/passes/amp.cpp b/forge/csrc/passes/amp.cpp similarity index 99% rename from pybuda/csrc/passes/amp.cpp rename to forge/csrc/passes/amp.cpp index e682942d2..e99ec8d06 100644 --- a/pybuda/csrc/passes/amp.cpp +++ b/forge/csrc/passes/amp.cpp @@ -319,9 +319,9 @@ nlohmann::json get_node_to_amp_properties_json(Graph *graph) void dump_mixed_precision_json_to_file(graphlib::Graph *graph, std::optional filepath) { - if (env_as("PYBUDA_DISABLE_REPORTIFY_DUMP")) + if (env_as("FORGE_DISABLE_REPORTIFY_DUMP")) return; - bool enable_dump = env_as("PYBUDA_DUMP_MIXED_PRECISION"); + bool enable_dump = env_as("FORGE_DUMP_MIXED_PRECISION"); if (not enable_dump) { diff --git a/pybuda/csrc/passes/amp.hpp b/forge/csrc/passes/amp.hpp similarity index 100% rename from pybuda/csrc/passes/amp.hpp rename to forge/csrc/passes/amp.hpp diff --git a/pybuda/csrc/passes/bind_reshape_to_io.cpp b/forge/csrc/passes/bind_reshape_to_io.cpp similarity index 100% rename from pybuda/csrc/passes/bind_reshape_to_io.cpp rename to forge/csrc/passes/bind_reshape_to_io.cpp diff --git a/pybuda/csrc/passes/bind_reshape_to_io.hpp b/forge/csrc/passes/bind_reshape_to_io.hpp similarity index 100% rename from pybuda/csrc/passes/bind_reshape_to_io.hpp rename to forge/csrc/passes/bind_reshape_to_io.hpp diff --git a/pybuda/csrc/passes/commutable_pattern.cpp b/forge/csrc/passes/commutable_pattern.cpp similarity index 100% rename from pybuda/csrc/passes/commutable_pattern.cpp rename to forge/csrc/passes/commutable_pattern.cpp diff --git a/pybuda/csrc/passes/commute_utils.cpp b/forge/csrc/passes/commute_utils.cpp similarity index 99% rename from pybuda/csrc/passes/commute_utils.cpp rename to forge/csrc/passes/commute_utils.cpp index db80e8a1e..9128a82e6 100644 --- a/pybuda/csrc/passes/commute_utils.cpp +++ b/forge/csrc/passes/commute_utils.cpp @@ -179,7 +179,7 @@ graphlib::Shape shape_of_only_operand(graphlib::Graph *graph, graphlib::OpNode * bool are_compatible_ops(graphlib::Graph *graph, graphlib::OpNode *a, graphlib::OpNode *b, graphlib::Shape *updated_shape, bool check_inverse) { - py::object eval_module = py::module_::import("pybuda.op.eval.pybuda"); + py::object eval_module = py::module_::import("forge.op.eval.forge"); py::function is_tm = eval_module.attr("is_tm"); if (a == b) @@ -810,7 +810,7 @@ bool commute_through_quantization( bool is_elementwise(graphlib::OpNode *op) { - py::object eval_module = py::module_::import("pybuda.op.eval.pybuda"); + py::object eval_module = py::module_::import("forge.op.eval.forge"); py::function is_eltwise = eval_module.attr("is_eltwise"); return is_eltwise(op->op_type()).cast(); } diff --git a/pybuda/csrc/passes/commute_utils.hpp b/forge/csrc/passes/commute_utils.hpp similarity index 100% rename from pybuda/csrc/passes/commute_utils.hpp rename to forge/csrc/passes/commute_utils.hpp diff --git a/pybuda/csrc/passes/constant_folding.cpp b/forge/csrc/passes/constant_folding.cpp similarity index 100% rename from pybuda/csrc/passes/constant_folding.cpp rename to forge/csrc/passes/constant_folding.cpp diff --git a/pybuda/csrc/passes/constant_folding.hpp b/forge/csrc/passes/constant_folding.hpp similarity index 100% rename from pybuda/csrc/passes/constant_folding.hpp rename to forge/csrc/passes/constant_folding.hpp diff --git a/pybuda/csrc/passes/consteval.cpp b/forge/csrc/passes/consteval.cpp similarity index 100% rename from pybuda/csrc/passes/consteval.cpp rename to forge/csrc/passes/consteval.cpp diff --git a/pybuda/csrc/passes/consteval.hpp b/forge/csrc/passes/consteval.hpp similarity index 100% rename from pybuda/csrc/passes/consteval.hpp rename to forge/csrc/passes/consteval.hpp diff --git a/pybuda/csrc/passes/dataformat.cpp b/forge/csrc/passes/dataformat.cpp similarity index 99% rename from pybuda/csrc/passes/dataformat.cpp rename to forge/csrc/passes/dataformat.cpp index 507891039..49d04931c 100644 --- a/pybuda/csrc/passes/dataformat.cpp +++ b/forge/csrc/passes/dataformat.cpp @@ -705,7 +705,7 @@ void configure_intermediate_data_formats(graphlib::Graph *graph) void fix_math_fidelity(graphlib::Graph *graph) { - bool disable_cap_sparse_mm_fidelity = env_as("PYBUDA_DISABLE_CAP_SPARSE_MM_FIDELITY", false); + bool disable_cap_sparse_mm_fidelity = env_as("FORGE_DISABLE_CAP_SPARSE_MM_FIDELITY", false); for (Node *node : graph->nodes()) { @@ -915,7 +915,7 @@ void validate_post_placer_data_formats(const graphlib::Graph *graph, const Devic void configure_stochastic_rounding(graphlib::Graph *graph, const bool is_stochastic_rounding_supported) { - bool enable_stochastic_rounding = env_as("PYBUDA_ENABLE_STOCHASTIC_ROUNDING", false); + bool enable_stochastic_rounding = env_as("FORGE_ENABLE_STOCHASTIC_ROUNDING", false); // For WH_B0, we support a few different flavours of stochastic rounding // 1. No stochastic rounding - fpu default: RN, sfpu default: RNE (supported) diff --git a/pybuda/csrc/passes/dataformat.hpp b/forge/csrc/passes/dataformat.hpp similarity index 100% rename from pybuda/csrc/passes/dataformat.hpp rename to forge/csrc/passes/dataformat.hpp diff --git a/pybuda/csrc/passes/decomposing_context.cpp b/forge/csrc/passes/decomposing_context.cpp similarity index 95% rename from pybuda/csrc/passes/decomposing_context.cpp rename to forge/csrc/passes/decomposing_context.cpp index 672cf3c19..f9b43a09a 100644 --- a/pybuda/csrc/passes/decomposing_context.cpp +++ b/forge/csrc/passes/decomposing_context.cpp @@ -47,11 +47,11 @@ NodeContext DecomposingContext::op( } new_node->set_golden_transforms(this->node_->get_golden_transforms()); - py::module_ eval_module = py::module_::import("pybuda.op.eval.pybuda"); - py::function pybuda_shape = eval_module.attr("get_f_pybuda_shape")(op_type); + py::module_ eval_module = py::module_::import("forge.op.eval.forge"); + py::function forge_shape = eval_module.attr("get_f_forge_shape")(op_type); std::vector> operand_tuples; for (NodeContext const &op_node : operands) operand_tuples.push_back(op_node.shape.as_vector()); - py::tuple ret = pybuda_shape(operand_tuples); + py::tuple ret = forge_shape(operand_tuples); graphlib::Shape shape = graphlib::Shape::create(ret[0].cast>()); std::vector broadcasts = ret[1].cast>(); @@ -162,7 +162,7 @@ std::vector> decompose_tt_forge_gr Graph *graph, const char *dispatcher_name, std::shared_ptr compiler_cfg) { std::vector> inserted_node_id_mapping; - py::module_ eval_module = py::module_::import("pybuda.op.eval.pybuda"); + py::module_ eval_module = py::module_::import("forge.op.eval.forge"); uint32_t nodes_removed = 1; while(nodes_removed) { @@ -181,7 +181,7 @@ std::vector> decompose_tt_forge_gr continue; } - py::function pybuda_decompose = eval_module.attr(dispatcher_name)(type); + py::function forge_decompose = eval_module.attr(dispatcher_name)(type); std::vector inputs; for(graphlib::Edge op_edge : graph->operand_data_edges(node)) { @@ -193,7 +193,7 @@ std::vector> decompose_tt_forge_gr DecomposingContext dc(graph, py_node, compiler_cfg); log_trace(LogGraphCompiler, "Decomposing {}", node->name()); - pybuda_decompose(&dc, inputs); + forge_decompose(&dc, inputs); if (dc.get_op_index() == 0) { // No ops were added diff --git a/pybuda/csrc/passes/decomposing_context.hpp b/forge/csrc/passes/decomposing_context.hpp similarity index 100% rename from pybuda/csrc/passes/decomposing_context.hpp rename to forge/csrc/passes/decomposing_context.hpp diff --git a/pybuda/csrc/passes/erase_consecutive_reshape.cpp b/forge/csrc/passes/erase_consecutive_reshape.cpp similarity index 100% rename from pybuda/csrc/passes/erase_consecutive_reshape.cpp rename to forge/csrc/passes/erase_consecutive_reshape.cpp diff --git a/pybuda/csrc/passes/erase_consecutive_reshape.hpp b/forge/csrc/passes/erase_consecutive_reshape.hpp similarity index 100% rename from pybuda/csrc/passes/erase_consecutive_reshape.hpp rename to forge/csrc/passes/erase_consecutive_reshape.hpp diff --git a/pybuda/csrc/passes/erase_inverse_ops.cpp b/forge/csrc/passes/erase_inverse_ops.cpp similarity index 100% rename from pybuda/csrc/passes/erase_inverse_ops.cpp rename to forge/csrc/passes/erase_inverse_ops.cpp diff --git a/pybuda/csrc/passes/erase_inverse_ops.hpp b/forge/csrc/passes/erase_inverse_ops.hpp similarity index 100% rename from pybuda/csrc/passes/erase_inverse_ops.hpp rename to forge/csrc/passes/erase_inverse_ops.hpp diff --git a/pybuda/csrc/passes/erase_unnecessary_4d_tm_sequence.cpp b/forge/csrc/passes/erase_unnecessary_4d_tm_sequence.cpp similarity index 100% rename from pybuda/csrc/passes/erase_unnecessary_4d_tm_sequence.cpp rename to forge/csrc/passes/erase_unnecessary_4d_tm_sequence.cpp diff --git a/pybuda/csrc/passes/erase_unnecessary_4d_tm_sequence.hpp b/forge/csrc/passes/erase_unnecessary_4d_tm_sequence.hpp similarity index 100% rename from pybuda/csrc/passes/erase_unnecessary_4d_tm_sequence.hpp rename to forge/csrc/passes/erase_unnecessary_4d_tm_sequence.hpp diff --git a/pybuda/csrc/passes/explicate_unsqueeze.cpp b/forge/csrc/passes/explicate_unsqueeze.cpp similarity index 100% rename from pybuda/csrc/passes/explicate_unsqueeze.cpp rename to forge/csrc/passes/explicate_unsqueeze.cpp diff --git a/pybuda/csrc/passes/explicate_unsqueeze.hpp b/forge/csrc/passes/explicate_unsqueeze.hpp similarity index 100% rename from pybuda/csrc/passes/explicate_unsqueeze.hpp rename to forge/csrc/passes/explicate_unsqueeze.hpp diff --git a/pybuda/csrc/passes/fracture.cpp b/forge/csrc/passes/fracture.cpp similarity index 100% rename from pybuda/csrc/passes/fracture.cpp rename to forge/csrc/passes/fracture.cpp diff --git a/pybuda/csrc/passes/fracture.hpp b/forge/csrc/passes/fracture.hpp similarity index 100% rename from pybuda/csrc/passes/fracture.hpp rename to forge/csrc/passes/fracture.hpp diff --git a/pybuda/csrc/passes/fuse_conv2d_bias.cpp b/forge/csrc/passes/fuse_conv2d_bias.cpp similarity index 100% rename from pybuda/csrc/passes/fuse_conv2d_bias.cpp rename to forge/csrc/passes/fuse_conv2d_bias.cpp diff --git a/pybuda/csrc/passes/fuse_conv2d_bias.hpp b/forge/csrc/passes/fuse_conv2d_bias.hpp similarity index 100% rename from pybuda/csrc/passes/fuse_conv2d_bias.hpp rename to forge/csrc/passes/fuse_conv2d_bias.hpp diff --git a/pybuda/csrc/passes/fuse_pad_conv2d.cpp b/forge/csrc/passes/fuse_pad_conv2d.cpp similarity index 100% rename from pybuda/csrc/passes/fuse_pad_conv2d.cpp rename to forge/csrc/passes/fuse_pad_conv2d.cpp diff --git a/pybuda/csrc/passes/fuse_pad_conv2d.hpp b/forge/csrc/passes/fuse_pad_conv2d.hpp similarity index 100% rename from pybuda/csrc/passes/fuse_pad_conv2d.hpp rename to forge/csrc/passes/fuse_pad_conv2d.hpp diff --git a/pybuda/csrc/passes/fuse_per_channel_ops.cpp b/forge/csrc/passes/fuse_per_channel_ops.cpp similarity index 99% rename from pybuda/csrc/passes/fuse_per_channel_ops.cpp rename to forge/csrc/passes/fuse_per_channel_ops.cpp index fce07c9af..b7dfd95e9 100644 --- a/pybuda/csrc/passes/fuse_per_channel_ops.cpp +++ b/forge/csrc/passes/fuse_per_channel_ops.cpp @@ -18,7 +18,7 @@ namespace tt::passes static bool is_elementwise_binary(graphlib::OpNode *op, graphlib::Graph *graph) { - py::object eval_module = py::module_::import("pybuda.op.eval.pybuda"); + py::object eval_module = py::module_::import("forge.op.eval.forge"); py::function is_eltwise = eval_module.attr("is_eltwise"); bool is_eltwise_op = is_eltwise(op->op_type()).cast(); bool is_binary = graph->data_operands(op).size() == 2; @@ -352,7 +352,7 @@ static bool fuse_per_channel_concat(graphlib::Graph *graph, graphlib::OpNode *co const_shape[concat_dim] = operand_input_shapes.at(merge_group_operand_indices[i])[concat_dim]; // Create a dummy const node - py::object eval_module = py::module_::import("pybuda.op.eval"); + py::object eval_module = py::module_::import("forge.op.eval"); auto const_tensor = make_shared_py_object( eval_module.attr("create_constant_tensor_from_tensor") (std::vector{const_value}, const_shape.as_vector(), false, ops[0].second->output_df())); diff --git a/pybuda/csrc/passes/fuse_per_channel_ops.hpp b/forge/csrc/passes/fuse_per_channel_ops.hpp similarity index 100% rename from pybuda/csrc/passes/fuse_per_channel_ops.hpp rename to forge/csrc/passes/fuse_per_channel_ops.hpp diff --git a/pybuda/csrc/passes/fuse_redundant_tm_sequence.cpp b/forge/csrc/passes/fuse_redundant_tm_sequence.cpp similarity index 96% rename from pybuda/csrc/passes/fuse_redundant_tm_sequence.cpp rename to forge/csrc/passes/fuse_redundant_tm_sequence.cpp index 83de8006b..f2d42df11 100644 --- a/pybuda/csrc/passes/fuse_redundant_tm_sequence.cpp +++ b/forge/csrc/passes/fuse_redundant_tm_sequence.cpp @@ -30,16 +30,16 @@ bool equivalent_pattern(const TMPattern& pattern1, const TMPattern& pattern2) { } graphlib::Shape replacement_output_shape(graphlib::Shape input_shape, const TMPattern& pattern) { - py::object eval_module = py::module_::import("pybuda.op.eval.pybuda"); + py::object eval_module = py::module_::import("forge.op.eval.forge"); for (uint i = 0; i < pattern.size(); i++) { auto op_name = pattern[i].op_name; auto attrs = pattern[i].attrs; - py::function pybuda_shape = eval_module.attr("get_f_pybuda_shape")(pattern[i].as_op_type()); + py::function forge_shape = eval_module.attr("get_f_forge_shape")(pattern[i].as_op_type()); std::vector> operand_tuples; operand_tuples.push_back(input_shape.as_vector()); - py::tuple ret = pybuda_shape(operand_tuples); + py::tuple ret = forge_shape(operand_tuples); graphlib::Shape shape = graphlib::Shape::create(ret[0].cast>()); input_shape = shape; } @@ -172,7 +172,7 @@ bool replace_pattern_with_new_pattern( bool fuse_tm_sequences(tt::graphlib::Graph* graph,TMPatternPairs& pattern_map) { bool updated = true; - py::object eval_module = py::module_::import("pybuda.op.eval.pybuda"); + py::object eval_module = py::module_::import("forge.op.eval.forge"); py::function is_tm = eval_module.attr("is_tm"); bool updated_anything = false; while (updated) { @@ -243,7 +243,7 @@ bool fuse_tm_sequences(tt::graphlib::Graph* graph,TMPatternPairs& pattern_map) { // Check if current pattern matches search pattern bool same_pattern = equivalent_pattern(current_pattern, search_pattern); - // Verify i/o shape by calling pybuda shape function + // Verify i/o shape by calling forge shape function graphlib::Shape output_shape = replacement_output_shape(sequence_input_shape, replace_pattern); // Make sure output shape is the same after replacement diff --git a/pybuda/csrc/passes/fuse_redundant_tm_sequence.hpp b/forge/csrc/passes/fuse_redundant_tm_sequence.hpp similarity index 100% rename from pybuda/csrc/passes/fuse_redundant_tm_sequence.hpp rename to forge/csrc/passes/fuse_redundant_tm_sequence.hpp diff --git a/pybuda/csrc/passes/fuse_reshape_transpose_into_slice.cpp b/forge/csrc/passes/fuse_reshape_transpose_into_slice.cpp similarity index 98% rename from pybuda/csrc/passes/fuse_reshape_transpose_into_slice.cpp rename to forge/csrc/passes/fuse_reshape_transpose_into_slice.cpp index 57a5ba3a7..e839c273e 100644 --- a/pybuda/csrc/passes/fuse_reshape_transpose_into_slice.cpp +++ b/forge/csrc/passes/fuse_reshape_transpose_into_slice.cpp @@ -84,7 +84,7 @@ graphlib::OpNode *find_valid_candidate( } else { - log_debug(LogGraphCompiler, "Invalid PyBuda Stack/Slice type: {}", ref_op_type); + log_debug(LogGraphCompiler, "Invalid Forge Stack/Slice type: {}", ref_op_type); return nullptr; } } @@ -296,7 +296,7 @@ bool valid_commute_through_forks( bool can_commute_past_operand(graphlib::OpNode *op) { // Element-wise ops are allowed as they are not tackling with Z dim - py::object eval_module = py::module_::import("pybuda.op.eval.pybuda"); + py::object eval_module = py::module_::import("forge.op.eval.forge"); py::function is_eltwise_fun = eval_module.attr("is_eltwise"); bool can_commute = is_eltwise_fun(op->op_type()).cast(); @@ -330,7 +330,7 @@ void commute_through_forks( } else { - log_debug(LogGraphCompiler, "Invalid PyBuda Stack/Slice type: {}", ref_op_type); + log_debug(LogGraphCompiler, "Invalid Forge Stack/Slice type: {}", ref_op_type); } } @@ -416,7 +416,7 @@ void update_shape_during_commute(graphlib::Graph *graph, graphlib::Node *operand if (graphlib::OpNode *op = dynamic_cast(operand_node)) { // Use Python API to check if op is element-wise - py::object eval_module = py::module_::import("pybuda.op.eval.pybuda"); + py::object eval_module = py::module_::import("forge.op.eval.forge"); py::function is_eltwise_fun = eval_module.attr("is_eltwise"); bool is_eltwise = is_eltwise_fun(op->op_type()).cast(); // Handle element-wise operands @@ -484,7 +484,7 @@ void fuse_into_slice_or_stack(StackSliceOpType ref_op_type, graphlib::Graph *gra } else { - log_debug(LogGraphCompiler, "Invalid PyBuda Stack/Slice type: {}", ref_op_type); + log_debug(LogGraphCompiler, "Invalid Forge Stack/Slice type: {}", ref_op_type); } } diff --git a/pybuda/csrc/passes/fuse_reshape_transpose_into_slice.hpp b/forge/csrc/passes/fuse_reshape_transpose_into_slice.hpp similarity index 100% rename from pybuda/csrc/passes/fuse_reshape_transpose_into_slice.hpp rename to forge/csrc/passes/fuse_reshape_transpose_into_slice.hpp diff --git a/pybuda/csrc/passes/generate_initial_flops_estimate.cpp b/forge/csrc/passes/generate_initial_flops_estimate.cpp similarity index 86% rename from pybuda/csrc/passes/generate_initial_flops_estimate.cpp rename to forge/csrc/passes/generate_initial_flops_estimate.cpp index a69343957..950dc9f6c 100644 --- a/pybuda/csrc/passes/generate_initial_flops_estimate.cpp +++ b/forge/csrc/passes/generate_initial_flops_estimate.cpp @@ -31,9 +31,9 @@ void generate_initial_flops_estimate(graphlib::Graph *graph) operand_tuples.push_back(data_operand->shape().as_vector()); inputs.push_back(data_operand->shape()); } - py::object eval_module = py::module_::import("pybuda.op.eval.pybuda"); + py::object eval_module = py::module_::import("forge.op.eval.forge"); py::function initial_flops_estimate = - eval_module.attr("get_f_pybuda_initial_flops_estimate")(op->op_type_ptr()); + eval_module.attr("get_f_forge_initial_flops_estimate")(op->op_type_ptr()); py::object ret = initial_flops_estimate(operand_tuples); long flops; @@ -65,10 +65,10 @@ void generate_initial_flops_estimate(graphlib::Graph *graph) { log_trace(LogGraphCompiler, "{}, {}, {}", p.first, std::get<1>(p.second), std::get<0>(p.second)); } - if (env_as("PYBUDA_SHOW_FLOPS_ESTIMATE")) - log_info(LogGraphCompiler, "Initial flops estimate from PyBuda: {}B, total_ops: {}", total_flops / 1e9, total_ops); + if (env_as("FORGE_SHOW_FLOPS_ESTIMATE")) + log_info(LogGraphCompiler, "Initial flops estimate from Forge: {}B, total_ops: {}", total_flops / 1e9, total_ops); else - log_trace(LogGraphCompiler, "Initial flops estimate from PyBuda: {}B, total_ops: {}", total_flops / 1e9, total_ops); + log_trace(LogGraphCompiler, "Initial flops estimate from Forge: {}B, total_ops: {}", total_flops / 1e9, total_ops); } } // namespace tt::passes diff --git a/pybuda/csrc/passes/generate_initial_flops_estimate.hpp b/forge/csrc/passes/generate_initial_flops_estimate.hpp similarity index 100% rename from pybuda/csrc/passes/generate_initial_flops_estimate.hpp rename to forge/csrc/passes/generate_initial_flops_estimate.hpp diff --git a/pybuda/csrc/passes/hoist_transforms_to_inputs.cpp b/forge/csrc/passes/hoist_transforms_to_inputs.cpp similarity index 100% rename from pybuda/csrc/passes/hoist_transforms_to_inputs.cpp rename to forge/csrc/passes/hoist_transforms_to_inputs.cpp diff --git a/pybuda/csrc/passes/hoist_transforms_to_inputs.hpp b/forge/csrc/passes/hoist_transforms_to_inputs.hpp similarity index 100% rename from pybuda/csrc/passes/hoist_transforms_to_inputs.hpp rename to forge/csrc/passes/hoist_transforms_to_inputs.hpp diff --git a/pybuda/csrc/passes/insert_inverse_on_io.cpp b/forge/csrc/passes/insert_inverse_on_io.cpp similarity index 100% rename from pybuda/csrc/passes/insert_inverse_on_io.cpp rename to forge/csrc/passes/insert_inverse_on_io.cpp diff --git a/pybuda/csrc/passes/insert_inverse_on_io.hpp b/forge/csrc/passes/insert_inverse_on_io.hpp similarity index 100% rename from pybuda/csrc/passes/insert_inverse_on_io.hpp rename to forge/csrc/passes/insert_inverse_on_io.hpp diff --git a/pybuda/csrc/passes/limit_to_4d_reshape.cpp b/forge/csrc/passes/limit_to_4d_reshape.cpp similarity index 100% rename from pybuda/csrc/passes/limit_to_4d_reshape.cpp rename to forge/csrc/passes/limit_to_4d_reshape.cpp diff --git a/pybuda/csrc/passes/limit_to_4d_reshape.hpp b/forge/csrc/passes/limit_to_4d_reshape.hpp similarity index 100% rename from pybuda/csrc/passes/limit_to_4d_reshape.hpp rename to forge/csrc/passes/limit_to_4d_reshape.hpp diff --git a/pybuda/csrc/passes/link_past_cache_ios.cpp b/forge/csrc/passes/link_past_cache_ios.cpp similarity index 99% rename from pybuda/csrc/passes/link_past_cache_ios.cpp rename to forge/csrc/passes/link_past_cache_ios.cpp index a4abf6f64..73d4ca838 100644 --- a/pybuda/csrc/passes/link_past_cache_ios.cpp +++ b/forge/csrc/passes/link_past_cache_ios.cpp @@ -436,7 +436,7 @@ std::map convert_inputs_to_params(graphlib::Graph *gra graph->remove_node(graph->node_by_id(id)); } - if (env_as("PYBUDA_ROTATE_PAST_CACHE_PARAMS", false)) + if (env_as("FORGE_ROTATE_PAST_CACHE_PARAMS", false)) rotate_params(graph, params_to_rotate); return ret; diff --git a/pybuda/csrc/passes/link_past_cache_ios.hpp b/forge/csrc/passes/link_past_cache_ios.hpp similarity index 100% rename from pybuda/csrc/passes/link_past_cache_ios.hpp rename to forge/csrc/passes/link_past_cache_ios.hpp diff --git a/pybuda/csrc/passes/lower_concat_to_runtime_transform.cpp b/forge/csrc/passes/lower_concat_to_runtime_transform.cpp similarity index 98% rename from pybuda/csrc/passes/lower_concat_to_runtime_transform.cpp rename to forge/csrc/passes/lower_concat_to_runtime_transform.cpp index 11c85c467..61ab67e66 100644 --- a/pybuda/csrc/passes/lower_concat_to_runtime_transform.cpp +++ b/forge/csrc/passes/lower_concat_to_runtime_transform.cpp @@ -14,7 +14,7 @@ namespace tt::passes void lower_concat_to_runtime_transform(graphlib::Graph *graph) { - bool concat_on_host = env_as("PYBUDA_CONCAT_ON_HOST"); + bool concat_on_host = env_as("FORGE_CONCAT_ON_HOST"); if (not concat_on_host) return; diff --git a/pybuda/csrc/passes/lower_concat_to_runtime_transform.hpp b/forge/csrc/passes/lower_concat_to_runtime_transform.hpp similarity index 100% rename from pybuda/csrc/passes/lower_concat_to_runtime_transform.hpp rename to forge/csrc/passes/lower_concat_to_runtime_transform.hpp diff --git a/pybuda/csrc/passes/lower_reinterpret_shape.cpp b/forge/csrc/passes/lower_reinterpret_shape.cpp similarity index 100% rename from pybuda/csrc/passes/lower_reinterpret_shape.cpp rename to forge/csrc/passes/lower_reinterpret_shape.cpp diff --git a/pybuda/csrc/passes/lower_reinterpret_shape.hpp b/forge/csrc/passes/lower_reinterpret_shape.hpp similarity index 100% rename from pybuda/csrc/passes/lower_reinterpret_shape.hpp rename to forge/csrc/passes/lower_reinterpret_shape.hpp diff --git a/pybuda/csrc/passes/lower_to_mlir.cpp b/forge/csrc/passes/lower_to_mlir.cpp similarity index 100% rename from pybuda/csrc/passes/lower_to_mlir.cpp rename to forge/csrc/passes/lower_to_mlir.cpp diff --git a/pybuda/csrc/passes/lower_to_mlir.hpp b/forge/csrc/passes/lower_to_mlir.hpp similarity index 100% rename from pybuda/csrc/passes/lower_to_mlir.hpp rename to forge/csrc/passes/lower_to_mlir.hpp diff --git a/pybuda/csrc/passes/lowering_context.cpp b/forge/csrc/passes/lowering_context.cpp similarity index 97% rename from pybuda/csrc/passes/lowering_context.cpp rename to forge/csrc/passes/lowering_context.cpp index 7aeda49ea..84ff0a0ca 100644 --- a/pybuda/csrc/passes/lowering_context.cpp +++ b/forge/csrc/passes/lowering_context.cpp @@ -156,7 +156,7 @@ std::vector LoweringContext::shape(NodeContext node, bool use_new return graph->node_by_id(node.id)->shape().as_vector(); } -std::vector LoweringContext::pybuda_shape() const +std::vector LoweringContext::forge_shape() const { return node->shape().as_vector(); } @@ -252,7 +252,7 @@ int calculate_tile_size(int val) Node *lower_queue(Graph *old_graph, Graph *new_graph, Node *old_node, NodeToNodeMap &old_to_new) { Node *new_node = new_graph->add_node(old_node->clone(), old_graph->get_subgraph_id_for_node(old_node->id())); - if (env_as("PYBUDA_ENABLE_TINY_TILE")) { + if (env_as("FORGE_ENABLE_TINY_TILE")) { graphlib::Shape shape = old_node->shape(); shape = shape.canonical(); int tile_size_r = calculate_tile_size(shape[-2]); @@ -310,13 +310,13 @@ Node *lower_queue(Graph *old_graph, Graph *new_graph, Node *old_node, NodeToNode return new_node; } -// Use python + lowering context to convert PyBuda node to Buda node +// Use python + lowering context to convert Forge node to Buda node void lower_node(const LoweringContext &lc) { graphlib::PyOpNode *node = lc.get_node(); graphlib::OpType type = node->op_type(); - auto eval_module = py::module_::import("pybuda.op.eval.pybuda"); - py::function pybuda_lower = eval_module.attr("get_f_pybuda_lower")(type); + auto eval_module = py::module_::import("forge.op.eval.forge"); + py::function forge_lower = eval_module.attr("get_f_forge_lower")(type); std::vector inputs; for(Edge opedge : lc.get_old_graph()->operand_data_edges(node)) @@ -337,7 +337,7 @@ void lower_node(const LoweringContext &lc) outputs.back().shape = lc.get_old_graph()->node_by_id(user_edge.consumer_node_id)->shape(); } - pybuda_lower(lc, inputs, outputs); + forge_lower(lc, inputs, outputs); } diff --git a/pybuda/csrc/passes/lowering_context.hpp b/forge/csrc/passes/lowering_context.hpp similarity index 98% rename from pybuda/csrc/passes/lowering_context.hpp rename to forge/csrc/passes/lowering_context.hpp index 56b2e4e38..cd5c2b25a 100644 --- a/pybuda/csrc/passes/lowering_context.hpp +++ b/forge/csrc/passes/lowering_context.hpp @@ -70,7 +70,7 @@ class LoweringContext { void set_broadcast_dim(NodeContext src, NodeContext dest, int dim, int factor, bool explicit_bcast = false); void set_runtime_tensor_transform(NodeContext node, graphlib::RuntimeTensorTransform t); std::vector shape(NodeContext node, bool use_new_graph = false) const; - std::vector pybuda_shape() const; + std::vector forge_shape() const; Graph *get_old_graph() const { return old_graph; } Graph *get_new_graph() const { return new_graph; } graphlib::PyOpNode *get_node() const { return node; } diff --git a/pybuda/csrc/passes/mlir_compiler.cpp b/forge/csrc/passes/mlir_compiler.cpp similarity index 97% rename from pybuda/csrc/passes/mlir_compiler.cpp rename to forge/csrc/passes/mlir_compiler.cpp index 89487a715..cb66b985d 100644 --- a/pybuda/csrc/passes/mlir_compiler.cpp +++ b/forge/csrc/passes/mlir_compiler.cpp @@ -7,7 +7,7 @@ #include "lower_to_mlir.hpp" #include "mlir_passes.hpp" -// PyBuda headers +// Forge headers #include "graph_lib/graph.hpp" #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wgnu-zero-variadic-macro-arguments" @@ -57,7 +57,7 @@ namespace tt::passes // Load all available dialects context.loadAllAvailableDialects(); - // Generate MLIR from the PyBuda graph. + // Generate MLIR from the Forge graph. mlir::OwningOpRef mlir_module = lower_to_mlir(graph, context); tt::log_info(LogMLIRCompiler, "MLIR module generated successfully."); diff --git a/pybuda/csrc/passes/mlir_compiler.hpp b/forge/csrc/passes/mlir_compiler.hpp similarity index 100% rename from pybuda/csrc/passes/mlir_compiler.hpp rename to forge/csrc/passes/mlir_compiler.hpp diff --git a/pybuda/csrc/passes/mlir_passes.cpp b/forge/csrc/passes/mlir_passes.cpp similarity index 100% rename from pybuda/csrc/passes/mlir_passes.cpp rename to forge/csrc/passes/mlir_passes.cpp diff --git a/pybuda/csrc/passes/mlir_passes.hpp b/forge/csrc/passes/mlir_passes.hpp similarity index 100% rename from pybuda/csrc/passes/mlir_passes.hpp rename to forge/csrc/passes/mlir_passes.hpp diff --git a/pybuda/csrc/passes/move_index_to_mm_weights.cpp b/forge/csrc/passes/move_index_to_mm_weights.cpp similarity index 100% rename from pybuda/csrc/passes/move_index_to_mm_weights.cpp rename to forge/csrc/passes/move_index_to_mm_weights.cpp diff --git a/pybuda/csrc/passes/move_index_to_mm_weights.hpp b/forge/csrc/passes/move_index_to_mm_weights.hpp similarity index 100% rename from pybuda/csrc/passes/move_index_to_mm_weights.hpp rename to forge/csrc/passes/move_index_to_mm_weights.hpp diff --git a/pybuda/csrc/passes/move_requantize.cpp b/forge/csrc/passes/move_requantize.cpp similarity index 100% rename from pybuda/csrc/passes/move_requantize.cpp rename to forge/csrc/passes/move_requantize.cpp diff --git a/pybuda/csrc/passes/move_requantize.hpp b/forge/csrc/passes/move_requantize.hpp similarity index 100% rename from pybuda/csrc/passes/move_requantize.hpp rename to forge/csrc/passes/move_requantize.hpp diff --git a/pybuda/csrc/passes/move_select_after_matmul_optional.cpp b/forge/csrc/passes/move_select_after_matmul_optional.cpp similarity index 99% rename from pybuda/csrc/passes/move_select_after_matmul_optional.cpp rename to forge/csrc/passes/move_select_after_matmul_optional.cpp index 4a0298fe4..f61aae7dc 100644 --- a/pybuda/csrc/passes/move_select_after_matmul_optional.cpp +++ b/forge/csrc/passes/move_select_after_matmul_optional.cpp @@ -676,9 +676,9 @@ static void internal_merge_identical_user_ops(graphlib::Graph *graph) { void move_select_after_matmul_optional(graphlib::Graph *graph) { - if (getenv("PYBUDA_MANUAL_SPLICE_DECOMP_TH") == nullptr) + if (getenv("FORGE_MANUAL_SPLICE_DECOMP_TH") == nullptr) return; - int manual_splice_decomp_th = env_as("PYBUDA_MANUAL_SPLICE_DECOMP_TH"); + int manual_splice_decomp_th = env_as("FORGE_MANUAL_SPLICE_DECOMP_TH"); // 0. merge indentical ops internal_merge_identical_user_ops(graph); diff --git a/pybuda/csrc/passes/move_select_after_matmul_optional.hpp b/forge/csrc/passes/move_select_after_matmul_optional.hpp similarity index 100% rename from pybuda/csrc/passes/move_select_after_matmul_optional.hpp rename to forge/csrc/passes/move_select_after_matmul_optional.hpp diff --git a/pybuda/csrc/passes/nd_slice.hpp b/forge/csrc/passes/nd_slice.hpp similarity index 100% rename from pybuda/csrc/passes/nd_slice.hpp rename to forge/csrc/passes/nd_slice.hpp diff --git a/pybuda/csrc/passes/pad_output_buffer.cpp b/forge/csrc/passes/pad_output_buffer.cpp similarity index 97% rename from pybuda/csrc/passes/pad_output_buffer.cpp rename to forge/csrc/passes/pad_output_buffer.cpp index 9d2f49128..cc5f0272d 100644 --- a/pybuda/csrc/passes/pad_output_buffer.cpp +++ b/forge/csrc/passes/pad_output_buffer.cpp @@ -31,8 +31,8 @@ void insert_buda_pad(graphlib::Graph *graph, graphlib::Edge edge, int rt_pad_amo void pad_output_buffer(graphlib::Graph *graph, const DeviceConfig &device_config) { - bool pad_output_buffer = env_as("PYBUDA_PAD_OUTPUT_BUFFER"); - int pad_threshold = env_as("PYBUDA_PAD_OUTPUT_BUFFER_THRESHOLD_TILES"); + bool pad_output_buffer = env_as("FORGE_PAD_OUTPUT_BUFFER"); + int pad_threshold = env_as("FORGE_PAD_OUTPUT_BUFFER_THRESHOLD_TILES"); if (not pad_output_buffer) return; diff --git a/pybuda/csrc/passes/pad_output_buffer.hpp b/forge/csrc/passes/pad_output_buffer.hpp similarity index 100% rename from pybuda/csrc/passes/pad_output_buffer.hpp rename to forge/csrc/passes/pad_output_buffer.hpp diff --git a/pybuda/csrc/passes/passes_utils.cpp b/forge/csrc/passes/passes_utils.cpp similarity index 99% rename from pybuda/csrc/passes/passes_utils.cpp rename to forge/csrc/passes/passes_utils.cpp index 41fb3ea91..4f546f8c4 100644 --- a/pybuda/csrc/passes/passes_utils.cpp +++ b/forge/csrc/passes/passes_utils.cpp @@ -297,7 +297,7 @@ std::vector get_factors(int num) bool check_unsupported_hw_ops(Graph *graph, bool should_throw) { bool unsupported_hw_ops = false; - py::object eval_module = py::module_::import("pybuda.op.eval.buda"); + py::object eval_module = py::module_::import("forge.op.eval.buda"); std::string message; for (Node *node : graph->nodes()) diff --git a/pybuda/csrc/passes/passes_utils.hpp b/forge/csrc/passes/passes_utils.hpp similarity index 100% rename from pybuda/csrc/passes/passes_utils.hpp rename to forge/csrc/passes/passes_utils.hpp diff --git a/pybuda/csrc/passes/post_autograd_graph_passes.cpp b/forge/csrc/passes/post_autograd_graph_passes.cpp similarity index 100% rename from pybuda/csrc/passes/post_autograd_graph_passes.cpp rename to forge/csrc/passes/post_autograd_graph_passes.cpp diff --git a/pybuda/csrc/passes/post_autograd_graph_passes.hpp b/forge/csrc/passes/post_autograd_graph_passes.hpp similarity index 100% rename from pybuda/csrc/passes/post_autograd_graph_passes.hpp rename to forge/csrc/passes/post_autograd_graph_passes.hpp diff --git a/pybuda/csrc/passes/pre_lowering_passes.cpp b/forge/csrc/passes/pre_lowering_passes.cpp similarity index 99% rename from pybuda/csrc/passes/pre_lowering_passes.cpp rename to forge/csrc/passes/pre_lowering_passes.cpp index a6c81de8d..d5c4b0793 100644 --- a/pybuda/csrc/passes/pre_lowering_passes.cpp +++ b/forge/csrc/passes/pre_lowering_passes.cpp @@ -152,7 +152,7 @@ static void insert_tile_broadcasts( } } - if (try_consteval and not env_as("PYBUDA_DISABLE_TILE_BROADCAST_CONSTEVAL")) + if (try_consteval and not env_as("FORGE_DISABLE_TILE_BROADCAST_CONSTEVAL")) { for (auto op : tile_broadcasts) { @@ -501,7 +501,7 @@ static bool swap_reshape(Graph *graph, graphlib::PyOpNode *add, graphlib::PyOpNo if (not commutable_reshape(reshape) and not requant) return false; - if (env_as("PYBUDA_FUSE_MATMUL_GELU")) { + if (env_as("FORGE_FUSE_MATMUL_GELU")) { TT_ASSERT((add->op_type().op == "add") || (add->op_type().op == "gelu") || (add->op_type().op == "buda_requantize")); } else { @@ -571,7 +571,7 @@ static bool has_fusable_upstream_matmul(graphlib::Graph *graph, graphlib::PyOpNo void fuse_bias(Graph *graph) { - if (env_as("PYBUDA_NO_FUSE_MATMUL_BIAS")) + if (env_as("FORGE_NO_FUSE_MATMUL_BIAS")) return; // Find matmul + bias, and merge bias into the matmul diff --git a/pybuda/csrc/passes/pre_lowering_passes.hpp b/forge/csrc/passes/pre_lowering_passes.hpp similarity index 100% rename from pybuda/csrc/passes/pre_lowering_passes.hpp rename to forge/csrc/passes/pre_lowering_passes.hpp diff --git a/pybuda/csrc/passes/pre_placer_buda_passes.cpp b/forge/csrc/passes/pre_placer_buda_passes.cpp similarity index 99% rename from pybuda/csrc/passes/pre_placer_buda_passes.cpp rename to forge/csrc/passes/pre_placer_buda_passes.cpp index f9badff11..0a509f0a8 100644 --- a/pybuda/csrc/passes/pre_placer_buda_passes.cpp +++ b/forge/csrc/passes/pre_placer_buda_passes.cpp @@ -569,10 +569,10 @@ std::vector> update_epoch_breaks_for_partial_datacopy( void calculate_ublock_order(graphlib::Graph *graph) { - auto eval_module = py::module_::import("pybuda.op.eval.buda"); - py::function pybuda_input_ublock_order = eval_module.attr("get_f_pybuda_input_ublock_order"); + auto eval_module = py::module_::import("forge.op.eval.buda"); + py::function forge_input_ublock_order = eval_module.attr("get_f_forge_input_ublock_order"); - auto relate = [pybuda_input_ublock_order]( + auto relate = [forge_input_ublock_order]( graphlib::Graph const *graph, Edge root, graphlib::UBlockOrder ublock_order, @@ -601,7 +601,7 @@ void calculate_ublock_order(graphlib::Graph *graph) continue; std::vector operands = graph->operand_data_edges(node); - auto maybe_input_ublock_order = pybuda_input_ublock_order(op->op_type(), operands.size()) + auto maybe_input_ublock_order = forge_input_ublock_order(op->op_type(), operands.size()) .cast>>(); bool no_input_ublock_reqs = not maybe_input_ublock_order; @@ -642,7 +642,7 @@ void calculate_ublock_order(graphlib::Graph *graph) continue; std::vector operands = graph->operand_data_edges(node); - auto maybe_input_ublock_order = pybuda_input_ublock_order(op->op_type(), operands.size()) + auto maybe_input_ublock_order = forge_input_ublock_order(op->op_type(), operands.size()) .cast>>(); if (not maybe_input_ublock_order) @@ -789,7 +789,7 @@ void calculate_ublock_order(graphlib::Graph *graph) continue; std::vector operands = graph->operand_data_edges(node); - auto maybe_input_ublock_order = pybuda_input_ublock_order(op->op_type(), operands.size()) + auto maybe_input_ublock_order = forge_input_ublock_order(op->op_type(), operands.size()) .cast>>(); std::vector const required_input_ublock_order = maybe_input_ublock_order diff --git a/pybuda/csrc/passes/pre_placer_buda_passes.hpp b/forge/csrc/passes/pre_placer_buda_passes.hpp similarity index 100% rename from pybuda/csrc/passes/pre_placer_buda_passes.hpp rename to forge/csrc/passes/pre_placer_buda_passes.hpp diff --git a/pybuda/csrc/passes/print_graph.cpp b/forge/csrc/passes/print_graph.cpp similarity index 92% rename from pybuda/csrc/passes/print_graph.cpp rename to forge/csrc/passes/print_graph.cpp index d947331e0..db48586d3 100644 --- a/pybuda/csrc/passes/print_graph.cpp +++ b/forge/csrc/passes/print_graph.cpp @@ -32,7 +32,7 @@ bool print_opnode(graphlib::Graph *graph, graphlib::Node *node) void print_graph_regular(graphlib::Graph *graph, std::string stage) { - auto stage_to_print = env_as_optional("PYBUDA_PRINT_GRAPH_AT"); + auto stage_to_print = env_as_optional("FORGE_PRINT_GRAPH_AT"); if (not stage_to_print or ((stage_to_print != stage) and (stage_to_print != "ALL"))) { return; @@ -92,7 +92,7 @@ void print_graph_viz_nodes_definitions(graphlib::Node *node) void print_graph_viz_op_nodes_definitions(graphlib::Node *node) { - auto print_bw_graph = env_as_optional("PYBUDA_PRINT_GRAPH_VIZ_FORMAT_DIR"); + auto print_bw_graph = env_as_optional("FORGE_PRINT_GRAPH_VIZ_FORMAT_DIR"); auto op = dynamic_cast(node); if (not op) @@ -118,7 +118,7 @@ void print_graph_viz_op_nodes_definitions(graphlib::Node *node) void print_graph_viz_op_nodes_relations(graphlib::Graph *graph, graphlib::Node *node) { - auto print_bw_graph = env_as_optional("PYBUDA_PRINT_GRAPH_VIZ_FORMAT_DIR"); + auto print_bw_graph = env_as_optional("FORGE_PRINT_GRAPH_VIZ_FORMAT_DIR"); auto op = dynamic_cast(node); if (not op) @@ -159,7 +159,7 @@ void print_graph_viz_op_nodes_relations(graphlib::Graph *graph, graphlib::Node * */ void print_graph_viz_format(graphlib::Graph *graph, std::string stage) { - auto stage_to_print = env_as_optional("PYBUDA_PRINT_GRAPH_VIZ_FORMAT_AT"); + auto stage_to_print = env_as_optional("FORGE_PRINT_GRAPH_VIZ_FORMAT_AT"); if (not stage_to_print or ((stage_to_print != stage) and (stage_to_print != "ALL"))) return; std::cout << "Graph at: " << stage << std::endl; @@ -205,8 +205,8 @@ void print_graph_viz_format(graphlib::Graph *graph, std::string stage) void print_graph(graphlib::Graph *graph, std::string stage) { - auto regular_print = env_as_optional("PYBUDA_PRINT_GRAPH_AT"); - auto graph_viz_format = env_as_optional("PYBUDA_PRINT_GRAPH_VIZ_FORMAT_AT"); + auto regular_print = env_as_optional("FORGE_PRINT_GRAPH_AT"); + auto graph_viz_format = env_as_optional("FORGE_PRINT_GRAPH_VIZ_FORMAT_AT"); if (regular_print) { diff --git a/pybuda/csrc/passes/print_graph.hpp b/forge/csrc/passes/print_graph.hpp similarity index 100% rename from pybuda/csrc/passes/print_graph.hpp rename to forge/csrc/passes/print_graph.hpp diff --git a/pybuda/csrc/passes/python_bindings.cpp b/forge/csrc/passes/python_bindings.cpp similarity index 98% rename from pybuda/csrc/passes/python_bindings.cpp rename to forge/csrc/passes/python_bindings.cpp index 1a94b4445..197e21cb9 100644 --- a/pybuda/csrc/passes/python_bindings.cpp +++ b/forge/csrc/passes/python_bindings.cpp @@ -16,7 +16,7 @@ namespace tt { static bool has_newstyle_interface(std::string const &op_name, bool is_buda) { py::object eval_module = - is_buda ? py::module_::import("pybuda.op.eval.buda") : py::module_::import("pybuda.op.eval.pybuda"); + is_buda ? py::module_::import("forge.op.eval.buda") : py::module_::import("forge.op.eval.forge"); return eval_module.attr("has_newstyle_interface")(op_name).cast(); } @@ -164,7 +164,7 @@ void PassesModule(py::module &m_passes) TT_ASSERT(cnode && cnode->is_tensor(), "Only use for ConstantInputNode of type tensor"); return borrow_shared_py_object(cnode->tensor()); }) - .def("pybuda_shape", &tt::LoweringContext::pybuda_shape); + .def("forge_shape", &tt::LoweringContext::forge_shape); py::class_(m_passes, "DecomposingContext") .def("node_name", &tt::DecomposingContext::get_node_name) diff --git a/pybuda/csrc/passes/python_bindings.hpp b/forge/csrc/passes/python_bindings.hpp similarity index 100% rename from pybuda/csrc/passes/python_bindings.hpp rename to forge/csrc/passes/python_bindings.hpp diff --git a/pybuda/csrc/passes/replace_incommutable_patterns.cpp b/forge/csrc/passes/replace_incommutable_patterns.cpp similarity index 100% rename from pybuda/csrc/passes/replace_incommutable_patterns.cpp rename to forge/csrc/passes/replace_incommutable_patterns.cpp diff --git a/pybuda/csrc/passes/replace_incommutable_patterns.hpp b/forge/csrc/passes/replace_incommutable_patterns.hpp similarity index 100% rename from pybuda/csrc/passes/replace_incommutable_patterns.hpp rename to forge/csrc/passes/replace_incommutable_patterns.hpp diff --git a/pybuda/csrc/passes/set_tile_dim.cpp b/forge/csrc/passes/set_tile_dim.cpp similarity index 100% rename from pybuda/csrc/passes/set_tile_dim.cpp rename to forge/csrc/passes/set_tile_dim.cpp diff --git a/pybuda/csrc/passes/set_tile_dim.hpp b/forge/csrc/passes/set_tile_dim.hpp similarity index 100% rename from pybuda/csrc/passes/set_tile_dim.hpp rename to forge/csrc/passes/set_tile_dim.hpp diff --git a/pybuda/csrc/passes/squeeze_to_reshape.cpp b/forge/csrc/passes/squeeze_to_reshape.cpp similarity index 100% rename from pybuda/csrc/passes/squeeze_to_reshape.cpp rename to forge/csrc/passes/squeeze_to_reshape.cpp diff --git a/pybuda/csrc/passes/squeeze_to_reshape.hpp b/forge/csrc/passes/squeeze_to_reshape.hpp similarity index 100% rename from pybuda/csrc/passes/squeeze_to_reshape.hpp rename to forge/csrc/passes/squeeze_to_reshape.hpp diff --git a/pybuda/csrc/passes/tests/gtest_main.cpp b/forge/csrc/passes/tests/gtest_main.cpp similarity index 100% rename from pybuda/csrc/passes/tests/gtest_main.cpp rename to forge/csrc/passes/tests/gtest_main.cpp diff --git a/pybuda/csrc/passes/tests/test_constant_folding.cpp b/forge/csrc/passes/tests/test_constant_folding.cpp similarity index 94% rename from pybuda/csrc/passes/tests/test_constant_folding.cpp rename to forge/csrc/passes/tests/test_constant_folding.cpp index f99c0bd69..eb6fb2c30 100644 --- a/pybuda/csrc/passes/tests/test_constant_folding.cpp +++ b/forge/csrc/passes/tests/test_constant_folding.cpp @@ -6,7 +6,7 @@ namespace tt::test { -struct ConstantFoldMultiply : public PybudaGraphTest +struct ConstantFoldMultiply : public ForgeGraphTest { protected: virtual std::vector create_graph() override @@ -60,7 +60,7 @@ TEST_F(ConstantFoldMultiply, constant_fold_multiply) EXPECT_LT(add_position, narrow_position); } -struct ConstantFoldMultiplyThroughAdd : public PybudaGraphTest, public testing::WithParamInterface> +struct ConstantFoldMultiplyThroughAdd : public ForgeGraphTest, public testing::WithParamInterface> { protected: virtual std::vector create_graph() override @@ -130,7 +130,7 @@ INSTANTIATE_TEST_SUITE_P( std::make_pair(2, true), std::make_pair(3, true))); -struct ConstantFoldBackToBack : public PybudaGraphTest, public testing::WithParamInterface +struct ConstantFoldBackToBack : public ForgeGraphTest, public testing::WithParamInterface { protected: virtual std::vector create_graph() override diff --git a/pybuda/csrc/passes/tests/test_data_formats.cpp b/forge/csrc/passes/tests/test_data_formats.cpp similarity index 100% rename from pybuda/csrc/passes/tests/test_data_formats.cpp rename to forge/csrc/passes/tests/test_data_formats.cpp diff --git a/pybuda/csrc/passes/tests/test_erase_inverse_ops.cpp b/forge/csrc/passes/tests/test_erase_inverse_ops.cpp similarity index 100% rename from pybuda/csrc/passes/tests/test_erase_inverse_ops.cpp rename to forge/csrc/passes/tests/test_erase_inverse_ops.cpp diff --git a/pybuda/csrc/passes/tests/test_erase_unnecessary_4d_tm_sequence.cpp b/forge/csrc/passes/tests/test_erase_unnecessary_4d_tm_sequence.cpp similarity index 100% rename from pybuda/csrc/passes/tests/test_erase_unnecessary_4d_tm_sequence.cpp rename to forge/csrc/passes/tests/test_erase_unnecessary_4d_tm_sequence.cpp diff --git a/pybuda/csrc/passes/tests/test_fracturing.cpp b/forge/csrc/passes/tests/test_fracturing.cpp similarity index 97% rename from pybuda/csrc/passes/tests/test_fracturing.cpp rename to forge/csrc/passes/tests/test_fracturing.cpp index 6c736eff6..deb375b17 100644 --- a/pybuda/csrc/passes/tests/test_fracturing.cpp +++ b/forge/csrc/passes/tests/test_fracturing.cpp @@ -49,7 +49,7 @@ static bool fully_connected(graphlib::Graph* graph) return true; } -struct FractureFF : public PybudaGraphTest +struct FractureFF : public ForgeGraphTest { protected: virtual std::vector create_graph() override @@ -176,7 +176,7 @@ TEST_F(FractureFF, 2d_weight_stationary_mixed_factors) EXPECT_EQ(count_nodes(nodes, e1_name, "matmul"), 2 * 2); } -struct FractureForkJoin : public PybudaGraphTest +struct FractureForkJoin : public ForgeGraphTest { protected: virtual std::vector create_graph() override @@ -236,7 +236,7 @@ TEST_F(FractureForkJoin, fracture_fork_join) } } -struct FractureDimSwitch : public PybudaGraphTest +struct FractureDimSwitch : public ForgeGraphTest { protected: virtual std::vector create_graph() override @@ -276,7 +276,7 @@ TEST_F(FractureDimSwitch, dim_switch) EXPECT_TRUE(fully_connected(graph)); } -struct FractureSimpleMixedFactors : public PybudaGraphTest, public testing::WithParamInterface> +struct FractureSimpleMixedFactors : public ForgeGraphTest, public testing::WithParamInterface> { protected: virtual std::vector create_graph() override @@ -326,7 +326,7 @@ INSTANTIATE_TEST_SUITE_P( std::make_pair(4, 2), std::make_pair(4, 1))); -struct FractureLayers : public PybudaGraphTest +struct FractureLayers : public ForgeGraphTest { protected: virtual std::vector create_graph() override diff --git a/pybuda/csrc/passes/tests/test_fuse_pad_conv2d.cpp b/forge/csrc/passes/tests/test_fuse_pad_conv2d.cpp similarity index 100% rename from pybuda/csrc/passes/tests/test_fuse_pad_conv2d.cpp rename to forge/csrc/passes/tests/test_fuse_pad_conv2d.cpp diff --git a/pybuda/csrc/passes/tests/test_fuse_reshape_transpose_pairs_into_slice_or_stack_tm.cpp b/forge/csrc/passes/tests/test_fuse_reshape_transpose_pairs_into_slice_or_stack_tm.cpp similarity index 100% rename from pybuda/csrc/passes/tests/test_fuse_reshape_transpose_pairs_into_slice_or_stack_tm.cpp rename to forge/csrc/passes/tests/test_fuse_reshape_transpose_pairs_into_slice_or_stack_tm.cpp diff --git a/pybuda/csrc/passes/tests/test_link_past_cache_ios.cpp b/forge/csrc/passes/tests/test_link_past_cache_ios.cpp similarity index 99% rename from pybuda/csrc/passes/tests/test_link_past_cache_ios.cpp rename to forge/csrc/passes/tests/test_link_past_cache_ios.cpp index 15ae6f120..48a3bf786 100644 --- a/pybuda/csrc/passes/tests/test_link_past_cache_ios.cpp +++ b/forge/csrc/passes/tests/test_link_past_cache_ios.cpp @@ -224,9 +224,9 @@ bool single_user_operand(graphlib::Graph *graph, graphlib::Node *n) TEST_F(T5PastCacheRotate, t5_past_cache_rotate) { - setenv("PYBUDA_ROTATE_PAST_CACHE_PARAMS", "1", 1 /* overwrite */); + setenv("FORGE_ROTATE_PAST_CACHE_PARAMS", "1", 1 /* overwrite */); tt::passes::link_past_cache_ios(graph); - unsetenv("PYBUDA_ROTATE_PAST_CACHE_PARAMS"); + unsetenv("FORGE_ROTATE_PAST_CACHE_PARAMS"); // get input/output nodes std::vector output_nodes, input_nodes; diff --git a/pybuda/csrc/passes/tests/test_mm_fuse_bias.cpp b/forge/csrc/passes/tests/test_mm_fuse_bias.cpp similarity index 98% rename from pybuda/csrc/passes/tests/test_mm_fuse_bias.cpp rename to forge/csrc/passes/tests/test_mm_fuse_bias.cpp index de41fd964..90f5f4601 100644 --- a/pybuda/csrc/passes/tests/test_mm_fuse_bias.cpp +++ b/forge/csrc/passes/tests/test_mm_fuse_bias.cpp @@ -7,7 +7,7 @@ namespace tt::test { struct MMFuseBias - : public PybudaGraphTest, + : public ForgeGraphTest, public testing::WithParamInterface, graphlib::NodeType>> { protected: diff --git a/pybuda/csrc/passes/tests/test_move_index_to_mm_weights.cpp b/forge/csrc/passes/tests/test_move_index_to_mm_weights.cpp similarity index 100% rename from pybuda/csrc/passes/tests/test_move_index_to_mm_weights.cpp rename to forge/csrc/passes/tests/test_move_index_to_mm_weights.cpp diff --git a/pybuda/csrc/passes/tests/test_move_select_after_matmul_optional.cpp b/forge/csrc/passes/tests/test_move_select_after_matmul_optional.cpp similarity index 92% rename from pybuda/csrc/passes/tests/test_move_select_after_matmul_optional.cpp rename to forge/csrc/passes/tests/test_move_select_after_matmul_optional.cpp index 06fb2deea..2feb44d22 100644 --- a/pybuda/csrc/passes/tests/test_move_select_after_matmul_optional.cpp +++ b/forge/csrc/passes/tests/test_move_select_after_matmul_optional.cpp @@ -36,7 +36,7 @@ struct MoveSelectAfterMatmulOptional : testing::Test TEST_F(MoveSelectAfterMatmulOptional, test_move_select_after_matmul_1) { - setenv("PYBUDA_MANUAL_SPLICE_DECOMP_TH", "158", 1 /* overwrite */); + setenv("FORGE_MANUAL_SPLICE_DECOMP_TH", "158", 1 /* overwrite */); passes::move_select_after_matmul_optional(graph); @@ -56,12 +56,12 @@ TEST_F(MoveSelectAfterMatmulOptional, test_move_select_after_matmul_1) } EXPECT_EQ(graph->nodes().size(), 9); - unsetenv("PYBUDA_MANUAL_SPLICE_DECOMP_TH"); + unsetenv("FORGE_MANUAL_SPLICE_DECOMP_TH"); } TEST_F(MoveSelectAfterMatmulOptional, test_move_select_after_matmul_2) { - setenv("PYBUDA_MANUAL_SPLICE_DECOMP_TH", "158", 1 /* overwrite */); + setenv("FORGE_MANUAL_SPLICE_DECOMP_TH", "158", 1 /* overwrite */); // Expand the base-test case auto weight2 = create_input(*graph, "weight2", graphlib::Shape::create({1, 1, 64, 64})); @@ -87,12 +87,12 @@ TEST_F(MoveSelectAfterMatmulOptional, test_move_select_after_matmul_2) } EXPECT_EQ(graph->nodes().size(), 11); - unsetenv("PYBUDA_MANUAL_SPLICE_DECOMP_TH"); + unsetenv("FORGE_MANUAL_SPLICE_DECOMP_TH"); } TEST_F(MoveSelectAfterMatmulOptional, test_move_select_after_matmul_3) { - setenv("PYBUDA_MANUAL_SPLICE_DECOMP_TH", "158", 1 /* overwrite */); + setenv("FORGE_MANUAL_SPLICE_DECOMP_TH", "158", 1 /* overwrite */); // Expand the base-test case graphlib::Node *select_0 = graph->get_node_by_name("select_0"); @@ -127,6 +127,6 @@ TEST_F(MoveSelectAfterMatmulOptional, test_move_select_after_matmul_3) } EXPECT_EQ(graph->nodes().size(), 9); - unsetenv("PYBUDA_MANUAL_SPLICE_DECOMP_TH"); + unsetenv("FORGE_MANUAL_SPLICE_DECOMP_TH"); } diff --git a/pybuda/csrc/passes/tests/test_past_cache_ublock_order.cpp b/forge/csrc/passes/tests/test_past_cache_ublock_order.cpp similarity index 100% rename from pybuda/csrc/passes/tests/test_past_cache_ublock_order.cpp rename to forge/csrc/passes/tests/test_past_cache_ublock_order.cpp diff --git a/pybuda/csrc/passes/tests/test_split_unsupp_ops.cpp b/forge/csrc/passes/tests/test_split_unsupp_ops.cpp similarity index 100% rename from pybuda/csrc/passes/tests/test_split_unsupp_ops.cpp rename to forge/csrc/passes/tests/test_split_unsupp_ops.cpp diff --git a/pybuda/csrc/passes/tests/test_tilize.cpp b/forge/csrc/passes/tests/test_tilize.cpp similarity index 100% rename from pybuda/csrc/passes/tests/test_tilize.cpp rename to forge/csrc/passes/tests/test_tilize.cpp diff --git a/pybuda/csrc/passes/tests/test_transpose_srca.cpp b/forge/csrc/passes/tests/test_transpose_srca.cpp similarity index 100% rename from pybuda/csrc/passes/tests/test_transpose_srca.cpp rename to forge/csrc/passes/tests/test_transpose_srca.cpp diff --git a/pybuda/csrc/python_bindings_common.hpp b/forge/csrc/python_bindings_common.hpp similarity index 100% rename from pybuda/csrc/python_bindings_common.hpp rename to forge/csrc/python_bindings_common.hpp diff --git a/pybuda/csrc/reportify/CMakeLists.txt b/forge/csrc/reportify/CMakeLists.txt similarity index 100% rename from pybuda/csrc/reportify/CMakeLists.txt rename to forge/csrc/reportify/CMakeLists.txt diff --git a/pybuda/csrc/reportify/paths.cpp b/forge/csrc/reportify/paths.cpp similarity index 96% rename from pybuda/csrc/reportify/paths.cpp rename to forge/csrc/reportify/paths.cpp index 1c492809f..f0e97fd91 100644 --- a/pybuda/csrc/reportify/paths.cpp +++ b/forge/csrc/reportify/paths.cpp @@ -17,7 +17,7 @@ static std::string get_variant(std::string const& test_name) { std::string variant; - if (auto maybe = env_as_optional("PYBUDA_REPORTIFY_POSTFIX"); maybe and not test_name.empty()) + if (auto maybe = env_as_optional("FORGE_REPORTIFY_POSTFIX"); maybe and not test_name.empty()) { variant = "." + *maybe; } diff --git a/pybuda/csrc/reportify/paths.hpp b/forge/csrc/reportify/paths.hpp similarity index 100% rename from pybuda/csrc/reportify/paths.hpp rename to forge/csrc/reportify/paths.hpp diff --git a/pybuda/csrc/reportify/reportify.cpp b/forge/csrc/reportify/reportify.cpp similarity index 98% rename from pybuda/csrc/reportify/reportify.cpp rename to forge/csrc/reportify/reportify.cpp index 3ec0ebf79..7db50b39e 100644 --- a/pybuda/csrc/reportify/reportify.cpp +++ b/forge/csrc/reportify/reportify.cpp @@ -63,7 +63,7 @@ std::vector tt_nodes_to_name_strings(const std::vectorname(); ret_json["unique_id"] = node->id(); std::vector input_nodes; @@ -188,7 +188,7 @@ json node_to_json(const graphlib::Node* node, const graphlib::Graph* graph) else if (node->node_type() == graphlib::NodeType::kPyOp) { const graphlib::PyOpNode* opnode = node->as(); - ret_json["ir"] = "pybuda"; + ret_json["ir"] = "forge"; ret_json["class"] = opnode->op_type().as_string(); ret_json["type"] = opnode->op_type().op; to_json(ret_json, opnode->op_type()); @@ -284,7 +284,7 @@ void dump_graph( const graphlib::Graph* graph, const std::string& report_path) { - if (env_as("PYBUDA_DISABLE_REPORTIFY_DUMP")) + if (env_as("FORGE_DISABLE_REPORTIFY_DUMP")) return; JsonNamePairs json_pairs = create_jsons_for_graph(graph_prefix, graph); @@ -316,7 +316,7 @@ void dump_epoch_type_graphs( const graphlib::Graph* graph, const std::string& directory_path) { - if (env_as("PYBUDA_DISABLE_REPORTIFY_DUMP")) + if (env_as("FORGE_DISABLE_REPORTIFY_DUMP")) return; std::function epoch_type_filter = diff --git a/pybuda/csrc/reportify/reportify.hpp b/forge/csrc/reportify/reportify.hpp similarity index 100% rename from pybuda/csrc/reportify/reportify.hpp rename to forge/csrc/reportify/reportify.hpp diff --git a/pybuda/csrc/reportify/to_json.cpp b/forge/csrc/reportify/to_json.cpp similarity index 100% rename from pybuda/csrc/reportify/to_json.cpp rename to forge/csrc/reportify/to_json.cpp diff --git a/pybuda/csrc/reportify/to_json.hpp b/forge/csrc/reportify/to_json.hpp similarity index 100% rename from pybuda/csrc/reportify/to_json.hpp rename to forge/csrc/reportify/to_json.hpp diff --git a/pybuda/csrc/runtime/CMakeLists.txt b/forge/csrc/runtime/CMakeLists.txt similarity index 100% rename from pybuda/csrc/runtime/CMakeLists.txt rename to forge/csrc/runtime/CMakeLists.txt diff --git a/pybuda/csrc/runtime/python_bindings.cpp b/forge/csrc/runtime/python_bindings.cpp similarity index 100% rename from pybuda/csrc/runtime/python_bindings.cpp rename to forge/csrc/runtime/python_bindings.cpp diff --git a/pybuda/csrc/runtime/python_bindings.hpp b/forge/csrc/runtime/python_bindings.hpp similarity index 100% rename from pybuda/csrc/runtime/python_bindings.hpp rename to forge/csrc/runtime/python_bindings.hpp diff --git a/pybuda/csrc/runtime/runtime.cpp b/forge/csrc/runtime/runtime.cpp similarity index 100% rename from pybuda/csrc/runtime/runtime.cpp rename to forge/csrc/runtime/runtime.cpp diff --git a/pybuda/csrc/runtime/runtime.hpp b/forge/csrc/runtime/runtime.hpp similarity index 100% rename from pybuda/csrc/runtime/runtime.hpp rename to forge/csrc/runtime/runtime.hpp diff --git a/pybuda/csrc/runtime/tt_device.cpp b/forge/csrc/runtime/tt_device.cpp similarity index 100% rename from pybuda/csrc/runtime/tt_device.cpp rename to forge/csrc/runtime/tt_device.cpp diff --git a/pybuda/csrc/runtime/tt_device.hpp b/forge/csrc/runtime/tt_device.hpp similarity index 97% rename from pybuda/csrc/runtime/tt_device.hpp rename to forge/csrc/runtime/tt_device.hpp index 338453022..13b68d89c 100644 --- a/pybuda/csrc/runtime/tt_device.hpp +++ b/forge/csrc/runtime/tt_device.hpp @@ -5,7 +5,7 @@ #include -#include "pybuda/csrc/backend_api/arch_type.hpp" +#include "forge/csrc/backend_api/arch_type.hpp" #include "tt/runtime/types.h" namespace tt diff --git a/pybuda/csrc/shared_utils/CMakeLists.txt b/forge/csrc/shared_utils/CMakeLists.txt similarity index 100% rename from pybuda/csrc/shared_utils/CMakeLists.txt rename to forge/csrc/shared_utils/CMakeLists.txt diff --git a/pybuda/csrc/shared_utils/json_extension.hpp b/forge/csrc/shared_utils/json_extension.hpp similarity index 100% rename from pybuda/csrc/shared_utils/json_extension.hpp rename to forge/csrc/shared_utils/json_extension.hpp diff --git a/pybuda/csrc/shared_utils/placement_printer.cpp b/forge/csrc/shared_utils/placement_printer.cpp similarity index 100% rename from pybuda/csrc/shared_utils/placement_printer.cpp rename to forge/csrc/shared_utils/placement_printer.cpp diff --git a/pybuda/csrc/shared_utils/placement_printer.hpp b/forge/csrc/shared_utils/placement_printer.hpp similarity index 100% rename from pybuda/csrc/shared_utils/placement_printer.hpp rename to forge/csrc/shared_utils/placement_printer.hpp diff --git a/pybuda/csrc/shared_utils/pretty_table.cpp b/forge/csrc/shared_utils/pretty_table.cpp similarity index 100% rename from pybuda/csrc/shared_utils/pretty_table.cpp rename to forge/csrc/shared_utils/pretty_table.cpp diff --git a/pybuda/csrc/shared_utils/pretty_table.hpp b/forge/csrc/shared_utils/pretty_table.hpp similarity index 100% rename from pybuda/csrc/shared_utils/pretty_table.hpp rename to forge/csrc/shared_utils/pretty_table.hpp diff --git a/pybuda/csrc/shared_utils/sparse_matmul_utils.cpp b/forge/csrc/shared_utils/sparse_matmul_utils.cpp similarity index 98% rename from pybuda/csrc/shared_utils/sparse_matmul_utils.cpp rename to forge/csrc/shared_utils/sparse_matmul_utils.cpp index 8e91ebe32..50fa047ff 100644 --- a/pybuda/csrc/shared_utils/sparse_matmul_utils.cpp +++ b/forge/csrc/shared_utils/sparse_matmul_utils.cpp @@ -156,7 +156,7 @@ static std::pair, std::vector> compress_unique_t add_tile(zero_tile); - if (env_as("PYBUDA_SPARSE_NO_MATH")) + if (env_as("FORGE_SPARSE_NO_MATH")) return std::make_pair(indices, tiles); for (int z = 0; z < zdim; ++z) @@ -376,13 +376,13 @@ static std::pair, int> encode_strips( int t_factor_r, int t_factor_c) { - if (env_as("PYBUDA_SPARSE_MM_ENCODE_ALL_STRIPS")) + if (env_as("FORGE_SPARSE_MM_ENCODE_ALL_STRIPS")) { throw std::runtime_error("Encoding all strips for sparse matmul is unsupported currently."); // svuckovic } - // Remove when this is resolved: tenstorrent/pybuda#842 - bool allow_illegal_sparse_pars = env_as("PYBUDA_ALLOW_ILLEGAL_SPARSE_PARS"); + // Remove when this is resolved: tenstorrent/forge#842 + bool allow_illegal_sparse_pars = env_as("FORGE_ALLOW_ILLEGAL_SPARSE_PARS"); std::int64_t kt = (dimc + TILE_DIM - 1) / TILE_DIM; std::int64_t m_k = kt / u_kt; @@ -704,7 +704,7 @@ int SparseBUDA::get_max_u_kt(int grid_r, int t_factor_r, int u_rt, int sparse_ti SparseBUDA::Layout SparseBUDA::create_layout(bool z_major, int fracture_factor) { Layout layout = Layout::Default; - if (z_major and (fracture_factor == 1) and not env_as("PYBUDA_SPARSE_DISABLE_LAYOUT_DATAFLOW")) + if (z_major and (fracture_factor == 1) and not env_as("FORGE_SPARSE_DISABLE_LAYOUT_DATAFLOW")) layout = Layout::ZMajorDataflow; else if (z_major) layout = Layout::ZMajor; @@ -787,8 +787,8 @@ std::unordered_map> SparseBUDA::get_par_t_values( return {{1, {}}}; } - // Remove when this is resolved: tenstorrent/pybuda#842 - bool allow_illegal_sparse_pars = env_as("PYBUDA_ALLOW_ILLEGAL_SPARSE_PARS"); + // Remove when this is resolved: tenstorrent/forge#842 + bool allow_illegal_sparse_pars = env_as("FORGE_ALLOW_ILLEGAL_SPARSE_PARS"); // Sort descending, we want to test the bigger ts first // If a t is valid, all its factors are valid too - we can skip testing them @@ -994,7 +994,7 @@ SparseBUDA::get_sparse_tiles_and_encodings( t_factor_c); buda_indices.push_back(encoded); num_strips_per_row.push_back(num_strips); - if (env_as("PYBUDA_SPARSE_PRINT_INDICES")) + if (env_as("FORGE_SPARSE_PRINT_INDICES")) { fmt::print("Grid_r[{}] {} {}\n", g_r, layout, t_factor_r); print_info_indices(buda_indices.back(), sparse_ublock_idx_bits); @@ -1004,7 +1004,7 @@ SparseBUDA::get_sparse_tiles_and_encodings( if (not visualize_sparse_path.empty()) { namespace py = pybind11; - auto sparse_utils_module = py::module_::import("pybuda.op.eval.sparse_utils"); + auto sparse_utils_module = py::module_::import("forge.op.eval.sparse_utils"); py::function visualize_sparse = sparse_utils_module.attr("visualize_sparse"); visualize_sparse(visualize_sparse_tensors, visualize_sparse_path, virtual_grid_r, zdim * t_factor_r); } @@ -1056,7 +1056,7 @@ int SparseBUDA::get_encoding_tiles_per_core_estimate(int grid_r, int t_factor_r, // ublock_index -> 10, num_matmuls -> 6 // tile_index -> 6, sparse_index -> 10 - bool encode_all_strips = env_as("PYBUDA_SPARSE_MM_ENCODE_ALL_STRIPS"); + bool encode_all_strips = env_as("FORGE_SPARSE_MM_ENCODE_ALL_STRIPS"); int tile_bytes = TILE_DIM * TILE_DIM * 4; // Using RawUInt32 for encoding tiles TT_ASSERT(sparse_shape[0] % TILE_DIM == 0); diff --git a/pybuda/csrc/shared_utils/sparse_matmul_utils.hpp b/forge/csrc/shared_utils/sparse_matmul_utils.hpp similarity index 100% rename from pybuda/csrc/shared_utils/sparse_matmul_utils.hpp rename to forge/csrc/shared_utils/sparse_matmul_utils.hpp diff --git a/pybuda/csrc/shared_utils/string_extension.cpp b/forge/csrc/shared_utils/string_extension.cpp similarity index 100% rename from pybuda/csrc/shared_utils/string_extension.cpp rename to forge/csrc/shared_utils/string_extension.cpp diff --git a/pybuda/csrc/shared_utils/string_extension.hpp b/forge/csrc/shared_utils/string_extension.hpp similarity index 100% rename from pybuda/csrc/shared_utils/string_extension.hpp rename to forge/csrc/shared_utils/string_extension.hpp diff --git a/pybuda/csrc/test/common.hpp b/forge/csrc/test/common.hpp similarity index 99% rename from pybuda/csrc/test/common.hpp rename to forge/csrc/test/common.hpp index 3dc23f97f..b06c9cfcc 100644 --- a/pybuda/csrc/test/common.hpp +++ b/forge/csrc/test/common.hpp @@ -277,7 +277,7 @@ class GraphTest : public ::testing::Test std::unordered_map op_name_id; }; -using PybudaGraphTest = GraphTest; +using ForgeGraphTest = GraphTest; using BudaGraphTest = GraphTest; inline DeviceConfig create_device_config( diff --git a/pybuda/csrc/test/graph_api.hpp b/forge/csrc/test/graph_api.hpp similarity index 100% rename from pybuda/csrc/test/graph_api.hpp rename to forge/csrc/test/graph_api.hpp diff --git a/pybuda/csrc/tt_torch_device/CMakeLists.txt b/forge/csrc/tt_torch_device/CMakeLists.txt similarity index 100% rename from pybuda/csrc/tt_torch_device/CMakeLists.txt rename to forge/csrc/tt_torch_device/CMakeLists.txt diff --git a/pybuda/csrc/tt_torch_device/python_bindings.cpp b/forge/csrc/tt_torch_device/python_bindings.cpp similarity index 95% rename from pybuda/csrc/tt_torch_device/python_bindings.cpp rename to forge/csrc/tt_torch_device/python_bindings.cpp index 832059cc4..485c86a3e 100644 --- a/pybuda/csrc/tt_torch_device/python_bindings.cpp +++ b/forge/csrc/tt_torch_device/python_bindings.cpp @@ -3,8 +3,8 @@ // SPDX-License-Identifier: Apache-2.0 #include "tt_torch_device/python_bindings.hpp" #include "tt_torch_device/tt_device.hpp" -#include "pybuda/csrc/python_bindings_common.hpp" -#include "pybuda/csrc/backend_api/arch_type.hpp" +#include "forge/csrc/python_bindings_common.hpp" +#include "forge/csrc/backend_api/arch_type.hpp" namespace tt { diff --git a/pybuda/csrc/tt_torch_device/python_bindings.hpp b/forge/csrc/tt_torch_device/python_bindings.hpp similarity index 100% rename from pybuda/csrc/tt_torch_device/python_bindings.hpp rename to forge/csrc/tt_torch_device/python_bindings.hpp diff --git a/pybuda/csrc/tt_torch_device/torch_device_impl.cpp b/forge/csrc/tt_torch_device/torch_device_impl.cpp similarity index 99% rename from pybuda/csrc/tt_torch_device/torch_device_impl.cpp rename to forge/csrc/tt_torch_device/torch_device_impl.cpp index f926097a3..483fd3b31 100644 --- a/pybuda/csrc/tt_torch_device/torch_device_impl.cpp +++ b/forge/csrc/tt_torch_device/torch_device_impl.cpp @@ -10,9 +10,9 @@ #include -#include "pybuda/csrc/lower_to_buda/common.hpp" -#include "pybuda/csrc/tt_torch_device/python_bindings.hpp" -#include "pybuda/csrc/tt_torch_device/tt_device.hpp" +#include "forge/csrc/lower_to_buda/common.hpp" +#include "forge/csrc/tt_torch_device/python_bindings.hpp" +#include "forge/csrc/tt_torch_device/tt_device.hpp" #include "utils/assert.hpp" #include "utils/logger.hpp" diff --git a/pybuda/csrc/tt_torch_device/tt_device.cpp b/forge/csrc/tt_torch_device/tt_device.cpp similarity index 98% rename from pybuda/csrc/tt_torch_device/tt_device.cpp rename to forge/csrc/tt_torch_device/tt_device.cpp index 09be661c7..2ce57b5cb 100644 --- a/pybuda/csrc/tt_torch_device/tt_device.cpp +++ b/forge/csrc/tt_torch_device/tt_device.cpp @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC // // SPDX-License-Identifier: Apache-2.0 -#include "pybuda/csrc/tt_torch_device/tt_device.hpp" +#include "forge/csrc/tt_torch_device/tt_device.hpp" #include #include @@ -9,7 +9,7 @@ #include #include -#include "pybuda/csrc/lower_to_buda/common.hpp" +#include "forge/csrc/lower_to_buda/common.hpp" #include "tt/runtime/runtime.h" #include "tt/runtime/types.h" #include "utils/assert.hpp" @@ -265,7 +265,7 @@ torch::Tensor eval_runtime_transform( py::object py_tensor = py::reinterpret_steal(THPVariable_Wrap(tensor)); PyGILState_STATE gstate=PyGILState_Ensure(); - auto module = py::module_::import("pybuda.tensor"); + auto module = py::module_::import("forge.tensor"); py::function eval_transform = module.attr("eval_runtime_transform"); py::tuple py_result = eval_transform(transform, py_tensor, tile_bcast_dims); PyGILState_Release(gstate); @@ -279,7 +279,7 @@ torch::Tensor narrow_to_pytorch(const torch::Tensor& tensor, std::string transfo py::object py_tensor = py::reinterpret_steal(THPVariable_Wrap(tensor)); PyGILState_STATE gstate=PyGILState_Ensure(); - auto module = py::module_::import("pybuda.tensor"); + auto module = py::module_::import("forge.tensor"); py::function eval_transform = module.attr("eval_runtime_transform"); //TODO: update py::object py_result = eval_transform(transform, py_tensor); PyGILState_Release(gstate); diff --git a/pybuda/csrc/tt_torch_device/tt_device.hpp b/forge/csrc/tt_torch_device/tt_device.hpp similarity index 98% rename from pybuda/csrc/tt_torch_device/tt_device.hpp rename to forge/csrc/tt_torch_device/tt_device.hpp index af18ef0f8..eba28e832 100644 --- a/pybuda/csrc/tt_torch_device/tt_device.hpp +++ b/forge/csrc/tt_torch_device/tt_device.hpp @@ -15,7 +15,7 @@ #include #include -#include "pybuda/csrc/backend_api/arch_type.hpp" +#include "forge/csrc/backend_api/arch_type.hpp" #include "runtime/tt_device.hpp" #include "tt/runtime/types.h" #include "utils/assert.hpp" diff --git a/pybuda/pybuda/CMakeLists.txt b/forge/forge/CMakeLists.txt similarity index 68% rename from pybuda/pybuda/CMakeLists.txt rename to forge/forge/CMakeLists.txt index 40dcf221b..5c448efbb 100644 --- a/pybuda/pybuda/CMakeLists.txt +++ b/forge/forge/CMakeLists.txt @@ -1,5 +1,5 @@ add_custom_target(install_ttforge ALL - COMMAND cd "${CMAKE_SOURCE_DIR}/pybuda" && pip install -e . + COMMAND cd "${CMAKE_SOURCE_DIR}/forge" && pip install -e . COMMENT "Installing ttforge module") add_dependencies(install_ttforge run_after_ttforge_csrc) diff --git a/pybuda/pybuda/_C.pyi b/forge/forge/_C.pyi similarity index 100% rename from pybuda/pybuda/_C.pyi rename to forge/forge/_C.pyi diff --git a/pybuda/pybuda/_C/__init__.pyi b/forge/forge/_C/__init__.pyi similarity index 100% rename from pybuda/pybuda/_C/__init__.pyi rename to forge/forge/_C/__init__.pyi diff --git a/forge/forge/_C/autograd.pyi b/forge/forge/_C/autograd.pyi new file mode 100644 index 000000000..30d7763f7 --- /dev/null +++ b/forge/forge/_C/autograd.pyi @@ -0,0 +1,24 @@ +import forge._C.graph +from typing import overload + +class AutogradConfig: + def __init__(self, recompute: bool = ..., optimizer: object = ...) -> None: ... + +class AutogradContext: + def __init__(self, *args, **kwargs) -> None: ... + @overload + def constant(self, arg0: int) -> forge._C.graph.NodeContext: ... + @overload + def constant(self, arg0: float) -> forge._C.graph.NodeContext: ... + def create_optimizer_op(self, type: str, operands: list[forge._C.graph.NodeContext], attributes=...) -> forge._C.graph.NodeContext: ... + def get_operands(self, arg0: forge._C.graph.NodeContext) -> list[forge._C.graph.NodeContext]: ... + def get_pytorch_tensor(self, arg0: forge._C.graph.NodeContext) -> object: ... + def get_shape(self, arg0: forge._C.graph.NodeContext) -> list[int]: ... + def input(self, *args, **kwargs): ... + def loopback(self, arg0: forge._C.graph.NodeContext, arg1: forge._C.graph.NodeContext) -> None: ... + def op(self, type: str | object, operands: list[forge._C.graph.NodeContext], attributes=...) -> forge._C.graph.NodeContext: ... + def tensor(self, arg0: object) -> forge._C.graph.NodeContext: ... + +class AutogradEngine: + def __init__(self, arg0: forge._C.graph.Graph, arg1: AutogradConfig) -> None: ... + def run(self) -> forge._C.graph.Graph: ... diff --git a/pybuda/pybuda/_C/graph.pyi b/forge/forge/_C/graph.pyi similarity index 94% rename from pybuda/pybuda/_C/graph.pyi rename to forge/forge/_C/graph.pyi index c12514d80..6978ae6d0 100644 --- a/pybuda/pybuda/_C/graph.pyi +++ b/forge/forge/_C/graph.pyi @@ -1,4 +1,4 @@ -import pybuda._C +import forge._C from typing import ClassVar, Iterator, overload C: UBlockOrder @@ -71,7 +71,7 @@ class InputNode: @property def node_type(self): ... @property - def output_df(self) -> pybuda._C.DataFormat: ... + def output_df(self) -> forge._C.DataFormat: ... @property def shape(self) -> Shape: ... @@ -84,7 +84,7 @@ class Node: @property def node_type(self): ... @property - def output_df(self) -> pybuda._C.DataFormat: ... + def output_df(self) -> forge._C.DataFormat: ... @property def shape(self) -> Shape: ... @@ -97,7 +97,7 @@ class NodeContext: @property def node_type(self): ... @property - def output_df(self) -> pybuda._C.DataFormat: ... + def output_df(self) -> forge._C.DataFormat: ... @property def shape(self) -> Shape: ... @property @@ -246,17 +246,17 @@ class UBlockOrder: def add_partial_datacopy_edge(arg0: Graph, arg1: int, arg2: int, arg3: int, arg4: int) -> None: ... def add_subgraph_io_link_edge(arg0: Graph, arg1: int, arg2: int, arg3: int, arg4: int) -> None: ... -def create_activation_input(arg0: Graph, arg1: str, arg2: list[int], arg3: bool, arg4: pybuda._C.DataFormat, arg5: int) -> int: ... +def create_activation_input(arg0: Graph, arg1: str, arg2: list[int], arg3: bool, arg4: forge._C.DataFormat, arg5: int) -> int: ... @overload -def create_constant_input(arg0: Graph, arg1: str, arg2: float, arg3: pybuda._C.DataFormat, arg4: int) -> int: ... +def create_constant_input(arg0: Graph, arg1: str, arg2: float, arg3: forge._C.DataFormat, arg4: int) -> int: ... @overload -def create_constant_input(arg0: Graph, arg1: str, arg2: object, arg3: list[int], arg4: pybuda._C.DataFormat, arg5: int) -> int: ... +def create_constant_input(arg0: Graph, arg1: str, arg2: object, arg3: list[int], arg4: forge._C.DataFormat, arg5: int) -> int: ... def create_control_edge(arg0: Graph, arg1: int, arg2: int, arg3: int, arg4: int) -> None: ... def create_data_edge(arg0: Graph, arg1: int, arg2: int, arg3: int, arg4: int, arg5: list[tuple]) -> None: ... -def create_op_node(arg0: Graph, arg1: str, arg2: OpType, arg3: list[int], arg4: pybuda._C.DataFormat, arg5: int, arg6: dict[str, bool | int | str]) -> int: ... -def create_output(arg0: Graph, arg1: str, arg2: list[int], arg3: pybuda._C.DataFormat, arg4: bool, arg5: int) -> int: ... -def create_parameter_input(arg0: Graph, arg1: str, arg2: list[int], arg3: bool, arg4: pybuda._C.DataFormat, arg5: int) -> int: ... -def create_target_input(arg0: Graph, arg1: str, arg2: list[int], arg3: bool, arg4: pybuda._C.DataFormat, arg5: int) -> int: ... +def create_op_node(arg0: Graph, arg1: str, arg2: OpType, arg3: list[int], arg4: forge._C.DataFormat, arg5: int, arg6: dict[str, bool | int | str]) -> int: ... +def create_output(arg0: Graph, arg1: str, arg2: list[int], arg3: forge._C.DataFormat, arg4: bool, arg5: int) -> int: ... +def create_parameter_input(arg0: Graph, arg1: str, arg2: list[int], arg3: bool, arg4: forge._C.DataFormat, arg5: int) -> int: ... +def create_target_input(arg0: Graph, arg1: str, arg2: list[int], arg3: bool, arg4: forge._C.DataFormat, arg5: int) -> int: ... def eval(graph: Graph, inputs: list[object], parameters: dict[str, object], tt_device: object, relative_atol: float, pcc: float, intermediate_golden_tensors: dict[int, object] = ..., losses: list[object] = ..., targets: list[object] = ..., dump_tensors_path: str = ..., allow_modified_shapes: bool = ...) -> tuple[list[object], dict[str, object], list[object], dict[str, object]]: ... def get_constant_input_value(arg0: Node, arg1: bool) -> object: ... def get_intermediate_tensors(graph: Graph, inputs: list[object], parameters: dict[str, object], tt_device: object, relative_atol: float, pcc: float, intermediate_golden_tensors: dict[int, object] = ..., losses: list[object] = ..., targets: list[object] = ..., dump_tensors_path: str = ..., allow_modified_shapes: bool = ...) -> dict[str, object]: ... diff --git a/pybuda/pybuda/_C/runtime.pyi b/forge/forge/_C/runtime.pyi similarity index 100% rename from pybuda/pybuda/_C/runtime.pyi rename to forge/forge/_C/runtime.pyi diff --git a/pybuda/pybuda/_C/torch_device.pyi b/forge/forge/_C/torch_device.pyi similarity index 96% rename from pybuda/pybuda/_C/torch_device.pyi rename to forge/forge/_C/torch_device.pyi index 60ee5ff84..6838702b5 100644 --- a/pybuda/pybuda/_C/torch_device.pyi +++ b/forge/forge/_C/torch_device.pyi @@ -1,4 +1,4 @@ -import pybuda._C +import forge._C import torch class TTDevice: @@ -7,7 +7,7 @@ class TTDevice: def str(self) -> str: ... def torch_device(self) -> torch.device: ... @property - def arch(self) -> pybuda._C.Arch: ... + def arch(self) -> forge._C.Arch: ... @property def cluster_yaml(self) -> str: ... @property diff --git a/pybuda/pybuda/__init__.py b/forge/forge/__init__.py similarity index 75% rename from pybuda/pybuda/__init__.py rename to forge/forge/__init__.py index 0ade90cf6..1fafa2959 100644 --- a/pybuda/pybuda/__init__.py +++ b/forge/forge/__init__.py @@ -3,19 +3,19 @@ # SPDX-License-Identifier: Apache-2.0 import os -# Set home directory paths for pybuda and buda +# Set home directory paths for forge and buda def set_home_paths(): import sys import pathlib from loguru import logger - pybuda_path = pathlib.Path(__file__).parent.parent.resolve() + forge_path = pathlib.Path(__file__).parent.parent.resolve() # deployment path - base_path = str(pybuda_path) + base_path = str(forge_path) out_path = "." - if "PYBUDA_HOME" not in os.environ: - os.environ["PYBUDA_HOME"] = str(pybuda_path) + if "FORGE_HOME" not in os.environ: + os.environ["FORGE_HOME"] = str(forge_path) if "TVM_HOME" not in os.environ: os.environ["TVM_HOME"] = str(base_path) + "/tvm" if "BUDA_OUT" not in os.environ: @@ -31,23 +31,23 @@ def set_home_paths(): os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0" -from .module import Module, PyTorchModule, PyBudaModule, TFGraphDefModule, OnnxModule, JaxModule, TFLiteModule +from .module import Module, PyTorchModule, ForgeModule, TFGraphDefModule, OnnxModule, JaxModule, TFLiteModule from .torch_compile import compile_torch from .compiled_graph_state import CompiledGraphState from .config import CompilerConfig, CompileDepth, set_configuration_options, set_epoch_break, set_chip_break, override_op_size, PerfTraceLevel, insert_buffering_nop, insert_nop, _internal_insert_fj_buffering_nop, override_dram_queue_placement, configure_mixed_precision from .verify import VerifyConfig -from .pybudaglobal import pybuda_reset, set_device_pipeline, is_silicon, get_tenstorrent_device +from .forgeglobal import forge_reset, set_device_pipeline, is_silicon, get_tenstorrent_device from .parameter import Parameter from .tensor import Tensor, SomeTensor, TensorShape from .optimizers import SGD, Adam, AdamW, LAMB, LARS from ._C import DataFormat, MathFidelity from ._C import k_dim -import pybuda.op as op -import pybuda.transformers +import forge.op as op +import forge.transformers -import pybuda.typing -from .compile import pybuda_compile_torch, compile_main as compile +import forge.typing +from .compile import forge_compile_torch, compile_main as compile # Torch backend registration # TODO: move this in a separate file / module. diff --git a/pybuda/pybuda/ci.py b/forge/forge/ci.py similarity index 90% rename from pybuda/pybuda/ci.py rename to forge/forge/ci.py index f41087fa6..a12a60f32 100644 --- a/pybuda/pybuda/ci.py +++ b/forge/forge/ci.py @@ -8,8 +8,8 @@ import shutil from pathlib import Path -from pybuda.utils import ( - get_pybuda_git_hash, +from forge.utils import ( + get_forge_git_hash, resolve_output_build_directory, write_buda_envs_configs, clear_test_output_build_directory, @@ -20,18 +20,18 @@ def enabled(): - return "PYBUDA_CI_DIR" in os.environ + return "FORGE_CI_DIR" in os.environ def capture_tensors(): - return enabled() and os.environ.get("PYBUDA_CI_CAPTURE_TENSORS", "0") != "0" + return enabled() and os.environ.get("FORGE_CI_CAPTURE_TENSORS", "0") != "0" def get_netlist_dir(): if not enabled(): return resolve_output_build_directory() - base_dir = os.environ.get("PYBUDA_CI_DIR") + base_dir = os.environ.get("FORGE_CI_DIR") netlist_dir = os.path.join( base_dir, ( @@ -53,7 +53,7 @@ def get_netlist_dir(): def comment_test_info(net) -> str: try: - git_hash = get_pybuda_git_hash() + git_hash = get_forge_git_hash() net.append_comment(f"git checkout {git_hash}") except: pass @@ -96,7 +96,7 @@ def initialize_output_build_directory(backend_output_directory: str): create_test_output_build_directory(backend_output_directory) logger.info( - f"Pybuda output build directory for compiled artifacts: {backend_output_directory}" + f"Forge output build directory for compiled artifacts: {backend_output_directory}" ) if not enabled(): create_symlink( diff --git a/pybuda/pybuda/compile.py b/forge/forge/compile.py similarity index 93% rename from pybuda/pybuda/compile.py rename to forge/forge/compile.py index eb71022c2..06f9bc349 100644 --- a/pybuda/pybuda/compile.py +++ b/forge/forge/compile.py @@ -9,14 +9,14 @@ import tensorflow as tf from loguru import logger -import pybuda -from pybuda.compiled_graph_state import CompiledGraphState, CompiledModel, CompileResults -from pybuda.config import ( +import forge +from forge.compiled_graph_state import CompiledGraphState, CompiledModel, CompileResults +from forge.config import ( CompilerConfig, CompileDepth, _get_global_compiler_config, ) -from pybuda._C import ( +from forge._C import ( link_past_cache_ios, move_index_to_mm_weights, run_post_initial_graph_passes, @@ -27,18 +27,18 @@ run_pre_lowering_passes, dump_graph, ) -import pybuda._C.autograd as pyautograd -import pybuda._C.graph as pygraph -from pybuda._C.graph import Graph -from pybuda._C.runtime import Binary -import pybuda.ci as ci -from pybuda.module import Module, PyBudaModule, wrap_module -from pybuda.parameter import Parameter -from pybuda.pybudaglobal import state_changed, clear_state_changed -import pybuda.query as query -from pybuda.tensor import Tensor, to_pt_tensors -from pybuda.typing import * -from pybuda.verify import VerifyConfig, do_verify, _generate_random_losses, _run_pytorch_backward +import forge._C.autograd as pyautograd +import forge._C.graph as pygraph +from forge._C.graph import Graph +from forge._C.runtime import Binary +import forge.ci as ci +from forge.module import Module, ForgeModule, wrap_module +from forge.parameter import Parameter +from forge.forgeglobal import state_changed, clear_state_changed +import forge.query as query +from forge.tensor import Tensor, to_pt_tensors +from forge.typing import * +from forge.verify import VerifyConfig, do_verify, _generate_random_losses, _run_pytorch_backward LAST_SUCCESSFUL_STAGE = None @@ -50,10 +50,10 @@ def init_log_last_successful_compile_stage(): def dump_compiler_cfg(backend_output_directory, compiler_cfg, graph_name): import yaml try: - int(os.environ["PYBUDA_DUMP_CONFIG"]) + int(os.environ["FORGE_DUMP_CONFIG"]) path = f"{graph_name}_config.yaml" except ValueError: - path = os.environ["PYBUDA_DUMP_CONFIG"] + path = os.environ["FORGE_DUMP_CONFIG"] with open(os.path.join(backend_output_directory, path), "w") as fd: yaml.dump(compiler_cfg.to_dict(), fd, indent=2) @@ -61,7 +61,7 @@ def dump_compiler_cfg(backend_output_directory, compiler_cfg, graph_name): def load_compiler_cfg(compiler_cfg, clobber=False): import yaml import json - path = os.environ["PYBUDA_LOAD_CONFIG"] + path = os.environ["FORGE_LOAD_CONFIG"] loader = json.load if os.path.splitext(path)[1] == ".json" else lambda f: yaml.load(f, yaml.SafeLoader) with open(path) as fd: d = compiler_cfg.to_dict() @@ -74,10 +74,10 @@ def load_compiler_cfg(compiler_cfg, clobber=False): def generate_override_config(graph, balancer_solution, placer_solution, nop_instructions, graph_name): import yaml try: - int(os.environ["PYBUDA_GENERATE_OVERRIDE_CONFIG"]) + int(os.environ["FORGE_GENERATE_OVERRIDE_CONFIG"]) path = f"{graph_name}_override_config.yaml" except ValueError: - path = os.environ["PYBUDA_GENERATE_OVERRIDE_CONFIG"] + path = os.environ["FORGE_GENERATE_OVERRIDE_CONFIG"] overrides = {} overrides["balancer_op_overrides"] = {k: { @@ -167,7 +167,7 @@ def compile_main( module: AnyModule, sample_inputs: List[torch.Tensor], module_name: Optional[str] = None, - loss: Optional[torch.nn.Module | PyBudaModule] = None, + loss: Optional[torch.nn.Module | ForgeModule] = None, optimizer: Optional[torch.optim.Optimizer] = None, ) -> CompiledModel: """ @@ -176,7 +176,7 @@ def compile_main( Parameters ---------- module: AnyModule - Torch, TensorFlow, or PyBuda module to compile + Torch, TensorFlow, or Forge module to compile sample_inputs: List[torch.Tensor] List of sample inputs for the module (used to infer shapes) @@ -184,7 +184,7 @@ def compile_main( module_name: Optional[str] Name of the module. If not provided, the class name of the provided module will be used. - loss: Optional[torch.nn.Module | PyBudaModule] + loss: Optional[torch.nn.Module | ForgeModule] Loss module for training. optimizer: Optional[torch.optim.Optimizer] @@ -196,7 +196,7 @@ def compile_main( """ - assert isinstance(module, AnyModule), "Only PyTorch, TensorFlow, and PyBuda modules are supported." + assert isinstance(module, AnyModule), "Only PyTorch, TensorFlow, and Forge modules are supported." compiler_cfg = _get_global_compiler_config() compiler_cfg.apply_env_config_overrides() @@ -232,10 +232,10 @@ def compile_main( training=wrapped_loss is not None, ) - return pybuda_compile_from_context(compile_context) + return forge_compile_from_context(compile_context) -def pybuda_compile_from_context(context: CompileContext) -> CompiledModel: +def forge_compile_from_context(context: CompileContext) -> CompiledModel: """ Run front-end compile passes and generate a Buda netlist, with a given compile context. @@ -317,14 +317,14 @@ def pybuda_compile_from_context(context: CompileContext) -> CompiledModel: return compiled_module -def pybuda_compile_torch( +def forge_compile_torch( module_name: str, module: torch.fx.GraphModule, graph: Graph, *inputs: Union[Tensor, List[Any], Dict[str, Any]] ): """ - Entry point for pybuda compile for torch 2.0 api. + Entry point for forge compile for torch 2.0 api. Parameters --------- @@ -361,9 +361,9 @@ def pybuda_compile_torch( graph=graph, ) - return pybuda_compile_from_context(compile_context) + return forge_compile_from_context(compile_context) -def pybuda_compile( +def forge_compile( graph_name: str, *inputs: Union[Tensor, List[Any], Dict[str, Any]], targets: List[Tensor] = [], @@ -425,7 +425,7 @@ def pybuda_compile( losses=losses, ) - return pybuda_compile_from_context(compile_context) + return forge_compile_from_context(compile_context) def check_for_compilation_early_stop(desired_stage, current_stage): """ @@ -536,7 +536,7 @@ def init_compile(context: CompileContext) -> CompileDepth: compiler_cfg = context.compiler_cfg graph_name = context.graph_name - force_full = bool(int(os.environ.get("PYBUDA_FORCE_FULL_COMPILE_DEPTH", "0"))) + force_full = bool(int(os.environ.get("FORGE_FORCE_FULL_COMPILE_DEPTH", "0"))) if force_full: compiler_cfg.compile_depth = CompileDepth.FULL @@ -544,9 +544,9 @@ def init_compile(context: CompileContext) -> CompileDepth: ci.initialize_output_build_directory(context.backend_output_directory) # compiler_cfg is fully formed - if "PYBUDA_LOAD_CONFIG" in os.environ: + if "FORGE_LOAD_CONFIG" in os.environ: compiler_cfg = load_compiler_cfg(compiler_cfg) - elif "PYBUDA_DUMP_CONFIG" in os.environ: + elif "FORGE_DUMP_CONFIG" in os.environ: dump_compiler_cfg(context.backend_output_directory, compiler_cfg, graph_name) init_log_last_successful_compile_stage() @@ -571,9 +571,9 @@ def generate_initial_graph(context: CompileContext) -> CompileDepth: if context.compiler_cfg.compile_tvm_to_python and context.graph is None: module_inputs = context.inputs for module in context.modules: - if not isinstance(module, PyBudaModule): + if not isinstance(module, ForgeModule): module, module_inputs = convert_to_forge_module(module, module_inputs, context.compiler_cfg, context.verify_cfg) - assert isinstance(module, PyBudaModule) + assert isinstance(module, ForgeModule) context.inputs = module_inputs @@ -602,7 +602,7 @@ def generate_initial_graph(context: CompileContext) -> CompileDepth: context.parameter_dict = {} for module in context.modules: - if isinstance(module, pybuda.module.Module): + if isinstance(module, forge.module.Module): for p in module.get_parameters(): context.parameter_dict[p.get_name()] = p.value(is_buda=False) elif isinstance(module, torch.fx.GraphModule): @@ -691,7 +691,7 @@ def run_post_pattern_matcher(context: CompileContext) -> CompileDepth: graph = context.graph graph_name = context.graph_name - graph, match_result = pypattern_matcher.lower_pybuda_to_pattern_matcher(graph, compiler_cfg.match_subgraph_patterns) + graph, match_result = pypattern_matcher.lower_forge_to_pattern_matcher(graph, compiler_cfg.match_subgraph_patterns) context.output_kwargs["match_result"] = match_result if match_result.is_subgraph_loopable: @@ -823,7 +823,7 @@ def run_pre_lowering_pass(context: CompileContext) -> CompileDepth: def run_mlir_compiler(context: CompileContext) -> CompileDepth: graph = context.graph - context.compiled_binary = pybuda._C.run_mlir_compiler(graph) + context.compiled_binary = forge._C.run_mlir_compiler(graph) return CompileDepth.FINISH_COMPILE @@ -847,23 +847,23 @@ def finish_compile(context: CompileContext) -> CompileDepth: return CompileDepth.FULL -def convert_to_forge_module(module: AnyModule, module_inputs: Union[AnyTensor, List[AnyTensor]], compiler_cfg: CompilerConfig, verify_cfg: VerifyConfig) -> PyBudaModule: +def convert_to_forge_module(module: AnyModule, module_inputs: Union[AnyTensor, List[AnyTensor]], compiler_cfg: CompilerConfig, verify_cfg: VerifyConfig) -> ForgeModule: """ Converts given module to a Forge module, along with the module_inputs (which will be converted to Forge tensors). Returns ------- - PyBudaModule, Tuple[Tensor, ...] + ForgeModule, Tuple[Tensor, ...] """ - from .tvm_to_python import generate_pybuda_module + from .tvm_to_python import generate_forge_module prev_state = state_changed() if module_inputs is None: logger.error("No inputs provided for module {}", module.name) assert False - forge_module, dev_types, module_inputs = generate_pybuda_module(module, to_pt_tensors(module_inputs), compiler_cfg, module.name, verify_cfg,) + forge_module, dev_types, module_inputs = generate_forge_module(module, to_pt_tensors(module_inputs), compiler_cfg, module.name, verify_cfg,) assert len(forge_module) == 1, "Attemping to load split model onto single devices" if not(prev_state): @@ -911,12 +911,12 @@ def generate_graph( TODO: This function was copied over from ttdevice.py with some modifications. Probably needs to be refactored (possibly moved to cpp) ''' - from .pybudaglobal import start_tracing, stop_tracing - from pybuda.tvm_utils import flatten_inputs + from .forgeglobal import start_tracing, stop_tracing + from forge.tvm_utils import flatten_inputs from collections import deque import inspect - from pybuda._C.graph import create_output, create_parameter_input, create_data_edge, create_activation_input, create_constant_input, create_op_node, create_target_input + from forge._C.graph import create_output, create_parameter_input, create_data_edge, create_activation_input, create_constant_input, create_op_node, create_target_input output_to_module_name_prefix = {} output_to_subgraph_index = {} @@ -933,7 +933,7 @@ def generate_graph( outputs = inputs for idx, module in enumerate(modules): - assert isinstance(module, PyBudaModule), "This function only supports PyBudaModule instances" + assert isinstance(module, ForgeModule), "This function only supports ForgeModule instances" if compiler_cfg.compile_subgraphs: outputs = inputs[idx] @@ -970,7 +970,7 @@ def generate_graph( if isinstance(inputs[0], Tensor): inputs = (inputs,) for index, (module, submodule_input) in enumerate(zip(modules, inputs)): - submodule_input_node_names = list(inspect.signature(super(PyBudaModule, module).__getattribute__("forward")).parameters.keys()) + submodule_input_node_names = list(inspect.signature(super(ForgeModule, module).__getattribute__("forward")).parameters.keys()) if len(modules) > 1: submodule_input_node_names = [f"{input_name}_{index}" for input_name in submodule_input_node_names] input_node_names += submodule_input_node_names diff --git a/pybuda/pybuda/compiled_graph_state.py b/forge/forge/compiled_graph_state.py similarity index 98% rename from pybuda/pybuda/compiled_graph_state.py rename to forge/forge/compiled_graph_state.py index babb2adf0..97bb82dc1 100644 --- a/pybuda/pybuda/compiled_graph_state.py +++ b/forge/forge/compiled_graph_state.py @@ -9,12 +9,12 @@ from dataclasses import dataclass, field from dataclasses_json import dataclass_json, config -from pybuda._C import DataFormat -from pybuda._C.graph import Graph, RuntimeTensorTransform -from pybuda._C.runtime import run_binary, Binary -from pybuda.utils import list_as_json -from pybuda.tensor import Tensor, get_post_const_eval_tensors -from pybuda.module import Module +from forge._C import DataFormat +from forge._C.graph import Graph, RuntimeTensorTransform +from forge._C.runtime import run_binary, Binary +from forge.utils import list_as_json +from forge.tensor import Tensor, get_post_const_eval_tensors +from forge.module import Module import torch diff --git a/pybuda/pybuda/config.py b/forge/forge/config.py similarity index 92% rename from pybuda/pybuda/config.py rename to forge/forge/config.py index 45aa6f32e..784147b30 100644 --- a/pybuda/pybuda/config.py +++ b/forge/forge/config.py @@ -8,11 +8,11 @@ from typing import Tuple, Dict, List, Optional, Union, Set from collections.abc import Iterable from dataclasses import dataclass, field -from pybuda._C import DataFormat, MathFidelity, AMPNodeProperties -import pybuda.query as query +from forge._C import DataFormat, MathFidelity, AMPNodeProperties +import forge.query as query from dataclasses_json import dataclass_json, config -from pybuda.utils import as_json, dict_as_json, list_as_json, optional_as_json, resolve_output_build_directory, resolve_device_descriptor_path +from forge.utils import as_json, dict_as_json, list_as_json, optional_as_json, resolve_output_build_directory, resolve_device_descriptor_path from loguru import logger @@ -121,13 +121,13 @@ class CompilerConfig: compile_depth: int = field(default=CompileDepth.FULL, metadata=as_json(CompileDepth)) # Defines compilation depth. Used to limit scope of some unit tests - enable_tvm_cpu_fallback: bool = True # Create cpu device for unsupported pybuda ops + enable_tvm_cpu_fallback: bool = True # Create cpu device for unsupported forge ops cpu_fallback_ops: Set[str] = field(default_factory=lambda: set(["embedding"])) # Types of ops to fall back to CPU for enable_tm_cpu_fallback: bool = False # Extend CPU fallback for TM ops tm_cpu_fallback_max_depth: int = 10 # Max search depth for extended CPU fallback enable_tvm_dropout: bool = False # (Temporary): Remove when buda supports dropout - enable_tvm_unsupported_ops: bool = False# Create "unsupported" pybuda ops in python file, allowing user to modify later + enable_tvm_unsupported_ops: bool = False# Create "unsupported" forge ops in python file, allowing user to modify later enable_op_level_comparision: bool = False # Should we need to compare every op with framework output at each compilation stage. enable_tvm_constant_prop: bool = False # Should we constant prop in tvm convert_framework_params_to_tvm: bool = True # Convert framework params to relay params @@ -135,7 +135,7 @@ class CompilerConfig: enable_tvm_jax_freeze_large_model: bool = True # When model param is larger than 2GB, Protobuf will error out. This flag will enable large model tracing framework_model_output_names: List[str] = field(default_factory=lambda: list()) # List of output names specified by framework tvm_constnat_prop_mask: Set[str] = field(default_factory=lambda: set()) # Which parameters should be constant propped by tvm - compile_tvm_to_python: bool = True # instead of generating a direct pybuda graph from TVM, generate a pybuda python class + compile_tvm_to_python: bool = True # instead of generating a direct forge graph from TVM, generate a forge python class retain_tvm_python_files: bool = False # Whether to keep generated python code, or load and delete tvm_graph_store_path: str = "" # Defines store path of serilized TVM graphs. tvm_graph_load_path: str = "" # Defines load path of serilized TVM graphs. @@ -171,7 +171,7 @@ class CompilerConfig: # # Have in mind that in each AMP level, non-mentioned op types are left with default data format (usually set by user; i.e. FP32). harvesting_mask: int = 0 # List of harvested rows (same across all chips) - enable_auto_transposing_placement: bool = ("PYBUDA_ENABLE_AUTO_TRANSPOSE" in os.environ) # compiler automatically detects ops to transpose on placement when the flag is set + enable_auto_transposing_placement: bool = ("FORGE_ENABLE_AUTO_TRANSPOSE" in os.environ) # compiler automatically detects ops to transpose on placement when the flag is set fracture_groups: List[Tuple[List[Tuple[str, int, int]], List[str], List[int]]] = field(default_factory=lambda: list()) # see insert_fracture_group conv_multi_op_fracture_factor_override: Dict[str, int] = field(default_factory=lambda: dict()) # override multi op fracture factor for conv enable_single_buffer_fallback: bool = False @@ -195,20 +195,20 @@ class CompilerConfig: # TODO: add reportify dir def apply_env_config_overrides(self): - if "PYBUDA_OVERRIDE_NUM_CHIPS" in os.environ: - self.chip_ids = list(range(int(os.environ.get('PYBUDA_OVERRIDE_NUM_CHIPS')))) + if "FORGE_OVERRIDE_NUM_CHIPS" in os.environ: + self.chip_ids = list(range(int(os.environ.get('FORGE_OVERRIDE_NUM_CHIPS')))) - if "PYBUDA_DISABLE_OP_FUSING" in os.environ: + if "FORGE_DISABLE_OP_FUSING" in os.environ: self.enable_auto_fusing = False - if "PYBUDA_PERFORMANCE_TRACE" in os.environ: + if "FORGE_PERFORMANCE_TRACE" in os.environ: self.performance_trace = { "none": PerfTraceLevel.NONE, "light": PerfTraceLevel.LIGHT, "verbose": PerfTraceLevel.VERBOSE, - }[os.environ["PYBUDA_PERFORMANCE_TRACE"].lower()] + }[os.environ["FORGE_PERFORMANCE_TRACE"].lower()] - if "PYBUDA_COMPILE_DEPTH" in os.environ: + if "FORGE_COMPILE_DEPTH" in os.environ: self.compile_depth = { "full": CompileDepth.FULL, "init_compile": CompileDepth.INIT_COMPILE, @@ -220,51 +220,51 @@ def apply_env_config_overrides(self): "generate_netlist": CompileDepth.GENERATE_NETLIST, "post_pattern_matcher": CompileDepth.POST_PATTERN_MATCHER, "backend_golden_verify": CompileDepth.BACKEND_GOLDEN_VERIFY, - }[os.environ["PYBUDA_COMPILE_DEPTH"].lower()] + }[os.environ["FORGE_COMPILE_DEPTH"].lower()] - if "PYBUDA_ENABLE_INPUT_QUEUES_ON_HOST" in os.environ: - self.input_queues_on_host = bool(int(os.environ["PYBUDA_ENABLE_INPUT_QUEUES_ON_HOST"])) + if "FORGE_ENABLE_INPUT_QUEUES_ON_HOST" in os.environ: + self.input_queues_on_host = bool(int(os.environ["FORGE_ENABLE_INPUT_QUEUES_ON_HOST"])) - if "PYBUDA_ENABLE_OUTPUT_QUEUES_ON_HOST" in os.environ: - self.output_queues_on_host = bool(int(os.environ["PYBUDA_ENABLE_OUTPUT_QUEUES_ON_HOST"])) + if "FORGE_ENABLE_OUTPUT_QUEUES_ON_HOST" in os.environ: + self.output_queues_on_host = bool(int(os.environ["FORGE_ENABLE_OUTPUT_QUEUES_ON_HOST"])) - if "PYBUDA_DEFAULT_DRAM_PARAMETERS" in os.environ: - self.default_dram_parameters = bool(int(os.environ["PYBUDA_DEFAULT_DRAM_PARAMETERS"])) + if "FORGE_DEFAULT_DRAM_PARAMETERS" in os.environ: + self.default_dram_parameters = bool(int(os.environ["FORGE_DEFAULT_DRAM_PARAMETERS"])) - if "PYBUDA_PRESTRIDE_DISABLE" in os.environ: - self.enable_conv_prestride = not bool(int(os.environ["PYBUDA_PRESTRIDE_DISABLE"])) + if "FORGE_PRESTRIDE_DISABLE" in os.environ: + self.enable_conv_prestride = not bool(int(os.environ["FORGE_PRESTRIDE_DISABLE"])) - if "PYBUDA_CONVERT_PARAMS_TO_TVM" in os.environ: - self.convert_framework_params_to_tvm = bool(int(os.environ["PYBUDA_CONVERT_PARAMS_TO_TVM"])) + if "FORGE_CONVERT_PARAMS_TO_TVM" in os.environ: + self.convert_framework_params_to_tvm = bool(int(os.environ["FORGE_CONVERT_PARAMS_TO_TVM"])) - if "PYBUDA_DEFAULT_DF" in os.environ: - self.default_df_override = DataFormat.from_json(os.environ["PYBUDA_DEFAULT_DF"]) + if "FORGE_DEFAULT_DF" in os.environ: + self.default_df_override = DataFormat.from_json(os.environ["FORGE_DEFAULT_DF"]) - if "PYBUDA_DISABLE_ENUMERATE_U_KT" in os.environ: - self.enable_enumerate_u_kt = not bool(int(os.environ["PYBUDA_DISABLE_ENUMERATE_U_KT"])) + if "FORGE_DISABLE_ENUMERATE_U_KT" in os.environ: + self.enable_enumerate_u_kt = not bool(int(os.environ["FORGE_DISABLE_ENUMERATE_U_KT"])) - if "PYBUDA_ENABLE_SINGLE_BUFFER_FALLBACK" in os.environ: - self.enable_single_buffer_fallback = bool(int(os.environ["PYBUDA_ENABLE_SINGLE_BUFFER_FALLBACK"])) + if "FORGE_ENABLE_SINGLE_BUFFER_FALLBACK" in os.environ: + self.enable_single_buffer_fallback = bool(int(os.environ["FORGE_ENABLE_SINGLE_BUFFER_FALLBACK"])) - if "PYBUDA_TTI_BACKEND_FORMAT" in os.environ: + if "FORGE_TTI_BACKEND_FORMAT" in os.environ: self.tti_dump_format = TTIDumpFormat.BACKEND - elif "PYBUDA_TTI_BACKEND_TILIZED_FORMAT" in os.environ: + elif "FORGE_TTI_BACKEND_TILIZED_FORMAT" in os.environ: self.tti_dump_format = TTIDumpFormat.BACKEND_TILIZED - if "PYBUDA_AMP_LIGHT" in os.environ: - self.enable_amp_light(level=int(os.environ["PYBUDA_AMP_LIGHT"])) + if "FORGE_AMP_LIGHT" in os.environ: + self.enable_amp_light(level=int(os.environ["FORGE_AMP_LIGHT"])) - if "PYBUDA_ENABLE_DEVICE_TILIZE" in os.environ: - self.enable_device_tilize = bool(int(os.environ["PYBUDA_ENABLE_DEVICE_TILIZE"])) - if "PYBUDA_ENABLE_FORKED_DRAM_INPUTS" in os.environ: - self.enable_forked_dram_inputs = bool(int(os.environ["PYBUDA_ENABLE_FORKED_DRAM_INPUTS"])) + if "FORGE_ENABLE_DEVICE_TILIZE" in os.environ: + self.enable_device_tilize = bool(int(os.environ["FORGE_ENABLE_DEVICE_TILIZE"])) + if "FORGE_ENABLE_FORKED_DRAM_INPUTS" in os.environ: + self.enable_forked_dram_inputs = bool(int(os.environ["FORGE_ENABLE_FORKED_DRAM_INPUTS"])) - if "PYBUDA_SCHEDULER_POLICY" in os.environ: - self.scheduler_policy = os.environ["PYBUDA_SCHEDULER_POLICY"] + if "FORGE_SCHEDULER_POLICY" in os.environ: + self.scheduler_policy = os.environ["FORGE_SCHEDULER_POLICY"] - if "PYBUDA_OVERRIDE_DEVICE_YAML" in os.environ and os.environ["PYBUDA_OVERRIDE_DEVICE_YAML"] != "": - self.backend_device_descriptor_path = resolve_device_descriptor_path(os.environ["PYBUDA_OVERRIDE_DEVICE_YAML"]) + if "FORGE_OVERRIDE_DEVICE_YAML" in os.environ and os.environ["FORGE_OVERRIDE_DEVICE_YAML"] != "": + self.backend_device_descriptor_path = resolve_device_descriptor_path(os.environ["FORGE_OVERRIDE_DEVICE_YAML"]) def __post_init__(self): self.apply_env_config_overrides() @@ -777,7 +777,7 @@ def remove_cpu_fallback_ops(op_types: Union[str, List[str]]): def insert_fracture_group(nodes: List[Union[str, Tuple[str, Union[int, List[int]], Union[int, List[int]]]]], chip_ids: Union[List[int], Dict[str, List[int]]] = []): """ - Insert a fracture group, where a fracture group describes pybuda a subgraph + Insert a fracture group, where a fracture group describes forge a subgraph to be fractured and along which dimension(s). Parameters @@ -860,7 +860,7 @@ def __insert_nop_impl( def insert_nop(src_op: str, dest_ops: Union[str, List[str]], *, hoist_tms: bool = True, nop_count: int = 1, daisy_chain: bool = False): """ - Instruct pybuda compiler to insert a NOP instruction on the edge identified by the named src/dest pair. + Instruct forge compiler to insert a NOP instruction on the edge identified by the named src/dest pair. Parameters ---------- @@ -893,7 +893,7 @@ def insert_nop(src_op: str, dest_ops: Union[str, List[str]], *, hoist_tms: bool def _internal_insert_fj_buffering_nop(src_op: str, dest_ops: Union[str, List[str]], *, hoist_tms: bool = True, nop_count: int = 1, daisy_chain: bool = False): """ - Instruct pybuda compiler to insert a fork-join buffering NOP instruction on the edge identified by the named src/dest pair. + Instruct forge compiler to insert a fork-join buffering NOP instruction on the edge identified by the named src/dest pair. Note: Adding a fork-join buffering NOP instructions may lead to exceptions! Parameters @@ -929,7 +929,7 @@ def insert_buffering_nop(src_op: str, dest_ops: Union[str, List[str]], *, hoist_ "DEPRECATION WARNING! Please use `insert_nop` instead of `insert_buffering_nop`. To add a buffering nop, use the \ internal API `_internal_insert_fj_buffering_nop`." - Instruct pybuda compiler to insert a buffering NOP instruction on the edge identified by the named src/dest pair. + Instruct forge compiler to insert a buffering NOP instruction on the edge identified by the named src/dest pair. Note: Adding buffering NOP instructions may lead to exceptions! Parameters @@ -965,7 +965,7 @@ def insert_buffering_nop(src_op: str, dest_ops: Union[str, List[str]], *, hoist_ def add_schedule_constraint(partial_ordering: List[str]): """ - Instruct pybuda compiler to schedule ops in a way that respects the given partial ordering. + Instruct forge compiler to schedule ops in a way that respects the given partial ordering. The compiler will ensure to schedule op_order[i] before op_order[i+1] in the final schedule. Parameters @@ -1107,13 +1107,13 @@ def _set_global_compiler_config(config: CompilerConfig): g_compiler_config = config -def _set_pybuda_override_veto(general_config_dict, environ_config_dict): +def _set_forge_override_veto(general_config_dict, environ_config_dict): import json - env_dict = {key: value for key, value in os.environ.items() if key.startswith("PYBUDA_") and key != "PYBUDA_OVERRIDES_VETO"} + env_dict = {key: value for key, value in os.environ.items() if key.startswith("FORGE_") and key != "FORGE_OVERRIDES_VETO"} env_dict = {**env_dict, **environ_config_dict} - os.environ["PYBUDA_OVERRIDES_VETO"] = json.dumps({ + os.environ["FORGE_OVERRIDES_VETO"] = json.dumps({ "general_conf": general_config_dict, "environ_conf": env_dict, }) diff --git a/pybuda/pybuda/pybudaglobal.py b/forge/forge/forgeglobal.py similarity index 86% rename from pybuda/pybuda/pybudaglobal.py rename to forge/forge/forgeglobal.py index 4596f0514..101aeccca 100644 --- a/pybuda/pybuda/pybudaglobal.py +++ b/forge/forge/forgeglobal.py @@ -4,7 +4,7 @@ """ 'Singleton' that holds pointers to all devices, modules, multi-processing queues, and other globally used data. -Devices and modules register with PyBudaGlobal as they are created +Devices and modules register with ForgeGlobal as they are created """ from typing import Tuple @@ -26,19 +26,19 @@ # Keep track of state changes (devices, modules) g_state_changed = True -# Are we actively tracing a graph, allows forwarding through pybuda modules without creating ops with unique names +# Are we actively tracing a graph, allows forwarding through forge modules without creating ops with unique names g_tracing = False # ID used to uniquefy nodes when no names are provided g_unique_node_id = -1 from pyinstrument import Profiler -profiler = Profiler() if "PYBUDA_PROFILE" in os.environ else None +profiler = Profiler() if "FORGE_PROFILE" in os.environ else None # If true, various defaults will revert to development mode - like, debug logging, # default device will be model instead of silicon, etc. -def PYBUDA_DEVMODE(): - return "PYBUDA_DEVMODE" in os.environ +def FORGE_DEVMODE(): + return "FORGE_DEVMODE" in os.environ def set_device_pipeline(devs: Tuple["Device"]): """ @@ -69,13 +69,13 @@ def get_devices(): return devices def get_tenstorrent_device(): - from pybuda.ttdevice import TTDevice + from forge.ttdevice import TTDevice for device in devices: if isinstance(device, TTDevice): return device return None -def pybuda_reset(): +def forge_reset(): """ Clears global list of devices and modules. Only needed in special circumstances, like testing. """ @@ -86,7 +86,7 @@ def pybuda_reset(): devices = [] modules = [] - from pybuda.config import _clear_global_compiler_config + from forge.config import _clear_global_compiler_config _clear_global_compiler_config() set_state_changed() @@ -125,7 +125,7 @@ def start_tracing(): def stop_tracing(): """ - Indicate that a graph trace has ended, pybuda graph can be forwarded without generating unique names + Indicate that a graph trace has ended, forge graph can be forwarded without generating unique names """ global g_tracing g_tracing = False @@ -173,7 +173,7 @@ def create_queue(mp_context = None) -> queue.Queue: """ Create a multi-processing queue, or if force sequential is set, a regular queue """ - if "PYBUDA_FORCE_SEQUENTIAL" not in os.environ and os.environ.get("PYBUDA_FORCE_THREADS", "0") == "0": + if "FORGE_FORCE_SEQUENTIAL" not in os.environ and os.environ.get("FORGE_FORCE_THREADS", "0") == "0": assert mp_context is not None, "Must provide mp_context" q = mp_context.Queue() q.cancel_join_thread() diff --git a/pybuda/pybuda/fx/__init__.py b/forge/forge/fx/__init__.py similarity index 100% rename from pybuda/pybuda/fx/__init__.py rename to forge/forge/fx/__init__.py diff --git a/pybuda/pybuda/fx/capture.py b/forge/forge/fx/capture.py similarity index 93% rename from pybuda/pybuda/fx/capture.py rename to forge/forge/fx/capture.py index 6da0d452f..e1719925e 100644 --- a/pybuda/pybuda/fx/capture.py +++ b/forge/forge/fx/capture.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 # -# Capture the FX graph and convert to MixedGraph of PyBuda and CPU graphs +# Capture the FX graph and convert to MixedGraph of Forge and CPU graphs # from typing import Dict, List, Optional @@ -12,15 +12,15 @@ import torch from loguru import logger -from pybuda._C.graph import create_op_node, create_data_edge, create_parameter_input, create_activation_input, create_output, create_constant_input, add_subgraph_io_link_edge -from pybuda.tensor import pytorch_dtype_to_buda_dataformat -from pybuda.fx.nodes import get_pybuda_node, torch_constant_ops, is_supported_op, get_unsupported_nodes -from pybuda.config import _get_global_compiler_config -from pybuda.fx.mixed_graph import MixedGraph -from pybuda.fx.schedule import TensorSource, Schedule -from pybuda.fx.graph_utils import reduce_graph, graph_lint +from forge._C.graph import create_op_node, create_data_edge, create_parameter_input, create_activation_input, create_output, create_constant_input, add_subgraph_io_link_edge +from forge.tensor import pytorch_dtype_to_buda_dataformat +from forge.fx.nodes import get_forge_node, torch_constant_ops, is_supported_op, get_unsupported_nodes +from forge.config import _get_global_compiler_config +from forge.fx.mixed_graph import MixedGraph +from forge.fx.schedule import TensorSource, Schedule +from forge.fx.graph_utils import reduce_graph, graph_lint -import pybuda +import forge class CaptureFX: def __init__(self): @@ -43,7 +43,7 @@ def capture_sample_outputs(self, outputs: List[torch.Tensor], subgraph_id: int): assert self.graph is not None self.graph.capture_sample_outputs(outputs, subgraph_id) - def get_buda_graph(self) -> pybuda._C.graph.Graph: + def get_buda_graph(self) -> forge._C.graph.Graph: assert self.graph is not None return self.graph.graph @@ -77,12 +77,12 @@ def eval_node(self, node): return node.target(*eval_args, **kwargs) - def add_op(self, node, name, pybuda_node, subgraph_idx): - shape = node.meta['tensor_meta'].shape if pybuda_node.shape is None else pybuda_node.shape - dtype = pytorch_dtype_to_buda_dataformat(node.meta['tensor_meta'].dtype) if pybuda_node.dtype is None else pybuda_node.dtype + def add_op(self, node, name, forge_node, subgraph_idx): + shape = node.meta['tensor_meta'].shape if forge_node.shape is None else forge_node.shape + dtype = pytorch_dtype_to_buda_dataformat(node.meta['tensor_meta'].dtype) if forge_node.dtype is None else forge_node.dtype logger.trace("add_op: {} shape: {} dtype: {}", name, shape, dtype) - self.add_constants_if_necessary(pybuda_node.args, subgraph_idx) + self.add_constants_if_necessary(forge_node.args, subgraph_idx) if "nn_module_stack" in node.meta: tags = { "layer": list(node.meta["nn_module_stack"].values())[-1][0], @@ -95,13 +95,13 @@ def add_op(self, node, name, pybuda_node, subgraph_idx): nid = create_op_node( self.get_buda_graph(), f"{name}_{subgraph_idx}", - pybuda_node.op, + forge_node.op, [int(dim) for dim in shape], pytorch_dtype_to_buda_dataformat(dtype), subgraph_idx, tags) - for i, input_node in enumerate(pybuda_node.args): + for i, input_node in enumerate(forge_node.args): create_data_edge(self.get_buda_graph(), self.node_to_id[input_node], 0, nid, i, []) if isinstance(node.target, torch._ops.OpOverloadPacket): @@ -109,7 +109,7 @@ def add_op(self, node, name, pybuda_node, subgraph_idx): # For input nodes, node.target is str self.id_to_intermed[nid] = self.eval_node(node) - if (pybuda_node.wrap_tuple): + if (forge_node.wrap_tuple): nid = (nid,) return nid @@ -234,8 +234,8 @@ def process_function(self, node, subgraph_idx): self.node_to_id[node] = self.node_to_id[node.args[0]][node.args[1]] self.id_to_intermed[self.node_to_id[node]] = self.id_to_intermed[self.node_to_id[node]][node.args[1]] elif is_supported_op(op_name, node): - pybuda_node = get_pybuda_node(op_name, node) - self.node_to_id[node] = self.add_op(node, node.name, pybuda_node, subgraph_idx) + forge_node = get_forge_node(op_name, node) + self.node_to_id[node] = self.add_op(node, node.name, forge_node, subgraph_idx) else: # Unsupported function, fall back to CPU assert False, f"Unsupported function {op_name}" diff --git a/pybuda/pybuda/fx/graph_utils.py b/forge/forge/fx/graph_utils.py similarity index 98% rename from pybuda/pybuda/fx/graph_utils.py rename to forge/forge/fx/graph_utils.py index b278321ff..ff5621f3b 100644 --- a/pybuda/pybuda/fx/graph_utils.py +++ b/forge/forge/fx/graph_utils.py @@ -10,7 +10,7 @@ import torch from loguru import logger -from pybuda.fx.nodes import call_function_is_nop, call_function_is_reshape +from forge.fx.nodes import call_function_is_nop, call_function_is_reshape def reduce_graph(module_or_graph: Union[torch.fx.Graph, torch.fx.GraphModule]): # Reduce the graph to only the nodes that are used diff --git a/pybuda/pybuda/fx/mixed_graph.py b/forge/forge/fx/mixed_graph.py similarity index 97% rename from pybuda/pybuda/fx/mixed_graph.py rename to forge/forge/fx/mixed_graph.py index 36562a443..52a00efd4 100644 --- a/pybuda/pybuda/fx/mixed_graph.py +++ b/forge/forge/fx/mixed_graph.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 # -# Mixed graph contains a pybuda graph, and one or more FX graphs that will be executed on the CPU. It is +# Mixed graph contains a forge graph, and one or more FX graphs that will be executed on the CPU. It is # generated by capturing a FX graph from pt2. Unsupported ops, or arguments will be dropped down to CPU. # @@ -15,16 +15,16 @@ import torch from loguru import logger -import pybuda -from pybuda.fx.nodes import torch_constant_ops, call_function_is_nop, call_function_is_reshape -from pybuda.fx.schedule import Schedule -from pybuda.fx.graph_utils import reduce_graph, get_output_node, append_to_output, move_output_to_end, remove_output_index, graph_lint, is_nop_graph, is_constant_graph, has_output, graph_to_device -from pybuda.fx.trace import IOTracer -from pybuda._C.torch_device import unique_id +import forge +from forge.fx.nodes import torch_constant_ops, call_function_is_nop, call_function_is_reshape +from forge.fx.schedule import Schedule +from forge.fx.graph_utils import reduce_graph, get_output_node, append_to_output, move_output_to_end, remove_output_index, graph_lint, is_nop_graph, is_constant_graph, has_output, graph_to_device +from forge.fx.trace import IOTracer +from forge._C.torch_device import unique_id class MixedGraph: def __init__(self, module_name: str): - self.graph = pybuda._C.graph.Graph(module_name) + self.graph = forge._C.graph.Graph(module_name) self.inputs_per_subgraph : Dict[int, List[int]] = {} self.outputs_per_subgraph : Dict[int, List[int]] = {} diff --git a/pybuda/pybuda/fx/nodes.py b/forge/forge/fx/nodes.py similarity index 83% rename from pybuda/pybuda/fx/nodes.py rename to forge/forge/fx/nodes.py index 6d1ab30f5..dc1c4c548 100644 --- a/pybuda/pybuda/fx/nodes.py +++ b/forge/forge/fx/nodes.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 # -# Functions that convert FX nodes to PyBuda +# Functions that convert FX nodes to Forge # import sys @@ -13,11 +13,11 @@ import torch from loguru import logger -from pybuda._C.graph import OpType -from pybuda.tensor import pytorch_dtype_to_buda_dataformat -from pybuda.config import CompilerConfig, _get_global_compiler_config +from forge._C.graph import OpType +from forge.tensor import pytorch_dtype_to_buda_dataformat +from forge.config import CompilerConfig, _get_global_compiler_config -class PyBudaNode: +class ForgeNode: def __init__(self, op: OpType, args: List[torch.fx.node.Node]): self.op = op self.args = args @@ -25,19 +25,19 @@ def __init__(self, op: OpType, args: List[torch.fx.node.Node]): self.dtype = None self.wrap_tuple = False -def process_dummy_no_attr(node, pybuda_op_name): - return PyBudaNode(OpType(pybuda_op_name, []), node.args) +def process_dummy_no_attr(node, forge_op_name): + return ForgeNode(OpType(forge_op_name, []), node.args) -def process_dummy_attr_in_args(node, pybuda_op_name): +def process_dummy_attr_in_args(node, forge_op_name): attrs = node.args[1] if len(node.args) == 2 else node.args[1:] if not isinstance(attrs, (list, tuple)): attrs = [attrs, ] - return PyBudaNode(OpType(pybuda_op_name, attrs), [node.args[0], ]) + return ForgeNode(OpType(forge_op_name, attrs), [node.args[0], ]) -def process_expand(node, pybuda_op_name): - return PyBudaNode(OpType(pybuda_op_name, []), [node.args[0], ]) +def process_expand(node, forge_op_name): + return ForgeNode(OpType(forge_op_name, []), [node.args[0], ]) -def process_clamp(node, pybuda_op_name): +def process_clamp(node, forge_op_name): assert len(node.args) == 3 inputs = [node.args[0],] min_ = node.args[1] @@ -45,23 +45,23 @@ def process_clamp(node, pybuda_op_name): if min_ is None: assert max_ is not None, "Both min and max attributes for clmap are empty" - return PyBudaNode(OpType("relu", [max_, "max"]), inputs) + return ForgeNode(OpType("relu", [max_, "max"]), inputs) elif max_ is None: assert min_ is not None, "Both min and max attributes for clmap are empty" - return PyBudaNode(OpType("relu", [min_, "min"]), inputs) + return ForgeNode(OpType("relu", [min_, "min"]), inputs) else: - return PyBudaNode(OpType(pybuda_op_name, named_attrs = {"min": min_, "max": max_}), inputs) + return ForgeNode(OpType(forge_op_name, named_attrs = {"min": min_, "max": max_}), inputs) -def process_flatten(node, pybuda_op_name): - return PyBudaNode(OpType(pybuda_op_name, [-1, ]), [node.args[0], ]) +def process_flatten(node, forge_op_name): + return ForgeNode(OpType(forge_op_name, [-1, ]), [node.args[0], ]) -def process_gelu(node, pybuda_op_name): - return PyBudaNode(OpType(pybuda_op_name, ["none", ]), node.args) +def process_gelu(node, forge_op_name): + return ForgeNode(OpType(forge_op_name, ["none", ]), node.args) -def process_getitem(node, pybuda_op_name): +def process_getitem(node, forge_op_name): num_dims = sum([(isinstance(dim, slice) and (dim.start is not None or dim.stop is not None)) or (not isinstance(dim, slice) and dim is not None) for dim in node.args[1]]) if num_dims == 0: - return PyBudaNode(OpType("nop", []), [node.args[0], ]) + return ForgeNode(OpType("nop", []), [node.args[0], ]) assert num_dims <= 1, "TODO: Support multi axis getitem" for dim, slice_index in enumerate(node.args[1]): if isinstance(slice_index, slice) and slice_index.start is None and slice_index.stop is None: @@ -83,9 +83,9 @@ def process_getitem(node, pybuda_op_name): if stop < 0: stop += node.args[0].meta['tensor_meta'].shape[dim] - return PyBudaNode(OpType(pybuda_op_name, [dim, start, stop, stride]), [node.args[0], ]) + return ForgeNode(OpType(forge_op_name, [dim, start, stop, stride]), [node.args[0], ]) -def process_interpolate(node, pybuda_op_name): +def process_interpolate(node, forge_op_name): assert all([arg in node.kwargs for arg in ["size", "mode", "align_corners"]]) output_size = node.kwargs["size"] @@ -99,9 +99,9 @@ def process_interpolate(node, pybuda_op_name): assert False, f"Unsupported interpolate mode: {mode_str}" attrs = [output_size, output_size, mode, align_corners, 0] # channel-last is false for pt - return PyBudaNode(OpType(pybuda_op_name, attrs), [node.args[0], ]) + return ForgeNode(OpType(forge_op_name, attrs), [node.args[0], ]) -def process_transpose(node, pybuda_op_name): +def process_transpose(node, forge_op_name): torch_op_name = node.target.__name__ if torch_op_name == "permute": dim0 = None @@ -113,7 +113,7 @@ def process_transpose(node, pybuda_op_name): elif dim1 is None: dim1 = i else: - assert False, "Multi axis permute needs to be added to pybuda" + assert False, "Multi axis permute needs to be added to forge" elif torch_op_name == "transpose": dim0 = node.args[1] @@ -129,9 +129,9 @@ def process_transpose(node, pybuda_op_name): named_attrs = {"dim0": dim0, "dim1": dim1, "z_dim_slice": -1} - return PyBudaNode(OpType(pybuda_op_name, named_attrs=named_attrs), [node.args[0], ]) + return ForgeNode(OpType(forge_op_name, named_attrs=named_attrs), [node.args[0], ]) -def process_softmax(node, pybuda_op_name): +def process_softmax(node, forge_op_name): if len(node.args) == 1: assert "dim" in node.kwargs, "dim must be specified" dim = node.kwargs["dim"] @@ -142,9 +142,9 @@ def process_softmax(node, pybuda_op_name): dim -= len(node.args[0].meta['tensor_meta'].shape) stable = 1 attrs = [dim, stable] - return PyBudaNode(OpType(pybuda_op_name, attrs), [node.args[0], ]) + return ForgeNode(OpType(forge_op_name, attrs), [node.args[0], ]) -def process_conv2d(node, pybuda_op_name): +def process_conv2d(node, forge_op_name): assert len(node.args) == 9 inputs = [node.args[0], node.args[1]] @@ -164,9 +164,9 @@ def process_conv2d(node, pybuda_op_name): assert all([d == dilation[0] for d in dilation]), "Dilation is not same for all-dim, not supported" attrs = strides + [dilation[0], group] + padding + [False, 0, 0, 0, False] # channel-last = false for pt - return PyBudaNode(OpType(pybuda_op_name, attrs), inputs) + return ForgeNode(OpType(forge_op_name, attrs), inputs) -def process_maxpool2d(node, pybuda_op_name): +def process_maxpool2d(node, forge_op_name): assert len(node.args) >= 2 and len(node.args) <= 7, f"Maxpool-2d supposed to have 2~7 args: #args = {len(node.args)}" inputs = [node.args[0],] kernel_size = node.args[1] @@ -198,13 +198,13 @@ def process_maxpool2d(node, pybuda_op_name): add_sub_surround_value = compiler_cfg.max_pool_add_sub_surround_value attrs = kernel_size + strides + [dilation, ceil_mode] + padding + [add_sub_surround, add_sub_surround_value, False] # channel-last = False for pt - pybuda_node = PyBudaNode(OpType(pybuda_op_name, attrs), inputs) - pybuda_node.shape = node.meta['tensor_meta'][0].shape - pybuda_node.dtype = pytorch_dtype_to_buda_dataformat(node.meta['tensor_meta'][0].dtype) - pybuda_node.wrap_tuple = True - return pybuda_node + forge_node = ForgeNode(OpType(forge_op_name, attrs), inputs) + forge_node.shape = node.meta['tensor_meta'][0].shape + forge_node.dtype = pytorch_dtype_to_buda_dataformat(node.meta['tensor_meta'][0].dtype) + forge_node.wrap_tuple = True + return forge_node -def process_matmul(node, pybuda_op_name): +def process_matmul(node, forge_op_name): assert len(node.args) == 2 or len(node.args) == 3 if len(node.args) == 3: # Torch addmm inputs are bias, LHS, RHS @@ -212,48 +212,48 @@ def process_matmul(node, pybuda_op_name): else: args = node.args - return PyBudaNode(OpType(pybuda_op_name, []), args) + return ForgeNode(OpType(forge_op_name, []), args) -def process_embedding(node, pybuda_op_name): +def process_embedding(node, forge_op_name): assert len(node.args) == 2 or len(node.args) == 3 #TODO Handle padding index (arg 2) args = [node.args[0], node.args[1]] - return PyBudaNode(OpType(pybuda_op_name, []), args) + return ForgeNode(OpType(forge_op_name, []), args) -def process_mean(node, pybuda_op_name): +def process_mean(node, forge_op_name): assert len(node.args) >= 2 dim = node.args[1] attrs = [dim,] args = [node.args[0],] - return PyBudaNode(OpType(pybuda_op_name, attrs), args) + return ForgeNode(OpType(forge_op_name, attrs), args) -def process_layernorm(node, pybuda_op_name): +def process_layernorm(node, forge_op_name): assert len(node.args) == 5 dim = -1 epsilon = node.args[4] attrs = [dim, epsilon] args = [node.args[0], node.args[2], node.args[3]] - pybuda_node = PyBudaNode(OpType(pybuda_op_name, attrs), args) - pybuda_node.shape = node.meta['tensor_meta'][0].shape - pybuda_node.dtype = pytorch_dtype_to_buda_dataformat(node.meta['tensor_meta'][0].dtype) - pybuda_node.wrap_tuple = True - return pybuda_node + forge_node = ForgeNode(OpType(forge_op_name, attrs), args) + forge_node.shape = node.meta['tensor_meta'][0].shape + forge_node.dtype = pytorch_dtype_to_buda_dataformat(node.meta['tensor_meta'][0].dtype) + forge_node.wrap_tuple = True + return forge_node -def process_batchnorm(node, pybuda_op_name): +def process_batchnorm(node, forge_op_name): assert len(node.args) == 7 epsilon = node.args[-1] attrs = [epsilon] args = [node.args[0], node.args[1], node.args[2], node.args[3], node.args[4]] - pybuda_node = PyBudaNode(OpType(pybuda_op_name, attrs), args) + forge_node = ForgeNode(OpType(forge_op_name, attrs), args) - pybuda_node.shape = node.meta['tensor_meta'][0].shape - pybuda_node.dtype = pytorch_dtype_to_buda_dataformat(node.meta['tensor_meta'][0].dtype) - pybuda_node.wrap_tuple = True - return pybuda_node + forge_node.shape = node.meta['tensor_meta'][0].shape + forge_node.dtype = pytorch_dtype_to_buda_dataformat(node.meta['tensor_meta'][0].dtype) + forge_node.wrap_tuple = True + return forge_node -def process_select(node, pybuda_op_name): +def process_select(node, forge_op_name): assert len(node.args) == 3 dim = node.args[1] @@ -262,9 +262,9 @@ def process_select(node, pybuda_op_name): index = node.args[2] attrs = [dim, index, index+1, 1] args = [node.args[0], ] - return PyBudaNode(OpType(pybuda_op_name, attrs), args) + return ForgeNode(OpType(forge_op_name, attrs), args) -def process_slice(node, pybuda_op_name): +def process_slice(node, forge_op_name): assert len(node.args) == 4 dim = node.args[1] @@ -273,15 +273,15 @@ def process_slice(node, pybuda_op_name): if dim >= 0: dim -= len(node.args[0].meta['tensor_meta'].shape) if start == 0 and end == sys.maxsize: - pybuda_node = PyBudaNode(OpType("nop", []), [node.args[0], ]) + forge_node = ForgeNode(OpType("nop", []), [node.args[0], ]) else: stride = 1 attrs = [dim, start, end, stride] args = [node.args[0], ] - pybuda_node = PyBudaNode(OpType(pybuda_op_name, attrs), args) - return pybuda_node + forge_node = ForgeNode(OpType(forge_op_name, attrs), args) + return forge_node -def process_unsqueeze(node, pybuda_op_name): +def process_unsqueeze(node, forge_op_name): assert len(node.args) == 2 dim = node.args[1] input_ndim = len(node.meta['tensor_meta'].shape) - 1 # supopsed to feed input ndim @@ -290,9 +290,9 @@ def process_unsqueeze(node, pybuda_op_name): dim -= len(node.meta['tensor_meta'].shape) attrs = [dim, input_ndim] - return PyBudaNode(OpType(pybuda_op_name, attrs), [node.args[0], ]) + return ForgeNode(OpType(forge_op_name, attrs), [node.args[0], ]) -def process_reshape(node, pybuda_op_name): +def process_reshape(node, forge_op_name): attrs = node.args[1].copy() if len(node.args) == 2 else node.args[1:].copy() if not isinstance(attrs, (list, tuple)): attrs = [attrs, ] @@ -314,32 +314,32 @@ def process_reshape(node, pybuda_op_name): attrs[blank_index] = input_volume//reshape_volume input_volume = node.args[0].meta['tensor_meta'].shape[0] - return PyBudaNode(OpType(pybuda_op_name, attrs), [node.args[0], ]) + return ForgeNode(OpType(forge_op_name, attrs), [node.args[0], ]) -def process_power(node, pybuda_op_name): +def process_power(node, forge_op_name): if isinstance(node.args[1], int) or isinstance(node.args[1], float) and math.isclose(node.args[1] / int(node.args[1]), 1.0): attrs = [int(node.args[1]), ] - pybuda_node = PyBudaNode(OpType("pow", attrs), [node.args[0], ]) + forge_node = ForgeNode(OpType("pow", attrs), [node.args[0], ]) else: - pybuda_node = PyBudaNode(OpType("power", []), node.args) - return pybuda_node + forge_node = ForgeNode(OpType("power", []), node.args) + return forge_node -def process_cat(node, pybuda_op_name): +def process_cat(node, forge_op_name): dim = node.args[1] if dim >= 0: dim -= len(node.meta['tensor_meta'].shape) - pybuda_node = PyBudaNode(OpType(pybuda_op_name, [dim, ]), node.args[0]) - return pybuda_node + forge_node = ForgeNode(OpType(forge_op_name, [dim, ]), node.args[0]) + return forge_node -def process_constant_pad_nd(node, pybuda_op_name): +def process_constant_pad_nd(node, forge_op_name): padding = node.args[1] value = node.args[2] if value != 0.0: raise ValueError("Buda only supports zero padding") # TODO: add to cpu fallback if padding is not 0 - pybuda_node = PyBudaNode(OpType(pybuda_op_name, [*padding, 0, False]), [node.args[0], ]) # mode index 0 = constant - return pybuda_node + forge_node = ForgeNode(OpType(forge_op_name, [*padding, 0, False]), [node.args[0], ]) # mode index 0 = constant + return forge_node -dynamo_to_pybuda_function = { +dynamo_to_forge_function = { "_softmax" : (process_softmax, "softmax"), "add" : (process_dummy_no_attr, "add"), "add_" : (process_dummy_no_attr, "add"), @@ -406,7 +406,7 @@ def process_constant_pad_nd(node, pybuda_op_name): def is_supported_op(torch_op_name, node: torch.fx.Node): - if torch_op_name not in dynamo_to_pybuda_function: + if torch_op_name not in dynamo_to_forge_function: return False # Check for special cases @@ -417,13 +417,13 @@ def is_supported_op(torch_op_name, node: torch.fx.Node): return True -def get_pybuda_node(torch_op_name, node): +def get_forge_node(torch_op_name, node): if not is_supported_op(torch_op_name, node): print(f"Unsupported op {torch_op_name}") breakpoint() assert False, f"Unsupported op {torch_op_name}" - return dynamo_to_pybuda_function[torch_op_name][0](node, dynamo_to_pybuda_function[torch_op_name][1]) + return dynamo_to_forge_function[torch_op_name][0](node, dynamo_to_forge_function[torch_op_name][1]) # Check to see if subgraph is already on device def is_on_device(subgraph_idx: int): @@ -433,12 +433,12 @@ def is_on_device(subgraph_idx: int): def remove_subgraph(subgraph_idx: int): pass -def add_op(graph, node, name, pybuda_node, subgraph_idx): +def add_op(graph, node, name, forge_node, subgraph_idx): global node_to_id - shape = node.meta['tensor_meta'].shape if pybuda_node.shape is None else pybuda_node.shape - dtype = pytorch_dtype_to_buda_dataformat(node.meta['tensor_meta'].dtype) if pybuda_node.dtype is None else pybuda_node.dtype + shape = node.meta['tensor_meta'].shape if forge_node.shape is None else forge_node.shape + dtype = pytorch_dtype_to_buda_dataformat(node.meta['tensor_meta'].dtype) if forge_node.dtype is None else forge_node.dtype - add_constants_if_necessary(graph, pybuda_node.args, subgraph_idx) + add_constants_if_necessary(graph, forge_node.args, subgraph_idx) if "nn_module_stack" in node.meta: tags = { "layer": list(node.meta["nn_module_stack"].values())[-1][0], @@ -451,13 +451,13 @@ def add_op(graph, node, name, pybuda_node, subgraph_idx): nid = create_op_node( graph, f"{name}_{subgraph_idx}", - pybuda_node.op, + forge_node.op, [int(dim) for dim in shape], pytorch_dtype_to_buda_dataformat(dtype), subgraph_idx, tags) - for i, input_node in enumerate(pybuda_node.args): + for i, input_node in enumerate(forge_node.args): create_data_edge(graph, node_to_id[input_node], 0, nid, i, []) eval_args = [id_to_intermed[node_to_id[arg]] if isinstance(arg, torch.fx.node.Node) else arg for arg in node.args] @@ -470,7 +470,7 @@ def add_op(graph, node, name, pybuda_node, subgraph_idx): # We will add NOP in cases where input to current subgraph is left on device # For input nodes, node.target is str id_to_intermed[nid] = node.target(*eval_args, **kwargs) - if (pybuda_node.wrap_tuple): + if (forge_node.wrap_tuple): nid = (nid,) return nid @@ -616,8 +616,8 @@ def process_function(node): node_to_id[node] = node_to_id[node.args[0]][node.args[1]] id_to_intermed[node_to_id[node]] = id_to_intermed[node_to_id[node]][node.args[1]] else: - pybuda_node = get_pybuda_node(op_name, node) - node_to_id[node] = add_op(graph, node, node.name, pybuda_node, subgraph_idx) + forge_node = get_forge_node(op_name, node) + node_to_id[node] = add_op(graph, node, node.name, forge_node, subgraph_idx) # Traverse up the graph from output nodes to populate consumed nodes set consumed = set() @@ -682,16 +682,16 @@ def process_function(node): def call_function_is_nop(node): assert node.op == "call_function" op_name = node.target.__name__ - if op_name in dynamo_to_pybuda_function: - return dynamo_to_pybuda_function[op_name][1] == "nop" + if op_name in dynamo_to_forge_function: + return dynamo_to_forge_function[op_name][1] == "nop" else: return False def call_function_is_reshape(node): assert node.op == "call_function" op_name = node.target.__name__ - if op_name in dynamo_to_pybuda_function: - return dynamo_to_pybuda_function[op_name][1] == "reshape" + if op_name in dynamo_to_forge_function: + return dynamo_to_forge_function[op_name][1] == "reshape" else: return False diff --git a/pybuda/pybuda/fx/schedule.py b/forge/forge/fx/schedule.py similarity index 99% rename from pybuda/pybuda/fx/schedule.py rename to forge/forge/fx/schedule.py index 46c59ded7..77b475ef1 100644 --- a/pybuda/pybuda/fx/schedule.py +++ b/forge/forge/fx/schedule.py @@ -12,7 +12,7 @@ import torch from loguru import logger -from pybuda.fx.graph_utils import get_output_node +from forge.fx.graph_utils import get_output_node # Enum to hold the source of a tensor class TensorSource(Enum): diff --git a/pybuda/pybuda/fx/torch_decomp_reconstruct.py b/forge/forge/fx/torch_decomp_reconstruct.py similarity index 97% rename from pybuda/pybuda/fx/torch_decomp_reconstruct.py rename to forge/forge/fx/torch_decomp_reconstruct.py index f3c7f50b4..a72565cc5 100644 --- a/pybuda/pybuda/fx/torch_decomp_reconstruct.py +++ b/forge/forge/fx/torch_decomp_reconstruct.py @@ -20,13 +20,13 @@ def decompose_matmul(bias, input, weight) -> torch.Tensor: res = torch.add(res, bias) return res -pybuda_decompositions = { +forge_decompositions = { torch.ops.aten.split.Tensor: decompose_split, torch.ops.aten.addmm.default: decompose_matmul, } -def get_pybuda_decompositions(): - return pybuda_decompositions +def get_forge_decompositions(): + return forge_decompositions # Reconstruct class ReconstructBilinearResize2d(): diff --git a/pybuda/pybuda/fx/trace.py b/forge/forge/fx/trace.py similarity index 100% rename from pybuda/pybuda/fx/trace.py rename to forge/forge/fx/trace.py diff --git a/pybuda/pybuda/module.py b/forge/forge/module.py similarity index 93% rename from pybuda/pybuda/module.py rename to forge/forge/module.py index cd5051d05..55f3d7732 100644 --- a/pybuda/pybuda/module.py +++ b/forge/forge/module.py @@ -10,8 +10,8 @@ import tensorflow as tf from loguru import logger -import pybuda -from .pybudaglobal import register_module, lazy_trace_data +import forge +from .forgeglobal import register_module, lazy_trace_data from .tensor import SomeTensor, Tensor, to_pt_tensors, to_tf_tensors, to_tf_variables, pytorch_dtype_to_buda_dataformat, buda_dataformat_to_pytorch_dtype from .parameter import Parameter import onnx @@ -19,17 +19,17 @@ import jax.numpy as jnp import numpy as np -from pybuda.tvm_utils import map_pt_dtype_to_tf, flatten_structured_output +from forge.tvm_utils import map_pt_dtype_to_tf, flatten_structured_output class Module: """ - Module class contains a workload that can be assigned to a single device. The workload can be implemented in PyTorch or in PyBuda. + Module class contains a workload that can be assigned to a single device. The workload can be implemented in PyTorch or in Forge. """ def __init__(self, name: str): - if "PYBUDA_GRAPH_NAME_SUFFIX" in os.environ and os.environ["PYBUDA_GRAPH_NAME_SUFFIX"] != "": - self.name = os.environ["PYBUDA_GRAPH_NAME_SUFFIX"] + "_" + name + if "FORGE_GRAPH_NAME_SUFFIX" in os.environ and os.environ["FORGE_GRAPH_NAME_SUFFIX"] != "": + self.name = os.environ["FORGE_GRAPH_NAME_SUFFIX"] + "_" + name else: self.name: str = name self.device: Optional["Device"] = None @@ -91,7 +91,7 @@ def run(self, *args) -> Tuple: Tuple[tensor,....] Outputs of inference """ - output_q = pybuda.run_inference(self, inputs=[args]) + output_q = forge.run_inference(self, inputs=[args]) return output_q.get() def __getstate__(self): @@ -103,7 +103,7 @@ def __getstate__(self): class PyTorchModule(Module): """ A wrapper around a PyTorch module. If placed on a CPU device, PyTorchModules will be executed as is, and if placed - on a TT device, modules will be lowered to PyBuda. + on a TT device, modules will be lowered to Forge. """ def __init__(self, name: str, module: torch.nn.Module, redirect_forward: bool = True): @@ -200,7 +200,7 @@ def add_parameter(self, name: str, parameter: Parameter): Whether to prepend module name to parameter name """ - if isinstance(parameter, pybuda.parameter.Parameter): + if isinstance(parameter, forge.parameter.Parameter): parameter = torch.nn.Parameter(parameter.value(), requires_grad=False) if name in self.module._parameters: raise RuntimeError(f"Module {self.name} already has parameter '{name}'") @@ -242,11 +242,11 @@ def get_parameters(self) -> List[Parameter]: continue if param == None: continue - pybuda_param = Parameter( + forge_param = Parameter( param.cpu(), requires_grad = param.requires_grad, name=name) - params.append(pybuda_param) + params.append(forge_param) recorded_names.append(name) return params @@ -352,11 +352,11 @@ def get_parameters(self) -> List[Parameter]: name = param.name data = param.numpy() - pybuda_param = Parameter( + forge_param = Parameter( torch.Tensor(data), requires_grad = True, name=name) - params.append(pybuda_param) + params.append(forge_param) return params @@ -626,9 +626,9 @@ def get_parameters(self) -> List[Parameter]: return [] # TODO -class PyBudaModule(Module): +class ForgeModule(Module): """ - A base class for all PyBuda modules. User should extend this class and implement `forward` function with workload implementation. + A base class for all Forge modules. User should extend this class and implement `forward` function with workload implementation. """ def __init__(self, name: str): super().__init__(name) @@ -636,20 +636,20 @@ def __init__(self, name: str): # Parameters in this module. This is auto-managed by __setattr__ self._parameters: Dict[str, Parameter] = {} # Constants that do not require gradients - self._constants: Dict[str, pybuda.Tensor] = {} + self._constants: Dict[str, forge.Tensor] = {} # Sub-modules - self._submodulelists: List[List[Dict[str, "PyBudaModule"]]] = [] - self._submodules: Dict[str, "PyBudaModule"] = {} + self._submodulelists: List[List[Dict[str, "ForgeModule"]]] = [] + self._submodules: Dict[str, "ForgeModule"] = {} self._user_inserted_tapout_queues: List[Tuple[str, int]] = [] self.subgraph_idx = 0 - def get_submodules(self) -> Dict[str, "PyBudaModule"]: + def get_submodules(self) -> Dict[str, "ForgeModule"]: submodules = self._submodules for submodulelist in self._submodulelists: - if not all([isinstance(sm, PyBudaModule) for sm in submodulelist]): + if not all([isinstance(sm, ForgeModule) for sm in submodulelist]): continue for submodule in submodulelist: submodules[submodule.name] = submodule @@ -658,7 +658,7 @@ def get_submodules(self) -> Dict[str, "PyBudaModule"]: def __getattribute__(self, name: str): if name == "forward": - orig_forward = super(PyBudaModule, self).__getattribute__("forward") + orig_forward = super(ForgeModule, self).__getattribute__("forward") if callable(orig_forward): def wrap_forward(*args, **kwargs): if len(self.input_names): @@ -669,7 +669,7 @@ def wrap_forward(*args, **kwargs): return orig_forward(*args, **kwargs) return wrap_forward - return super(PyBudaModule, self).__getattribute__(name) + return super(ForgeModule, self).__getattribute__(name) def pre_forward(self, *args, **kwargs): """ @@ -737,7 +737,7 @@ def get_constant(self, name) -> Tensor: Returns ------- - pybuda.Tensor + forge.Tensor constant in module """ @@ -770,13 +770,13 @@ def set_constant(self, name: str, data: SomeTensor): lazy_trace_data(data) if isinstance(data, torch.Tensor): - data = pybuda.Tensor.create_from_torch(data, constant=True, dev_data_format=pytorch_dtype_to_buda_dataformat(data.dtype)) + data = forge.Tensor.create_from_torch(data, constant=True, dev_data_format=pytorch_dtype_to_buda_dataformat(data.dtype)) import numpy as np if isinstance(data, np.ndarray): - data = pybuda.Tensor.create_from_torch(torch.Tensor(data), constant=True, dev_data_format=pytorch_dtype_to_buda_dataformat(data.dtype)) + data = forge.Tensor.create_from_torch(torch.Tensor(data), constant=True, dev_data_format=pytorch_dtype_to_buda_dataformat(data.dtype)) - assert isinstance(data, pybuda.Tensor) + assert isinstance(data, forge.Tensor) self._constants[name] = data @@ -869,7 +869,7 @@ def __setattr__(self, name: str, value): value._set_auto_name(self.name + "." + name) else: value._set_auto_name(name) - elif isinstance(value, PyBudaModule): + elif isinstance(value, ForgeModule): self._submodules[name] = value value.name = self.name + "." + name elif isinstance(value, dict): @@ -888,7 +888,7 @@ def __setattr__(self, name: str, value): self._submodulelists.append(value) object.__setattr__(self, name, value) # default set - if isinstance(value, PyBudaModule): + if isinstance(value, ForgeModule): value.initialize_parameters() def __delattr__(self, name: str): @@ -947,14 +947,14 @@ class IntQueueHandle: """ Handle for an intermediate queue, a debug device for reading out intermediate operation results from the device """ - def __init__(self, module: PyBudaModule, op_name: str, output_index: int): + def __init__(self, module: ForgeModule, op_name: str, output_index: int): self.module = module self.op_name = op_name self.output_index = output_index def wrap_module(module, name: str)-> Module: """ - Wrap a module in a PyBuda module + Wrap a module in a Forge module Parameters ---------- @@ -973,7 +973,7 @@ def wrap_module(module, name: str)-> Module: return PyTorchModule(name, module) elif isinstance(module, tf.keras.Model): return TFModule(name, module) - elif isinstance(module, PyBudaModule): + elif isinstance(module, ForgeModule): return module else: raise RuntimeError("Unsupported module type: " + str(type(module))) diff --git a/pybuda/pybuda/op/__init__.py b/forge/forge/op/__init__.py similarity index 98% rename from pybuda/pybuda/op/__init__.py rename to forge/forge/op/__init__.py index 4b4d3726c..20183680b 100644 --- a/pybuda/pybuda/op/__init__.py +++ b/forge/forge/op/__init__.py @@ -17,4 +17,4 @@ from .embedding import Embedding from .dram_queue import DRAMQueue from .quantize import Quantize, Dequantize, Requantize, BudaRequantize -import pybuda.op.loss +import forge.op.loss diff --git a/pybuda/pybuda/op/common.py b/forge/forge/op/common.py similarity index 84% rename from pybuda/pybuda/op/common.py rename to forge/forge/op/common.py index 2a8b1c558..9320600a9 100644 --- a/pybuda/pybuda/op/common.py +++ b/forge/forge/op/common.py @@ -6,15 +6,15 @@ from ..tensor import Tensor from ..parameter import Parameter -from pybuda.op.eval.pybuda import get_f_pybuda_eval, get_f_pybuda_shape -from pybuda._C import DataFormat -from pybuda._C.graph import OpType -import pybuda -from pybuda.pybudaglobal import get_unique_node_id, tracing +from forge.op.eval.forge import get_f_forge_eval, get_f_forge_shape +from forge._C import DataFormat +from forge._C.graph import OpType +import forge +from forge.forgeglobal import get_unique_node_id, tracing depracated_name_dict = {} deprecated_op_id = 0 -class PyBudaOp: +class ForgeOp: def __init__( self, @@ -39,7 +39,7 @@ def __init__( depracated_name_dict[f"{op_type}_{deprecated_op_id}"] = self.name deprecated_op_id += 1 - operands = tuple(pybuda.op.Constant("", constant=operand) if isinstance(operand, (int, float)) else operand for operand in operands) + operands = tuple(forge.op.Constant("", constant=operand) if isinstance(operand, (int, float)) else operand for operand in operands) self.operands = operands self.attrs = attrs self.named_attrs = named_attrs @@ -51,7 +51,7 @@ def get_tensor(self, out_df=None) -> Tensor: """ #shapes = [o.shape.get_pytorch_shape() if isinstance(o, (Tensor, Parameter)) else o for o in self.operands] shapes = [o.shape.get_pytorch_shape() for o in self.operands] - shape, self.operand_broadcast = get_f_pybuda_shape(self.cpp_op_type)(shapes) + shape, self.operand_broadcast = get_f_forge_shape(self.cpp_op_type)(shapes) # TODO: pick data formats in some way when mismatched inputs are coming... if out_df is not None: @@ -83,7 +83,7 @@ def get_tensor(self, out_df=None) -> Tensor: # Calculate reference if there's one if all([o.has_value() if isinstance(o, (Tensor, Parameter)) else True for o in self.operands]): values = [o.value() if isinstance(o, (Tensor, Parameter)) else o for o in self.operands] - result.set_value(get_f_pybuda_eval(self.cpp_op_type)(values)) + result.set_value(get_f_forge_eval(self.cpp_op_type)(values)) return result diff --git a/pybuda/pybuda/op/constant.py b/forge/forge/op/constant.py similarity index 93% rename from pybuda/pybuda/op/constant.py rename to forge/forge/op/constant.py index a27067f95..b8fb01e08 100644 --- a/pybuda/pybuda/op/constant.py +++ b/forge/forge/op/constant.py @@ -2,7 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 from ..tensor import Tensor -from .common import PyBudaOp as op +from .common import ForgeOp as op from typing import Union, Tuple, List def Constant(name: str, *, constant: float) -> Tensor: diff --git a/pybuda/pybuda/op/convolution.py b/forge/forge/op/convolution.py similarity index 97% rename from pybuda/pybuda/op/convolution.py rename to forge/forge/op/convolution.py index 8a7b757ff..1f4c53096 100644 --- a/pybuda/pybuda/op/convolution.py +++ b/forge/forge/op/convolution.py @@ -5,9 +5,9 @@ from ..tensor import Tensor from ..parameter import Parameter -from .common import PyBudaOp as op +from .common import ForgeOp as op -from pybuda.op.eval.sparse_utils import conv2d_padding_to_canonical, conv3d_padding_to_canonical +from forge.op.eval.sparse_utils import conv2d_padding_to_canonical, conv3d_padding_to_canonical def Conv2d( diff --git a/pybuda/pybuda/op/dram_queue.py b/forge/forge/op/dram_queue.py similarity index 96% rename from pybuda/pybuda/op/dram_queue.py rename to forge/forge/op/dram_queue.py index 24a3cc3e5..8fb0f40a6 100644 --- a/pybuda/pybuda/op/dram_queue.py +++ b/forge/forge/op/dram_queue.py @@ -4,7 +4,7 @@ from typing import Optional from ..tensor import Tensor -from .common import PyBudaOp as op +from .common import ForgeOp as op def DRAMQueue(name: str, operandA: Tensor, *, num_entries: int) -> Tensor: diff --git a/pybuda/pybuda/op/eltwise_binary.py b/forge/forge/op/eltwise_binary.py similarity index 99% rename from pybuda/pybuda/op/eltwise_binary.py rename to forge/forge/op/eltwise_binary.py index 774edc36d..b19801033 100644 --- a/pybuda/pybuda/op/eltwise_binary.py +++ b/forge/forge/op/eltwise_binary.py @@ -5,7 +5,7 @@ from ..tensor import Tensor from ..parameter import Parameter -from .common import PyBudaOp as op +from .common import ForgeOp as op def Add( name: str, diff --git a/pybuda/pybuda/op/eltwise_nary.py b/forge/forge/op/eltwise_nary.py similarity index 98% rename from pybuda/pybuda/op/eltwise_nary.py rename to forge/forge/op/eltwise_nary.py index 1297f07d8..43f6c2ea3 100644 --- a/pybuda/pybuda/op/eltwise_nary.py +++ b/forge/forge/op/eltwise_nary.py @@ -5,7 +5,7 @@ from ..tensor import Tensor from ..parameter import Parameter -from .common import PyBudaOp as op +from .common import ForgeOp as op def Concatenate( name: str, diff --git a/pybuda/pybuda/op/eltwise_unary.py b/forge/forge/op/eltwise_unary.py similarity index 99% rename from pybuda/pybuda/op/eltwise_unary.py rename to forge/forge/op/eltwise_unary.py index dbaed489d..8fbe3e4b8 100644 --- a/pybuda/pybuda/op/eltwise_unary.py +++ b/forge/forge/op/eltwise_unary.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 from typing import Union, Tuple from ..tensor import Tensor -from .common import PyBudaOp as op +from .common import ForgeOp as op def Abs( name: str, diff --git a/pybuda/pybuda/op/embedding.py b/forge/forge/op/embedding.py similarity index 95% rename from pybuda/pybuda/op/embedding.py rename to forge/forge/op/embedding.py index 495f573a4..bdb9e0c50 100644 --- a/pybuda/pybuda/op/embedding.py +++ b/forge/forge/op/embedding.py @@ -5,7 +5,7 @@ from ..tensor import Tensor from ..parameter import Parameter -from .common import PyBudaOp as op +from .common import ForgeOp as op def Embedding( name: str, diff --git a/pybuda/pybuda/op/eval/__init__.py b/forge/forge/op/eval/__init__.py similarity index 100% rename from pybuda/pybuda/op/eval/__init__.py rename to forge/forge/op/eval/__init__.py diff --git a/pybuda/pybuda/op/eval/buda/__init__.py b/forge/forge/op/eval/buda/__init__.py similarity index 95% rename from pybuda/pybuda/op/eval/buda/__init__.py rename to forge/forge/op/eval/buda/__init__.py index 12e6a5a97..465e6f49a 100644 --- a/pybuda/pybuda/op/eval/buda/__init__.py +++ b/forge/forge/op/eval/buda/__init__.py @@ -134,7 +134,7 @@ def _get_module_or_class(op_name): assert op_name in op_to_module_map, f"Buda op module not defined for {op_name}" module_name_or_cls = op_to_module_map[op_name] if type(module_name_or_cls) is str: - return importlib.import_module("." + module_name_or_cls, package="pybuda.op.eval.buda") + return importlib.import_module("." + module_name_or_cls, package="forge.op.eval.buda") else: return module_name_or_cls @@ -143,35 +143,35 @@ def get_f_instance(op_type): assert not isinstance(module_or_class, ModuleType) return module_or_class(op_type) -def get_f_pybuda_shape(op_type, tile_height, tile_width): +def get_f_forge_shape(op_type, tile_height, tile_width): module_or_class = _get_module_or_class(op_type.op) if isinstance(module_or_class, ModuleType): return lambda *args: module_or_class.shape(op_type.op, op_type.attr, *args, tile_height, tile_width) else: return lambda *args: module_or_class(op_type).shape(*args, tile_height, tile_width) -def get_f_pybuda_eval(op_type): +def get_f_forge_eval(op_type): module_or_class = _get_module_or_class(op_type.op) if isinstance(module_or_class, ModuleType): return lambda *args: module_or_class.eval(op_type.op, op_type.attr, *args) else: return module_or_class(op_type).eval -def get_f_pybuda_parallelization(op_type): +def get_f_forge_parallelization(op_type): module_or_class = _get_module_or_class(op_type.op) if isinstance(module_or_class, ModuleType): return lambda *args: module_or_class.parallelization(op_type.op, op_type.attr, *args) else: return module_or_class(op_type).parallelization -def get_f_pybuda_input_ublock_order(op_type, num_operands): +def get_f_forge_input_ublock_order(op_type, num_operands): module_or_class = _get_module_or_class(op_type.op) if isinstance(module_or_class, ModuleType): return module_or_class.input_ublock_order(op_type.op, op_type.attr, num_operands) else: return module_or_class(op_type).input_ublock_order(num_operands) -def get_f_pybuda_execution_cycles(op_type): +def get_f_forge_execution_cycles(op_type): module_or_class = _get_module_or_class(op_type.op) if isinstance(module_or_class, ModuleType): return lambda *args: module_or_class.execution_cycles(op_type.op, *args) diff --git a/pybuda/pybuda/op/eval/buda/abs.py b/forge/forge/op/eval/buda/abs.py similarity index 90% rename from pybuda/pybuda/op/eval/buda/abs.py rename to forge/forge/op/eval/buda/abs.py index b68457345..d64cb9cc0 100644 --- a/pybuda/pybuda/op/eval/buda/abs.py +++ b/forge/forge/op/eval/buda/abs.py @@ -7,11 +7,11 @@ from ..interface import BudaEltwiseUnaryOp import torch -import pybuda -from pybuda.utils import align_up_tile, round_up_div +import forge +from forge.utils import align_up_tile, round_up_div from .tm import eval as tm_eval -from pybuda.pybudaglobal import TILE_DIM -from pybuda._C.graph import UBlockOrder, Shape +from forge.forgeglobal import TILE_DIM +from forge._C.graph import UBlockOrder, Shape class Abs(BudaEltwiseUnaryOp): @@ -60,7 +60,7 @@ def execution_cycles(self, arch_name, op_model) -> int: return compiler_cache_cycles use_legacy_path = bool( - int(os.environ.get("PYBUDA_TEMP_ELT_UNARY_ESTIMATES_LEGACY", "0")) + int(os.environ.get("FORGE_TEMP_ELT_UNARY_ESTIMATES_LEGACY", "0")) ) if use_legacy_path: diff --git a/pybuda/pybuda/op/eval/buda/buffer.py b/forge/forge/op/eval/buda/buffer.py similarity index 90% rename from pybuda/pybuda/op/eval/buda/buffer.py rename to forge/forge/op/eval/buda/buffer.py index c15c28e22..39cf1d9be 100644 --- a/pybuda/pybuda/op/eval/buda/buffer.py +++ b/forge/forge/op/eval/buda/buffer.py @@ -5,10 +5,10 @@ from ..interface import BudaEltwiseUnaryOp import torch -from pybuda.utils import align_up_tile, round_up_div +from forge.utils import align_up_tile, round_up_div from .tm import eval as tm_eval -from pybuda.pybudaglobal import TILE_DIM -from pybuda._C.graph import UBlockOrder, Shape +from forge.forgeglobal import TILE_DIM +from forge._C.graph import UBlockOrder, Shape class Buffer(BudaEltwiseUnaryOp): @@ -55,7 +55,7 @@ def execution_cycles(self, arch_name, op_model) -> int: return compiler_cache_cycles use_legacy_path = bool( - int(os.environ.get("PYBUDA_TEMP_ELT_UNARY_ESTIMATES_LEGACY", "0")) + int(os.environ.get("FORGE_TEMP_ELT_UNARY_ESTIMATES_LEGACY", "0")) ) if use_legacy_path: diff --git a/pybuda/pybuda/op/eval/buda/clip.py b/forge/forge/op/eval/buda/clip.py similarity index 91% rename from pybuda/pybuda/op/eval/buda/clip.py rename to forge/forge/op/eval/buda/clip.py index 97a077dd9..225caf2c0 100644 --- a/pybuda/pybuda/op/eval/buda/clip.py +++ b/forge/forge/op/eval/buda/clip.py @@ -7,10 +7,10 @@ from ..interface import BudaEltwiseUnaryOp import torch -from pybuda.utils import align_up_tile, round_up_div +from forge.utils import align_up_tile, round_up_div from .tm import eval as tm_eval -from pybuda.pybudaglobal import TILE_DIM -from pybuda._C.graph import UBlockOrder, Shape +from forge.forgeglobal import TILE_DIM +from forge._C.graph import UBlockOrder, Shape class Clip(BudaEltwiseUnaryOp): @@ -59,7 +59,7 @@ def execution_cycles(self, arch_name, op_model) -> int: return compiler_cache_cycles use_legacy_path = bool( - int(os.environ.get("PYBUDA_TEMP_ELT_UNARY_ESTIMATES_LEGACY", "0")) + int(os.environ.get("FORGE_TEMP_ELT_UNARY_ESTIMATES_LEGACY", "0")) ) if use_legacy_path: diff --git a/pybuda/pybuda/op/eval/buda/constant.py b/forge/forge/op/eval/buda/constant.py similarity index 100% rename from pybuda/pybuda/op/eval/buda/constant.py rename to forge/forge/op/eval/buda/constant.py diff --git a/pybuda/pybuda/op/eval/buda/cosine.py b/forge/forge/op/eval/buda/cosine.py similarity index 88% rename from pybuda/pybuda/op/eval/buda/cosine.py rename to forge/forge/op/eval/buda/cosine.py index 335b9fb54..f3d1c4f6f 100644 --- a/pybuda/pybuda/op/eval/buda/cosine.py +++ b/forge/forge/op/eval/buda/cosine.py @@ -7,13 +7,13 @@ from ..interface import BudaEltwiseUnaryOp import torch -import pybuda +import forge -from pybuda.utils import align_up_tile, round_up_div +from forge.utils import align_up_tile, round_up_div from .tm import eval as tm_eval -from pybuda.tensor import pad_pytorch_tensor_to_buda -from pybuda.pybudaglobal import TILE_DIM -from pybuda._C.graph import UBlockOrder, Shape +from forge.tensor import pad_pytorch_tensor_to_buda +from forge.forgeglobal import TILE_DIM +from forge._C.graph import UBlockOrder, Shape class Cosine(BudaEltwiseUnaryOp): @@ -62,7 +62,7 @@ def execution_cycles(self, arch_name, op_model) -> int: return compiler_cache_cycles use_legacy_path = bool( - int(os.environ.get("PYBUDA_TEMP_ELT_UNARY_ESTIMATES_LEGACY", "0")) + int(os.environ.get("FORGE_TEMP_ELT_UNARY_ESTIMATES_LEGACY", "0")) ) if use_legacy_path: diff --git a/pybuda/pybuda/op/eval/buda/cyclenet.py b/forge/forge/op/eval/buda/cyclenet.py similarity index 100% rename from pybuda/pybuda/op/eval/buda/cyclenet.py rename to forge/forge/op/eval/buda/cyclenet.py diff --git a/pybuda/pybuda/op/eval/buda/depthwise.py b/forge/forge/op/eval/buda/depthwise.py similarity index 91% rename from pybuda/pybuda/op/eval/buda/depthwise.py rename to forge/forge/op/eval/buda/depthwise.py index 942a00f3e..06a3f8b4a 100644 --- a/pybuda/pybuda/op/eval/buda/depthwise.py +++ b/forge/forge/op/eval/buda/depthwise.py @@ -10,12 +10,12 @@ from loguru import logger -import pybuda._C.balancer as balancer -from pybuda._C import DataFormat, MathFidelity -from pybuda._C.backend_api import get_op_model_execution_cycles -from pybuda.pybudaglobal import TILE_DIM -from pybuda.utils import align_up_tile -from pybuda._C.graph import UBlockOrder +import forge._C.balancer as balancer +from forge._C import DataFormat, MathFidelity +from forge._C.backend_api import get_op_model_execution_cycles +from forge.forgeglobal import TILE_DIM +from forge.utils import align_up_tile +from forge._C.graph import UBlockOrder from ..common import to_torch_operands, math_fidelity_to_multiplier, data_format_to_int, op_model_to_desc, cast_for_cpu_eval diff --git a/pybuda/pybuda/op/eval/buda/dram_queue.py b/forge/forge/op/eval/buda/dram_queue.py similarity index 94% rename from pybuda/pybuda/op/eval/buda/dram_queue.py rename to forge/forge/op/eval/buda/dram_queue.py index 230bcd34f..37d86aecc 100644 --- a/pybuda/pybuda/op/eval/buda/dram_queue.py +++ b/forge/forge/op/eval/buda/dram_queue.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 from argparse import ArgumentError -import pybuda._C.balancer as balancer +import forge._C.balancer as balancer from ..common import to_torch_operands diff --git a/pybuda/pybuda/op/eval/buda/eltwise_binary.py b/forge/forge/op/eval/buda/eltwise_binary.py similarity index 93% rename from pybuda/pybuda/op/eval/buda/eltwise_binary.py rename to forge/forge/op/eval/buda/eltwise_binary.py index b9570a19a..8b2a89491 100644 --- a/pybuda/pybuda/op/eval/buda/eltwise_binary.py +++ b/forge/forge/op/eval/buda/eltwise_binary.py @@ -4,15 +4,15 @@ from loguru import logger from typing import List, Tuple -from pybuda.utils import align_up_tile, round_up_div, align_up +from forge.utils import align_up_tile, round_up_div, align_up -import pybuda._C.balancer as balancer +import forge._C.balancer as balancer import torch -from pybuda._C import DataFormat, MathFidelity -from pybuda._C.graph import UBlockOrder -from pybuda._C.backend_api import get_op_model_execution_cycles +from forge._C import DataFormat, MathFidelity +from forge._C.graph import UBlockOrder +from forge._C.backend_api import get_op_model_execution_cycles -from ....pybudaglobal import TILE_DIM +from ....forgeglobal import TILE_DIM from ..common import to_torch_operands, math_fidelity_to_multiplier, op_model_to_desc, get_compiler_cached_cycles diff --git a/pybuda/pybuda/op/eval/buda/eltwise_nary.py b/forge/forge/op/eval/buda/eltwise_nary.py similarity index 94% rename from pybuda/pybuda/op/eval/buda/eltwise_nary.py rename to forge/forge/op/eval/buda/eltwise_nary.py index 682f5b5e0..d047a791a 100644 --- a/pybuda/pybuda/op/eval/buda/eltwise_nary.py +++ b/forge/forge/op/eval/buda/eltwise_nary.py @@ -4,12 +4,12 @@ from typing import List, Tuple import torch -import pybuda -import pybuda._C.balancer as balancer -from pybuda._C.backend_api import get_op_model_execution_cycles -from ....pybudaglobal import TILE_DIM +import forge +import forge._C.balancer as balancer +from forge._C.backend_api import get_op_model_execution_cycles +from ....forgeglobal import TILE_DIM from ..common import to_torch_operands, op_model_to_desc, get_compiler_cached_cycles -from pybuda.utils import align_up_tile, round_up_div +from forge.utils import align_up_tile, round_up_div from .tm import eval as tm_eval @@ -29,7 +29,7 @@ def eval(type, attr, ops): for t_op in t_ops: assert len(t_op.shape) == 4, f'Tensor must have 4 dimensions, given {len(t_op.shape)}' - # To pybuda shape + # To forge shape for i in range(len(t_ops)): t_ops[i] = t_ops[i][:, :, :originalY*originalX, :] t_ops[i] = t_ops[i].transpose(2, 3) diff --git a/pybuda/pybuda/op/eval/buda/eltwise_unary.py b/forge/forge/op/eval/buda/eltwise_unary.py similarity index 93% rename from pybuda/pybuda/op/eval/buda/eltwise_unary.py rename to forge/forge/op/eval/buda/eltwise_unary.py index 8885f9f01..0f84dded8 100644 --- a/pybuda/pybuda/op/eval/buda/eltwise_unary.py +++ b/forge/forge/op/eval/buda/eltwise_unary.py @@ -5,17 +5,17 @@ import os from loguru import logger -import pybuda._C.balancer as balancer -from pybuda.pybudaglobal import TILE_DIM +import forge._C.balancer as balancer +from forge.forgeglobal import TILE_DIM import torch import torch.nn.functional -from pybuda.utils import align_up_tile -from pybuda._C import DataFormat, MathFidelity -from pybuda._C.graph import UBlockOrder, Shape -from pybuda._C.backend_api import get_op_model_param +from forge.utils import align_up_tile +from forge._C import DataFormat, MathFidelity +from forge._C.graph import UBlockOrder, Shape +from forge._C.backend_api import get_op_model_param from .tm import eval as tm_eval -from pybuda.tensor import pad_pytorch_tensor_to_buda -from pybuda._C.backend_api import get_op_model_execution_cycles +from forge.tensor import pad_pytorch_tensor_to_buda +from forge._C.backend_api import get_op_model_execution_cycles from ..common import to_torch_operands, op_model_to_desc, get_compiler_cached_cycles @@ -49,7 +49,7 @@ def gelu_forward(x, approximate): def eval(type, attr, ops): assert len(ops) == 1, "Eltwise unary should have one input" - # assert (type != "ethernet_datacopy" or (len(attr) == 1 or len(attr) == 2)), f"Ethernet datacopy must only have 1 or 2 attributes. Attrs = {attr}" tenstorrent/pybuda#1085 + # assert (type != "ethernet_datacopy" or (len(attr) == 1 or len(attr) == 2)), f"Ethernet datacopy must only have 1 or 2 attributes. Attrs = {attr}" tenstorrent/forge#1085 t_ops = to_torch_operands(*ops) @@ -213,7 +213,7 @@ def execution_cycles(type, arch_name, op_model) -> int: if compiler_cache_cycles is not None: return compiler_cache_cycles - use_legacy_path = bool(int(os.environ.get("PYBUDA_TEMP_ELT_UNARY_ESTIMATES_LEGACY", "0"))) + use_legacy_path = bool(int(os.environ.get("FORGE_TEMP_ELT_UNARY_ESTIMATES_LEGACY", "0"))) # Some ops don't yet have implemented cycles, approximate cycles here # Additionally, always use the BBE path for `reduce` op diff --git a/pybuda/pybuda/op/eval/buda/embedding.py b/forge/forge/op/eval/buda/embedding.py similarity index 90% rename from pybuda/pybuda/op/eval/buda/embedding.py rename to forge/forge/op/eval/buda/embedding.py index c402f57a3..73c121c24 100644 --- a/pybuda/pybuda/op/eval/buda/embedding.py +++ b/forge/forge/op/eval/buda/embedding.py @@ -2,9 +2,9 @@ # SPDX-License-Identifier: Apache-2.0 import torch -from pybuda._C.graph import UBlockOrder +from forge._C.graph import UBlockOrder from ..common import to_torch_operands -from pybuda.tensor import pad_pytorch_tensor_to_buda, align_up_tile +from forge.tensor import pad_pytorch_tensor_to_buda, align_up_tile def eval(type, attr, ops): diff --git a/pybuda/pybuda/op/eval/buda/ethernet_datacopy.py b/forge/forge/op/eval/buda/ethernet_datacopy.py similarity index 90% rename from pybuda/pybuda/op/eval/buda/ethernet_datacopy.py rename to forge/forge/op/eval/buda/ethernet_datacopy.py index c722d244e..da3961035 100644 --- a/pybuda/pybuda/op/eval/buda/ethernet_datacopy.py +++ b/forge/forge/op/eval/buda/ethernet_datacopy.py @@ -6,13 +6,13 @@ from ..interface import BudaEltwiseUnaryOp import torch -import pybuda -from pybuda.utils import align_up_tile, round_up_div +import forge +from forge.utils import align_up_tile, round_up_div from .tm import eval as tm_eval from ..common import to_torch_operands -from pybuda.tensor import pad_pytorch_tensor_to_buda -from pybuda.pybudaglobal import TILE_DIM -from pybuda._C.graph import UBlockOrder, Shape +from forge.tensor import pad_pytorch_tensor_to_buda +from forge.forgeglobal import TILE_DIM +from forge._C.graph import UBlockOrder, Shape class EthernetDatacopy(BudaEltwiseUnaryOp): diff --git a/pybuda/pybuda/op/eval/buda/exp.py b/forge/forge/op/eval/buda/exp.py similarity index 87% rename from pybuda/pybuda/op/eval/buda/exp.py rename to forge/forge/op/eval/buda/exp.py index b881a541a..cbb7eda65 100644 --- a/pybuda/pybuda/op/eval/buda/exp.py +++ b/forge/forge/op/eval/buda/exp.py @@ -7,13 +7,13 @@ from ..interface import BudaEltwiseUnaryOp import torch -import pybuda -from pybuda._C import UnsupportedHWOpsError -from pybuda.utils import align_up_tile, round_up_div +import forge +from forge._C import UnsupportedHWOpsError +from forge.utils import align_up_tile, round_up_div from ..common import to_torch_operands -from pybuda.tensor import pad_pytorch_tensor_to_buda -from pybuda.pybudaglobal import TILE_DIM -from pybuda._C.graph import UBlockOrder, Shape +from forge.tensor import pad_pytorch_tensor_to_buda +from forge.forgeglobal import TILE_DIM +from forge._C.graph import UBlockOrder, Shape class Exp(BudaEltwiseUnaryOp): @@ -63,7 +63,7 @@ def execution_cycles(self, arch_name, op_model) -> int: return compiler_cache_cycles use_legacy_path = bool( - int(os.environ.get("PYBUDA_TEMP_ELT_UNARY_ESTIMATES_LEGACY", "0")) + int(os.environ.get("FORGE_TEMP_ELT_UNARY_ESTIMATES_LEGACY", "0")) ) if use_legacy_path: diff --git a/pybuda/pybuda/op/eval/buda/fused_ops.py b/forge/forge/op/eval/buda/fused_ops.py similarity index 96% rename from pybuda/pybuda/op/eval/buda/fused_ops.py rename to forge/forge/op/eval/buda/fused_ops.py index b67d69872..dff26232d 100644 --- a/pybuda/pybuda/op/eval/buda/fused_ops.py +++ b/forge/forge/op/eval/buda/fused_ops.py @@ -2,8 +2,8 @@ # SPDX-License-Identifier: Apache-2.0 from ..common import op_model_to_desc, get_compiler_cached_cycles -from pybuda._C.graph import UBlockOrder -from pybuda._C.backend_api import get_op_model_execution_cycles +from forge._C.graph import UBlockOrder +from forge._C.backend_api import get_op_model_execution_cycles DEST_INPUT_OR_OUTPUT_COEFF = 0.5 DEST_INPUT_AND_OUTPUT_COEFF = 0.2 diff --git a/pybuda/pybuda/op/eval/buda/log.py b/forge/forge/op/eval/buda/log.py similarity index 91% rename from pybuda/pybuda/op/eval/buda/log.py rename to forge/forge/op/eval/buda/log.py index 4306aaa79..79e55679f 100644 --- a/pybuda/pybuda/op/eval/buda/log.py +++ b/forge/forge/op/eval/buda/log.py @@ -7,10 +7,10 @@ from ..interface import BudaEltwiseUnaryOp import torch -from pybuda.utils import align_up_tile, round_up_div +from forge.utils import align_up_tile, round_up_div from .tm import eval as tm_eval -from pybuda.pybudaglobal import TILE_DIM -from pybuda._C.graph import UBlockOrder, Shape +from forge.forgeglobal import TILE_DIM +from forge._C.graph import UBlockOrder, Shape class Log(BudaEltwiseUnaryOp): @@ -61,7 +61,7 @@ def execution_cycles(self, arch_name, op_model) -> int: return compiler_cache_cycles use_legacy_path = bool( - int(os.environ.get("PYBUDA_TEMP_ELT_UNARY_ESTIMATES_LEGACY", "0")) + int(os.environ.get("FORGE_TEMP_ELT_UNARY_ESTIMATES_LEGACY", "0")) ) if use_legacy_path: diff --git a/pybuda/pybuda/op/eval/buda/matmul.pth b/forge/forge/op/eval/buda/matmul.pth similarity index 100% rename from pybuda/pybuda/op/eval/buda/matmul.pth rename to forge/forge/op/eval/buda/matmul.pth diff --git a/pybuda/pybuda/op/eval/buda/matmul.py b/forge/forge/op/eval/buda/matmul.py similarity index 97% rename from pybuda/pybuda/op/eval/buda/matmul.py rename to forge/forge/op/eval/buda/matmul.py index 3e38b077b..db5b50ad6 100644 --- a/pybuda/pybuda/op/eval/buda/matmul.py +++ b/forge/forge/op/eval/buda/matmul.py @@ -10,12 +10,12 @@ from loguru import logger -import pybuda._C.balancer as balancer -from pybuda._C import DataFormat, MathFidelity -from pybuda._C.backend_api import get_op_model_execution_cycles -from pybuda.pybudaglobal import TILE_DIM -from pybuda.utils import align_up_tile, align_up -from pybuda._C.graph import UBlockOrder +import forge._C.balancer as balancer +from forge._C import DataFormat, MathFidelity +from forge._C.backend_api import get_op_model_execution_cycles +from forge.forgeglobal import TILE_DIM +from forge.utils import align_up_tile, align_up +from forge._C.graph import UBlockOrder from ..common import to_torch_operands, cast_for_cpu_eval, math_fidelity_to_multiplier, data_format_to_int, op_model_to_desc, get_compiler_cached_cycles @@ -396,8 +396,8 @@ def execution_cycles(type, arch_name, op_model, theoretical) -> int: # Calculate cycles per core # if ( - os.environ.get("PYBUDA_TEMP_SCALE_SPARSE_ESTIMATE_ARGS", False) - and os.environ.get("PYBUDA_TEMP_SPARSE_ESTIMATE_ARGS_PER_CORE", False) + os.environ.get("FORGE_TEMP_SCALE_SPARSE_ESTIMATE_ARGS", False) + and os.environ.get("FORGE_TEMP_SPARSE_ESTIMATE_ARGS_PER_CORE", False) ): cycles_to_return = 0 for r in range(op_model.grid_shape.r): @@ -450,7 +450,7 @@ def execution_cycles(type, arch_name, op_model, theoretical) -> int: mblock_executions = m_k * mblock_m * mblock_n ublock_executions = mblock_executions * u_kt * ublock_rt * ublock_ct - is_cyclenet = "PYBUDA_CYCLENET" in os.environ + is_cyclenet = "FORGE_CYCLENET" in os.environ if is_cyclenet: input0_df = data_format_to_int(op_model.input_buffers[0].data_format) output_df = data_format_to_int(op_model.output_buffers[0].data_format) diff --git a/pybuda/pybuda/op/eval/buda/nop.py b/forge/forge/op/eval/buda/nop.py similarity index 94% rename from pybuda/pybuda/op/eval/buda/nop.py rename to forge/forge/op/eval/buda/nop.py index a7372a6b9..f28362cff 100644 --- a/pybuda/pybuda/op/eval/buda/nop.py +++ b/forge/forge/op/eval/buda/nop.py @@ -7,10 +7,10 @@ from ..interface import BudaEltwiseUnaryOp import torch -from pybuda.utils import align_up_tile, round_up_div +from forge.utils import align_up_tile, round_up_div from .tm import eval as tm_eval -from pybuda.pybudaglobal import TILE_DIM -from pybuda._C.graph import UBlockOrder, Shape +from forge.forgeglobal import TILE_DIM +from forge._C.graph import UBlockOrder, Shape class Nop(BudaEltwiseUnaryOp): @@ -91,7 +91,7 @@ def execution_cycles(self, arch_name, op_model) -> int: return compiler_cache_cycles use_legacy_path = bool( - int(os.environ.get("PYBUDA_TEMP_ELT_UNARY_ESTIMATES_LEGACY", "0")) + int(os.environ.get("FORGE_TEMP_ELT_UNARY_ESTIMATES_LEGACY", "0")) ) if use_legacy_path: diff --git a/pybuda/pybuda/op/eval/buda/quantize.py b/forge/forge/op/eval/buda/quantize.py similarity index 89% rename from pybuda/pybuda/op/eval/buda/quantize.py rename to forge/forge/op/eval/buda/quantize.py index bc5299ae0..88c23d92f 100644 --- a/pybuda/pybuda/op/eval/buda/quantize.py +++ b/forge/forge/op/eval/buda/quantize.py @@ -10,15 +10,15 @@ from loguru import logger -import pybuda._C.balancer as balancer -from pybuda._C import DataFormat, MathFidelity -from pybuda._C.backend_api import get_op_model_execution_cycles -from pybuda.pybudaglobal import TILE_DIM -from pybuda.utils import align_up_tile, align_up -from pybuda._C.graph import UBlockOrder +import forge._C.balancer as balancer +from forge._C import DataFormat, MathFidelity +from forge._C.backend_api import get_op_model_execution_cycles +from forge.forgeglobal import TILE_DIM +from forge.utils import align_up_tile, align_up +from forge._C.graph import UBlockOrder from ..common import op_model_to_desc, get_compiler_cached_cycles -from pybuda.op.eval.pybuda.quantize import STRING_TO_LOWER_LIMIT, STRING_TO_UPPER_LIMIT, STRING_TO_TORCH_DTYPE +from forge.op.eval.forge.quantize import STRING_TO_LOWER_LIMIT, STRING_TO_UPPER_LIMIT, STRING_TO_TORCH_DTYPE def eval(type, attr, ops): if type == "quantization": diff --git a/pybuda/pybuda/op/eval/buda/reciprocal.py b/forge/forge/op/eval/buda/reciprocal.py similarity index 91% rename from pybuda/pybuda/op/eval/buda/reciprocal.py rename to forge/forge/op/eval/buda/reciprocal.py index 6cadc40d7..458770fb0 100644 --- a/pybuda/pybuda/op/eval/buda/reciprocal.py +++ b/forge/forge/op/eval/buda/reciprocal.py @@ -7,11 +7,11 @@ from ..interface import BudaEltwiseUnaryOp import torch -import pybuda -from pybuda.utils import align_up_tile, round_up_div +import forge +from forge.utils import align_up_tile, round_up_div from .tm import eval as tm_eval -from pybuda.pybudaglobal import TILE_DIM -from pybuda._C.graph import UBlockOrder, Shape +from forge.forgeglobal import TILE_DIM +from forge._C.graph import UBlockOrder, Shape class Reciprocal(BudaEltwiseUnaryOp): @@ -64,7 +64,7 @@ def execution_cycles(self, arch_name, op_model) -> int: return compiler_cache_cycles use_legacy_path = bool( - int(os.environ.get("PYBUDA_TEMP_ELT_UNARY_ESTIMATES_LEGACY", "0")) + int(os.environ.get("FORGE_TEMP_ELT_UNARY_ESTIMATES_LEGACY", "0")) ) if use_legacy_path: diff --git a/pybuda/pybuda/op/eval/buda/splice.py b/forge/forge/op/eval/buda/splice.py similarity index 98% rename from pybuda/pybuda/op/eval/buda/splice.py rename to forge/forge/op/eval/buda/splice.py index a8e43651a..4fc284167 100644 --- a/pybuda/pybuda/op/eval/buda/splice.py +++ b/forge/forge/op/eval/buda/splice.py @@ -4,10 +4,10 @@ from ..interface import BudaEltwiseNaryOp import torch -import pybuda -from pybuda._C import UnsupportedHWOpsError -from ....pybudaglobal import TILE_DIM -from pybuda.utils import align_up_tile, round_up_div +import forge +from forge._C import UnsupportedHWOpsError +from ....forgeglobal import TILE_DIM +from forge.utils import align_up_tile, round_up_div from .tm import eval as tm_eval diff --git a/pybuda/pybuda/op/eval/buda/sqrt.py b/forge/forge/op/eval/buda/sqrt.py similarity index 89% rename from pybuda/pybuda/op/eval/buda/sqrt.py rename to forge/forge/op/eval/buda/sqrt.py index 567d7c99c..98a86d2fe 100644 --- a/pybuda/pybuda/op/eval/buda/sqrt.py +++ b/forge/forge/op/eval/buda/sqrt.py @@ -7,10 +7,10 @@ from ..interface import BudaEltwiseUnaryOp import torch -from pybuda.utils import align_up_tile, round_up_div +from forge.utils import align_up_tile, round_up_div from .tm import eval as tm_eval -from pybuda.pybudaglobal import TILE_DIM -from pybuda._C.graph import UBlockOrder, Shape +from forge.forgeglobal import TILE_DIM +from forge._C.graph import UBlockOrder, Shape class Sqrt(BudaEltwiseUnaryOp): @classmethod @@ -55,7 +55,7 @@ def execution_cycles(self, arch_name, op_model) -> int: if compiler_cache_cycles is not None: return compiler_cache_cycles - use_legacy_path = bool(int(os.environ.get("PYBUDA_TEMP_ELT_UNARY_ESTIMATES_LEGACY", "0"))) + use_legacy_path = bool(int(os.environ.get("FORGE_TEMP_ELT_UNARY_ESTIMATES_LEGACY", "0"))) if (use_legacy_path ): tile_weight = get_op_model_param(op_model_desc, "tile_weight") diff --git a/pybuda/pybuda/op/eval/buda/tanh.py b/forge/forge/op/eval/buda/tanh.py similarity index 91% rename from pybuda/pybuda/op/eval/buda/tanh.py rename to forge/forge/op/eval/buda/tanh.py index 403cec567..f19d3d249 100644 --- a/pybuda/pybuda/op/eval/buda/tanh.py +++ b/forge/forge/op/eval/buda/tanh.py @@ -7,10 +7,10 @@ from ..interface import BudaEltwiseUnaryOp import torch -from pybuda.utils import align_up_tile, round_up_div +from forge.utils import align_up_tile, round_up_div from .tm import eval as tm_eval -from pybuda.pybudaglobal import TILE_DIM -from pybuda._C.graph import UBlockOrder, Shape +from forge.forgeglobal import TILE_DIM +from forge._C.graph import UBlockOrder, Shape class Tanh(BudaEltwiseUnaryOp): @@ -61,7 +61,7 @@ def execution_cycles(self, arch_name, op_model) -> int: return compiler_cache_cycles use_legacy_path = bool( - int(os.environ.get("PYBUDA_TEMP_ELT_UNARY_ESTIMATES_LEGACY", "0")) + int(os.environ.get("FORGE_TEMP_ELT_UNARY_ESTIMATES_LEGACY", "0")) ) if use_legacy_path: diff --git a/pybuda/pybuda/op/eval/buda/tilizer.py b/forge/forge/op/eval/buda/tilizer.py similarity index 90% rename from pybuda/pybuda/op/eval/buda/tilizer.py rename to forge/forge/op/eval/buda/tilizer.py index c14218388..7881619e1 100644 --- a/pybuda/pybuda/op/eval/buda/tilizer.py +++ b/forge/forge/op/eval/buda/tilizer.py @@ -7,10 +7,10 @@ from ..interface import BudaEltwiseUnaryOp import torch -from pybuda.utils import align_up_tile, round_up_div +from forge.utils import align_up_tile, round_up_div from .tm import eval as tm_eval -from pybuda.pybudaglobal import TILE_DIM -from pybuda._C.graph import UBlockOrder, Shape +from forge.forgeglobal import TILE_DIM +from forge._C.graph import UBlockOrder, Shape class Tilizer(BudaEltwiseUnaryOp): @classmethod @@ -56,7 +56,7 @@ def execution_cycles(self, arch_name, op_model) -> int: return compiler_cache_cycles use_legacy_path = bool( - int(os.environ.get("PYBUDA_TEMP_ELT_UNARY_ESTIMATES_LEGACY", "0")) + int(os.environ.get("FORGE_TEMP_ELT_UNARY_ESTIMATES_LEGACY", "0")) ) if use_legacy_path: diff --git a/pybuda/pybuda/op/eval/buda/tm.py b/forge/forge/op/eval/buda/tm.py similarity index 98% rename from pybuda/pybuda/op/eval/buda/tm.py rename to forge/forge/op/eval/buda/tm.py index 187adaaa4..cf63b1c20 100644 --- a/pybuda/pybuda/op/eval/buda/tm.py +++ b/forge/forge/op/eval/buda/tm.py @@ -4,9 +4,9 @@ from ..common import to_torch_operands import torch -import pybuda -from pybuda.pybudaglobal import TILE_DIM -from pybuda.utils import align_up_tile, round_up_div, align_up +import forge +from forge.forgeglobal import TILE_DIM +from forge.utils import align_up_tile, round_up_div, align_up from ..sparse_utils import bcast_sparse_picker_matrix @@ -81,7 +81,7 @@ def eval(type, attr, ops): result.append(t_ops[0].select(dim, offset + i)) else: result.append(zero_slice) - return pybuda.tensor.pad_pytorch_tensor_to_buda(torch.stack(result, dim=dim), []) + return forge.tensor.pad_pytorch_tensor_to_buda(torch.stack(result, dim=dim), []) if type == "gather": assert len(attr) == 5, "Gather should have 5 attributes" @@ -99,7 +99,7 @@ def eval(type, attr, ops): offset += 1 else: result.append(zero_slice) - return pybuda.tensor.pad_pytorch_tensor_to_buda(torch.stack(result, dim=dim), []) + return forge.tensor.pad_pytorch_tensor_to_buda(torch.stack(result, dim=dim), []) if type == "hslice": assert len(attr) == 1, "HSlice should have one attribute, the slice size" diff --git a/pybuda/pybuda/op/eval/buda/transpose.py b/forge/forge/op/eval/buda/transpose.py similarity index 100% rename from pybuda/pybuda/op/eval/buda/transpose.py rename to forge/forge/op/eval/buda/transpose.py diff --git a/pybuda/pybuda/op/eval/buda/void.py b/forge/forge/op/eval/buda/void.py similarity index 98% rename from pybuda/pybuda/op/eval/buda/void.py rename to forge/forge/op/eval/buda/void.py index d0a5b08d9..9cc3b6bcf 100644 --- a/pybuda/pybuda/op/eval/buda/void.py +++ b/forge/forge/op/eval/buda/void.py @@ -4,7 +4,7 @@ # Void op is for testing purposes only import torch -import pybuda +import forge def eval(type, attr, ops): diff --git a/pybuda/pybuda/op/eval/common.py b/forge/forge/op/eval/common.py similarity index 96% rename from pybuda/pybuda/op/eval/common.py rename to forge/forge/op/eval/common.py index b04740c60..7e91aaa55 100644 --- a/pybuda/pybuda/op/eval/common.py +++ b/forge/forge/op/eval/common.py @@ -16,10 +16,10 @@ from loguru import logger -from ...pybudaglobal import TILE_DIM +from ...forgeglobal import TILE_DIM from ...tensor import narrow_buda_tensor_to_pytorch, pad_pytorch_tensor_to_buda, buda_dataformat_to_pytorch_dtype -from pybuda import DataFormat, MathFidelity +from forge import DataFormat, MathFidelity def to_torch_operands(*ops): """ @@ -378,7 +378,7 @@ def data_format_to_int(df: DataFormat) -> int: # elif (desc.type == "reduce"): # desc.op_attr = sub_op_model.reduce_dim -# desc.approx_mode = "PYBUDA_EXP_APPROX" in os.environ +# desc.approx_mode = "FORGE_EXP_APPROX" in os.environ # else: # desc.type = type # desc.mblock_m = op_model.output_buffers[0].block_shape.mblock_m @@ -391,8 +391,8 @@ def data_format_to_int(df: DataFormat) -> int: # desc.ublock_kt = op_model.input_buffers[1].block_shape.ublock.rt # desc.mblock_k = op_model.op_shape.inputs[1].rt // desc.ublock_kt # desc.sparse_indices = op_model.sparse_indices -# scale_sparse_args = bool(int(os.environ.get("PYBUDA_TEMP_SCALE_SPARSE_ESTIMATE_ARGS", True))) -# if bool(int(os.environ.get("PYBUDA_TEMP_ENABLE_NEW_SPARSE_ESTIMATES", True))): +# scale_sparse_args = bool(int(os.environ.get("FORGE_TEMP_SCALE_SPARSE_ESTIMATE_ARGS", True))) +# if bool(int(os.environ.get("FORGE_TEMP_ENABLE_NEW_SPARSE_ESTIMATES", True))): # sparse_metadata = op_model.get_sparse_metadata() # desc.sparse_indices = sum(sparse_metadata.nz_tiles) # desc.sparse_nz_ublocks = sum(sparse_metadata.nz_ublocks) @@ -403,17 +403,17 @@ def data_format_to_int(df: DataFormat) -> int: # # number of cores. However, not all the cores perform the same amount of work, so we need to # # calculate parameters per core. We keep both of these modes in this transition period. # # -# # PYBUDA_TEMP_SCALE_SPARSE_ESTIMATE_ARGS (scale_sparse_args) must be set to true to enable any of +# # FORGE_TEMP_SCALE_SPARSE_ESTIMATE_ARGS (scale_sparse_args) must be set to true to enable any of # # the mentioned modes. # # # # Mode 1: # # Average the parameters (by default) # # Mode 2: # # Scale the parameters by the number of cores (needs the env var -# # "PYBUDA_TEMP_SPARSE_ESTIMATE_ARGS_PER_CORE" to be set to true) +# # "FORGE_TEMP_SPARSE_ESTIMATE_ARGS_PER_CORE" to be set to true) # # # if scale_sparse_args: -# per_core_mode = os.environ.get("PYBUDA_TEMP_SPARSE_ESTIMATE_ARGS_PER_CORE", False) +# per_core_mode = os.environ.get("FORGE_TEMP_SPARSE_ESTIMATE_ARGS_PER_CORE", False) # if not per_core_mode: # # Average mode # # @@ -510,9 +510,9 @@ def calculate_tile_size(val): # global g_compiler_perf_cache # if not g_compiler_perf_cache: -# cache_file = os.environ.get("PYBUDA_COMPILER_CACHE", None) +# cache_file = os.environ.get("FORGE_COMPILER_CACHE", None) # if cache_file is not None and os.path.exists(cache_file): -# with open(os.environ["PYBUDA_COMPILER_CACHE"], 'rb') as file: +# with open(os.environ["FORGE_COMPILER_CACHE"], 'rb') as file: # import pickle # g_compiler_perf_cache = pickle.load(file) # else: diff --git a/pybuda/pybuda/op/eval/pybuda/__init__.py b/forge/forge/op/eval/forge/__init__.py similarity index 95% rename from pybuda/pybuda/op/eval/pybuda/__init__.py rename to forge/forge/op/eval/forge/__init__.py index b1371bec7..f46710114 100644 --- a/pybuda/pybuda/op/eval/pybuda/__init__.py +++ b/forge/forge/op/eval/forge/__init__.py @@ -184,10 +184,10 @@ def is_eltwise_nary(op_type): @lru_cache(maxsize=len(op_to_module_map)) def _get_module_or_class(op_name): - assert op_name in op_to_module_map, f"Pybuda op module not defined for {op_name}" + assert op_name in op_to_module_map, f"Forge op module not defined for {op_name}" module_name_or_cls = op_to_module_map[op_name] if type(module_name_or_cls) is str: - return importlib.import_module("." + module_name_or_cls, package="pybuda.op.eval.pybuda") + return importlib.import_module("." + module_name_or_cls, package="forge.op.eval.forge") else: return module_name_or_cls @@ -199,28 +199,28 @@ def get_f_instance(op_type): def empty_function(*inputs): pass -def get_f_pybuda_backward(op_type): +def get_f_forge_backward(op_type): module_or_class = _get_module_or_class(op_type.op) if isinstance(module_or_class, ModuleType): return lambda *inputs : module_or_class.backward(op_type.op, op_type.attr, *inputs) else: return module_or_class(op_type).backward -def get_f_pybuda_shape(op_type): +def get_f_forge_shape(op_type): module_or_class = _get_module_or_class(op_type.op) if isinstance(module_or_class, ModuleType): return lambda *inputs : module_or_class.shape(op_type.op, op_type.attr, *inputs) else: return module_or_class(op_type).shape -def get_f_pybuda_eval(op_type): +def get_f_forge_eval(op_type): module_or_class = _get_module_or_class(op_type.op) if isinstance(module_or_class, ModuleType): return lambda *inputs : module_or_class.eval(op_type.op, op_type.attr, *inputs) else: return module_or_class(op_type).eval -def get_f_pybuda_lower(op_type): +def get_f_forge_lower(op_type): module_or_class = _get_module_or_class(op_type.op) if isinstance(module_or_class, ModuleType): if op_type.op == "matmul" or op_type.op == "sparse_matmul": @@ -229,7 +229,7 @@ def get_f_pybuda_lower(op_type): else: return module_or_class(op_type).lower -def get_f_pybuda_decompose(op_type): +def get_f_forge_decompose(op_type): module_or_class = _get_module_or_class(op_type.op) if isinstance(module_or_class, ModuleType): if hasattr(module_or_class, "decompose"): @@ -239,7 +239,7 @@ def get_f_pybuda_decompose(op_type): else: return module_or_class(op_type).decompose -def get_f_pybuda_decompose_post_autograd(op_type): +def get_f_forge_decompose_post_autograd(op_type): module_or_class = _get_module_or_class(op_type.op) if isinstance(module_or_class, ModuleType): if hasattr(module_or_class, "decompose_post_autograd"): @@ -249,7 +249,7 @@ def get_f_pybuda_decompose_post_autograd(op_type): else: return module_or_class(op_type).decompose_post_autograd -def get_f_pybuda_decompose_post_optimize(op_type): +def get_f_forge_decompose_post_optimize(op_type): module_or_class = _get_module_or_class(op_type.op) if isinstance(module_or_class, ModuleType): if hasattr(module_or_class, "decompose_post_optimize"): @@ -259,7 +259,7 @@ def get_f_pybuda_decompose_post_optimize(op_type): else: return module_or_class(op_type).decompose_post_optimize -def get_f_pybuda_initial_flops_estimate(op_type): +def get_f_forge_initial_flops_estimate(op_type): module_or_class = _get_module_or_class(op_type.op) if isinstance(module_or_class, ModuleType): if hasattr(module_or_class, "initial_flops_estimate"): diff --git a/pybuda/pybuda/op/eval/pybuda/abs.py b/forge/forge/op/eval/forge/abs.py similarity index 93% rename from pybuda/pybuda/op/eval/pybuda/abs.py rename to forge/forge/op/eval/forge/abs.py index 426f02057..b4623afe8 100644 --- a/pybuda/pybuda/op/eval/pybuda/abs.py +++ b/forge/forge/op/eval/forge/abs.py @@ -9,10 +9,10 @@ from ..interface import PyEltwiseUnaryOp from loguru import logger from ..common import to_torch_operands -from ....pybudaglobal import TILE_DIM +from ....forgeglobal import TILE_DIM from ....tensor import buda_dataformat_to_pytorch_dtype import numpy as np -from pybuda.op.eval.common import calculate_tile_size +from forge.op.eval.common import calculate_tile_size from ..buda.abs import Abs as BudaAbs @@ -49,7 +49,7 @@ def backward(self, ac, operand, inputs, output, grad): def lower(self, lc, tensors, outputs): assert len(tensors) == 1, "Abs should have one input" - if bool(int(os.environ.get("PYBUDA_ENABLE_TINY_TILE", "0"))): + if bool(int(os.environ.get("FORGE_ENABLE_TINY_TILE", "0"))): node_shape = list(tensors[0].shape) tile_height = calculate_tile_size(node_shape[-2]) tile_width = calculate_tile_size(node_shape[-1]) diff --git a/pybuda/pybuda/op/eval/pybuda/argmax.py b/forge/forge/op/eval/forge/argmax.py similarity index 97% rename from pybuda/pybuda/op/eval/pybuda/argmax.py rename to forge/forge/op/eval/forge/argmax.py index e5607e45f..fd2b77cfc 100644 --- a/pybuda/pybuda/op/eval/pybuda/argmax.py +++ b/forge/forge/op/eval/forge/argmax.py @@ -9,10 +9,10 @@ from ..interface import PyEltwiseUnaryOp from loguru import logger from ..common import to_torch_operands -from ....pybudaglobal import TILE_DIM +from ....forgeglobal import TILE_DIM from ....tensor import buda_dataformat_to_pytorch_dtype import numpy as np -from pybuda.op.eval.common import calculate_tile_size +from forge.op.eval.common import calculate_tile_size from ..buda.abs import Abs as BudaAbs diff --git a/pybuda/pybuda/op/eval/pybuda/buffer.py b/forge/forge/op/eval/forge/buffer.py similarity index 91% rename from pybuda/pybuda/op/eval/pybuda/buffer.py rename to forge/forge/op/eval/forge/buffer.py index 8d275fbf4..b8d67a236 100644 --- a/pybuda/pybuda/op/eval/pybuda/buffer.py +++ b/forge/forge/op/eval/forge/buffer.py @@ -9,9 +9,9 @@ from ..interface import PyEltwiseUnaryOp from loguru import logger from ..common import to_torch_operands -from ....pybudaglobal import TILE_DIM +from ....forgeglobal import TILE_DIM import numpy as np -from pybuda.op.eval.common import calculate_tile_size +from forge.op.eval.common import calculate_tile_size from ..buda.buffer import Buffer as BudaBuffer @@ -47,7 +47,7 @@ def backward(self, ac, operand, inputs, output, grad): def lower(self, lc, tensors, outputs): assert len(tensors) == 1, "Buffer should have one input" - if bool(int(os.environ.get("PYBUDA_ENABLE_TINY_TILE", "0"))): + if bool(int(os.environ.get("FORGE_ENABLE_TINY_TILE", "0"))): node_shape = list(tensors[0].shape) tile_height = calculate_tile_size(node_shape[-2]) tile_width = calculate_tile_size(node_shape[-1]) diff --git a/pybuda/pybuda/op/eval/pybuda/clip.py b/forge/forge/op/eval/forge/clip.py similarity index 97% rename from pybuda/pybuda/op/eval/pybuda/clip.py rename to forge/forge/op/eval/forge/clip.py index b71921f43..afd5c8cf2 100644 --- a/pybuda/pybuda/op/eval/pybuda/clip.py +++ b/forge/forge/op/eval/forge/clip.py @@ -9,10 +9,10 @@ from ..interface import PyEltwiseUnaryOp from loguru import logger from ..common import to_torch_operands -from ....pybudaglobal import TILE_DIM +from ....forgeglobal import TILE_DIM from ....tensor import buda_dataformat_to_pytorch_dtype import numpy as np -from pybuda.op.eval.common import calculate_tile_size +from forge.op.eval.common import calculate_tile_size from ..buda.nop import Nop as BudaNop diff --git a/pybuda/pybuda/op/eval/pybuda/constant.py b/forge/forge/op/eval/forge/constant.py similarity index 100% rename from pybuda/pybuda/op/eval/pybuda/constant.py rename to forge/forge/op/eval/forge/constant.py diff --git a/pybuda/pybuda/op/eval/pybuda/convolution.py b/forge/forge/op/eval/forge/convolution.py similarity index 97% rename from pybuda/pybuda/op/eval/pybuda/convolution.py rename to forge/forge/op/eval/forge/convolution.py index 476a87fd2..aae1f0c04 100644 --- a/pybuda/pybuda/op/eval/pybuda/convolution.py +++ b/forge/forge/op/eval/forge/convolution.py @@ -5,11 +5,11 @@ import ast import torch -from pybuda._C.graph import NodeType -from pybuda.pybudaglobal import TILE_DIM -from pybuda.utils import align_up_tile, round_up_div, clamp -from pybuda import Tensor -from pybuda.config import _get_global_compiler_config +from forge._C.graph import NodeType +from forge.forgeglobal import TILE_DIM +from forge.utils import align_up_tile, round_up_div, clamp +from forge import Tensor +from forge.config import _get_global_compiler_config from .transpose import TransposeTM from .buffer import Buffer @@ -304,10 +304,10 @@ def decompose_conv2d_sparse_first(attr, dc, inputs): padding_same = (padding == [(kW // 2), (kW // 2), (kH // 2), (kH // 2)]) pad_for_factorization = False - manual_splice_decomp_th = os.environ.get('PYBUDA_MANUAL_SPLICE_DECOMP_TH') - sparse_r_padding = ast.literal_eval(os.environ.get('PYBUDA_PAD_SPARSE_MM', "{}")) - sparse_weight_padding_mm = ast.literal_eval(os.environ.get('PYBUDA_PAD_SPARSE_MM_WEIGHT_MM', "{}")) - sparse_weight_padding_concat = ast.literal_eval(os.environ.get('PYBUDA_PAD_SPARSE_MM_WEIGHT_CONCAT', "{}")) + manual_splice_decomp_th = os.environ.get('FORGE_MANUAL_SPLICE_DECOMP_TH') + sparse_r_padding = ast.literal_eval(os.environ.get('FORGE_PAD_SPARSE_MM', "{}")) + sparse_weight_padding_mm = ast.literal_eval(os.environ.get('FORGE_PAD_SPARSE_MM_WEIGHT_MM', "{}")) + sparse_weight_padding_concat = ast.literal_eval(os.environ.get('FORGE_PAD_SPARSE_MM_WEIGHT_CONCAT', "{}")) if kH * kW > 1 or (stride[0] > 1 or stride[1] > 1) or not padding_same: sparse_r = align_up_tile(yout * xout) // 32 sparse_c = align_up_tile(result.shape[-2]) // 32 @@ -460,7 +460,7 @@ def decompose_conv2d_sparse_second(attr, dc, inputs): if (kH * kW) > 1: result = dc.op("hslice", [result], (kH * kW,)) - if "PYBUDA_MIDDLE_CNN_BUFFER" in os.environ: # most workloads are ok without it, and perf is much better... so enable only where needed + if "FORGE_MIDDLE_CNN_BUFFER" in os.environ: # most workloads are ok without it, and perf is much better... so enable only where needed result = dc.op(Buffer.create(), [result]) # HW workaround for: tenstorrent/budabackend#656 result = dc.op("vstack", [result], (kH * kW,)) result = dc.op(Buffer.create(), [result]) # HW workaround for: tenstorrent/budabackend#656 @@ -557,7 +557,7 @@ def vstack(x, factor): #grouped_conv = groups > 1 #depthwise = cin == groups and cin == cout - #depthwise_env_enabled = "PYBUDA_ENABLE_DEPTHWISE" in os.environ and os.environ["PYBUDA_ENABLE_DEPTHWISE"] == "1" + #depthwise_env_enabled = "FORGE_ENABLE_DEPTHWISE" in os.environ and os.environ["FORGE_ENABLE_DEPTHWISE"] == "1" # Disallow depthwise path when training, needs BW ops implementation #depthwise = depthwise and depthwise_env_enabled and not dc.is_training_enabled() @@ -779,7 +779,7 @@ def decompose(type, attr, dc, inputs): elif should_fracture_conv_at_op_level(attr, dc, inputs): # Fracture decompose_fracture_conv2d_at_op_level(attr, dc, inputs) - elif bool(int(os.environ.get("PYBUDA_CONV2D_SPARSE_SECOND", "0"))): + elif bool(int(os.environ.get("FORGE_CONV2D_SPARSE_SECOND", "0"))): # Sparse second decompose_conv2d_sparse_second(attr, dc, inputs) else: diff --git a/pybuda/pybuda/op/eval/pybuda/cosine.py b/forge/forge/op/eval/forge/cosine.py similarity index 92% rename from pybuda/pybuda/op/eval/pybuda/cosine.py rename to forge/forge/op/eval/forge/cosine.py index 5d3a8061e..908608d16 100644 --- a/pybuda/pybuda/op/eval/pybuda/cosine.py +++ b/forge/forge/op/eval/forge/cosine.py @@ -9,10 +9,10 @@ from ..interface import PyEltwiseUnaryOp from loguru import logger from ..common import to_torch_operands -from ....pybudaglobal import TILE_DIM +from ....forgeglobal import TILE_DIM from ....tensor import buda_dataformat_to_pytorch_dtype import numpy as np -from pybuda.op.eval.common import calculate_tile_size +from forge.op.eval.common import calculate_tile_size from ..buda.cosine import Cosine as BudaCosine @@ -44,7 +44,7 @@ def backward(self, ac, operand, inputs, output, grad): def lower(self, lc, tensors, outputs): assert len(tensors) == 1, "Cosine should have one input" - if bool(int(os.environ.get("PYBUDA_ENABLE_TINY_TILE", "0"))): + if bool(int(os.environ.get("FORGE_ENABLE_TINY_TILE", "0"))): node_shape = list(tensors[0].shape) tile_height = calculate_tile_size(node_shape[-2]) tile_width = calculate_tile_size(node_shape[-1]) diff --git a/pybuda/pybuda/op/eval/pybuda/cumulativesum.py b/forge/forge/op/eval/forge/cumulativesum.py similarity index 95% rename from pybuda/pybuda/op/eval/pybuda/cumulativesum.py rename to forge/forge/op/eval/forge/cumulativesum.py index 2599bb41a..1b771085c 100644 --- a/pybuda/pybuda/op/eval/pybuda/cumulativesum.py +++ b/forge/forge/op/eval/forge/cumulativesum.py @@ -9,10 +9,10 @@ from ..interface import PyEltwiseUnaryOp from loguru import logger from ..common import to_torch_operands -from ....pybudaglobal import TILE_DIM +from ....forgeglobal import TILE_DIM from ....tensor import buda_dataformat_to_pytorch_dtype import numpy as np -from pybuda.op.eval.common import calculate_tile_size +from forge.op.eval.common import calculate_tile_size from ..buda.abs import Abs as BudaAbs from .nop import Nop diff --git a/pybuda/pybuda/op/eval/pybuda/depthwise.py b/forge/forge/op/eval/forge/depthwise.py similarity index 98% rename from pybuda/pybuda/op/eval/pybuda/depthwise.py rename to forge/forge/op/eval/forge/depthwise.py index e77daaa50..dc2b3498a 100644 --- a/pybuda/pybuda/op/eval/pybuda/depthwise.py +++ b/forge/forge/op/eval/forge/depthwise.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 import torch -from pybuda.pybudaglobal import TILE_DIM +from forge.forgeglobal import TILE_DIM from ..common import to_torch_operands, cast_for_cpu_eval diff --git a/pybuda/pybuda/op/eval/pybuda/dram_queue.py b/forge/forge/op/eval/forge/dram_queue.py similarity index 100% rename from pybuda/pybuda/op/eval/pybuda/dram_queue.py rename to forge/forge/op/eval/forge/dram_queue.py diff --git a/pybuda/pybuda/op/eval/pybuda/eltwise_binary.py b/forge/forge/op/eval/forge/eltwise_binary.py similarity index 97% rename from pybuda/pybuda/op/eval/pybuda/eltwise_binary.py rename to forge/forge/op/eval/forge/eltwise_binary.py index f90ead13c..bdbec7f43 100644 --- a/pybuda/pybuda/op/eval/pybuda/eltwise_binary.py +++ b/forge/forge/op/eval/forge/eltwise_binary.py @@ -4,8 +4,8 @@ import os from typing import List, Tuple -from pybuda.pybudaglobal import TILE_DIM -from pybuda.tensor import Tensor +from forge.forgeglobal import TILE_DIM +from forge.tensor import Tensor import numpy as np import torch from .transpose import TransposeTM @@ -17,8 +17,8 @@ from ..buda.nop import Nop as BudaNop from ..common import to_torch_operands -from pybuda.utils import align_up_tile -from pybuda.op.eval.common import calculate_tile_size +from forge.utils import align_up_tile +from forge.op.eval.common import calculate_tile_size def eval(type, attr, ops): @@ -169,12 +169,12 @@ def eq(A, B): #lc.op("power_binary", ops, attr) # 'power' backend op is unary ln_x = lc.op(BudaLog.create(), [ops[0]]) y_ln_x = lc.op("multiply", (ops[1], ln_x)) - approximate_mode = "true" if "PYBUDA_EXP_APPROX" in os.environ else "false" + approximate_mode = "true" if "FORGE_EXP_APPROX" in os.environ else "false" lc.op(BudaExp.create(approximate_mode=approximate_mode), [y_ln_x]) else: # Find proper tile sizes - if bool(int(os.environ.get("PYBUDA_ENABLE_TINY_TILE", "0"))): - node_shape = lc.pybuda_shape() + if bool(int(os.environ.get("FORGE_ENABLE_TINY_TILE", "0"))): + node_shape = lc.forge_shape() tile_height = calculate_tile_size(node_shape[-2]) tile_width = calculate_tile_size(node_shape[-1]) else: @@ -398,7 +398,7 @@ def decompose_post_autograd(op_type, attr, dc, inputs): res = dc.op("add", (res, x_gt)) dc.fuse(res) return - elif op_type == "maximum" and os.environ.get("PYBUDA_ENABLE_MAXIMUM_DECOMPOSITION", "0") == "1": + elif op_type == "maximum" and os.environ.get("FORGE_ENABLE_MAXIMUM_DECOMPOSITION", "0") == "1": operand0, operand1 = inputs[0], inputs[1] orig_op0_shape = operand0.shape.as_list() orig_op1_shape = operand1.shape.as_list() diff --git a/pybuda/pybuda/op/eval/pybuda/eltwise_nary.py b/forge/forge/op/eval/forge/eltwise_nary.py similarity index 96% rename from pybuda/pybuda/op/eval/pybuda/eltwise_nary.py rename to forge/forge/op/eval/forge/eltwise_nary.py index c95af595a..946fc66ef 100644 --- a/pybuda/pybuda/op/eval/pybuda/eltwise_nary.py +++ b/forge/forge/op/eval/forge/eltwise_nary.py @@ -7,13 +7,13 @@ import os import torch import math -import pybuda +import forge from ..common import to_torch_operands from .transpose import TransposeTM from .nop import Nop from .buffer import Buffer from ..buda.splice import Splice -from pybuda.pybudaglobal import TILE_DIM, align_up_tile, is_tile_dim_aligned +from forge.forgeglobal import TILE_DIM, align_up_tile, is_tile_dim_aligned from ..sparse_utils import ( create_flattened_padding_removal_sparse_picker_matrix, ) @@ -37,7 +37,7 @@ def eval(type, attr, ops): for t_op in t_ops: assert len(t_op.shape) == 4, f'Tensor must have 4 dimensions, given {len(t_op.shape)}' - # To pybuda shape + # To forge shape for i in range(len(t_ops)): t_ops[i] = t_ops[i][:, :, :originalY*originalX, :] t_ops[i] = t_ops[i].transpose(2, 3) @@ -272,9 +272,9 @@ def decompose_post_optimize(type, attr, dc, inputs): # pass # maximum number of inputs is 8 - max_inputs = int(os.environ.get("PYBUDA_MAX_CONCAT_INPUTS", "8")) + max_inputs = int(os.environ.get("FORGE_MAX_CONCAT_INPUTS", "8")) if len(inputs) > max_inputs: - # TODO: use max_num_inputs 8 when tenstorrent/pybuda#316 is resolved + # TODO: use max_num_inputs 8 when tenstorrent/forge#316 is resolved max_num_inputs = min(6, max_inputs) idx = 0 concats = [] @@ -287,13 +287,13 @@ def decompose_post_optimize(type, attr, dc, inputs): return if ( - int(os.environ.get("PYBUDA_CONCAT_SLICE_Y", "0")) != 0 + int(os.environ.get("FORGE_CONCAT_SLICE_Y", "0")) != 0 and axis == -1 and inputs[0].shape[-2] > 10000 and inputs[0].shape[-2] % TILE_DIM == 0 and all([len(inp.shape) >= 2 for inp in inputs]) and all([inp.shape[-2] == in1.shape[-2] for inp in inputs]) ): - num_split_r = int(os.environ.get("PYBUDA_CONCAT_SLICE_Y", "0")) + num_split_r = int(os.environ.get("FORGE_CONCAT_SLICE_Y", "0")) rows_per_split = inputs[0].shape[-2] // num_split_r concats = [] @@ -323,7 +323,7 @@ def decompose_post_optimize(type, attr, dc, inputs): padding_removal_needed = True # Insert slice only when concat on last dim && requires sparse matmul - insert_slice = (padding_removal_needed and axis == -1) or bool(int(os.environ.get("PYBUDA_INSERT_SLICE_FOR_CONCAT", "0"))) + insert_slice = (padding_removal_needed and axis == -1) or bool(int(os.environ.get("FORGE_INSERT_SLICE_FOR_CONCAT", "0"))) if insert_slice: length_at_dim = [inp.shape[axis] for inp in inputs] # large concats on x&y need to be sliced and streamed @@ -380,7 +380,7 @@ def decompose_post_optimize(type, attr, dc, inputs): cols = torch.tensor(cols) rows = torch.arange(len(cols)) pad_for_factorization = False - sparse_r_padding = ast.literal_eval(os.environ.get('PYBUDA_PAD_SPARSE_MM', "{}")) + sparse_r_padding = ast.literal_eval(os.environ.get('FORGE_PAD_SPARSE_MM', "{}")) sparse_r = unpadded_shape_len // 32 if sparse_r in sparse_r_padding: pad_for_factorization = True diff --git a/pybuda/pybuda/op/eval/pybuda/eltwise_unary.py b/forge/forge/op/eval/forge/eltwise_unary.py similarity index 96% rename from pybuda/pybuda/op/eval/pybuda/eltwise_unary.py rename to forge/forge/op/eval/forge/eltwise_unary.py index 1d5dad84c..923bdb554 100644 --- a/pybuda/pybuda/op/eval/pybuda/eltwise_unary.py +++ b/forge/forge/op/eval/forge/eltwise_unary.py @@ -7,10 +7,10 @@ import torch.nn.functional from loguru import logger from ..common import to_torch_operands -from ....pybudaglobal import TILE_DIM +from ....forgeglobal import TILE_DIM from ....tensor import buda_dataformat_to_pytorch_dtype import numpy as np -from pybuda.op.eval.common import calculate_tile_size +from forge.op.eval.common import calculate_tile_size from .tanh import Tanh from ..buda.log import Log as BudaLog from .nop import Nop @@ -217,8 +217,8 @@ def lower(type, attr, lc, ops, outputs): assert dim in [2, 3], f"Tile broadcast is only valid on the last two dims (R/C): {shape_size}, {dim}" assert size <= TILE_DIM and size > 1, f"Tile broadcast can only broadcast within one tile" - if bool(int(os.environ.get("PYBUDA_ENABLE_TINY_TILE", "0"))) and dim == 2: - node_shape = lc.pybuda_shape() + if bool(int(os.environ.get("FORGE_ENABLE_TINY_TILE", "0"))) and dim == 2: + node_shape = lc.forge_shape() tile_height = calculate_tile_size(node_shape[-2]) if node_shape[-2] % tile_height == 0: lc.op(BudaNop.create(), ops, tile_height=tile_height,tile_width=TILE_DIM) @@ -264,10 +264,10 @@ def lower(type, attr, lc, ops, outputs): lc.op("matmul", (ops[0], const), tag="tile_broadcast_c") elif type == "exp": - lc.op("exp", ops, [], {"approximate_mode": "true" if "PYBUDA_EXP_APPROX" in os.environ else "false"}) + lc.op("exp", ops, [], {"approximate_mode": "true" if "FORGE_EXP_APPROX" in os.environ else "false"}) elif type == "reciprocal": - lc.op("reciprocal", ops, [], {"approximate_mode": "true" if "PYBUDA_EXP_APPROX" in os.environ else "false"}) + lc.op("reciprocal", ops, [], {"approximate_mode": "true" if "FORGE_EXP_APPROX" in os.environ else "false"}) elif type == "dropout": p, training, seed = attr @@ -281,7 +281,7 @@ def lower(type, attr, lc, ops, outputs): elif type == "gelu": lc.op("gelu", ops, attr, {"approximate_mode": "true" if attr[0] == "tanh" else "false"}) elif type == "gelu_derivative": - lc.op("gelu_derivative", ops, attr, {"approximate_mode": "true" if "PYBUDA_EXP_APPROX" in os.environ else "false"}) + lc.op("gelu_derivative", ops, attr, {"approximate_mode": "true" if "FORGE_EXP_APPROX" in os.environ else "false"}) elif type == "clip": @@ -335,12 +335,12 @@ def lower(type, attr, lc, ops, outputs): shape = list(ops[0].shape.as_list()) ln_x = lc.op(BudaLog.create(), ops) y_ln_x = lc.op("multiply", (lc.tensor(torch.zeros(shape) + exponent_value), ln_x)) - approximate_mode = "true" if "PYBUDA_EXP_APPROX" in os.environ else "false" + approximate_mode = "true" if "FORGE_EXP_APPROX" in os.environ else "false" lc.op(BudaExp.create(approximate_mode=approximate_mode), [y_ln_x]) else: # Find proper tile sizes - if bool(int(os.environ.get("PYBUDA_ENABLE_TINY_TILE", "0"))): + if bool(int(os.environ.get("FORGE_ENABLE_TINY_TILE", "0"))): node_shape = list(ops[0].shape) tile_height = calculate_tile_size(node_shape[-2]) tile_width = calculate_tile_size(node_shape[-1]) @@ -558,7 +558,7 @@ def decompose(type, attr, dc, inputs): dc.fuse(argmax) - elif type == "sigmoid" and bool(int(os.environ.get("PYBUDA_DECOMPOSE_SIGMOID", "0"))): + elif type == "sigmoid" and bool(int(os.environ.get("FORGE_DECOMPOSE_SIGMOID", "0"))): inp = inputs[0] minus_one = dc.tensor(torch.ones([1,1]) * -1) plus_one = dc.tensor(torch.ones([1,1])) @@ -568,7 +568,7 @@ def decompose(type, attr, dc, inputs): result = dc.op(Reciprocal.create(), [result]) dc.fuse(result) - elif type == "gelu" and bool(int(os.environ.get("PYBUDA_DECOMPOSE_GELU", "0"))): + elif type == "gelu" and bool(int(os.environ.get("FORGE_DECOMPOSE_GELU", "0"))): inp_node = inputs[0] data_type = buda_dataformat_to_pytorch_dtype(inp_node.output_df) one_half = dc.tensor(torch.ones((1), dtype=data_type) * 0.5) diff --git a/pybuda/pybuda/op/eval/pybuda/embedding.py b/forge/forge/op/eval/forge/embedding.py similarity index 89% rename from pybuda/pybuda/op/eval/pybuda/embedding.py rename to forge/forge/op/eval/forge/embedding.py index ec631f1bb..0af6729d4 100644 --- a/pybuda/pybuda/op/eval/pybuda/embedding.py +++ b/forge/forge/op/eval/forge/embedding.py @@ -3,9 +3,9 @@ # SPDX-License-Identifier: Apache-2.0 import torch from ..common import to_torch_operands -from pybuda._C import DataFormat -from pybuda._C.graph import RuntimeTensorTransform, RuntimeTensorTransformType -from ....pybudaglobal import TILE_DIM +from forge._C import DataFormat +from forge._C.graph import RuntimeTensorTransform, RuntimeTensorTransformType +from ....forgeglobal import TILE_DIM def eval(type, attr, ops): diff --git a/pybuda/pybuda/op/eval/pybuda/ethernet_datacopy.py b/forge/forge/op/eval/forge/ethernet_datacopy.py similarity index 91% rename from pybuda/pybuda/op/eval/pybuda/ethernet_datacopy.py rename to forge/forge/op/eval/forge/ethernet_datacopy.py index 283586405..188a62ea9 100644 --- a/pybuda/pybuda/op/eval/pybuda/ethernet_datacopy.py +++ b/forge/forge/op/eval/forge/ethernet_datacopy.py @@ -9,10 +9,10 @@ from ..interface import PyEltwiseUnaryOp from loguru import logger from ..common import to_torch_operands -from ....pybudaglobal import TILE_DIM +from ....forgeglobal import TILE_DIM from ....tensor import buda_dataformat_to_pytorch_dtype import numpy as np -from pybuda.op.eval.common import calculate_tile_size +from forge.op.eval.common import calculate_tile_size from ..buda import ethernet_datacopy as BudaEthernetDataCopy @@ -44,7 +44,7 @@ def backward(self, ac, operand, inputs, output, grad): def lower(self, lc, tensors, outputs): assert len(tensors) == 1, "ethernet_datacopy should have one input" # Find proper tile sizes - if bool(int(os.environ.get("PYBUDA_ENABLE_TINY_TILE", "0"))): + if bool(int(os.environ.get("FORGE_ENABLE_TINY_TILE", "0"))): node_shape = list(tensors[0].shape) tile_height = calculate_tile_size(node_shape[-2]) tile_width = calculate_tile_size(node_shape[-1]) diff --git a/pybuda/pybuda/op/eval/pybuda/exp.py b/forge/forge/op/eval/forge/exp.py similarity index 89% rename from pybuda/pybuda/op/eval/pybuda/exp.py rename to forge/forge/op/eval/forge/exp.py index 424303332..ad6e230f7 100644 --- a/pybuda/pybuda/op/eval/pybuda/exp.py +++ b/forge/forge/op/eval/forge/exp.py @@ -9,10 +9,10 @@ from ..interface import PyEltwiseUnaryOp from loguru import logger from ..common import to_torch_operands -from ....pybudaglobal import TILE_DIM +from ....forgeglobal import TILE_DIM from ....tensor import buda_dataformat_to_pytorch_dtype import numpy as np -from pybuda.op.eval.common import calculate_tile_size +from forge.op.eval.common import calculate_tile_size from ..buda.exp import Exp as BudaExp @@ -45,7 +45,7 @@ def backward(self, ac, operand, inputs, output, grad): def lower(self, lc, tensors, outputs): assert len(tensors) == 1, "Exp should have one input" - approximate_mode = "true" if "PYBUDA_EXP_APPROX" in os.environ else "false" + approximate_mode = "true" if "FORGE_EXP_APPROX" in os.environ else "false" lc.op(BudaExp.create(approximate_mode=approximate_mode), tensors) def initial_flops_estimate(self, tensor_shapes): diff --git a/pybuda/pybuda/op/eval/pybuda/log.py b/forge/forge/op/eval/forge/log.py similarity index 92% rename from pybuda/pybuda/op/eval/pybuda/log.py rename to forge/forge/op/eval/forge/log.py index 23a2c424f..28d7ee95a 100644 --- a/pybuda/pybuda/op/eval/pybuda/log.py +++ b/forge/forge/op/eval/forge/log.py @@ -9,10 +9,10 @@ from ..interface import PyEltwiseUnaryOp from loguru import logger from ..common import to_torch_operands -from ....pybudaglobal import TILE_DIM +from ....forgeglobal import TILE_DIM from ....tensor import buda_dataformat_to_pytorch_dtype import numpy as np -from pybuda.op.eval.common import calculate_tile_size +from forge.op.eval.common import calculate_tile_size from ..buda.log import Log as BudaLog from .reciprocal import Reciprocal @@ -48,7 +48,7 @@ def backward(self, ac, operand, inputs, output, grad): def lower(self, lc, tensors, outputs): assert len(tensors) == 1, "Log should have one input" - if bool(int(os.environ.get("PYBUDA_ENABLE_TINY_TILE", "0"))): + if bool(int(os.environ.get("FORGE_ENABLE_TINY_TILE", "0"))): node_shape = list(tensors[0].shape) tile_height = calculate_tile_size(node_shape[-2]) tile_width = calculate_tile_size(node_shape[-1]) diff --git a/pybuda/pybuda/op/eval/pybuda/mask.py b/forge/forge/op/eval/forge/mask.py similarity index 86% rename from pybuda/pybuda/op/eval/pybuda/mask.py rename to forge/forge/op/eval/forge/mask.py index 76681a827..f676a4c28 100644 --- a/pybuda/pybuda/op/eval/pybuda/mask.py +++ b/forge/forge/op/eval/forge/mask.py @@ -2,9 +2,9 @@ # SPDX-License-Identifier: Apache-2.0 import torch -from pybuda.pybudaglobal import TILE_DIM -from pybuda.utils import align_up_tile, round_up_div, clamp -from pybuda import Tensor +from forge.forgeglobal import TILE_DIM +from forge.utils import align_up_tile, round_up_div, clamp +from forge import Tensor def eval(type, attr, ops): diff --git a/pybuda/pybuda/op/eval/pybuda/matmul.py b/forge/forge/op/eval/forge/matmul.py similarity index 96% rename from pybuda/pybuda/op/eval/pybuda/matmul.py rename to forge/forge/op/eval/forge/matmul.py index 1ddc48f3e..0385e0531 100644 --- a/pybuda/pybuda/op/eval/pybuda/matmul.py +++ b/forge/forge/op/eval/forge/matmul.py @@ -6,14 +6,14 @@ from math import sqrt import os -from pybuda._C import DataFormat +from forge._C import DataFormat import torch -from pybuda.pybudaglobal import TILE_DIM +from forge.forgeglobal import TILE_DIM from ..common import to_torch_operands, cast_for_cpu_eval from ..sparse_utils import transpose_sparse_picker_matrix, create_sparse_buda, shapeify_sparse_tiles_and_encodings, is_kernel_fracturing_candidate -from pybuda.utils import round_up_div -from pybuda.op.eval.common import calculate_tile_size +from forge.utils import round_up_div +from forge.op.eval.common import calculate_tile_size from .transpose import TransposeTM def eval(type, attr, ops): @@ -157,7 +157,7 @@ def lower(type, attr, buda_attr, lc, ops, outputs): accumulate = (len(attr) >= 2) and bool(attr[0]) if has_requant else (len(attr) >= 1) and bool(attr[0]) buda_attrs = {} - if 'sfpu_op' in buda_attr and os.environ.get("PYBUDA_FUSE_MATMUL_GELU", "0") != "0": + if 'sfpu_op' in buda_attr and os.environ.get("FORGE_FUSE_MATMUL_GELU", "0") != "0": buda_attrs["sfpu_op"] = "gelu" if accumulate: buda_attrs["accumulate"] = True @@ -233,8 +233,8 @@ def lower(type, attr, buda_attr, lc, ops, outputs): lc.op("matmul", [in0, in1, in2], (accumulate, is_sparse, sparse_tile_ptr_bits, 1, zdim, picker.shape[-2], in1.shape[-1], fracture_factor, u_rt, u_kt, u_ct, grid_c, t_factor_r, t_factor_c, sparse_ublock_idx_bits), buda_attrs) else: # Find proper tile sizes - if bool(int(os.environ.get("PYBUDA_ENABLE_TINY_TILE", "0"))): - node_shape = lc.pybuda_shape() + if bool(int(os.environ.get("FORGE_ENABLE_TINY_TILE", "0"))): + node_shape = lc.forge_shape() tile_height = calculate_tile_size(node_shape[-2]) tile_width = TILE_DIM else: diff --git a/pybuda/pybuda/op/eval/pybuda/nn.py b/forge/forge/op/eval/forge/nn.py similarity index 98% rename from pybuda/pybuda/op/eval/pybuda/nn.py rename to forge/forge/op/eval/forge/nn.py index c509d80da..e7cda4d41 100644 --- a/pybuda/pybuda/op/eval/pybuda/nn.py +++ b/forge/forge/op/eval/forge/nn.py @@ -285,8 +285,8 @@ def lower(op_type, attr, lc, ops, outputs): Operator attributes. lc: - Lowering Context, PyBuda C++ API for breaking - Pybuda graph/subgraph into Buda operations. + Lowering Context, Forge C++ API for breaking + Forge graph/subgraph into Buda operations. ops: Input operands, tensors. @@ -314,7 +314,7 @@ def backward(op_type, attr, ac, operand, inputs, output, grad): Operation attributes. ac: - Autograd Context, PyBuda C++ API for automatic gradient computation. + Autograd Context, Forge C++ API for automatic gradient computation. operand: Operation operands. @@ -380,8 +380,8 @@ def decompose(op_type, attr, dc, inputs): Operation attributes. dc: - Decomposing Context, PyBuda C++ API for breaking - Pybuda graph/subgraph to simpler, PyBuda graph, too. + Decomposing Context, Forge C++ API for breaking + Forge graph/subgraph to simpler, Forge graph, too. inputs: Operation inputs. @@ -448,8 +448,8 @@ def decompose_post_autograd(op_type, attr, dc, inputs): Operation attributes. dc: - Decomposing Context, PyBuda C++ API for breaking - Pybuda graph/subgraph to simpler, PyBuda graph, too. + Decomposing Context, Forge C++ API for breaking + Forge graph/subgraph to simpler, Forge graph, too. inputs: Operation inputs. diff --git a/pybuda/pybuda/op/eval/pybuda/nop.py b/forge/forge/op/eval/forge/nop.py similarity index 90% rename from pybuda/pybuda/op/eval/pybuda/nop.py rename to forge/forge/op/eval/forge/nop.py index 532e21913..4b6d42380 100644 --- a/pybuda/pybuda/op/eval/pybuda/nop.py +++ b/forge/forge/op/eval/forge/nop.py @@ -9,10 +9,10 @@ from ..interface import PyEltwiseUnaryOp from loguru import logger from ..common import to_torch_operands -from ....pybudaglobal import TILE_DIM +from ....forgeglobal import TILE_DIM from ....tensor import buda_dataformat_to_pytorch_dtype import numpy as np -from pybuda.op.eval.common import calculate_tile_size +from forge.op.eval.common import calculate_tile_size from ..buda.nop import Nop as BudaNop @@ -45,7 +45,7 @@ def backward(self, ac, operand, inputs, output, grad): def lower(self, lc, tensors, outputs): assert len(tensors) == 1, "Nop should have one input" - if bool(int(os.environ.get("PYBUDA_ENABLE_TINY_TILE", "0"))): + if bool(int(os.environ.get("FORGE_ENABLE_TINY_TILE", "0"))): node_shape = list(tensors[0].shape) tile_height = calculate_tile_size(node_shape[-2]) tile_width = calculate_tile_size(node_shape[-1]) diff --git a/pybuda/pybuda/op/eval/pybuda/pooling.py b/forge/forge/op/eval/forge/pooling.py similarity index 98% rename from pybuda/pybuda/op/eval/pybuda/pooling.py rename to forge/forge/op/eval/forge/pooling.py index a4cb14f85..648ff4731 100644 --- a/pybuda/pybuda/op/eval/pybuda/pooling.py +++ b/forge/forge/op/eval/forge/pooling.py @@ -6,8 +6,8 @@ import os import math import torch.nn.functional as F -from pybuda.pybudaglobal import TILE_DIM -from pybuda.utils import align_up_tile +from forge.forgeglobal import TILE_DIM +from forge.utils import align_up_tile from .transpose import TransposeTM from .nop import Nop @@ -440,7 +440,7 @@ def decompose(type, attr, dc, inputs): pickers = [] sparse_r = align_up_tile(yout * xout) // 32 padded_r = 0 - sparse_r_padding = ast.literal_eval(os.environ.get('PYBUDA_PAD_SPARSE_MM', "{}")) + sparse_r_padding = ast.literal_eval(os.environ.get('FORGE_PAD_SPARSE_MM', "{}")) pad_for_factorization = False if sparse_r in sparse_r_padding: pad_for_factorization = True @@ -456,7 +456,7 @@ def decompose(type, attr, dc, inputs): picker = torch.stack(pickers).unsqueeze(0) picker_tensor = dc.tensor(picker) - result_c_padding = ast.literal_eval(os.environ.get('PYBUDA_PAD_SPARSE_MM_WEIGHT_CONCAT', "{}")) + result_c_padding = ast.literal_eval(os.environ.get('FORGE_PAD_SPARSE_MM_WEIGHT_CONCAT', "{}")) result_c = align_up_tile(result.shape[-1]) // TILE_DIM if result_c in result_c_padding: pad_for_factorization = True @@ -471,7 +471,7 @@ def decompose(type, attr, dc, inputs): if pad_for_factorization: if sparse_r in sparse_r_padding: # temporarily add decompotion that manually insert vslice/vstack around splice op - manual_splice_decomp_th = os.environ.get('PYBUDA_MANUAL_SPLICE_DECOMP_TH') + manual_splice_decomp_th = os.environ.get('FORGE_MANUAL_SPLICE_DECOMP_TH') if manual_splice_decomp_th is not None: if sparse_r >= int(manual_splice_decomp_th): result = dc.op("vslice", [result], (sparse_r_padding[sparse_r],)) diff --git a/pybuda/pybuda/op/eval/pybuda/quantize.py b/forge/forge/op/eval/forge/quantize.py similarity index 98% rename from pybuda/pybuda/op/eval/pybuda/quantize.py rename to forge/forge/op/eval/forge/quantize.py index c80011b24..22978bb0a 100644 --- a/pybuda/pybuda/op/eval/pybuda/quantize.py +++ b/forge/forge/op/eval/forge/quantize.py @@ -6,11 +6,11 @@ import os import math import torch.nn.functional as F -from pybuda.pybudaglobal import TILE_DIM -from pybuda.utils import align_up_tile +from forge.forgeglobal import TILE_DIM +from forge.utils import align_up_tile import numpy as np from ..common import to_torch_operands -from pybuda.tensor import pytorch_dtype_to_buda_dataformat +from forge.tensor import pytorch_dtype_to_buda_dataformat from .reciprocal import Reciprocal STRING_TO_TORCH_DTYPE = { diff --git a/pybuda/pybuda/op/eval/pybuda/reciprocal.py b/forge/forge/op/eval/forge/reciprocal.py similarity index 90% rename from pybuda/pybuda/op/eval/pybuda/reciprocal.py rename to forge/forge/op/eval/forge/reciprocal.py index bc3f7e21f..bc072d6ff 100644 --- a/pybuda/pybuda/op/eval/pybuda/reciprocal.py +++ b/forge/forge/op/eval/forge/reciprocal.py @@ -9,10 +9,10 @@ from ..interface import PyEltwiseUnaryOp from loguru import logger from ..common import to_torch_operands -from ....pybudaglobal import TILE_DIM +from ....forgeglobal import TILE_DIM from ....tensor import buda_dataformat_to_pytorch_dtype import numpy as np -from pybuda.op.eval.common import calculate_tile_size +from forge.op.eval.common import calculate_tile_size from ..buda.reciprocal import Reciprocal as BudaReciprocal @@ -41,7 +41,7 @@ def shape(self, tensor_shapes): def lower(self, lc, tensors, outputs): assert len(tensors) == 1, "Reciprocal should have one input" - approximate_mode = "true" if "PYBUDA_EXP_APPROX" in os.environ else "false" + approximate_mode = "true" if "FORGE_EXP_APPROX" in os.environ else "false" lc.op(BudaReciprocal.create(approximate_mode=approximate_mode), tensors) def backward(self, ac, operand, inputs, output, grad): diff --git a/pybuda/pybuda/op/eval/pybuda/reduce.py b/forge/forge/op/eval/forge/reduce.py similarity index 99% rename from pybuda/pybuda/op/eval/pybuda/reduce.py rename to forge/forge/op/eval/forge/reduce.py index 18b1070b5..2fa4b42e7 100644 --- a/pybuda/pybuda/op/eval/pybuda/reduce.py +++ b/forge/forge/op/eval/forge/reduce.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 from ..common import to_torch_operands -from ....pybudaglobal import TILE_DIM, align_up_tile +from ....forgeglobal import TILE_DIM, align_up_tile from ....tensor import buda_dataformat_to_pytorch_dtype from .transpose import TransposeTM from .nop import Nop diff --git a/pybuda/pybuda/op/eval/pybuda/resize.py b/forge/forge/op/eval/forge/resize.py similarity index 97% rename from pybuda/pybuda/op/eval/pybuda/resize.py rename to forge/forge/op/eval/forge/resize.py index fd75cf7cd..007e5014d 100644 --- a/pybuda/pybuda/op/eval/pybuda/resize.py +++ b/forge/forge/op/eval/forge/resize.py @@ -2,10 +2,10 @@ # SPDX-License-Identifier: Apache-2.0 import torch -from pybuda.pybudaglobal import TILE_DIM -from pybuda.utils import align_up_tile, round_up_div, clamp -from pybuda import Tensor -from pybuda.op.resize import INT_TO_RESIZE2d_METHOD +from forge.forgeglobal import TILE_DIM +from forge.utils import align_up_tile, round_up_div, clamp +from forge import Tensor +from forge.op.resize import INT_TO_RESIZE2d_METHOD from .transpose import TransposeTM from .nop import Nop @@ -205,18 +205,18 @@ def decompose_upsample_2d(attr, dc, inputs, resize_method): elif resize_method == "bilinear": dident = create_bilinear_upsample_picker_matrix(scale_factor, shape, align_corners=attr[-2], channel_last=channel_last) dident_dense = dident.unsqueeze(0).unsqueeze(0).to_dense() - if (int(os.environ.get('PYBUDA_SPLIT_RESIZE2D', '0')) == inputs[0].shape[-2]): + if (int(os.environ.get('FORGE_SPLIT_RESIZE2D', '0')) == inputs[0].shape[-2]): dd = [] split_factor = 8 for s in range(split_factor): dd.append(create_bilinear_upsample_picker_matrix(scale_factor, shape, align_corners=attr[-2], channel_last=channel_last, split_idx=s, split_factor=split_factor)) # Choose whether to use sparse or dense matmul based on sparsity of dident - if torch.count_nonzero(dident_dense) > (torch.numel(dident_dense) // 2) or int(os.environ.get('PYBUDA_FORCE_RESIZE_DENSE_MM', '0')): + if torch.count_nonzero(dident_dense) > (torch.numel(dident_dense) // 2) or int(os.environ.get('FORGE_FORCE_RESIZE_DENSE_MM', '0')): dident_tensor = dc.tensor(dident_dense) result = dc.op("matmul", [dident_tensor, activations]) else: - if (int(os.environ.get('PYBUDA_SPLIT_RESIZE2D', '0')) == inputs[0].shape[-2]): + if (int(os.environ.get('FORGE_SPLIT_RESIZE2D', '0')) == inputs[0].shape[-2]): dd_tensor = [dc.tensor(d) for d in dd] res = [] for d in dd_tensor: diff --git a/pybuda/pybuda/op/eval/pybuda/sqrt.py b/forge/forge/op/eval/forge/sqrt.py similarity index 92% rename from pybuda/pybuda/op/eval/pybuda/sqrt.py rename to forge/forge/op/eval/forge/sqrt.py index 770e51b23..294417593 100644 --- a/pybuda/pybuda/op/eval/pybuda/sqrt.py +++ b/forge/forge/op/eval/forge/sqrt.py @@ -9,10 +9,10 @@ from ..interface import PyEltwiseUnaryOp from loguru import logger from ..common import to_torch_operands -from ....pybudaglobal import TILE_DIM +from ....forgeglobal import TILE_DIM from ....tensor import buda_dataformat_to_pytorch_dtype import numpy as np -from pybuda.op.eval.common import calculate_tile_size +from forge.op.eval.common import calculate_tile_size from ..buda.sqrt import Sqrt as BudaSqrt from .reciprocal import Reciprocal @@ -50,7 +50,7 @@ def backward(self, ac, operand, inputs, output, grad): def lower(self, lc, tensors, outputs): assert len(tensors) == 1, "Sqrt should have one input" - if bool(int(os.environ.get("PYBUDA_ENABLE_TINY_TILE", "0"))): + if bool(int(os.environ.get("FORGE_ENABLE_TINY_TILE", "0"))): node_shape = list(tensors[0].shape) tile_height = calculate_tile_size(node_shape[-2]) tile_width = calculate_tile_size(node_shape[-1]) diff --git a/pybuda/pybuda/op/eval/pybuda/tanh.py b/forge/forge/op/eval/forge/tanh.py similarity index 92% rename from pybuda/pybuda/op/eval/pybuda/tanh.py rename to forge/forge/op/eval/forge/tanh.py index 4e5d837e1..4b58e4605 100644 --- a/pybuda/pybuda/op/eval/pybuda/tanh.py +++ b/forge/forge/op/eval/forge/tanh.py @@ -9,10 +9,10 @@ from ..interface import PyEltwiseUnaryOp from loguru import logger from ..common import to_torch_operands -from ....pybudaglobal import TILE_DIM +from ....forgeglobal import TILE_DIM from ....tensor import buda_dataformat_to_pytorch_dtype import numpy as np -from pybuda.op.eval.common import calculate_tile_size +from forge.op.eval.common import calculate_tile_size from ..buda.tanh import Tanh as BudaTanh @@ -47,7 +47,7 @@ def backward(self, ac, operand, inputs, output, grad): def lower(self, lc, tensors, outputs): assert len(tensors) == 1, "Tanh should have one input" - if bool(int(os.environ.get("PYBUDA_ENABLE_TINY_TILE", "0"))): + if bool(int(os.environ.get("FORGE_ENABLE_TINY_TILE", "0"))): node_shape = list(tensors[0].shape) tile_height = calculate_tile_size(node_shape[-2]) tile_width = calculate_tile_size(node_shape[-1]) diff --git a/pybuda/pybuda/op/eval/pybuda/tilizer.py b/forge/forge/op/eval/forge/tilizer.py similarity index 92% rename from pybuda/pybuda/op/eval/pybuda/tilizer.py rename to forge/forge/op/eval/forge/tilizer.py index 3a4f18ee2..b435fecb8 100644 --- a/pybuda/pybuda/op/eval/pybuda/tilizer.py +++ b/forge/forge/op/eval/forge/tilizer.py @@ -9,10 +9,10 @@ from ..interface import PyEltwiseUnaryOp from loguru import logger from ..common import to_torch_operands -from ....pybudaglobal import TILE_DIM +from ....forgeglobal import TILE_DIM from ....tensor import buda_dataformat_to_pytorch_dtype import numpy as np -from pybuda.op.eval.common import calculate_tile_size +from forge.op.eval.common import calculate_tile_size from .nop import Nop class Tilizer(PyEltwiseUnaryOp): diff --git a/pybuda/pybuda/op/eval/pybuda/tm.py b/forge/forge/op/eval/forge/tm.py similarity index 99% rename from pybuda/pybuda/op/eval/pybuda/tm.py rename to forge/forge/op/eval/forge/tm.py index 26c019fc8..b4cd92c37 100644 --- a/pybuda/pybuda/op/eval/pybuda/tm.py +++ b/forge/forge/op/eval/forge/tm.py @@ -25,10 +25,10 @@ import ast import os from loguru import logger -import pybuda -from pybuda.tensor import change_rank -from pybuda.pybudaglobal import TILE_DIM -from pybuda.utils import align_up_tile, round_up_div, align_up +import forge +from forge.tensor import change_rank +from forge.forgeglobal import TILE_DIM +from forge.utils import align_up_tile, round_up_div, align_up from .transpose import TransposeTM from ..buda.splice import Splice from .nop import Nop @@ -1100,8 +1100,8 @@ def decompose(type, attr, dc, inputs): result = dc.op(Nop.create(), [inputs[0]]) dc.fuse(result) return - elif dim == -2 and stride == 1 and length == stop and "PYBUDA_PAD_MM" in os.environ: - sparse_r_padding = ast.literal_eval(os.environ.get('PYBUDA_PAD_MM', "{}")) + elif dim == -2 and stride == 1 and length == stop and "FORGE_PAD_MM" in os.environ: + sparse_r_padding = ast.literal_eval(os.environ.get('FORGE_PAD_MM', "{}")) sparse_r = align_up_tile(attr[-2]) // 32 if sparse_r in sparse_r_padding: padded_r = sparse_r_padding[sparse_r] - sparse_r @@ -1195,7 +1195,7 @@ def decompose(type, attr, dc, inputs): total_padding_r = attr[2] + attr[3] all_around_padding = attr[:-2] else: - raise RuntimeError("Pybuda only support Pad with either 2 or 4 attributes") + raise RuntimeError("Forge only support Pad with either 2 or 4 attributes") if (((len(attr) == 4 and attr[0] == 0) or (len(attr) == 6 and attr[0] == 0 and attr[2] == 0)) and @@ -1217,7 +1217,7 @@ def decompose(type, attr, dc, inputs): elif len(attr) == 6: left, right, top, bottom, _, _ = attr else: - raise RuntimeError("Pybuda only support Pad with either 3 or 5 attributes") + raise RuntimeError("Forge only support Pad with either 3 or 5 attributes") if mode_idx == 1: # 'replicate' mode result = activations @@ -1498,7 +1498,7 @@ def decompose_xy_flatten_reshape(inputs, dc, orig_shape, attr): padded_shape = result.shape r_new = padded_shape[-1] * orig_shape[-2] // (padded_shape[-1] // TILE_DIM) pad_for_factrization = False - sparse_r_padding = ast.literal_eval(os.environ.get('PYBUDA_PAD_SPARSE_MM', "{}")) + sparse_r_padding = ast.literal_eval(os.environ.get('FORGE_PAD_SPARSE_MM', "{}")) if orig_shape[-2] % TILE_DIM and orig_shape[-1] % TILE_DIM and orig_shape[-2] in sparse_r_padding: pad_for_factrization = True padded_r_new = sparse_r_padding[orig_shape[-2]] * TILE_DIM diff --git a/pybuda/pybuda/op/eval/pybuda/transpose.py b/forge/forge/op/eval/forge/transpose.py similarity index 99% rename from pybuda/pybuda/op/eval/pybuda/transpose.py rename to forge/forge/op/eval/forge/transpose.py index ea1e88983..4d2112bdd 100644 --- a/pybuda/pybuda/op/eval/pybuda/transpose.py +++ b/forge/forge/op/eval/forge/transpose.py @@ -5,7 +5,7 @@ from ..interface import PyTM from ..buda.transpose import TransposeTM as BudaTransposeTM from .. import sparse_utils -from pybuda._C import UnsupportedHWOpsError +from forge._C import UnsupportedHWOpsError class TransposeTM(PyTM): diff --git a/pybuda/pybuda/op/eval/interface.py b/forge/forge/op/eval/interface.py similarity index 95% rename from pybuda/pybuda/op/eval/interface.py rename to forge/forge/op/eval/interface.py index 2a43b09ab..f8b3e41d0 100644 --- a/pybuda/pybuda/op/eval/interface.py +++ b/forge/forge/op/eval/interface.py @@ -3,9 +3,9 @@ # SPDX-License-Identifier: Apache-2.0 import torch from typing import List, Tuple, Dict, Union, Optional -from pybuda._C.graph import NodeContext, OpType -from pybuda._C.passes import LoweringContext, DecomposingContext -from pybuda._C.autograd import AutogradContext +from forge._C.graph import NodeContext, OpType +from forge._C.passes import LoweringContext, DecomposingContext +from forge._C.autograd import AutogradContext class OpTypeWrapper: @@ -42,9 +42,9 @@ def __repr__(self): class PyOp(OpTypeWrapper): """ - Pybuda IR Op interface + Forge IR Op interface - All Pybuda IR ops must inherit and implement this interface + All Forge IR ops must inherit and implement this interface """ @classmethod @@ -130,9 +130,9 @@ def is_eltwise_nary(self) -> bool: class PyTM(PyOp): """ - Pybuda IR TM interface + Forge IR TM interface - All Pybuda IR tms must inherit and implement this interface + All Forge IR tms must inherit and implement this interface """ def is_tm(self) -> bool: diff --git a/pybuda/pybuda/op/eval/sparse_utils.py b/forge/forge/op/eval/sparse_utils.py similarity index 98% rename from pybuda/pybuda/op/eval/sparse_utils.py rename to forge/forge/op/eval/sparse_utils.py index 59986161e..ee6ee3f58 100644 --- a/pybuda/pybuda/op/eval/sparse_utils.py +++ b/forge/forge/op/eval/sparse_utils.py @@ -6,11 +6,11 @@ import os import torch from loguru import logger -import pybuda -from pybuda.utils import align_up_tile, align_up, round_up_div, clamp -from ...pybudaglobal import TILE_DIM +import forge +from forge.utils import align_up_tile, align_up, round_up_div, clamp +from ...forgeglobal import TILE_DIM from ...tensor import narrow_buda_tensor_to_pytorch, pad_pytorch_tensor_to_buda -from pybuda._C import DataFormat, compress_sparse_tensor_and_strip_info, SparseCOO, SparseBUDA, MathFidelity +from forge._C import DataFormat, compress_sparse_tensor_and_strip_info, SparseCOO, SparseBUDA, MathFidelity from math import gcd @@ -1221,10 +1221,10 @@ def calculate_total_sparse_tile_util(slices, grid_r, ts, bcast_factor, verbose=F def is_kernel_fracturing_candidate(operands, z_bcast_factor): # In production, we want to fracture convs with very specific properties # For testing, we use this mechanism to enable fracturing for all convs - force_allow_fracturing = bool(int(os.environ.get("PYBUDA_FORCE_ALLOW_FRACTURING", "0"))) - force_disallow_fracturing = bool(int(os.environ.get("PYBUDA_FORCE_DISALLOW_FRACTURING", "0"))) + force_allow_fracturing = bool(int(os.environ.get("FORGE_FORCE_ALLOW_FRACTURING", "0"))) + force_disallow_fracturing = bool(int(os.environ.get("FORGE_FORCE_DISALLOW_FRACTURING", "0"))) - assert not (force_allow_fracturing and force_disallow_fracturing), "Both PYBUDA_FORCE_ALLOW_FRACTURING and PYBUDA_FORCE_DISALLOW_FRACTURING set to non-zero" + assert not (force_allow_fracturing and force_disallow_fracturing), "Both FORGE_FORCE_ALLOW_FRACTURING and FORGE_FORCE_DISALLOW_FRACTURING set to non-zero" if force_allow_fracturing: return True @@ -1297,7 +1297,7 @@ def does_prestriding_improve_perf(act_shape, weights_shape, ps_weights, stride): def can_fracture_conv_at_op_level(attr, dc, inputs): # Can't fracture if disabled (unless manual op-level override) - if bool(int(os.environ.get("PYBUDA_DISABLE_CONV_MULTI_OP_FRACTURE", "0"))): + if bool(int(os.environ.get("FORGE_DISABLE_CONV_MULTI_OP_FRACTURE", "0"))): return False return inputs[1].shape.r > 1 @@ -1340,13 +1340,13 @@ def does_fracturing_conv_at_op_level_improve_perf(attr, dc, inputs): def should_fracture_conv_at_op_level(attr, dc, inputs): # We currently can't fracture when training, due to `index` op not having bw op defined - # tenstorrent/pybuda#972 + # tenstorrent/forge#972 if dc.is_training_enabled(): return False # Check if fracturing is forced - if bool(int(os.environ.get("PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE", "0"))) and inputs[1].shape.r > 1: - logger.warning("Environment variable \"PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE\" will be deprecated soon, please do NOT use.") + if bool(int(os.environ.get("FORGE_FORCE_CONV_MULTI_OP_FRACTURE", "0"))) and inputs[1].shape.r > 1: + logger.warning("Environment variable \"FORGE_FORCE_CONV_MULTI_OP_FRACTURE\" will be deprecated soon, please do NOT use.") return True # Check if user overriden @@ -1519,7 +1519,7 @@ def conv2d_out_shape(type, attr, ops): ) # TODO: the existence of this `if` block is a but confusing, should be fixed once this proposal is implemented: - # tenstorrent/pybuda#1761 + # tenstorrent/forge#1761 if is_convtranspose2d: # if transposed conv, the output is calculated by `calculate_conv2d_transpose_output_dimensions()` # however, we can't call this function on conv2d, as some attributes have been changed to fit the style of diff --git a/pybuda/pybuda/op/loss.py b/forge/forge/op/loss.py similarity index 95% rename from pybuda/pybuda/op/loss.py rename to forge/forge/op/loss.py index 9dce2320d..992e9d974 100644 --- a/pybuda/pybuda/op/loss.py +++ b/forge/forge/op/loss.py @@ -1,13 +1,13 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -from ..module import PyBudaModule +from ..module import ForgeModule from .reduce import ReduceSum, ReduceAvg from .eltwise_unary import Exp, Log, Abs from .eltwise_binary import Subtract, Multiply from .constant import Constant -class CrossEntropyLoss(PyBudaModule): +class CrossEntropyLoss(ForgeModule): """ Cross-Entropy Loss """ @@ -39,7 +39,7 @@ def forward(self, predictions, labels): return out -class L1Loss(PyBudaModule): +class L1Loss(ForgeModule): """ L1Loss diff --git a/pybuda/pybuda/op/matmul.py b/forge/forge/op/matmul.py similarity index 93% rename from pybuda/pybuda/op/matmul.py rename to forge/forge/op/matmul.py index 1c518c871..fce0f92b0 100644 --- a/pybuda/pybuda/op/matmul.py +++ b/forge/forge/op/matmul.py @@ -5,9 +5,9 @@ from ..tensor import Tensor from ..parameter import Parameter -from .common import PyBudaOp as op -from pybuda.pybudaglobal import get_unique_node_id -from pybuda import DataFormat +from .common import ForgeOp as op +from forge.forgeglobal import get_unique_node_id +from forge import DataFormat def Matmul( name: str, diff --git a/pybuda/pybuda/op/nn.py b/forge/forge/op/nn.py similarity index 96% rename from pybuda/pybuda/op/nn.py rename to forge/forge/op/nn.py index 54228ed8a..997d47e83 100644 --- a/pybuda/pybuda/op/nn.py +++ b/forge/forge/op/nn.py @@ -1,15 +1,15 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -from pybuda.op.matmul import SparseMatmul +from forge.op.matmul import SparseMatmul import math import torch from typing import Union from ..tensor import Tensor, TensorShape from ..parameter import Parameter -from ..module import PyBudaModule -from .common import PyBudaOp as op +from ..module import ForgeModule +from .common import ForgeOp as op from .eltwise_unary import Exp, Reciprocal, Sqrt from .eltwise_binary import Multiply, Subtract, Add @@ -19,7 +19,7 @@ from .pooling import MaxPool1d, MaxPool2d, AvgPool2d from .matmul import Matmul from .tm import Reshape, Transpose, Unsqueeze -from pybuda.pybudaglobal import get_unique_node_id +from forge.forgeglobal import get_unique_node_id import os @@ -183,7 +183,7 @@ def Batchnorm( return Add(name + "_bias", Multiply(name + "_weights", out, weights), bias) -class Linear(PyBudaModule): +class Linear(ForgeModule): """ Linear transformation module. @@ -241,7 +241,7 @@ def forward(self, activations): ) -class Conv2dModule(PyBudaModule): +class Conv2dModule(ForgeModule): """ Conv2dModule """ @@ -305,7 +305,7 @@ def forward(self, activations): return m1 -class ConvTranspose2dModule(PyBudaModule): +class ConvTranspose2dModule(ForgeModule): """ ConvTranspose2dModule """ @@ -374,7 +374,7 @@ def forward(self, activations): return m1 -class MaxPool1dModule(PyBudaModule): +class MaxPool1dModule(ForgeModule): """ MaxPool1dModule """ @@ -403,7 +403,7 @@ def forward(self, activations): return MaxPool1d(self.name, activations, **self.kwargs) -class MaxPool2dModule(PyBudaModule): +class MaxPool2dModule(ForgeModule): """ MaxPool2dModule """ @@ -432,7 +432,7 @@ def forward(self, activations): return MaxPool2d(self.name, activations, **self.kwargs) -class AvgPool2dModule(PyBudaModule): +class AvgPool2dModule(ForgeModule): """ AvgPool2dModule """ @@ -461,7 +461,7 @@ def forward(self, activations): return AvgPool2d(self.name, activations, **self.kwargs) -class SparseMatmulModule(PyBudaModule): +class SparseMatmulModule(ForgeModule): """ SparseMatmulModule """ diff --git a/pybuda/pybuda/op/pooling.py b/forge/forge/op/pooling.py similarity index 99% rename from pybuda/pybuda/op/pooling.py rename to forge/forge/op/pooling.py index 402b4e492..924b11c84 100644 --- a/pybuda/pybuda/op/pooling.py +++ b/forge/forge/op/pooling.py @@ -5,7 +5,7 @@ from ..tensor import Tensor from ..parameter import Parameter -from .common import PyBudaOp as op +from .common import ForgeOp as op def MaxPool1d( diff --git a/pybuda/pybuda/op/quantize.py b/forge/forge/op/quantize.py similarity index 99% rename from pybuda/pybuda/op/quantize.py rename to forge/forge/op/quantize.py index 7f992cccf..f9e9c69ad 100644 --- a/pybuda/pybuda/op/quantize.py +++ b/forge/forge/op/quantize.py @@ -2,7 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 from ..tensor import Tensor, pytorch_dtype_to_buda_dataformat -from .common import PyBudaOp as op +from .common import ForgeOp as op import torch def Quantize( diff --git a/pybuda/pybuda/op/reduce.py b/forge/forge/op/reduce.py similarity index 99% rename from pybuda/pybuda/op/reduce.py rename to forge/forge/op/reduce.py index adc48ce26..09aacfe8b 100644 --- a/pybuda/pybuda/op/reduce.py +++ b/forge/forge/op/reduce.py @@ -2,7 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 from ..tensor import Tensor -from .common import PyBudaOp as op +from .common import ForgeOp as op def ReduceSum( name: str, diff --git a/pybuda/pybuda/op/resize.py b/forge/forge/op/resize.py similarity index 98% rename from pybuda/pybuda/op/resize.py rename to forge/forge/op/resize.py index ce6d42dff..09af3882d 100644 --- a/pybuda/pybuda/op/resize.py +++ b/forge/forge/op/resize.py @@ -5,7 +5,7 @@ from ..tensor import Tensor from ..parameter import Parameter -from .common import PyBudaOp as op +from .common import ForgeOp as op RESIZE2d_METHOD_TO_INT = { "nearest_neighbor" : 0, diff --git a/pybuda/pybuda/op/tm.py b/forge/forge/op/tm.py similarity index 99% rename from pybuda/pybuda/op/tm.py rename to forge/forge/op/tm.py index 27a04e9c0..379864d40 100644 --- a/pybuda/pybuda/op/tm.py +++ b/forge/forge/op/tm.py @@ -2,8 +2,8 @@ # SPDX-License-Identifier: Apache-2.0 from typing import Union, Tuple, List, Dict -from ..pybudaglobal import TILE_DIM -from .common import PyBudaOp as op +from ..forgeglobal import TILE_DIM +from .common import ForgeOp as op from ..tensor import Tensor, pytorch_dtype_to_buda_dataformat import torch diff --git a/pybuda/pybuda/optimizers.py b/forge/forge/optimizers.py similarity index 98% rename from pybuda/pybuda/optimizers.py rename to forge/forge/optimizers.py index 8e25086b5..d2b87903f 100644 --- a/pybuda/pybuda/optimizers.py +++ b/forge/forge/optimizers.py @@ -11,10 +11,10 @@ import numpy as np import torch -from pybuda.tensor import Tensor -from pybuda.parameter import Parameter -import pybuda.torch_optimizers -from pybuda.torch_optimizers import AdamNoBiasCorrection +from forge.tensor import Tensor +from forge.parameter import Parameter +import forge.torch_optimizers +from forge.torch_optimizers import AdamNoBiasCorrection class Optimizer: """ @@ -327,7 +327,7 @@ def generate_op_trace(self, ac, parameter, gradient): else: weight_decay = None ## import locally to avoid circular dependency from Dataformat, fix it later - from pybuda.op.eval.pybuda.buffer import Buffer + from forge.op.eval.forge.buffer import Buffer # we copy the grad accum. queue since it only accepts a single consumer/pop gradient_copy = ac.op(Buffer.create(), (gradient,)) @@ -356,9 +356,9 @@ def generate_op_trace(self, ac, parameter, gradient): updated_variance = ac.op( "add", (variance_times_beta2, gradient_squared_times_one_minus_beta2) ) - from pybuda.op.eval.pybuda.reciprocal import Reciprocal + from forge.op.eval.forge.reciprocal import Reciprocal #import Sqrt module locally to avoid circular dependency - from pybuda.op.eval.pybuda.sqrt import Sqrt + from forge.op.eval.forge.sqrt import Sqrt if self.bias_correction: # bias_correction1 = 1 - beta1 ** step beta1_one = ac.constant(1.0) @@ -553,7 +553,7 @@ def set_parameters_to_optimize(self, parameters: List[Parameter]): # For each Parameter, we register its associated set of optimizer parameters for parameter in parameters: if parameter.requires_grad: - # PyBuda + # Forge self.parameter_to_opt_inputs[ parameter.get_name() ] = self.get_param_dict( @@ -652,7 +652,7 @@ def generate_op_trace(self, ac, parameter, gradient): # g(t) -> gradient at current timestep #temp fix to avoid circular dependency by importing locally - from pybuda.op.eval.pybuda.buffer import Buffer + from forge.op.eval.forge.buffer import Buffer grad = ac.op(Buffer.create(), (gradient, )) # m(t) <- beta1 * m(t - 1) + (1 - beta1) * g(t) @@ -696,7 +696,7 @@ def generate_op_trace(self, ac, parameter, gradient): phi_norm = ac.op("reduce_sum", (phi_norm, ), (-1, )) #importing locally to avoid circular dependency from Dataformats - from pybuda.op.eval.pybuda.sqrt import Sqrt + from forge.op.eval.forge.sqrt import Sqrt phi_norm = ac.op(Sqrt.create(), (phi_norm, )) epsilon = ac.tensor(torch.zeros(param_shape) + self.eps) @@ -705,7 +705,7 @@ def generate_op_trace(self, ac, parameter, gradient): # adam ratio, ratio of corrected mean and corrected variance stabilized with epsilon r_t = ac.op(Sqrt.create(), (updated_variance, )) r_t = ac.op("add", (r_t, epsilon)) - from pybuda.op.eval.pybuda.reciprocal import Reciprocal + from forge.op.eval.forge.reciprocal import Reciprocal r_t = ac.op("multiply", (updated_mean, ac.op(Reciprocal.create(), (r_t, )))) if self.weight_decay != 0: @@ -748,7 +748,7 @@ def generate_op_trace(self, ac, parameter, gradient): r_t_norm_eq = ac.op("equal", (r_t_norm, zero)) trust_ratio = ac.op(Reciprocal.create(), (r_t_norm, )) trust_ratio = ac.op("multiply", (phi_norm, trust_ratio)) - from pybuda.op.eval.pybuda.clip import Clip + from forge.op.eval.forge.clip import Clip trust_ratio = ac.op(Clip.create(min=self.clip_value[0], max=self.clip_value[1]), (trust_ratio, )) trust_ratio = ac.op("multiply", (trust_ratio, r_t_norm_ne)) trust_ratio = ac.op("add", (trust_ratio, r_t_norm_eq)) @@ -815,7 +815,7 @@ def get_pytorch_optimizer(self, parameters: Dict[str, torch.Tensor], lr = None) Return an equivalent pytorch optimizer, used for verification. """ if not self.torch_optimizer: - self.torch_optimizer = pybuda.torch_optimizers.LAMB( + self.torch_optimizer = forge.torch_optimizers.LAMB( params=[p for p in parameters.values()], lr=self.learning_rate, betas=(self.beta1, self.beta2), @@ -875,7 +875,7 @@ def set_parameters_to_optimize(self, parameters: List[Parameter]): # For each Parameter, we register its associated set of optimizer parameters for parameter in parameters: if parameter.requires_grad: - # PyBuda + # Forge self.parameter_to_opt_inputs[ parameter.get_name() ] = self.get_param_dict( @@ -971,7 +971,7 @@ def generate_op_trace(self, ac, parameter, gradient): # g(t) -> gradient at current timestep #temp fix for circular dependency - from pybuda.op.eval.pybuda.buffer import Buffer + from forge.op.eval.forge.buffer import Buffer grad = ac.op(Buffer.create(), (gradient, )) # lambda <- || w(t) || / (|| g(t) || + beta * || w(t) ||) @@ -981,7 +981,7 @@ def generate_op_trace(self, ac, parameter, gradient): weight_norm = ac.op("reduce_sum", (weight_norm, ), (-2, )) weight_norm = ac.op("reduce_sum", (weight_norm, ), (-1, )) #importing locally to avoid circular dependency from Dataformats - from pybuda.op.eval.pybuda.sqrt import Sqrt + from forge.op.eval.forge.sqrt import Sqrt weight_norm = ac.op(Sqrt.create(), (weight_norm, )) grad_norm = ac.op("multiply", (grad, grad)) @@ -1030,7 +1030,7 @@ def generate_op_trace(self, ac, parameter, gradient): local_learning_rate = ac.op("multiply", (weight_decay, weight_norm)) local_learning_rate = ac.op("add", (grad_norm, local_learning_rate)) local_learning_rate = ac.op("add", (epsilon, local_learning_rate)) - from pybuda.op.eval.pybuda.reciprocal import Reciprocal + from forge.op.eval.forge.reciprocal import Reciprocal local_learning_rate = ac.op(Reciprocal.create(), (local_learning_rate, )) local_learning_rate = ac.op("multiply", (weight_norm, local_learning_rate)) local_learning_rate = ac.op("multiply", (lars_coeff, local_learning_rate)) @@ -1114,7 +1114,7 @@ def get_pytorch_optimizer(self, parameters: Dict[str, torch.Tensor], lr = None) Return an equivalent pytorch optimizer, used for verification. """ if not self.torch_optimizer: - self.torch_optimizer = pybuda.torch_optimizers.LARS( + self.torch_optimizer = forge.torch_optimizers.LARS( params=[p for p in parameters.values()], lr=self.learning_rate, momentum=self.momentum, diff --git a/pybuda/pybuda/parameter.py b/forge/forge/parameter.py similarity index 91% rename from pybuda/pybuda/parameter.py rename to forge/forge/parameter.py index 2e88fafd8..53a7522fa 100644 --- a/pybuda/pybuda/parameter.py +++ b/forge/forge/parameter.py @@ -7,9 +7,9 @@ from loguru import logger from .tensor import Tensor, TensorShape, TensorBase, pytorch_dtype_to_buda_dataformat, pad_pytorch_tensor_to_buda, buda_dataformat_to_pytorch_dtype -from .pybudaglobal import lazy_trace_data -from pybuda._C import DataFormat -import pybuda +from .forgeglobal import lazy_trace_data +from forge._C import DataFormat +import forge class Parameter(TensorBase): """ @@ -174,7 +174,7 @@ def _set_fp32_fallback(self, fp32_fallback: DataFormat): @property def data_format(self) -> DataFormat: """ - Return this parameter's PyBuda data format + Return this parameter's Forge data format """ assert self._data_format is not None, "No data type set for parameter yet" return self._data_format @@ -194,7 +194,7 @@ def set_data_format(self, df: DataFormat): Parameters ---------- df: DataFormat - PyBuda data format + Forge data format """ self._data_format = df if self._value is not None: @@ -212,10 +212,10 @@ def create_from_torch(cls, torch_tensor: torch.Tensor) -> "Parameter": def _create_const_tensor(self, value): assert isinstance(value, (int, float)), f"Automatic constant tensor creation for {type(value)} not supported" - return pybuda.op.Constant("", constant=value) + return forge.op.Constant("", constant=value) def _handle_binary_op(self, other, op, is_r=False): - if not isinstance(other, (pybuda.Tensor, pybuda.Parameter)): + if not isinstance(other, (forge.Tensor, forge.Parameter)): other = self._create_const_tensor(other) if not is_r: return op("", self, other) @@ -223,19 +223,19 @@ def _handle_binary_op(self, other, op, is_r=False): return op("", other, self) def __add__(self, other): - return self._handle_binary_op(other, pybuda.op.Add) + return self._handle_binary_op(other, forge.op.Add) def __radd__(self, other): - return self._handle_binary_op(other, pybuda.op.Add, is_r=True) + return self._handle_binary_op(other, forge.op.Add, is_r=True) def __sub__(self, other): - return self._handle_binary_op(other, pybuda.op.Subtract) + return self._handle_binary_op(other, forge.op.Subtract) def __rsub__(self, other): - return self._handle_binary_op(other, pybuda.op.Subtract, is_r=True) + return self._handle_binary_op(other, forge.op.Subtract, is_r=True) def __mul__(self, other): - return self._handle_binary_op(other, pybuda.op.Multiply) + return self._handle_binary_op(other, forge.op.Multiply) def __rmul__(self, other): - return self._handle_binary_op(other, pybuda.op.Multiply, is_r=True) + return self._handle_binary_op(other, forge.op.Multiply, is_r=True) diff --git a/pybuda/pybuda/python_codegen.py b/forge/forge/python_codegen.py similarity index 97% rename from pybuda/pybuda/python_codegen.py rename to forge/forge/python_codegen.py index 565813a09..d8b838864 100644 --- a/pybuda/pybuda/python_codegen.py +++ b/forge/forge/python_codegen.py @@ -8,50 +8,50 @@ import tensorflow as tf from loguru import logger -def pybuda_df_str_from_str(df: str, name: str): +def forge_df_str_from_str(df: str, name: str): df = df.lower() if df == "bfp2": - return "pybuda.DataFormat.Bfp2" + return "forge.DataFormat.Bfp2" elif df == "bfp2_b": - return "pybuda.DataFormat.Bfp2_b" + return "forge.DataFormat.Bfp2_b" elif df == "bfp4": - return "pybuda.DataFormat.Bfp4" + return "forge.DataFormat.Bfp4" elif df == "bfp4_b": - return "pybuda.DataFormat.Bfp4_b" + return "forge.DataFormat.Bfp4_b" elif df == "bfp8": - return "pybuda.DataFormat.Bfp8" + return "forge.DataFormat.Bfp8" elif df == "bfp8_b": - return "pybuda.DataFormat.Bfp8_b" + return "forge.DataFormat.Bfp8_b" elif df == "float16": - return "pybuda.DataFormat.Float16" + return "forge.DataFormat.Float16" elif df in ["float16_b", "bfloat16"]: - return "pybuda.DataFormat.Float16_b" + return "forge.DataFormat.Float16_b" elif df == "float32": - return "pybuda.DataFormat.Float32" + return "forge.DataFormat.Float32" elif df == "int8": - return "pybuda.DataFormat.Int8" + return "forge.DataFormat.Int8" elif df == "invalid": - return "pybuda.DataFormat.Invalid" + return "forge.DataFormat.Invalid" elif df == "lf8": - return "pybuda.DataFormat.Lf8" + return "forge.DataFormat.Lf8" elif df == "raw_uint16": - return "pybuda.DataFormat.RawUInt16" + return "forge.DataFormat.RawUInt16" elif df == "raw_uint32": - return "pybuda.DataFormat.RawUInt32" + return "forge.DataFormat.RawUInt32" elif df == "raw_uint8": - return "pybuda.DataFormat.RawUInt8" + return "forge.DataFormat.RawUInt8" elif df == "uint16": - return "pybuda.DataFormat.UInt16" + return "forge.DataFormat.UInt16" elif df == "uint8": - return "pybuda.DataFormat.UInt8" + return "forge.DataFormat.UInt8" elif df == "int8": - return "pybuda.DataFormat.Int8" + return "forge.DataFormat.Int8" elif df == "int32": - return "pybuda.DataFormat.Int32" + return "forge.DataFormat.Int32" else: logger.warning(f"Invalid data format: {df} for constant/parameter \'{name}\', defaulting to float32") - return "pybuda.DataFormat.Float32" + return "forge.DataFormat.Float32" def pytorch_df_str_from_str(df: str, name): df = df.lower() @@ -104,7 +104,7 @@ def close_file(self): def import_module_path(self): return self.module_directory + f".{self.module_name}" -class PyBudaWriter(PythonWriter): +class ForgeWriter(PythonWriter): incompatible_np_float_types = [tf.bfloat16, ] def __init__(self, module_name, framework, contains_incompatible_np_floats=False, delete_inputs=True): @@ -119,9 +119,9 @@ def __init__(self, module_name, framework, contains_incompatible_np_floats=False self.dev = "TTDevice" def write_header(self): - self.wl("import pybuda") - self.wl("import pybuda.op") - self.wl("from pybuda import PyBudaModule") + self.wl("import forge") + self.wl("import forge.op") + self.wl("from forge import ForgeModule") self.wl("") self.wl("from loguru import logger") @@ -129,7 +129,7 @@ def write_header(self): self.wl("import torch") if self.framework == "tensorflow": self.wl("import tensorflow as tf") - self.wl("from pybuda.tvm_utils import map_tf_dtype_to_pt") + self.wl("from forge.tvm_utils import map_tf_dtype_to_pt") if self.framework == "jax": self.wl("import flax") @@ -144,7 +144,7 @@ def write_class_definition(self, params, constants, class_name=None, num_submode if class_name is None: class_name = self.class_name self.num_submodels = num_submodels - self.wl(f"class {class_name}(PyBudaModule):") + self.wl(f"class {class_name}(ForgeModule):") self.indent += 1 self.wl("def __init__(self, name):") self.indent += 1 @@ -163,9 +163,9 @@ def write_class_definition(self, params, constants, class_name=None, num_submode continue self.param_names.append(name) if is_submodel: - self.wl(f"self.add_parameter(\"{name}\", pybuda.Parameter(*{shape}, requires_grad={requires_grad}, dev_data_format={pybuda_df_str_from_str(dtype, name)}), prepend_name=True)") + self.wl(f"self.add_parameter(\"{name}\", forge.Parameter(*{shape}, requires_grad={requires_grad}, dev_data_format={forge_df_str_from_str(dtype, name)}), prepend_name=True)") else: - self.wl(f"self.add_parameter(\"{name}\", pybuda.Parameter(*{shape}, requires_grad={requires_grad}, dev_data_format={pybuda_df_str_from_str(dtype, name)}))") + self.wl(f"self.add_parameter(\"{name}\", forge.Parameter(*{shape}, requires_grad={requires_grad}, dev_data_format={forge_df_str_from_str(dtype, name)}))") for const in constants.values(): @@ -360,7 +360,7 @@ def write_param_parser(self, param_names, param_file_name): self.wl("}") if self.contains_incompatible_np_floats: - self.wl(f"incompatible_np_float_types = {PyBudaWriter.incompatible_np_float_types}") + self.wl(f"incompatible_np_float_types = {ForgeWriter.incompatible_np_float_types}") self.wl("for weight in weights:") self.indent += 1 diff --git a/pybuda/pybuda/query.py b/forge/forge/query.py similarity index 99% rename from pybuda/pybuda/query.py rename to forge/forge/query.py index a7e5df74b..5c2afbf4e 100644 --- a/pybuda/pybuda/query.py +++ b/forge/forge/query.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 from enum import Enum -import pybuda._C.graph.query as query +import forge._C.graph.query as query class NodePredicateType(Enum): diff --git a/pybuda/pybuda/schedulers.py b/forge/forge/schedulers.py similarity index 96% rename from pybuda/pybuda/schedulers.py rename to forge/forge/schedulers.py index 93d1bf9cb..4f0435647 100644 --- a/pybuda/pybuda/schedulers.py +++ b/forge/forge/schedulers.py @@ -7,7 +7,7 @@ import torch from .optimizers import Optimizer -from pybuda.torch_schedulers import TorchLearningRateScheduler +from forge.torch_schedulers import TorchLearningRateScheduler class LearningRateScheduler: diff --git a/pybuda/pybuda/tensor.py b/forge/forge/tensor.py similarity index 96% rename from pybuda/pybuda/tensor.py rename to forge/forge/tensor.py index 035389df4..8f4bdd400 100644 --- a/pybuda/pybuda/tensor.py +++ b/forge/forge/tensor.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 from typing import Union, Tuple, List, Optional, Dict -from pybuda.tvm_utils import map_tf_dtype_to_pt +from forge.tvm_utils import map_tf_dtype_to_pt import torch import tensorflow as tf @@ -15,22 +15,22 @@ import jax.numpy as jnp import json -from .pybudaglobal import TILE_DIM, align_up_tile, round_up_div -from pybuda._C import DataFormat -from pybuda._C.graph import OpType, RuntimeTensorTransform, RuntimeTensorTransformType, get_constant_input_value -from pybuda.utils import detach_tensors +from .forgeglobal import TILE_DIM, align_up_tile, round_up_div +from forge._C import DataFormat +from forge._C.graph import OpType, RuntimeTensorTransform, RuntimeTensorTransformType, get_constant_input_value +from forge.utils import detach_tensors from functools import reduce from operator import mul from .utils import align_up -from pybuda.tvm_utils import map_tf_dtype_to_pt, map_pt_dtype_to_tf +from forge.tvm_utils import map_tf_dtype_to_pt, map_pt_dtype_to_tf -import pybuda +import forge SomeTensor = Union[torch.Tensor, "Tensor", np.ndarray] class TensorShape: """ - Convenience wrapper for tensor dimensions. All Buda tensors are fixed to 4 dimensions - rows, columns, Z, W. PyBuda tensors are free to have any dimensions. + Convenience wrapper for tensor dimensions. All Buda tensors are fixed to 4 dimensions - rows, columns, Z, W. Forge tensors are free to have any dimensions. """ def __init__(self, *dims): @@ -116,10 +116,10 @@ def __getitem__(self, i): class TensorBase: def _create_const_tensor(self, value): assert isinstance(value, (int, float)), f"Automatic constant tensor creation for {type(value)} not supported" - return pybuda.op.Constant("", constant=value) + return forge.op.Constant("", constant=value) def _handle_binary_op(self, other, op, is_r=False): - if not isinstance(other, (pybuda.Tensor, pybuda.Parameter)): + if not isinstance(other, (forge.Tensor, forge.Parameter)): other = self._create_const_tensor(other) if not is_r: return op("", self, other) @@ -127,22 +127,22 @@ def _handle_binary_op(self, other, op, is_r=False): return op("", other, self) def __add__(self, other): - return self._handle_binary_op(other, pybuda.op.Add) + return self._handle_binary_op(other, forge.op.Add) def __radd__(self, other): - return self._handle_binary_op(other, pybuda.op.Add, is_r=True) + return self._handle_binary_op(other, forge.op.Add, is_r=True) def __sub__(self, other): - return self._handle_binary_op(other, pybuda.op.Subtract) + return self._handle_binary_op(other, forge.op.Subtract) def __rsub__(self, other): - return self._handle_binary_op(other, pybuda.op.Subtract, is_r=True) + return self._handle_binary_op(other, forge.op.Subtract, is_r=True) def __mul__(self, other): - return self._handle_binary_op(other, pybuda.op.Multiply) + return self._handle_binary_op(other, forge.op.Multiply) def __rmul__(self, other): - return self._handle_binary_op(other, pybuda.op.Multiply, is_r=True) + return self._handle_binary_op(other, forge.op.Multiply, is_r=True) class Tensor(TensorBase): """ @@ -215,8 +215,8 @@ def is_constant(self) -> bool: def __repr__(self): if self.has_value(): - return f"PyBuda Tensor: {self.value()}, {self.data_format}" - return f"PyBuda Empty Tensor: {self.shape}" + return f"Forge Tensor: {self.value()}, {self.data_format}" + return f"Forge Empty Tensor: {self.shape}" @property def pt_data_format(self) -> torch.dtype: @@ -260,7 +260,7 @@ def create_from_torch(cls, torch_tensor: torch.Tensor, dev_data_format: Optional return TensorFromPytorch(torch_tensor, dev_data_format, constant) @classmethod - def create_from_trace(cls, src_op: "PyBudaOp", shape: Tuple[int, ...], data_format: DataFormat) -> "TensorFromTrace": + def create_from_trace(cls, src_op: "ForgeOp", shape: Tuple[int, ...], data_format: DataFormat) -> "TensorFromTrace": """ New path to creating front-end Tensor """ @@ -365,7 +365,7 @@ class TensorFromTrace(Tensor): """ Tensor wrapper created by tracing model graph """ - def __init__(self, src_op: "PyBudaOp", shape: Tuple[int, ...], data_format: DataFormat): + def __init__(self, src_op: "ForgeOp", shape: Tuple[int, ...], data_format: DataFormat): super().__init__() self.tensor_shape = TensorShape(*shape) self.src_op = src_op @@ -427,7 +427,7 @@ def to_format(self, data_format: DataFormat) -> "Tensor": # Parameters # ---------- # t: Tensor - # Pybuda tensor to be turned into a descriptor + # Forge tensor to be turned into a descriptor # batch: int, optional # If batch != 0, set batch dimension to given value @@ -675,7 +675,7 @@ def is_equivalent_data_format(pt_df: torch.dtype, tt_df: DataFormat) -> bool: # format = df # # Before we push the tensors to the queue, we need to make sure that the -# # tensors are in the right format and aligned between PyBuda and PyTorch. +# # tensors are in the right format and aligned between Forge and PyTorch. # # If this isn't the case, expected shapes on the queues will be invalid # # and the runtime will crash. # # @@ -1205,7 +1205,7 @@ def consteval_tensor( is_buda: bool, epoch_type: str ) -> torch.Tensor: - import pybuda.op.eval.pybuda as eval_module + import forge.op.eval.forge as eval_module consteval_graph = consteval_trace.get(name, None) @@ -1220,8 +1220,8 @@ def get_loss_node(): assert False, "Loss node not found" def eval_op(op_type, inputs): - pybuda_eval = eval_module.get_f_pybuda_eval(OpType(op_type["type"], op_type["attrs"], op_type["named_attrs"])) - return pybuda_eval(inputs) + forge_eval = eval_module.get_f_forge_eval(OpType(op_type["type"], op_type["attrs"], op_type["named_attrs"])) + return forge_eval(inputs) logger.debug("ConstEval graph: {}", name) node_to_tensor: Dict[str, torch.Tensor] = {} @@ -1241,7 +1241,7 @@ def eval_op(op_type, inputs): input_value = narrow_buda_tensor_to_pytorch(input_value, node["cache"]["shape"], has_microbatch_dim=False) node_to_tensor[node_name] = input_value - elif node["opcode"] in {"BudaOp", "PyBudaOp"}: + elif node["opcode"] in {"BudaOp", "ForgeOp"}: inputs_after_tms: List[torch.Tensor] = [] for input_index, operand in enumerate(node["input_nodes"]): operand_tensor = node_to_tensor[operand] diff --git a/pybuda/pybuda/tools/__init__.py b/forge/forge/tools/__init__.py similarity index 100% rename from pybuda/pybuda/tools/__init__.py rename to forge/forge/tools/__init__.py diff --git a/pybuda/pybuda/tools/autotune.py b/forge/forge/tools/autotune.py similarity index 94% rename from pybuda/pybuda/tools/autotune.py rename to forge/forge/tools/autotune.py index 4ed5ead63..82dbef98c 100755 --- a/pybuda/pybuda/tools/autotune.py +++ b/forge/forge/tools/autotune.py @@ -62,7 +62,7 @@ def main(command, num_loops, cache_path, perf_path): env_vars = os.environ.copy() # Copy the current environment env_vars["LOGGER_LEVEL"] = "None" - env_vars["PYBUDA_COMPILER_CACHE"] = cache_path + env_vars["FORGE_COMPILER_CACHE"] = cache_path # Run the original command subprocess.run(command + " -o " + perf_path, shell=True, env=env_vars) @@ -70,7 +70,7 @@ def main(command, num_loops, cache_path, perf_path): # Run the autotuning loop for i in range(num_loops): subprocess.run(command + " --perf_analysis --loop_count 1", shell=True, env=env_vars) - subprocess.run(f"pybuda/pybuda/tools/perf_analysis.py --cache {cache_path}", shell=True, env=env_vars) + subprocess.run(f"forge/forge/tools/perf_analysis.py --cache {cache_path}", shell=True, env=env_vars) subprocess.run(command + " -o " + perf_path, shell=True, env=env_vars) # Make a backup of the cache file at the end of each loop @@ -89,7 +89,7 @@ def main(command, num_loops, cache_path, perf_path): best_result_cache = f"{backup_cache_path}{best_result_index-1}" shutil.copyfile(best_result_cache, cache_path) # copy the snapshot from the best iteration to cache path for repro - repro_command = f"PYBUDA_COMPILER_CACHE={cache_path} " + command + " -o " + perf_path + repro_command = f"FORGE_COMPILER_CACHE={cache_path} " + command + " -o " + perf_path logger.info(f"repro: {repro_command}") diff --git a/pybuda/pybuda/tools/autotune.sh b/forge/forge/tools/autotune.sh similarity index 71% rename from pybuda/pybuda/tools/autotune.sh rename to forge/forge/tools/autotune.sh index b568c85ce..173fd6c82 100755 --- a/pybuda/pybuda/tools/autotune.sh +++ b/forge/forge/tools/autotune.sh @@ -6,11 +6,11 @@ LOGFILE="autotune.log" extract_commands() { - grep "pybuda/test/benchmark/benchmark.py" "$1" | sed 's/ -o perf.json//' + grep "forge/test/benchmark/benchmark.py" "$1" | sed 's/ -o perf.json//' } # Read commands into an array -mapfile -t commands < <(extract_commands "pybuda/test/benchmark/run_benchmark") +mapfile -t commands < <(extract_commands "forge/test/benchmark/run_benchmark") for cmd in "${commands[@]}"; do # Extract the model name for the cache file @@ -21,7 +21,7 @@ for cmd in "${commands[@]}"; do /mnt/motor/syseng/bin/tt-smi/wh/stable -wr all wait # Autotune the model - pybuda/pybuda/tools/autotune.py --cache ".cache/${model}_${config}.ttc" "$cmd" + forge/forge/tools/autotune.py --cache ".cache/${model}_${config}.ttc" "$cmd" done # Dump the results diff --git a/pybuda/pybuda/tools/net2reportify.py b/forge/forge/tools/net2reportify.py similarity index 96% rename from pybuda/pybuda/tools/net2reportify.py rename to forge/forge/tools/net2reportify.py index af021f7e4..79d94ac86 100755 --- a/pybuda/pybuda/tools/net2reportify.py +++ b/forge/forge/tools/net2reportify.py @@ -31,7 +31,7 @@ def write_reportify_graph(netlist_name, graph, report_name, verbose=False): def net2reportify(netlist_name, netlist, extract_graphs=[], verbose=False): - if bool(int(os.environ.get("PYBUDA_DISABLE_REPORTIFY_DUMP", "0"))): + if bool(int(os.environ.get("FORGE_DISABLE_REPORTIFY_DUMP", "0"))): return if type(netlist) is str: with open(netlist) as fd: @@ -46,7 +46,7 @@ def node_shape(node): def emit_queue(reportify_graph, queue_name, queue): reportify_graph["nodes"][queue_name] = queue - reportify_graph["nodes"][queue_name]["pybuda"] = 1 + reportify_graph["nodes"][queue_name]["forge"] = 1 reportify_graph["nodes"][queue_name]["epoch"] = 0 reportify_graph["nodes"][queue_name]["name"] = queue_name reportify_graph["nodes"][queue_name]["cache"] = {"shape": node_shape(queue)} @@ -76,7 +76,7 @@ def get_ublock_order(input_idx): reportify_graph["nodes"][node_name] = node reportify_graph["nodes"][node_name]["class"] = node["type"] - reportify_graph["nodes"][node_name]["pybuda"] = 1 + reportify_graph["nodes"][node_name]["forge"] = 1 reportify_graph["nodes"][node_name]["epoch"] = epoch reportify_graph["nodes"][node_name]["epoch_type"] = epoch_type reportify_graph["nodes"][node_name]["name"] = node_name @@ -126,7 +126,7 @@ def net2placement( device_yaml=None, verbose=False, ): - if bool(int(os.environ.get("PYBUDA_DISABLE_REPORTIFY_DUMP", "0"))): + if bool(int(os.environ.get("FORGE_DISABLE_REPORTIFY_DUMP", "0"))): return if type(netlist) is str: with open(netlist) as fd: diff --git a/pybuda/pybuda/tools/perf_analysis.py b/forge/forge/tools/perf_analysis.py similarity index 98% rename from pybuda/pybuda/tools/perf_analysis.py rename to forge/forge/tools/perf_analysis.py index e0a79a8b5..3997bdf79 100755 --- a/pybuda/pybuda/tools/perf_analysis.py +++ b/forge/forge/tools/perf_analysis.py @@ -123,7 +123,7 @@ def as_gb_sec(clock_speed, bytes_cycle): def load_perf_analysis(epoch_count, config) -> List[Dict]: """ Load backend graph perf report for each epoch. Generate per-kernel numbers from totals, since current version - in pybuda only has totals. Remove once BBE is pulled in with new backend perf analyzer that has per-kernel numbers. + in forge only has totals. Remove once BBE is pulled in with new backend perf analyzer that has per-kernel numbers. """ print(f"Loading performance analysis data for {epoch_count} epochs...") @@ -306,7 +306,7 @@ def load_estimated_cycles(): # Check if file exists if not os.path.exists(file_path): - print(f"{file_path} does not exist. Run with PYBUDA_OP_PERF=1 to generate it, if running with pybuda. Loading will continue without it.") + print(f"{file_path} does not exist. Run with FORGE_OP_PERF=1 to generate it, if running with forge. Loading will continue without it.") return {} print(f"Loading {file_path}...") @@ -332,7 +332,7 @@ def load_balancer_score(): # Check if file exists if not os.path.exists(file_path): - print(f"{file_path} does not exist. Run with PYBUDA_OP_PERF=1 to generate it, if running with pybuda. Loading will continue without it.") + print(f"{file_path} does not exist. Run with FORGE_OP_PERF=1 to generate it, if running with forge. Loading will continue without it.") return {} print(f"Loading {file_path}...") @@ -1051,7 +1051,7 @@ def main(stdscr, data, save_epoch_screens=False, epoch_sreens_save_dir=None): import argparse parser = argparse.ArgumentParser(description=""" - Perf analyzer collects performance data from various sources and displays it in terminal. To use, run any pybuda test with PYBUDA_OP_PERF=1 and TT_BACKEND_PERF_ANALYZER=1 switches to generate data, and then run this script in pybuda root, providing the netlist. + Perf analyzer collects performance data from various sources and displays it in terminal. To use, run any forge test with FORGE_OP_PERF=1 and TT_BACKEND_PERF_ANALYZER=1 switches to generate data, and then run this script in forge root, providing the netlist. """) parser.add_argument('-n', '--netlist', help='Model netlist') parser.add_argument('-s', '--spatial_epochs', action='store_true', help='Show individual spatial epochs instead of temporal ones. Caution - overall performance estimate on multi-chip runs will not be accurate in this mode.') diff --git a/pybuda/pybuda/tools/run_net2pipe.py b/forge/forge/tools/run_net2pipe.py similarity index 100% rename from pybuda/pybuda/tools/run_net2pipe.py rename to forge/forge/tools/run_net2pipe.py diff --git a/pybuda/pybuda/tools/tti_merge.py b/forge/forge/tools/tti_merge.py similarity index 99% rename from pybuda/pybuda/tools/tti_merge.py rename to forge/forge/tools/tti_merge.py index 178792e46..aab3dd5dd 100644 --- a/pybuda/pybuda/tools/tti_merge.py +++ b/forge/forge/tools/tti_merge.py @@ -7,8 +7,8 @@ import re import shutil import os -import pybuda._C.backend_api as backend_api -from pybuda._C import DataFormat +import forge._C.backend_api as backend_api +from forge._C import DataFormat import subprocess as sp import json import copy diff --git a/pybuda/pybuda/torch_compile.py b/forge/forge/torch_compile.py similarity index 92% rename from pybuda/pybuda/torch_compile.py rename to forge/forge/torch_compile.py index 496044ca0..17c1e17d8 100644 --- a/pybuda/pybuda/torch_compile.py +++ b/forge/forge/torch_compile.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 import hashlib import os -import pybuda +import forge import torch import io import json @@ -12,16 +12,16 @@ from loguru import logger -from pybuda._C.torch_device import get_default_device, unique_id, TTForgeTensorDesc -from pybuda.compiled_graph_state import CompiledGraphState -from pybuda.fx.capture import CaptureFX -from pybuda.fx.schedule import TensorSource -from pybuda.fx.mixed_graph import MixedGraph +from forge._C.torch_device import get_default_device, unique_id, TTForgeTensorDesc +from forge.compiled_graph_state import CompiledGraphState +from forge.fx.capture import CaptureFX +from forge.fx.schedule import TensorSource +from forge.fx.mixed_graph import MixedGraph _tt0 = None _compile_cache = None -_compile_cache_dir = os.environ.get("PYBUDA_COMPILE_CACHE_DIR", "tt_build") +_compile_cache_dir = os.environ.get("FORGE_COMPILE_CACHE_DIR", "tt_build") _capture: CaptureFX = CaptureFX() _subgraph_index = 0 _module_index = 0 @@ -89,10 +89,10 @@ def _build_backend_compile_request(device, compiler_cfg, compiled_graph_state, s ) # Backend Compile - bcfg = pybuda._C.backend_api.BackendConfig( + bcfg = forge._C.backend_api.BackendConfig( device.type, device.arch, - pybuda._C.backend_api.DeviceMode.CompileAndRun, + forge._C.backend_api.DeviceMode.CompileAndRun, compiler_cfg.backend_opt_level, compiler_cfg.backend_output_dir, soc_desc_yaml, @@ -202,11 +202,11 @@ def _compile(module, aten_module, module_name, sample_inputs, device, compiler_c logger.debug("Frontend Compile") module = module.to("cpu") - fe_compile_result = pybuda.pybuda_compile_torch( + fe_compile_result = forge.forge_compile_torch( module_name, module, _capture.get_buda_graph(), - *[pybuda.Tensor.create_from_torch(sample_input.to("cpu")) for sample_input in [g for gs in graph_inputs for g in gs]] + *[forge.Tensor.create_from_torch(sample_input.to("cpu")) for sample_input in [g for gs in graph_inputs for g in gs]] ) # Backend Compile @@ -256,10 +256,10 @@ def _compile_cached(module, aten_module, module_name, sample_inputs, device, com return _compile_cache[key] elif cache and not default_output_dir: logger.warning( - "PyBuda compile cache disabled because of user compiler_cfg.backend_output_dir path override" + "Forge compile cache disabled because of user compiler_cfg.backend_output_dir path override" ) else: - compiler_cfg.backend_output_dir = pybuda.utils.resolve_output_build_directory() + compiler_cfg.backend_output_dir = forge.utils.resolve_output_build_directory() workload, compiled_graph_state, schedule = _compile(module, aten_module, module_name, sample_inputs, device, compiler_cfg) @@ -396,7 +396,7 @@ def to(self, dev): from torch.fx.experimental.proxy_tensor import make_fx from torch._functorch.compile_utils import strip_overloads -from pybuda.fx.torch_decomp_reconstruct import get_pybuda_decompositions, apply_torch_reconstruct_patterns +from forge.fx.torch_decomp_reconstruct import get_forge_decompositions, apply_torch_reconstruct_patterns def compile_torch( module, @@ -405,8 +405,8 @@ def compile_torch( ): torch_device = list(module.parameters())[0].device if len(list(module.parameters())) > 0 else "tt" with torch.no_grad(): - pybuda_decompositions = get_pybuda_decompositions() - decompositions = {**core_aten_decompositions(), **pybuda_decompositions} + forge_decompositions = get_forge_decompositions() + decompositions = {**core_aten_decompositions(), **forge_decompositions} fake_tensor_mode = torch._dynamo.utils.detect_fake_mode(sample_inputs) fake_tensor_mode.allow_non_fake_inputs = True aten = make_fx(module, tracing_mode="symbolic", decomposition_table=decompositions, _allow_non_fake_inputs=True)(*sample_inputs) @@ -437,7 +437,7 @@ def _torch_compile( device = _device if compiler_cfg is None: - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.compile_subgraphs = True if module_name is None: @@ -449,7 +449,7 @@ def _torch_compile( module_name = f"{module.__class__.__name__}_{_module_index}" _module_index += 1 - cache &= not bool(os.environ.get("PYBUDA_DISABLE_COMPILE_CACHE", "0")) + cache &= not bool(os.environ.get("FORGE_DISABLE_COMPILE_CACHE", "0")) workload, compiled_graph_state, schedule = _compile_cached( module, aten_module, module_name, sample_inputs, device, compiler_cfg, cache @@ -464,4 +464,4 @@ def _torch_compile( return compiled_model -# compile_torch = aot_autograd(fw_compiler=_torch_compile, decompositions={**core_aten_decompositions(), **pybuda_decompositions}) +# compile_torch = aot_autograd(fw_compiler=_torch_compile, decompositions={**core_aten_decompositions(), **forge_decompositions}) diff --git a/pybuda/pybuda/torch_optimizers.py b/forge/forge/torch_optimizers.py similarity index 100% rename from pybuda/pybuda/torch_optimizers.py rename to forge/forge/torch_optimizers.py diff --git a/pybuda/pybuda/torch_schedulers.py b/forge/forge/torch_schedulers.py similarity index 100% rename from pybuda/pybuda/torch_schedulers.py rename to forge/forge/torch_schedulers.py diff --git a/pybuda/pybuda/transformers/__init__.py b/forge/forge/transformers/__init__.py similarity index 100% rename from pybuda/pybuda/transformers/__init__.py rename to forge/forge/transformers/__init__.py diff --git a/pybuda/pybuda/transformers/pipeline.py b/forge/forge/transformers/pipeline.py similarity index 93% rename from pybuda/pybuda/transformers/pipeline.py rename to forge/forge/transformers/pipeline.py index db36cfcfa..3ce73ae74 100644 --- a/pybuda/pybuda/transformers/pipeline.py +++ b/forge/forge/transformers/pipeline.py @@ -3,31 +3,31 @@ # SPDX-License-Identifier: Apache-2.0 import inspect -import pybuda +import forge from loguru import logger import torch from collections import OrderedDict import transformers from transformers.modeling_outputs import CausalLMOutputWithCrossAttentions, BaseModelOutputWithPastAndCrossAttentions from transformers.models.auto.tokenization_auto import AutoTokenizer -from pybuda.pybudaglobal import align_up_tile -from pybuda.tensor import remove_microbatch +from forge.forgeglobal import align_up_tile +from forge.tensor import remove_microbatch class NLPPipelineWrapper(torch.nn.Module): """ Wrapper for transformers nlp pipeline. Provide to pipeline(...) call as model. """ - def __init__(self, model, tokenizer, name="pb_model", use_cache=None, fp32_fallback=pybuda.DataFormat.Float16_b, forward_fn=None, max_length=None): + def __init__(self, model, tokenizer, name="pb_model", use_cache=None, fp32_fallback=forge.DataFormat.Float16_b, forward_fn=None, max_length=None): super().__init__() - #pybuda.config._get_global_compiler_config().verify_pybuda_codegen_vs_framework = False + #forge.config._get_global_compiler_config().verify_forge_codegen_vs_framework = False self.original_fwd = model.forward self.forward_args = list(inspect.signature(model.forward).parameters.keys()) self.forward_args_dict = list(inspect.signature(model.forward).parameters.items()) self.forward_fn = forward_fn if forward_fn is None: - self.module = pybuda.PyTorchModule(name, model, redirect_forward=False) - self.ttdevice = pybuda.TTDevice("tt0", module=self.module, fp32_fallback=fp32_fallback) + self.module = forge.PyTorchModule(name, model, redirect_forward=False) + self.ttdevice = forge.TTDevice("tt0", module=self.module, fp32_fallback=fp32_fallback) self.pad_token_id = tokenizer.pad_token_id self.config = model.config self.model = model @@ -60,7 +60,7 @@ def tt_forward(self, *inputs, **kwargs): inputs = list(inputs) inputs = [i.int() if isinstance(i, torch.Tensor) and not torch.is_floating_point(i) else i for i in inputs] self.ttdevice.push_to_inputs(inputs) - output_q = pybuda.run_inference(_sequential=True) + output_q = forge.run_inference(_sequential=True) logits = output_q.get()[0].value() logits = logits[:, :self.orig_len, :] @@ -216,22 +216,22 @@ def from_pretrained(cls, name, pipeline, use_cache, forward_fn=None, max_length= def pipeline(pipeline_type: str, *args, **kwargs): m = kwargs["model"] forward_fn = None - pybuda_max_length = None + forge_max_length = None if "forward_fn" in kwargs: forward_fn = kwargs.pop("forward_fn") - if "pybuda_max_length" in kwargs: - pybuda_max_length = kwargs.pop("pybuda_max_length") + if "forge_max_length" in kwargs: + forge_max_length = kwargs.pop("forge_max_length") use_cache = None if "use_cache" not in kwargs else kwargs["use_cache"] if isinstance(m, str): - model, tokenizer = NLPPipelineWrapper.from_pretrained(m, pipeline_type, use_cache=use_cache, forward_fn=forward_fn, max_length=pybuda_max_length) + model, tokenizer = NLPPipelineWrapper.from_pretrained(m, pipeline_type, use_cache=use_cache, forward_fn=forward_fn, max_length=forge_max_length) kwargs["model"] = model if "tokenizer" not in kwargs: kwargs["tokenizer"] = tokenizer elif isinstance(m, torch.nn.Module): - kwargs["model"].prepare_inputs_for_generation = NLPPipelineWrapper(m, kwargs["tokenizer"], m.__class__.__name__, use_cache=use_cache, forward_fn=forward_fn, max_length=pybuda_max_length).prepare_inputs_for_generation + kwargs["model"].prepare_inputs_for_generation = NLPPipelineWrapper(m, kwargs["tokenizer"], m.__class__.__name__, use_cache=use_cache, forward_fn=forward_fn, max_length=forge_max_length).prepare_inputs_for_generation else: raise RuntimeError("Unsupported model type") diff --git a/pybuda/pybuda/tvm.py b/forge/forge/tvm.py similarity index 92% rename from pybuda/pybuda/tvm.py rename to forge/forge/tvm.py index 725274b96..191741ce6 100644 --- a/pybuda/pybuda/tvm.py +++ b/forge/forge/tvm.py @@ -2,7 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 -from pybuda._C.graph import ( +from forge._C.graph import ( Graph, create_op_node, create_data_edge, @@ -12,14 +12,14 @@ create_constant_input, remove_node, ) -from .pybudaglobal import TILE_DIM -from pybuda.module import PyBudaModule -import pybuda -from pybuda.tensor import pytorch_dtype_to_buda_dataformat -from pybuda._C import DataFormat -from pybuda._C.graph import OpType -from pybuda.op.resize import RESIZE2d_METHOD_TO_INT -from tvm.contrib.pybuda_compile import load_tvm_graph +from .forgeglobal import TILE_DIM +from forge.module import ForgeModule +import forge +from forge.tensor import pytorch_dtype_to_buda_dataformat +from forge._C import DataFormat +from forge._C.graph import OpType +from forge.op.resize import RESIZE2d_METHOD_TO_INT +from tvm.contrib.forge_compile import load_tvm_graph from collections import deque from loguru import logger @@ -232,7 +232,7 @@ def populate_conv2d_transpose_attrs(graph, nid, attrs): attrs.append(groups) padding = [int(padding) for padding in node["attrs"]["padding"][0]] - assert all([p == padding[0] for p in padding]), "Pybuda only supports same padding on all sides" + assert all([p == padding[0] for p in padding]), "Forge only supports same padding on all sides" # TVM has padding [top, left, bottom, right] # Convert to [left right top bottom] attrs.append(padding[1]) @@ -246,9 +246,9 @@ def populate_pad_attrs(graph, nid, attrs): shape = node["attrs"]["shape"][0][0] if len(shape) > 2: - # Pybuda Pad only supports padding on last 2 dims + # Forge Pad only supports padding on last 2 dims assert len(pad_width) == len(shape) * 2 - assert all([x == 0 for x in pad_width[0:-4]]), "Pybuda Pad only supports padding on last 2 dims" + assert all([x == 0 for x in pad_width[0:-4]]), "Forge Pad only supports padding on last 2 dims" pad_width = pad_width[-4:] # TVM nn.pad axis start from the last axis, need to swap @@ -327,7 +327,7 @@ def populate_broadcast_attrs(graph, nid, attrs): attrs.append(out_dim) attrs.append(True) input_shape[i] = out_dim - assert input_shape == output_shape, "Pybuda broadcast only support 1 dim" + assert input_shape == output_shape, "Forge broadcast only support 1 dim" def populate_resize2d_attrs(graph, nid, attrs): @@ -353,21 +353,21 @@ def populate_resize2d_attrs(graph, nid, attrs): def expand_compound_op(graph, nid, attrs, buda_graph, intermediate): - class SubGraph(PyBudaModule): + class SubGraph(ForgeModule): def __init__(self, fn_name, attributes): super().__init__("subgraph") self.fn_name = fn_name self.attributes = attributes def forward(self, *act): - import pybuda.op - import pybuda.op.nn + import forge.op + import forge.op.nn if self.fn_name == "softmax": - return pybuda.op.nn.Softmax("softmax", act[0], dim=self.attributes[0]) + return forge.op.nn.Softmax("softmax", act[0], dim=self.attributes[0]) if self.fn_name == "layernorm": - return pybuda.op.nn.Layernorm( + return forge.op.nn.Layernorm( "layernorm", act[0], act[1], @@ -387,7 +387,7 @@ def forward(self, *act): ] inputs = tuple( [ - pybuda.Tensor.create_from_torch(graph["nodes"][input_nid]["tensor"]) + forge.Tensor.create_from_torch(graph["nodes"][input_nid]["tensor"]) for input_nid in input_nids ] ) @@ -395,7 +395,7 @@ def forward(self, *act): output = subgraph.forward(*inputs) # TODO: Handle subgraph with multiple outputs - assert isinstance(output, pybuda.Tensor) + assert isinstance(output, forge.Tensor) pending_tensors = deque() visited_tensors = {} @@ -429,7 +429,7 @@ def set_as_subgraph_output(op, tensor): set_as_subgraph_output(visited_tensors[tensor], tensor) continue - if isinstance(tensor, pybuda.Parameter): + if isinstance(tensor, forge.Parameter): # parameter tensor if tensor.get_name() is not None: name = tensor.get_name() @@ -511,14 +511,14 @@ def set_as_subgraph_output(op, tensor): "add" : "add", "argmax" : "argmax", "broadcast_to" : "broadcast", - "pybuda.binary_stack" : "binary_stack", - "pybuda.buda_conv2d_with_bias" : "conv2d", - "pybuda.concatenate" : "concatenate", - "pybuda.hslice" : "hslice", - "pybuda.hstack" : "hstack", - "pybuda.matmul" : "matmul", - "pybuda.vslice" : "vslice", - "pybuda.vstack" : "vstack", + "forge.binary_stack" : "binary_stack", + "forge.buda_conv2d_with_bias" : "conv2d", + "forge.concatenate" : "concatenate", + "forge.hslice" : "hslice", + "forge.hstack" : "hstack", + "forge.matmul" : "matmul", + "forge.vslice" : "vslice", + "forge.vstack" : "vstack", "clip" : "clip", "cos" : "cos", "exp" : "exp", @@ -585,7 +585,7 @@ def set_as_subgraph_output(op, tensor): } -class ModuleWrapper(PyBudaModule): +class ModuleWrapper(ForgeModule): def __init__(self, torchmod, name): super().__init__(name=name) self.torchmod = torchmod @@ -621,12 +621,12 @@ def str_to_dataformat(t: str) -> DataFormat: raise RuntimeError("Unsupported format: " + t) -def compile_tvm_for_pybuda(buda_graph, torchmod, inputs, compiler_cfg, graph_name, verify_cfg=None): - from pybuda.op.eval.pybuda import get_f_pybuda_shape # avoid circular import - from pybuda.op.eval.pybuda import get_f_pybuda_eval # avoid circular import - from pybuda._C.graph import OpType +def compile_tvm_for_forge(buda_graph, torchmod, inputs, compiler_cfg, graph_name, verify_cfg=None): + from forge.op.eval.forge import get_f_forge_shape # avoid circular import + from forge.op.eval.forge import get_f_forge_eval # avoid circular import + from forge._C.graph import OpType - framework = pybuda.tvm_to_python.get_framework(module) + framework = forge.tvm_to_python.get_framework(module) module = torchmod.module json_graph, pytorch_inputs, weights = load_tvm_graph(inputs, module, compiler_cfg, graph_name, framework, verify_cfg=verify_cfg) @@ -715,7 +715,7 @@ def create_output_if_needed(nid, buda_graph, graph): f"Node: {inq} shape: {node['buda_shape']} name: {buda_graph.get_node_name(inq)} type: constant" ) else: - param = pybuda.Parameter( + param = forge.Parameter( tensor, requires_grad=tensor.requires_grad, name=node["name"], @@ -754,7 +754,7 @@ def create_output_if_needed(nid, buda_graph, graph): # so assume we do requires_grad = compiler_cfg.enable_tvm_constant_prop tensor.requires_grad = requires_grad - param = pybuda.Parameter( + param = forge.Parameter( tensor, requires_grad=requires_grad, name=node["name"], @@ -803,7 +803,7 @@ def create_output_if_needed(nid, buda_graph, graph): if node["name"] == "nn.pad" and int(node["attrs"]["num_inputs"]) == 2: pad_value_node = graph["nodes"][node["inputs"][1][0]] assert pad_value_node["tensor"].ndim == 0, "Pad value should be a single element" - assert pad_value_node["tensor"].numpy().item() == 0, "Pybuda only support padding with 0" + assert pad_value_node["tensor"].numpy().item() == 0, "Forge only support padding with 0" remove_node(buda_graph, pad_value_node["bid"]) # Remove from json node["attrs"]["num_inputs"] = '1' @@ -822,8 +822,8 @@ def create_output_if_needed(nid, buda_graph, graph): shapes.append(input_node["buda_shape"]) forward_inputs.append(input_node["tensor"]) - shape, operand_broadcast = get_f_pybuda_shape(OpType(op_type, attrs))(shapes) - node["tensor"] = get_f_pybuda_eval(OpType(op_type, attrs))(forward_inputs) + shape, operand_broadcast = get_f_forge_shape(OpType(op_type, attrs))(shapes) + node["tensor"] = get_f_forge_eval(OpType(op_type, attrs))(forward_inputs) if node["name"] != "nop": intermediate[op] = node["tensor"] @@ -850,13 +850,13 @@ def create_output_if_needed(nid, buda_graph, graph): buda_inputs = [] for buda_input in input_nodes: buda_inputs.append( - pybuda.Tensor.create_from_torch(graph["nodes"][buda_input]["tensor"]) + forge.Tensor.create_from_torch(graph["nodes"][buda_input]["tensor"]) ) buda_outputs = [] for output in output_nodes: buda_outputs.append( - pybuda.Tensor.create_from_torch(graph["nodes"][output]["tensor"]) + forge.Tensor.create_from_torch(graph["nodes"][output]["tensor"]) ) return buda_graph, buda_module, buda_inputs, buda_outputs, intermediate diff --git a/pybuda/pybuda/tvm_to_python.py b/forge/forge/tvm_to_python.py similarity index 90% rename from pybuda/pybuda/tvm_to_python.py rename to forge/forge/tvm_to_python.py index 21f0c79dc..bef47fdfb 100644 --- a/pybuda/pybuda/tvm_to_python.py +++ b/forge/forge/tvm_to_python.py @@ -10,19 +10,19 @@ import torch import numpy as np -# import pybuda._C.pattern_matcher as pypattern_matcher -from pybuda.module import OnnxModule, PyBudaModule, TFLiteModule -from pybuda.config import _get_global_compiler_config -from pybuda.verify.config import _get_global_verify_config -import pybuda -from pybuda.tensor import to_pt_tensors -from pybuda.tvm_utils import flatten_inputs +# import forge._C.pattern_matcher as pypattern_matcher +from forge.module import OnnxModule, ForgeModule, TFLiteModule +from forge.config import _get_global_compiler_config +from forge.verify.config import _get_global_verify_config +import forge +from forge.tensor import to_pt_tensors +from forge.tvm_utils import flatten_inputs import os import sys import importlib -from pybuda.python_codegen import PyTorchWriter, PyBudaWriter, PythonWriter +from forge.python_codegen import PyTorchWriter, ForgeWriter, PythonWriter def populate_torch_all_to_args(graph, nid, compiler_cfg): @@ -435,8 +435,8 @@ def _populate_torch_init_args(graph, nid): "nn.softmax" : "softmax", "not_equal" : "not_equal", "power" : "power", - "pybuda_cpudevice.adv_index" : "adv_index", - "pybuda_cpudevice.concatenate" : "concatenate", + "forge_cpudevice.adv_index" : "adv_index", + "forge_cpudevice.concatenate" : "concatenate", "reciprocal" : "reciprocal", "reshape" : "reshape", "scatter_elements" : "scatter_add", @@ -455,7 +455,7 @@ def _populate_torch_init_args(graph, nid): # "take" : "take", "where" : "where", "layernorm" : "layernorm", - "pybuda_cpudevice.dropout" : "dropout", + "forge_cpudevice.dropout" : "dropout", } pytorch_op_to_function_name = { @@ -884,7 +884,7 @@ def populate_broadcast_args(graph, nid, compiler_cfg): dim = i shape = out_dim input_shape[i] = out_dim - assert input_shape == output_shape, "Pybuda broadcast only supports 1 dim" + assert input_shape == output_shape, "Forge broadcast only supports 1 dim" dim = dim - len(input_shape) args = [] @@ -895,7 +895,7 @@ def populate_broadcast_args(graph, nid, compiler_cfg): def populate_reduce_args(graph, nid, compiler_cfg): node = graph["nodes"][nid] dim = int(node["attrs"]["axis"][0][0]) - assert len(node['attrs']['axis'][0]) == 1, "PyBuda only supports reduce with a single axis" + assert len(node['attrs']['axis'][0]) == 1, "Forge only supports reduce with a single axis" input_nid = node["inputs"][0][0] input_shape = graph["nodes"][input_nid]["attrs"]["shape"][0][0] @@ -1061,12 +1061,12 @@ def populate_pad_args(graph, nid, compiler_cfg): channel_last = False mode = node['attrs']['pad_mode'][0][0] - assert mode in ["constant", "edge", "reflect"], "PyBuda pad only support constant/replicate/reflect padding for now" + assert mode in ["constant", "edge", "reflect"], "Forge pad only support constant/replicate/reflect padding for now" if len(shape) > 2: - # Pybuda Pad only supports padding on last 2 dims + # Forge Pad only supports padding on last 2 dims assert len(pad_width) == len(shape) * 2 - assert all([x == 0 for x in pad_width[0:-6]]), "Pybuda Pad does not support padding on W dim" - assert all([x == 0 for x in pad_width[-6:-4]]) or all([x == 0 for x in pad_width[-2:]]), "Pybuda only support Z dim padding for channel-last inputs" + assert all([x == 0 for x in pad_width[0:-6]]), "Forge Pad does not support padding on W dim" + assert all([x == 0 for x in pad_width[-6:-4]]) or all([x == 0 for x in pad_width[-2:]]), "Forge only support Z dim padding for channel-last inputs" if any([x != 0 for x in pad_width[-6:-4]]): pad_width = pad_width[-6:-2] channel_last = True @@ -1293,17 +1293,17 @@ def populate_requantize_args(graph, nid, compiler_cfg): "pixel_shuffle" : "pixel_shuffle", "power" : "power", "nn.prelu" : "prelu", - "pybuda.adv_index" : "adv_index", - "pybuda.binary_stack" : "binary_stack", - "pybuda.buda_conv2d_transpose_with_bias" : "conv2d_transpose", - "pybuda.buda_conv2d_with_bias" : "conv2d", - "pybuda.concatenate" : "concatenate", - "pybuda.dropout" : "dropout", - "pybuda.hslice" : "hslice", - "pybuda.hstack" : "hstack", - "pybuda.matmul" : "matmul", - "pybuda.vslice" : "vslice", - "pybuda.vstack" : "vstack", + "forge.adv_index" : "adv_index", + "forge.binary_stack" : "binary_stack", + "forge.buda_conv2d_transpose_with_bias" : "conv2d_transpose", + "forge.buda_conv2d_with_bias" : "conv2d", + "forge.concatenate" : "concatenate", + "forge.dropout" : "dropout", + "forge.hslice" : "hslice", + "forge.hstack" : "hstack", + "forge.matmul" : "matmul", + "forge.vslice" : "vslice", + "forge.vstack" : "vstack", "reciprocal" : "reciprocal", "reshape" : "reshape", "scatter" : "index_copy", @@ -1329,85 +1329,85 @@ def populate_requantize_args(graph, nid, compiler_cfg): } buda_op_to_function_name = { - "abs" : "pybuda.op.Abs", - "add" : "pybuda.op.Add", - "adv_index" : "pybuda.op.AdvIndex", - "argmax" : "pybuda.op.Argmax", - "avg_pool1d" : "pybuda.op.AvgPool1d", - "avg_pool2d" : "pybuda.op.AvgPool2d", - "binary_stack" : "pybuda.op.BinaryStack", - "broadcast" : "pybuda.op.Broadcast", - "cast" : "pybuda.op.Identity", # Datatype cast - "clip" : "pybuda.op.Clip", - "concatenate" : "pybuda.op.Concatenate", - "conv2d_transpose" : "pybuda.op.Conv2dTranspose", - "conv2d" : "pybuda.op.Conv2d", - "conv3d" : "pybuda.op.Conv3d", - "cos" : "pybuda.op.Cosine", - "cumsum" : "pybuda.op.CumSum", - "dropout" : "pybuda.op.Identity", # (Temporary): change when buda supports dropout - "embedding" : "pybuda.op.Embedding", - "equal" : "pybuda.op.Equal", - "exp" : "pybuda.op.Exp", - "gelu" : "pybuda.op.Gelu", - "greater_equal" : "pybuda.op.GreaterEqual", - "greater" : "pybuda.op.Greater", - "hslice" : "pybuda.op.HSlice", - "hstack" : "pybuda.op.HStack", - "identity" : "pybuda.op.Identity", - "index_copy" : "pybuda.op.IndexCopy", - "index" : "pybuda.op.Index", - "layernorm" : "pybuda.op.Layernorm", - "leaky_relu" : "pybuda.op.LeakyRelu", - "less_equal" : "pybuda.op.LessEqual", - "less" : "pybuda.op.Less", - "log_softmax" : "pybuda.op.LogSoftmax", - "log" : "pybuda.op.Log", - "logical_and" : "pybuda.op.LogicalAnd", - "logical_not" : "pybuda.op.LogicalNot", - "matmul" : "pybuda.op.Matmul", - "max_pool1d" : "pybuda.op.MaxPool1d", - "max_pool2d" : "pybuda.op.MaxPool2d", - "max_pool3d" : "pybuda.op.MaxPool3d", - "maximum" : "pybuda.op.Max", - "mean" : "pybuda.op.ReduceAvg", - "minimum" : "pybuda.op.Min", - "multiply" : "pybuda.op.Multiply", - "not_equal" : "pybuda.op.NotEqual", - "pad" : "pybuda.op.Pad", - "pixel_shuffle" : "pybuda.op.PixelShuffle", - "power" : "pybuda.op.Power", - "prelu" : "pybuda.op.Prelu", - "reciprocal" : "pybuda.op.Reciprocal", - "reduce_avg" : "pybuda.op.ReduceAvg", - "reduce_max" : "pybuda.op.ReduceMax", - "reduce_sum" : "pybuda.op.ReduceSum", - "relu" : "pybuda.op.Relu", - "repeat" : "pybuda.op.Repeat", - "reshape" : "pybuda.op.Reshape", - "resize2d" : "pybuda.op.Resize2d", - "resize3d" : "pybuda.op.Resize3d", - "select" : "pybuda.op.Select", - "sigmoid" : "pybuda.op.Sigmoid", - "sin" : "pybuda.op.Sine", - "softmax" : "pybuda.op.Softmax", - "sqrt" : "pybuda.op.Sqrt", - "stack" : "pybuda.op.Stack", - "subtract" : "pybuda.op.Subtract", - "take" : "pybuda.op.AdvIndex", - "tanh" : "pybuda.op.Tanh", - "transpose" : "pybuda.op.Transpose", + "abs" : "forge.op.Abs", + "add" : "forge.op.Add", + "adv_index" : "forge.op.AdvIndex", + "argmax" : "forge.op.Argmax", + "avg_pool1d" : "forge.op.AvgPool1d", + "avg_pool2d" : "forge.op.AvgPool2d", + "binary_stack" : "forge.op.BinaryStack", + "broadcast" : "forge.op.Broadcast", + "cast" : "forge.op.Identity", # Datatype cast + "clip" : "forge.op.Clip", + "concatenate" : "forge.op.Concatenate", + "conv2d_transpose" : "forge.op.Conv2dTranspose", + "conv2d" : "forge.op.Conv2d", + "conv3d" : "forge.op.Conv3d", + "cos" : "forge.op.Cosine", + "cumsum" : "forge.op.CumSum", + "dropout" : "forge.op.Identity", # (Temporary): change when buda supports dropout + "embedding" : "forge.op.Embedding", + "equal" : "forge.op.Equal", + "exp" : "forge.op.Exp", + "gelu" : "forge.op.Gelu", + "greater_equal" : "forge.op.GreaterEqual", + "greater" : "forge.op.Greater", + "hslice" : "forge.op.HSlice", + "hstack" : "forge.op.HStack", + "identity" : "forge.op.Identity", + "index_copy" : "forge.op.IndexCopy", + "index" : "forge.op.Index", + "layernorm" : "forge.op.Layernorm", + "leaky_relu" : "forge.op.LeakyRelu", + "less_equal" : "forge.op.LessEqual", + "less" : "forge.op.Less", + "log_softmax" : "forge.op.LogSoftmax", + "log" : "forge.op.Log", + "logical_and" : "forge.op.LogicalAnd", + "logical_not" : "forge.op.LogicalNot", + "matmul" : "forge.op.Matmul", + "max_pool1d" : "forge.op.MaxPool1d", + "max_pool2d" : "forge.op.MaxPool2d", + "max_pool3d" : "forge.op.MaxPool3d", + "maximum" : "forge.op.Max", + "mean" : "forge.op.ReduceAvg", + "minimum" : "forge.op.Min", + "multiply" : "forge.op.Multiply", + "not_equal" : "forge.op.NotEqual", + "pad" : "forge.op.Pad", + "pixel_shuffle" : "forge.op.PixelShuffle", + "power" : "forge.op.Power", + "prelu" : "forge.op.Prelu", + "reciprocal" : "forge.op.Reciprocal", + "reduce_avg" : "forge.op.ReduceAvg", + "reduce_max" : "forge.op.ReduceMax", + "reduce_sum" : "forge.op.ReduceSum", + "relu" : "forge.op.Relu", + "repeat" : "forge.op.Repeat", + "reshape" : "forge.op.Reshape", + "resize2d" : "forge.op.Resize2d", + "resize3d" : "forge.op.Resize3d", + "select" : "forge.op.Select", + "sigmoid" : "forge.op.Sigmoid", + "sin" : "forge.op.Sine", + "softmax" : "forge.op.Softmax", + "sqrt" : "forge.op.Sqrt", + "stack" : "forge.op.Stack", + "subtract" : "forge.op.Subtract", + "take" : "forge.op.AdvIndex", + "tanh" : "forge.op.Tanh", + "transpose" : "forge.op.Transpose", "unsupported" : "Unsupported", - "vslice" : "pybuda.op.VSlice", - "vstack" : "pybuda.op.VStack", - "where" : "pybuda.op.Where", + "vslice" : "forge.op.VSlice", + "vstack" : "forge.op.VStack", + "where" : "forge.op.Where", # Quantization ops - "quantize" : "pybuda.op.Quantize", - "dequantize" : "pybuda.op.Dequantize", - "requantize" : "pybuda.op.Requantize", + "quantize" : "forge.op.Quantize", + "dequantize" : "forge.op.Dequantize", + "requantize" : "forge.op.Requantize", } -pybuda_ops_needing_arguments = { +forge_ops_needing_arguments = { "argmax" : populate_argmax_args, "avg_pool1d" : populate_avgpool1d_args, "avg_pool2d" : populate_avgpool2d_args, @@ -1456,7 +1456,7 @@ def populate_requantize_args(graph, nid, compiler_cfg): "requantize" : populate_requantize_args, } -class ModuleWrapper(PyBudaModule): +class ModuleWrapper(ForgeModule): def __init__(self, torchmod, name): super().__init__(name=name) self.torchmod = torchmod @@ -1480,19 +1480,19 @@ def __init__(self, function_name, output_name, node_name="", input_names=[], arg def get_framework(module): - if isinstance(module, pybuda.module.PyTorchModule): + if isinstance(module, forge.module.PyTorchModule): framework = "pytorch" - elif isinstance(module, pybuda.module.TFModule): # or isinstance(module, tf.keras.layers.Layer): + elif isinstance(module, forge.module.TFModule): # or isinstance(module, tf.keras.layers.Layer): framework = "tensorflow" - elif isinstance(module, pybuda.module.TFGraphDefModule): + elif isinstance(module, forge.module.TFGraphDefModule): framework = "tf_graphdef" - elif isinstance(module, pybuda.module.OnnxModule): + elif isinstance(module, forge.module.OnnxModule): framework = "onnx" - elif isinstance(module, pybuda.module.MXNetModule): + elif isinstance(module, forge.module.MXNetModule): framework = "mxnet" - elif isinstance(module, pybuda.module.JaxModule): + elif isinstance(module, forge.module.JaxModule): framework = "jax" - elif isinstance(module, pybuda.module.TFLiteModule): + elif isinstance(module, forge.module.TFLiteModule): framework = 'tflite' else: assert False, f"Unsupported framework: {type(module)}" @@ -1511,7 +1511,7 @@ def cleanup_temporary_files(): generated_files = [] def get_buda_outputs(buda_mods, devices, buda_inputs): - from pybuda.tensor import to_buda_tensors, to_pt_tensors + from forge.tensor import to_buda_tensors, to_pt_tensors for i, (mod, dev) in enumerate(zip(buda_mods, devices)): if dev == "CPUDevice": @@ -1525,13 +1525,13 @@ def get_buda_outputs(buda_mods, devices, buda_inputs): return to_buda_tensors(buda_inputs) def verify_framework_vs_buda_codegen(frame_outputs, buda_outputs, verify_cfg): - from pybuda.op.eval import compare_tensor_to_golden + from forge.op.eval import compare_tensor_to_golden test_pass = True for i, (golden, output) in enumerate(zip(frame_outputs, buda_outputs)): - test_pass &= compare_tensor_to_golden(f"Framework vs. Pybuda codegen output {i}", golden, output.value(), + test_pass &= compare_tensor_to_golden(f"Framework vs. Forge codegen output {i}", golden, output.value(), is_buda=False, verify_cfg=verify_cfg) - assert test_pass, f"Data mismatch on output {i} between framework and Pybuda codegen" + assert test_pass, f"Data mismatch on output {i} between framework and Forge codegen" logger.info("Verified python codegen agains framework") def save_writers_metadata(modules, inputs, sorted_inputs, module_name): @@ -1566,7 +1566,7 @@ def metadata_path(module_name): def load_writers_metadata(module_name, inputs): filepath = metadata_path(module_name) - assert os.path.exists(filepath), f"{filepath} not found, has the test been run with PYBUDA_RELOAD_GENERATED_MODULES disabled and compiler_cfg.retain_tvm_python_files enabled" + assert os.path.exists(filepath), f"{filepath} not found, has the test been run with FORGE_RELOAD_GENERATED_MODULES disabled and compiler_cfg.retain_tvm_python_files enabled" with open(filepath, "r") as metadata_file: metadata = json.load(metadata_file) @@ -1589,7 +1589,7 @@ def load_writers_metadata(module_name, inputs): return module_writers, ordered_inptus -def generate_pybuda_module(framework_mod, inputs, compiler_cfg=None, graph_name=None, verify_cfg=None, clean_later=False, input_names=[]): +def generate_forge_module(framework_mod, inputs, compiler_cfg=None, graph_name=None, verify_cfg=None, clean_later=False, input_names=[]): global counter if compiler_cfg is None: @@ -1603,13 +1603,13 @@ def generate_pybuda_module(framework_mod, inputs, compiler_cfg=None, graph_name= if graph_name is None: graph_name = framework_mod.name - reload = bool(int(os.environ.get("PYBUDA_RELOAD_GENERATED_MODULES", "0"))) + reload = bool(int(os.environ.get("FORGE_RELOAD_GENERATED_MODULES", "0"))) if reload and not compiler_cfg.retain_tvm_python_files: compiler_cfg.retain_tvm_python_files = True if not os.path.exists(metadata_path(graph_name)): reload = False - if verify_cfg is not None and verify_cfg.verify_pybuda_codegen_vs_framework: + if verify_cfg is not None and verify_cfg.verify_forge_codegen_vs_framework: framework_outputs = framework_mod.cpu_eval_forward(*pytorch_inputs) if not reload: @@ -1639,7 +1639,7 @@ def generate_pybuda_module(framework_mod, inputs, compiler_cfg=None, graph_name= devices.append(writer.dev) if writer.dev == "CPUDevice": - buda_mod = pybuda.PyTorchModule(writer.module_name, TestClass()) + buda_mod = forge.PyTorchModule(writer.module_name, TestClass()) buda_mod.module.process_framework_parameters(framework_mod.module) else: buda_mod = TestClass(writer.module_name) @@ -1659,11 +1659,11 @@ def generate_pybuda_module(framework_mod, inputs, compiler_cfg=None, graph_name= cleanup_temporary_files() if devices[0] == "CPUDevice": - buda_inputs = pybuda.tensor.to_pt_tensors(flattened_inputs) + buda_inputs = forge.tensor.to_pt_tensors(flattened_inputs) else: - buda_inputs = pybuda.tensor.to_buda_tensors(flattened_inputs) + buda_inputs = forge.tensor.to_buda_tensors(flattened_inputs) - if verify_cfg is not None and verify_cfg.verify_pybuda_codegen_vs_framework: + if verify_cfg is not None and verify_cfg.verify_forge_codegen_vs_framework: buda_outputs = get_buda_outputs(buda_mods, devices, buda_inputs) verify_framework_vs_buda_codegen(framework_outputs, buda_outputs, verify_cfg=verify_cfg) @@ -1692,7 +1692,7 @@ def compile_tvm_to_python(framework_mod, graph_name, inputs, module_name=None, c path = framework_mod.tflite_path # Load here to avoid importing tvm unnecessarily when this file is loaded - from tvm.contrib.pybuda_compile import load_tvm_graph + from tvm.contrib.forge_compile import load_tvm_graph json_graphs, flattened_pytorch_inputs, weights = load_tvm_graph(inputs, framework_mod.module, compiler_cfg, graph_name, framework, path=path, verify_cfg=verify_cfg, input_names=input_names) def _determine_node_dtype(node): @@ -1735,7 +1735,7 @@ def is_nop_reshape(nid): ops = {} returns = {} returns_requiring_batch_dim_fix = [] - pybuda_inputs = [None] * len(flattened_pytorch_inputs) + forge_inputs = [None] * len(flattened_pytorch_inputs) params_from_tvm = {} def make_parser_friendly_name(node, node_type): @@ -1783,7 +1783,7 @@ def make_parser_friendly_name(node, node_type): inp_idx = nid if "nid_to_input_idx" in json_graph.keys() and len(json_graph["nid_to_input_idx"]) != 0: inp_idx = json_graph["nid_to_input_idx"][nid] - pybuda_inputs[inp_idx] = flattened_pytorch_inputs[inp_idx] + forge_inputs[inp_idx] = flattened_pytorch_inputs[inp_idx] graph_input_names[inp_idx] = node["buda_name"] node["op"] = "*" @@ -1863,7 +1863,7 @@ def make_parser_friendly_name(node, node_type): node["buda_name"] = op_type + f"_{nid}" args = () - argument_getter = pybuda_ops_needing_arguments if json_graph["device"] == "tt" else pytorch_ops_needing_arguments + argument_getter = forge_ops_needing_arguments if json_graph["device"] == "tt" else pytorch_ops_needing_arguments if op_type in argument_getter: if op_type == "dropout" and json_graph["device"] != "tt": if is_training: @@ -1941,7 +1941,7 @@ def make_parser_friendly_name(node, node_type): input_node["users"].append(nid) input_names.append(input_node["buda_name"]) # Handle concatenate case when a single node name in referenced twice in the input list - if node["name"] == "pybuda.concatenate" and len(input_names) == 1: + if node["name"] == "forge.concatenate" and len(input_names) == 1: inp_shape = graph["nodes"][node["inputs"][input_port][0]]["attrs"]["shape"][0][0] out_shape = node["attrs"]["shape"][0][0] @@ -1957,8 +1957,8 @@ def make_parser_friendly_name(node, node_type): src_layer=span_to_src_layer(node), ) - if any([input is None for input in pybuda_inputs]): - pybuda_inputs = flattened_pytorch_inputs + if any([input is None for input in forge_inputs]): + forge_inputs = flattened_pytorch_inputs for output_nid in output_nodes: output_node = graph["nodes"][output_nid] @@ -2096,11 +2096,11 @@ def replace_node_name(orig, new): # replace_node_name(name_to_replace, f"layer_{idx}") # Some float types (e.g. tf.bfloat16) are not compatible with numpy - # We must signal to the PyBudaWriter if the model contains these types so it can implement the workaround + # We must signal to the ForgeWriter if the model contains these types so it can implement the workaround contains_incompatible_np_floats = False if framework == "tensorflow": for weight in framework_mod.module.weights: - if weight.dtype in PyBudaWriter.incompatible_np_float_types: + if weight.dtype in ForgeWriter.incompatible_np_float_types: contains_incompatible_np_floats = True current_module_name = module_name @@ -2113,8 +2113,8 @@ def replace_node_name(orig, new): if json_graph["device"] == "tt": delete_inputs = not ((verify_cfg is not None and verify_cfg.verify_all) or compiler_cfg.enable_op_level_comparision) if not delete_inputs: - logger.warning("Preserving Intermediate tensor values in PyBudaModule forward may causes out-of-memory issues") - writer = PyBudaWriter(current_module_name, framework, contains_incompatible_np_floats=contains_incompatible_np_floats, delete_inputs=delete_inputs) + logger.warning("Preserving Intermediate tensor values in ForgeModule forward may causes out-of-memory issues") + writer = ForgeWriter(current_module_name, framework, contains_incompatible_np_floats=contains_incompatible_np_floats, delete_inputs=delete_inputs) else: writer = PyTorchWriter(current_module_name, source_framework=framework) @@ -2161,7 +2161,7 @@ def replace_node_name(orig, new): # generated graph, so we should add dummy variables to cunsume them. This is only needed for # the first module. if graph_index == 0: - for input_index, _ in enumerate(pybuda_inputs): + for input_index, _ in enumerate(forge_inputs): if input_index not in graph_input_names: graph_input_names[input_index] = f"unused_input_{input_index}" @@ -2208,6 +2208,6 @@ def delete_unneeded_outputs(ops, returns): modules.append(writer) if compiler_cfg.retain_tvm_python_files: - save_writers_metadata(modules, flattened_pytorch_inputs, pybuda_inputs, graph_name) + save_writers_metadata(modules, flattened_pytorch_inputs, forge_inputs, graph_name) - return modules, pybuda_inputs + return modules, forge_inputs diff --git a/pybuda/pybuda/tvm_utils.py b/forge/forge/tvm_utils.py similarity index 98% rename from pybuda/pybuda/tvm_utils.py rename to forge/forge/tvm_utils.py index 4121fd0e5..8318fe3ac 100644 --- a/pybuda/pybuda/tvm_utils.py +++ b/forge/forge/tvm_utils.py @@ -46,7 +46,7 @@ def map_pt_dtype_to_tf(pt_dtype): return list(tf_to_pt_type_map.keys())[pt_types.index(pt_dtype)] def flatten_inputs(inputs, names=None, force_float32=False): - from pybuda.tensor import Tensor + from forge.tensor import Tensor new_inputs = [] new_names = [] @@ -98,7 +98,7 @@ def flatten_inputs(inputs, names=None, force_float32=False): return new_inputs, new_names, flattened_name_map def flatten_structured_output(outputs): - from pybuda.tensor import Tensor + from forge.tensor import Tensor new_outputs = [] diff --git a/pybuda/pybuda/typing.py b/forge/forge/typing.py similarity index 79% rename from pybuda/pybuda/typing.py rename to forge/forge/typing.py index a2c1d832c..ba3d8ab45 100644 --- a/pybuda/pybuda/typing.py +++ b/forge/forge/typing.py @@ -5,11 +5,11 @@ import torch import tensorflow as tf -from .module import PyBudaModule +from .module import ForgeModule from .tensor import Tensor FrameworkModule = torch.nn.Module | tf.keras.Model FrameworkTensor = torch.Tensor | tf.Tensor -AnyModule = FrameworkModule | PyBudaModule +AnyModule = FrameworkModule | ForgeModule AnyTensor = FrameworkTensor | Tensor diff --git a/pybuda/pybuda/utils.py b/forge/forge/utils.py similarity index 94% rename from pybuda/pybuda/utils.py rename to forge/forge/utils.py index 84fb40e5a..1e9b1dc9a 100644 --- a/pybuda/pybuda/utils.py +++ b/forge/forge/utils.py @@ -19,7 +19,7 @@ import dataclasses_json from loguru import logger -from .pybudaglobal import TILE_DIM +from .forgeglobal import TILE_DIM TILE_WIDTH = TILE_DIM TILE_HEIGHT = TILE_DIM @@ -137,7 +137,7 @@ def from_json(d): def get_padded_tensors(parameters): - """ Pybuda expects activation/parameter tensors to be 4-dimensions R/C-dim being 32-aligned""" + """ Forge expects activation/parameter tensors to be 4-dimensions R/C-dim being 32-aligned""" updated_tensors = {} for parameter_name, parameter_tensor in parameters.items(): @@ -159,16 +159,16 @@ def get_padded_tensors(parameters): return updated_tensors -def get_pybuda_parameters_from_state_dict(state_dict: Dict[str, torch.Tensor]): - from pybuda.parameter import Parameter - pybuda_parameters = {} +def get_forge_parameters_from_state_dict(state_dict: Dict[str, torch.Tensor]): + from forge.parameter import Parameter + forge_parameters = {} torch_parameters = get_padded_tensors(state_dict) for parameter_name, parameter_tensor in torch_parameters.items(): - pybuda_parameters[parameter_name] = Parameter( + forge_parameters[parameter_name] = Parameter( parameter_tensor, requires_grad=parameter_tensor.requires_grad, ) - return pybuda_parameters + return forge_parameters def detach_tensors(tensors: List[torch.Tensor], fix_non_contiguos: bool = False) -> List[torch.Tensor]: """ @@ -184,7 +184,7 @@ def detach_tensors(tensors: List[torch.Tensor], fix_non_contiguos: bool = False) return detached_tensors -def get_pybuda_git_hash() -> Optional[str]: +def get_forge_git_hash() -> Optional[str]: try: git_hash = ( subprocess.check_output(["git", "rev-parse", "--short", "HEAD"], stderr=subprocess.STDOUT) @@ -217,7 +217,7 @@ def budabackend_path() -> str: return os.environ["BUDA_HOME"] if os.path.exists(os.getcwd() + '/third_party/budabackend'): - # must be in pybuda root + # must be in forge root return "third_party/budabackend/" else: return "" @@ -238,10 +238,10 @@ def get_buda_compile_and_runtime_configs() -> Dict[str, str]: Eventually we want to separate out compile-time and runtime environment variables but we don't currently have a good way to do that yet. - The current filter/capture is just a filter for 'PYBUDA_*' and 'TT_BACKEND_*' + The current filter/capture is just a filter for 'FORGE_*' and 'TT_BACKEND_*' """ compile_and_runtime_env_vars = { - config: value for config, value in os.environ.items() if config.startswith(('PYBUDA_', 'TT_BACKEND_')) + config: value for config, value in os.environ.items() if config.startswith(('FORGE_', 'TT_BACKEND_')) } return compile_and_runtime_env_vars @@ -256,7 +256,7 @@ def get_tmp_dir() -> str: def get_output_build_dir() -> str: - user_defined_path = os.environ.get('PYBUDA_BUILD_DIR', None) + user_defined_path = os.environ.get('FORGE_BUILD_DIR', None) output_build_directory = user_defined_path or get_tmp_dir() return output_build_directory @@ -309,7 +309,7 @@ def resolve_output_build_directory(*, directory_prefix: str = None) -> str: """ Return the path to the temp directory where the test output build artifacts will be dumped. Order of path resolution: - 1. Use user-defined path if set: PYBUDA_BUILD_DIR + 1. Use user-defined path if set: FORGE_BUILD_DIR 2. Default to TMPDIR environment variable 3. If TMPDIR environment variable is unset, default to /tmp/ """ diff --git a/pybuda/pybuda/verify/__init__.py b/forge/forge/verify/__init__.py similarity index 100% rename from pybuda/pybuda/verify/__init__.py rename to forge/forge/verify/__init__.py diff --git a/pybuda/pybuda/verify/config.py b/forge/forge/verify/config.py similarity index 85% rename from pybuda/pybuda/verify/config.py rename to forge/forge/verify/config.py index a38db5bf0..32d26a0d7 100644 --- a/pybuda/pybuda/verify/config.py +++ b/forge/forge/verify/config.py @@ -8,9 +8,9 @@ import torch -from pybuda._C import DataFormat +from forge._C import DataFormat from dataclasses_json import dataclass_json -from pybuda.utils import as_json +from forge.utils import as_json class TestKind(Enum): @@ -58,7 +58,7 @@ class VerifyConfig: relative_atol: float = 0.1 # set atol at 10% of the max value in tensor pcc: Optional[float] = None # use Pearson Coefficient Check instead of allclose dump_tensors_path: str = "" # dump nodes at final graph evaluation in a format that can be read in Backend - run_golden: bool = "PYBUDA_VERIFY_GOLDEN" in os.environ and os.environ["PYBUDA_VERIFY_GOLDEN"] == '1' # run on back-end golden - Legacy, to be replaced by the path below + run_golden: bool = "FORGE_VERIFY_GOLDEN" in os.environ and os.environ["FORGE_VERIFY_GOLDEN"] == '1' # run on back-end golden - Legacy, to be replaced by the path below run_net2pipe: bool = False # run netlist through net2pipe golden_ignore_df_precision: bool = True # When running golden, run at full FP32 and ignore actual netlist types chip_ids: Union[List[int], List[Tuple[int]]] = None # chip IDs to run on @@ -68,12 +68,12 @@ class VerifyConfig: verify_tvm_compile: bool = False # Should tvm run forward and verify the results verify_pipeline_result_vs_framework: bool = False # Compare Framework output on CPU vs module pipline outputs - verify_pybuda_codegen_vs_framework: bool = False # Compare Framework output on CPU vs pybuda codegen from TVM json graphs + verify_forge_codegen_vs_framework: bool = False # Compare Framework output on CPU vs forge codegen from TVM json graphs - verify_all: bool = "PYBUDA_FORCE_VERIFY_ALL" in os.environ and os.environ["PYBUDA_FORCE_VERIFY_ALL"] == '1' # Whether or not to verify after every compile stage + verify_all: bool = "FORGE_FORCE_VERIFY_ALL" in os.environ and os.environ["FORGE_FORCE_VERIFY_ALL"] == '1' # Whether or not to verify after every compile stage verify_last: bool = True # Whether or not to verify after the final stage (overriden by disabled()) - verify_post_autograd_passes: bool = "PYBUDA_VERIFY_POST_AUTOGRAD_PASSES" in os.environ and os.environ["PYBUDA_VERIFY_POST_AUTOGRAD_PASSES"] == '1'# Whether or not to force verification at post autograd passes (overridden by disabled()) - verify_post_placer: bool = "PYBUDA_VERIFY_POST_PLACER" in os.environ and os.environ["PYBUDA_VERIFY_POST_PLACER"] == '1' # Whether or not to force verification at post placer (overidden by disabled()) + verify_post_autograd_passes: bool = "FORGE_VERIFY_POST_AUTOGRAD_PASSES" in os.environ and os.environ["FORGE_VERIFY_POST_AUTOGRAD_PASSES"] == '1'# Whether or not to force verification at post autograd passes (overridden by disabled()) + verify_post_placer: bool = "FORGE_VERIFY_POST_PLACER" in os.environ and os.environ["FORGE_VERIFY_POST_PLACER"] == '1' # Whether or not to force verification at post placer (overidden by disabled()) # names of parameters for which gradient error will not fail the test. Some gradients are so small that # atol/rtol/pcc will never be good enough to pass @@ -104,7 +104,7 @@ class VerifyConfig: _input_gradient_queue: Optional[torch.multiprocessing.Queue] = None _parameter_gradient_queue: Optional[torch.multiprocessing.Queue] = None - if "PYBUDA_VERIFY_RESULTS_OFF_BY_DEFAULT" in os.environ and not ("PYBUDA_FORCE_VERIFY_ALL" in os.environ and os.environ["PYBUDA_FORCE_VERIFY_ALL"] == '1'): + if "FORGE_VERIFY_RESULTS_OFF_BY_DEFAULT" in os.environ and not ("FORGE_FORCE_VERIFY_ALL" in os.environ and os.environ["FORGE_FORCE_VERIFY_ALL"] == '1'): intermediates = False run_golden = False verify_all = False @@ -113,11 +113,11 @@ class VerifyConfig: verify_post_autograd_passes = False verify_tvm_compile = False verify_pipeline_result_vs_framework = False - verify_pybuda_codegen_vs_framework = False + verify_forge_codegen_vs_framework = False if verify_all: verify_pipeline_result_vs_framework = True - verify_pybuda_codegen_vs_framework = True + verify_forge_codegen_vs_framework = True verify_tvm_compile = True verify_each_buda_pass = True diff --git a/pybuda/pybuda/verify/cpueval.py b/forge/forge/verify/cpueval.py similarity index 92% rename from pybuda/pybuda/verify/cpueval.py rename to forge/forge/verify/cpueval.py index 25c20412d..6884815b3 100644 --- a/pybuda/pybuda/verify/cpueval.py +++ b/forge/forge/verify/cpueval.py @@ -12,10 +12,10 @@ import tensorflow as tf from ..tensor import to_pt_tensors -from ..pybudaglobal import get_devices +from ..forgeglobal import get_devices from ..utils import detach_tensors -import pybuda -from pybuda.tvm_utils import map_tf_dtype_to_pt, map_pt_dtype_to_tf +import forge +from forge.tvm_utils import map_tf_dtype_to_pt, map_pt_dtype_to_tf def cpueval_inference( inputs: List[Tuple[torch.Tensor, ...]], @@ -23,7 +23,7 @@ def cpueval_inference( sequential: bool) -> List[Tuple[torch.Tensor, ...]]: """ Use CPU/Pytorch to run inference of the full pipeline of devices, equivalen to what run_inference would do. - This uses the initial graph for pybuda models, or pytorch for pytorch models. + This uses the initial graph for forge models, or pytorch for pytorch models. Parameters ---------- @@ -41,8 +41,8 @@ def cpueval_inference( devices = get_devices() assert len(devices) == len(parameters), "Mismatched number of devices and parameters" - from pybuda.run.impl import _run_command - from pybuda.run.commands import Command + from forge.run.impl import _run_command + from forge.run.commands import Command ret = [] for input in inputs: @@ -106,8 +106,8 @@ def cpueval_training( """ devices = get_devices() - from pybuda.run.impl import _run_command - from pybuda.run.commands import Command + from forge.run.impl import _run_command + from forge.run.commands import Command ret = TrainingEvalData() ret.devices = [] @@ -140,7 +140,7 @@ def cpueval_training( ret.devices[i].grad.append(TrainingEvalData.GradData(inputs=bw_input_grads, parameters=bw_parameter_grads)) for i, d in enumerate(devices): - if isinstance(d, pybuda.cpudevice.CPUDevice) and d.framework == "tensorflow": + if isinstance(d, forge.cpudevice.CPUDevice) and d.framework == "tensorflow": assert all([x.name in parameters[i] for x in d.modules[0].module.trainable_variables]) cpu_grads = [tf.Variable(grad.detach().numpy(), dtype=map_pt_dtype_to_tf(grad.dtype)) for grad in ret.devices[i].grad[0].parameters.values()] diff --git a/pybuda/pybuda/verify/utils.py b/forge/forge/verify/utils.py similarity index 100% rename from pybuda/pybuda/verify/utils.py rename to forge/forge/verify/utils.py diff --git a/pybuda/pybuda/verify/verify.py b/forge/forge/verify/verify.py similarity index 96% rename from pybuda/pybuda/verify/verify.py rename to forge/forge/verify/verify.py index 4121cf653..2c248d82a 100644 --- a/pybuda/pybuda/verify/verify.py +++ b/forge/forge/verify/verify.py @@ -3,21 +3,21 @@ # SPDX-License-Identifier: Apache-2.0 """ -Verify by evaluating the pybuda graph +Verify by evaluating the forge graph """ import os from typing import Tuple, Dict, List, Any from loguru import logger -from pybuda.pybudaglobal import align_up_tile +from forge.forgeglobal import align_up_tile import torch from ..tensor import Tensor, TensorShape, pad_pytorch_tensor_to_buda, narrow_buda_tensor_to_pytorch from .config import VerifyConfig, should_waive_gradient from ..config import PerfTraceLevel -import pybuda._C.graph as pygraph -from pybuda.tools.run_net2pipe import net2pipe +import forge._C.graph as pygraph +from forge.tools.run_net2pipe import net2pipe def _generate_random_losses(outputs, is_buda): losses = [] @@ -77,7 +77,7 @@ def do_verify( """ Verify graph vs. pytorch golden """ - from pybuda.op.eval import compare_tensor_to_golden # avoid circular import + from forge.op.eval import compare_tensor_to_golden # avoid circular import torch_inputs: List[torch.Tensor] = [i.value() for i in inputs] torch_targets: List[torch.Tensor] = [i.value() for i in targets] @@ -197,7 +197,7 @@ def do_verify( if not ok: logger.error(msg) - continue_on_mismatch = bool(int(os.environ.get("PYBUDA_CONTINUE_ON_MISMATCH", "0"))) + continue_on_mismatch = bool(int(os.environ.get("FORGE_CONTINUE_ON_MISMATCH", "0"))) if not continue_on_mismatch: assert ok, msg return losses diff --git a/pybuda/setup.py b/forge/setup.py similarity index 72% rename from pybuda/setup.py rename to forge/setup.py index 414a660a2..fd8fcdd85 100644 --- a/pybuda/setup.py +++ b/forge/setup.py @@ -5,9 +5,9 @@ setup( - name="pybuda", + name="forge", version="0.1", description="Tenstorrent Python Buda framework", - packages=["pybuda"], - package_dir={"pybuda": "pybuda"}, + packages=["forge"], + package_dir={"forge": "forge"}, ) diff --git a/pybuda/test/README.debug.md b/forge/test/README.debug.md similarity index 91% rename from pybuda/test/README.debug.md rename to forge/test/README.debug.md index 0e13ca546..281341fa4 100644 --- a/pybuda/test/README.debug.md +++ b/forge/test/README.debug.md @@ -1,5 +1,5 @@ -*Test specific environment variables that can be used to fine tune default behavior of PyBuda tests.* +*Test specific environment variables that can be used to fine tune default behavior of Forge tests.* ## Parameters * RANDOM\_TEST\_COUNT: Number of random tests to be generated and executed. The parameter generate test_index in range from 0 to RANDOM\_TEST\_COUNT-1. (default: 5) diff --git a/pybuda/test/__init__.py b/forge/test/__init__.py similarity index 100% rename from pybuda/test/__init__.py rename to forge/test/__init__.py diff --git a/pybuda/test/backend/__init__.py b/forge/test/backend/__init__.py similarity index 100% rename from pybuda/test/backend/__init__.py rename to forge/test/backend/__init__.py diff --git a/pybuda/test/backend/benchmark/test_simple.py b/forge/test/backend/benchmark/test_simple.py similarity index 70% rename from pybuda/test/backend/benchmark/test_simple.py rename to forge/test/backend/benchmark/test_simple.py index b9be6183a..372f63e01 100644 --- a/pybuda/test/backend/benchmark/test_simple.py +++ b/forge/test/backend/benchmark/test_simple.py @@ -5,23 +5,23 @@ # Simple model end-to-end benchmarks # import pytest -import pybuda +import forge import torch from loguru import logger import time def run_benchmark(module, microbatch, input_shapes, cycle_range, fidelity, data_format): - tt0 = pybuda.TTDevice("tt0", module=module, fp32_fallback=data_format) - pybuda.set_configuration_options( + tt0 = forge.TTDevice("tt0", module=module, fp32_fallback=data_format) + forge.set_configuration_options( math_fidelity=fidelity, - performance_trace=pybuda.PerfTraceLevel.VERBOSE) + performance_trace=forge.PerfTraceLevel.VERBOSE) loop_count = 1 inputs = tuple(torch.rand(microbatch, *shape) for shape in input_shapes) tt0.push_to_inputs(inputs) - output_q = pybuda.run_inference(input_count = loop_count + 1, _verify_cfg=pybuda.VerifyConfig.disabled()) + output_q = forge.run_inference(input_count = loop_count + 1, _verify_cfg=forge.VerifyConfig.disabled()) # Wait until compile is done, and first input has gone through output_q.get() @@ -48,20 +48,20 @@ def run_benchmark(module, microbatch, input_shapes, cycle_range, fidelity, data_ assert clocks_per_sample >= cycle_range[0] and clocks_per_sample <= cycle_range[1], f"Clocks per sample out of range: {clocks_per_sample} not in {cycle_range}" -class MatmulTest(pybuda.PyBudaModule): +class MatmulTest(forge.ForgeModule): def __init__(self, name, weight_shape): super().__init__(name) - self.weights = pybuda.Parameter(torch.rand(*weight_shape)) + self.weights = forge.Parameter(torch.rand(*weight_shape)) def forward(self, act): - return pybuda.op.Matmul("matmul", act, self.weights) + return forge.op.Matmul("matmul", act, self.weights) @pytest.mark.skip(reason="Perf not close to expected yet") def test_matmul(): weight_shape = (768, 768) act_shape = (128, 768) - pybuda.override_op_size("matmul", (1, 4)) - pybuda.override_op_size("matmul_output_nop_0", (1, 4)) + forge.override_op_size("matmul", (1, 4)) + forge.override_op_size("matmul_output_nop_0", (1, 4)) run_benchmark(MatmulTest("benchmark_matmul", weight_shape), 1024, (act_shape,), - cycle_range=(15000, 25000), fidelity=pybuda.MathFidelity.LoFi, data_format=pybuda.DataFormat.Bfp8_b) + cycle_range=(15000, 25000), fidelity=forge.MathFidelity.LoFi, data_format=forge.DataFormat.Bfp8_b) diff --git a/pybuda/test/backend/models/__init__.py b/forge/test/backend/models/__init__.py similarity index 100% rename from pybuda/test/backend/models/__init__.py rename to forge/test/backend/models/__init__.py diff --git a/pybuda/test/backend/models/gpt2_pybuda.py b/forge/test/backend/models/gpt2_forge.py similarity index 87% rename from pybuda/test/backend/models/gpt2_pybuda.py rename to forge/test/backend/models/gpt2_forge.py index 7028d889c..df14fcfab 100644 --- a/pybuda/test/backend/models/gpt2_pybuda.py +++ b/forge/test/backend/models/gpt2_forge.py @@ -2,16 +2,16 @@ # SPDX-License-Identifier: Apache-2.0 ''' -Doing GPT-2 exploration in Python script instead of notebook because the "edit PyBuda -> rebuild env -> restart notebook" workflow was too painful. +Doing GPT-2 exploration in Python script instead of notebook because the "edit Forge -> rebuild env -> restart notebook" workflow was too painful. ''' import os -import pybuda -from pybuda import (BackendType, BackendDevice, TTDevice, CPUDevice, PyTorchModule, DataFormat) -from pybuda.verify import verify_module, verify_module_pipeline, VerifyConfig, TestKind -from pybuda.verify.utils import CPUCombiner -from pybuda.config import CompilerConfig, _get_global_compiler_config -from pybuda.op.eval.common import compare_tensor_to_golden, calculate_pcc +import forge +from forge import (BackendType, BackendDevice, TTDevice, CPUDevice, PyTorchModule, DataFormat) +from forge.verify import verify_module, verify_module_pipeline, VerifyConfig, TestKind +from forge.verify.utils import CPUCombiner +from forge.config import CompilerConfig, _get_global_compiler_config +from forge.op.eval.common import compare_tensor_to_golden, calculate_pcc import torch from transformers import GPT2Model, GPT2Config, GPT2Tokenizer, GPT2LMHeadModel @@ -87,7 +87,7 @@ def forward(self, hidden_states): # @pytest.mark.parametrize('test_kind', (TestKind.INFERENCE, TestKind.TRAINING), ids=('inference', 'training')) def test_pt_gpt2_block(devtype, test_device, test_kind): ''' - FP32 GPT2 block in PyBuda + FP32 GPT2 block in Forge ''' model = GPT2Model.from_pretrained("gpt2") block = PyTorchModule("gpt2_block_backend",model.h[0]) @@ -128,7 +128,7 @@ def test_gpt2_block_train_manual_loop(test_device, num_block, sequential): embedding = EmbWrapper(GPT2LMHeadModel.from_pretrained('gpt2')) - pybuda.set_configuration_options(accumulate_df=DataFormat.Float32) + forge.set_configuration_options(accumulate_df=DataFormat.Float32) # Get pretrained GPT2 tokenizer tokenizer = GPT2Tokenizer.from_pretrained("gpt2") @@ -164,7 +164,7 @@ def test_gpt2_block_train_manual_loop(test_device, num_block, sequential): opt_tt = None if not opt_on_cpu: - opt_tt = pybuda.optimizers.SGD(learning_rate=5e-5, device_params=True) + opt_tt = forge.optimizers.SGD(learning_rate=5e-5, device_params=True) fp32_fallback = DataFormat.Float32 if test_device.arch == BackendDevice.Wormhole_B0 else DataFormat.Float16_b @@ -174,13 +174,13 @@ def test_gpt2_block_train_manual_loop(test_device, num_block, sequential): # Whether or not we do optimizer on CPU, we compute Loss on cpu cpu0 = CPUDevice('cpu0', module=PyTorchModule('identity', CPUIdentity())) - cpu0.place_loss_module(pybuda.PyTorchModule('l1loss', torch.nn.L1Loss())) + cpu0.place_loss_module(forge.PyTorchModule('l1loss', torch.nn.L1Loss())) #_get_global_compiler_config().enable_tvm_dropout = False #_get_global_compiler_config().enable_tvm_unsupported_ops = True # Compile - checkpoint_q = pybuda.initialize_pipeline(training=True, + checkpoint_q = forge.initialize_pipeline(training=True, _sequential=sequential, sample_inputs=(input_hidden, input_mask), sample_targets=(targets,), @@ -194,19 +194,19 @@ def test_gpt2_block_train_manual_loop(test_device, num_block, sequential): scale_loss=1.0, )) - loss_q = pybuda.run.get_loss_queue() + loss_q = forge.run.get_loss_queue() - # PyBuda training loop + # Forge training loop for step in range(5): tt0.push_to_inputs((input_hidden, input_mask)) cpu0.push_to_target_inputs(targets) - pybuda.run_forward(input_count=1, _sequential=sequential) - pybuda.run_backward(input_count=1, zero_grad=True, _sequential=sequential) + forge.run_forward(input_count=1, _sequential=sequential) + forge.run_backward(input_count=1, zero_grad=True, _sequential=sequential) if opt_on_cpu: - grads = pybuda.get_parameter_gradients(tt0, _sequential=sequential) - params = pybuda.get_parameter_checkpoint(tt0, _sequential=sequential) + grads = forge.get_parameter_gradients(tt0, _sequential=sequential) + params = forge.get_parameter_checkpoint(tt0, _sequential=sequential) for name in params[0].keys(): # Set grad for each torch tensor @@ -222,19 +222,19 @@ def test_gpt2_block_train_manual_loop(test_device, num_block, sequential): opt = torch.optim.SGD([p.value() for p in params[0].values()], lr=5e-5) opt.step() - pybuda.update_device_parameters(tt0, params, _sequential=sequential) + forge.update_device_parameters(tt0, params, _sequential=sequential) else: - pybuda.run_optimizer(_sequential=sequential) + forge.run_optimizer(_sequential=sequential) losses_pb = [] while not loss_q.empty(): losses_pb.append(loss_q.get()[0]) - pybuda.shutdown() + forge.shutdown() - print('pybuda loss history:') + print('forge loss history:') print(losses_pb) print('PyTorch loss history:') print(loss_pt) @@ -251,7 +251,7 @@ def test_gpt2_block_train_manual_loop(test_device, num_block, sequential): assert len(losses_pb) == len(loss_pt) for i, l_pb in enumerate(losses_pb): - print(f"index={i} pybuda loss/pytorch loss={l_pb/loss_pt[i]}") + print(f"index={i} forge loss/pytorch loss={l_pb/loss_pt[i]}") assert math.isclose(l_pb, loss_pt[i], rel_tol=rel_tol, abs_tol=0.03) class GPT2BlockWrapper(torch.nn.Module): @@ -275,7 +275,7 @@ def test_pt_gpt2_blocks(devtype, test_kind): if test_kind.is_training(): pytest.skip() # failing for a while ''' - FP32 GPT2 blocks in PyBuda + FP32 GPT2 blocks in Forge ''' #model = GPT2LMHeadModel.from_pretrained("gpt2") #blocks = PyTorchModule("gpt2_blocks", BlocksWrapper(model)) @@ -285,7 +285,7 @@ def test_pt_gpt2_blocks(devtype, test_kind): torch.manual_seed(0) - pybuda.set_configuration_options(accumulate_df=DataFormat.Float32) + forge.set_configuration_options(accumulate_df=DataFormat.Float32) compiler_cfg = _get_global_compiler_config() relative_atol = 0.3 if test_kind.is_training() else 0.1 @@ -306,7 +306,7 @@ def test_gpt2_inference(devtype, dataformat): if (dataformat == DataFormat.Float16_b): pytest.skip() # failing for a while - pybuda.set_configuration_options(accumulate_df=dataformat) + forge.set_configuration_options(accumulate_df=dataformat) compiler_cfg = _get_global_compiler_config() # Get pretrained GPT2 @@ -335,8 +335,8 @@ def test_gpt2_inference(devtype, dataformat): res_pt = blocks(*embed_out) # Set up TT Device with module - buda_blocks = pybuda.PyTorchModule("gpt2_blocks", blocks) - tt0 = pybuda.TTDevice('tt0', devtype=devtype, arch=BackendDevice.Wormhole_B0, fp32_fallback=dataformat) + buda_blocks = forge.PyTorchModule("gpt2_blocks", blocks) + tt0 = forge.TTDevice('tt0', devtype=devtype, arch=BackendDevice.Wormhole_B0, fp32_fallback=dataformat) tt0.place_module(buda_blocks) hidden_tt = embed_out[0].clone().detach() @@ -344,7 +344,7 @@ def test_gpt2_inference(devtype, dataformat): # Run on TT hardware tt0.push_to_inputs((hidden_tt, mask_tt)) - output_q = pybuda.run_inference() + output_q = forge.run_inference() out_tt = output_q.get() res_tt = out_tt[0].value().detach() @@ -380,7 +380,7 @@ def test_gpt2_inference(devtype, dataformat): @pytest.mark.parametrize('devtype', (BackendType.Golden, BackendType.Silicon), ids=('Golden', 'Silicon')) # @pytest.mark.parametrize('test_kind', (TestKind.INFERENCE, TestKind.TRAINING), ids=('inference', 'training')) def test_pt_gpt2_generate(devtype, test_kind): - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() model = GPT2LMHeadModel.from_pretrained("gpt2") tokenizer = GPT2Tokenizer.from_pretrained("gpt2") @@ -395,12 +395,12 @@ def test_pt_gpt2_generate(devtype, test_kind): print(blocks.gpt2.h[0].attn.embed_dim) print(blocks.gpt2.h[0].attn.split_size) - buda_blocks = pybuda.PyTorchModule("gpt2_blocks", blocks) + buda_blocks = forge.PyTorchModule("gpt2_blocks", blocks) - cpu0 = pybuda.CPUDevice("cpu0", module=pybuda.PyTorchModule("gpt2_embeddings", embedding)) + cpu0 = forge.CPUDevice("cpu0", module=forge.PyTorchModule("gpt2_embeddings", embedding)) tt0 = TTDevice("tt0", devtype=BackendType.Golden, arch=BackendDevice.Wormhole_B0, num_chips=1, module=buda_blocks) - cpu1 = pybuda.CPUDevice("cpu1", module=pybuda.PyTorchModule("gpt2_lm_head", lm_head)) + cpu1 = forge.CPUDevice("cpu1", module=forge.PyTorchModule("gpt2_lm_head", lm_head)) dummy_text = "Text to generate input tensor for the compiler " dummy_input = tokenizer(dummy_text, return_tensors='pt', max_length=32, padding='max_length', truncation=True) @@ -422,7 +422,7 @@ def test_pt_gpt2_generate(devtype, test_kind): tokens_to_generate = 10 for i in range(tokens_to_generate): cpu0.push_to_inputs((input_ids_tt, attn_mask_tt)) - output_q = pybuda.run_inference() + output_q = forge.run_inference() outputs = output_q.get() lm_head_out = outputs[0].value().detach() next_token = torch.argmax(lm_head_out, dim=-1)[0][last_prefix_token + i] @@ -430,7 +430,7 @@ def test_pt_gpt2_generate(devtype, test_kind): input_ids_tt[0][next_token_index] = next_token attn_mask_tt[0][next_token_index] = 1 - pybuda.shutdown() + forge.shutdown() generated_text_tt = tokenizer.decode(input_ids_tt[0][:next_token_index].numpy().tolist()) diff --git a/pybuda/test/backend/models/test_bert.py b/forge/test/backend/models/test_bert.py similarity index 93% rename from pybuda/test/backend/models/test_bert.py rename to forge/test/backend/models/test_bert.py index 57d1b79d2..8836bcc49 100644 --- a/pybuda/test/backend/models/test_bert.py +++ b/forge/test/backend/models/test_bert.py @@ -12,20 +12,20 @@ import pytest import inspect -from pybuda.verify import verify_module, verify_module_pipeline, VerifyConfig +from forge.verify import verify_module, verify_module_pipeline, VerifyConfig from test.bert.modules import ( - PyBudaBertMHA, - PyBudaBertEncoder, - PyBudaFeedForward, - PyBudaPredictionHeadDecoder, - PyBudaPredictionHeadTransform, + ForgeBertMHA, + ForgeBertEncoder, + ForgeFeedForward, + ForgePredictionHeadDecoder, + ForgePredictionHeadTransform, get_bert_parameters ) -from pybuda import Tensor, DataFormat, BackendType, BackendDevice -from pybuda.config import _get_global_compiler_config -from pybuda._C.placer import DRAMPlacementAlgorithm +from forge import Tensor, DataFormat, BackendType, BackendDevice +from forge.config import _get_global_compiler_config +from forge._C.placer import DRAMPlacementAlgorithm def get_relaxed_atol_pcc(test_kind, test_device, size = "tiny", microbatch_size = 1): """ @@ -69,7 +69,7 @@ def test_mha(test_kind, cfg, test_device): params = get_bert_parameters("mha", hidden_dim=hidden_dim) config = { "num_heads": num_heads, "encoder_index": 0 } - mod = PyBudaBertMHA("mha", params, config) + mod = ForgeBertMHA("mha", params, config) verify_module(mod, [(microbatch_size, seq_len, hidden_dim), (microbatch_size, 1, seq_len)], VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, accumulation_steps=1, @@ -100,7 +100,7 @@ def test_ff(test_kind, cfg, test_device, optimizer): params = get_bert_parameters("ff", hidden_dim=hidden_dim) config = { "encoder_index": 0 } - mod = PyBudaFeedForward("ff", params, config) + mod = ForgeFeedForward("ff", params, config) verify_module(mod, [(microbatch_size, seq_len, hidden_dim)], VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, accumulation_steps=1, @@ -126,7 +126,7 @@ def test_ff_fp16(test_kind, cfg, test_device): for v in params.values(): v.set_data_format(DataFormat.Float16) config = { "encoder_index": 0 } - mod = PyBudaFeedForward("ff", params, config) + mod = ForgeFeedForward("ff", params, config) relative_atol = 0.3 if test_kind.is_training() and test_device.devtype == BackendType.Silicon else 0.1 verify_module(mod, [(microbatch_size, seq_len, hidden_dim)], @@ -157,7 +157,7 @@ def test_pred_transform(test_kind, cfg, test_device): params = get_bert_parameters("pred_transform", hidden_dim=hidden_dim) config = { } - mod = PyBudaPredictionHeadTransform("pred_transform", params, config) + mod = ForgePredictionHeadTransform("pred_transform", params, config) verify_module(mod, [(microbatch_size, seq_len, hidden_dim)], VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, accumulation_steps=1, relative_atol=relative_atol, pcc=pcc), @@ -184,7 +184,7 @@ def test_pred_decoder(test_kind, cfg, test_device): params = get_bert_parameters("pred_decoder", hidden_dim=hidden_dim, vocab_size=vocab_size) config = { } - mod = PyBudaPredictionHeadDecoder("pred_decoder", params, config) + mod = ForgePredictionHeadDecoder("pred_decoder", params, config) verify_module(mod, [(microbatch_size, seq_len, hidden_dim)], VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, accumulation_steps=1, relative_atol=relative_atol, pcc=pcc), @@ -218,11 +218,11 @@ def test_encoder(test_kind, cfg, test_device, dram_allocator): params = get_bert_parameters("encoder", hidden_dim=hidden_dim) config = { "num_heads": num_heads, "encoder_index": 0 } - mod = PyBudaBertEncoder("encoder", params, config) + mod = ForgeBertEncoder("encoder", params, config) params["reciprocal_of_sqrt_of_head_size_0"].set_value(torch.full((1, 1, 1, 1), 1/math.sqrt(num_heads))) - pybuda.config.set_configuration_options(dram_placement_algorithm=dram_allocator) + forge.config.set_configuration_options(dram_placement_algorithm=dram_allocator) relative_atol = 0.3 if test_kind.is_training() and test_device.devtype == BackendType.Silicon else 0.1 verify_module(mod, [(microbatch_size, seq_len, hidden_dim), (microbatch_size, 1, seq_len)], @@ -262,7 +262,7 @@ def test_multi_encoder(test_kind, cfg, test_device, encoder_count): config["encoder_index"] = encoder_index config["passthrough_attn_mask"] = bool(encoder_index != (encoder_count - 1)) - mod = PyBudaBertEncoder(f"encoder{encoder_index}", enc_params, config) + mod = ForgeBertEncoder(f"encoder{encoder_index}", enc_params, config) enc_params[f"reciprocal_of_sqrt_of_head_size_{encoder_index}"].set_value(torch.full((1, 1, 1, 1), 1/math.sqrt(num_heads))) @@ -297,14 +297,14 @@ def test_multichip_wormhole_multi_encoder(test_kind, cfg, test_device, encoder_c config["encoder_index"] = encoder_index config["passthrough_attn_mask"] = bool(encoder_index != (encoder_count - 1)) - mod = PyBudaBertEncoder(f"encoder{encoder_index}", enc_params, config) + mod = ForgeBertEncoder(f"encoder{encoder_index}", enc_params, config) enc_params[f"reciprocal_of_sqrt_of_head_size_{encoder_index}"].set_value(torch.full((1, 1, 1, 1), 1/math.sqrt(num_heads))) modules.append(mod) compiler_cfg = _get_global_compiler_config() - # tenstorrent/pybuda#480 + # tenstorrent/forge#480 compiler_cfg.use_interactive_placer = False if test_device.arch is BackendDevice.Grayskull else True verify_module_pipeline(modules, [(microbatch_size, 1, seq_len, hidden_dim), (microbatch_size, 1, 1, seq_len)], @@ -340,7 +340,7 @@ def test_multichip_wormhole_split(test_kind, cfg, test_device, encoder_count, nu config["encoder_index"] = encoder_index config["passthrough_attn_mask"] = bool(encoder_index != (encoder_count - 1)) - mod = PyBudaBertEncoder(f"encoder{encoder_index}", enc_params, config) + mod = ForgeBertEncoder(f"encoder{encoder_index}", enc_params, config) enc_params[f"reciprocal_of_sqrt_of_head_size_{encoder_index}"].set_value(torch.full((1, 1, 1, 1), 1/math.sqrt(num_heads))) @@ -360,8 +360,8 @@ def test_multichip_wormhole_split(test_kind, cfg, test_device, encoder_count, nu @pytest.mark.parametrize("encoder_count", [1, 2, 4, 12, 24,], ids=["enc1", "enc2", "enc4", "enc12", "enc24"]) @pytest.mark.parametrize("num_chips", [2, 4, 8, 12,], ids=["chip2", "chip4", "chip8", "chip12"]) def test_multichip_wormhole_multi_encoder_split_concurrent(test_kind, cfg, test_device, encoder_count, num_chips): - # Set pybuda config - pybuda.config.set_configuration_options(default_df_override=DataFormat.Float16_b) + # Set forge config + forge.config.set_configuration_options(default_df_override=DataFormat.Float16_b) # Skip all golden tests if not test_device.is_silicon(): @@ -390,7 +390,7 @@ def test_multichip_wormhole_multi_encoder_split_concurrent(test_kind, cfg, test_ config["encoder_index"] = encoder_index config["passthrough_attn_mask"] = bool(encoder_index != (encoder_count - 1)) - mod = PyBudaBertEncoder(f"encoder{encoder_index}", enc_params, config) + mod = ForgeBertEncoder(f"encoder{encoder_index}", enc_params, config) enc_params[f"reciprocal_of_sqrt_of_head_size_{encoder_index}"].set_value(torch.full((1, 1, 1, 1), 1/math.sqrt(num_heads))) if encoder_index > 0: @@ -408,7 +408,7 @@ def test_multichip_wormhole_multi_encoder_split_concurrent(test_kind, cfg, test_ params_centered_on_zero=True, ) -from pybuda import PyTorchModule +from forge import PyTorchModule from transformers import BertModel, BertConfig, BertForPreTraining, BertTokenizer, BertForQuestionAnswering def test_pt_bert(test_kind, test_device): seq_len = 128 @@ -447,7 +447,7 @@ def test_pt_bert(test_kind, test_device): data being popped from the command response queue is not the same size as what pytorch is expecting. I was not able to figure out why this happens. -It seems as though pybuda/pybuda/verify/backend.py::_verify_training was intended to be run in sequential mode. +It seems as though forge/forge/verify/backend.py::_verify_training was intended to be run in sequential mode. The solution to this problem probably involves generating the ground truth sequentially and comparing afterwards. ''' @@ -455,15 +455,15 @@ def test_pt_bert(test_kind, test_device): @pytest.mark.parametrize("encoder_count", [1, 2, 4, 12, 24], ids=["enc1", "enc2", "enc4", "enc12", "enc24"]) @pytest.mark.parametrize("num_chips", [1, 2, 4, 8, 12, 32], ids=["chip1", "chip2", "chip4", "chip8", "chip12", "chip32"]) def test_pt_encoder(test_kind, test_device, size, encoder_count, num_chips): - # Set pybuda config - pybuda.config.set_configuration_options(default_df_override=DataFormat.Float16_b) + # Set forge config + forge.config.set_configuration_options(default_df_override=DataFormat.Float16_b) # Skip certain tests in golden CI (redundant) if not test_device.is_silicon() and (num_chips > 1 or encoder_count > 2): pytest.skip() if test_kind.is_training() and test_device.arch == BackendDevice.Grayskull and size == "large": - pytest.skip() # see tenstorrent/pybuda#969 + pytest.skip() # see tenstorrent/forge#969 optimizer = {"type": "sgd", "params": {"learning_rate": 50.0 } } if size == "tiny": @@ -474,7 +474,7 @@ def test_pt_encoder(test_kind, test_device, size, encoder_count, num_chips): seq_len = 128 if test_device.is_silicon() and test_kind.is_training(): _get_global_compiler_config().enable_broadcast_splitting = True # fork error workaround - pybuda.config.override_op_size("bw_in0_matmul_128_matmul_1", (1, 2)) # tenstorrent/budabackend#667 + forge.config.override_op_size("bw_in0_matmul_128_matmul_1", (1, 2)) # tenstorrent/budabackend#667 #pytest.skip("Issue 667") # unsure why, but CI fails even with the workaround above, while it passes in interactive runs elif size == "large": model_name = "bert-large-uncased" @@ -509,9 +509,9 @@ def test_pt_encoder(test_kind, test_device, size, encoder_count, num_chips): import os if test_device.is_silicon() and test_kind.is_training() and size == "large": - # Revert when issue is closed: tenstorrent/pybuda#207 - os.environ["PYBUDA_NO_FUSE_MATMUL_BIAS"] = "1" - os.environ["PYBUDA_ENABLE_BROADCAST_SPLITTING"] = "1" + # Revert when issue is closed: tenstorrent/forge#207 + os.environ["FORGE_NO_FUSE_MATMUL_BIAS"] = "1" + os.environ["FORGE_ENABLE_BROADCAST_SPLITTING"] = "1" if test_kind.is_training() and size == "large": os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{77*1024}" @@ -537,8 +537,8 @@ def test_pt_encoder(test_kind, test_device, size, encoder_count, num_chips): def test_pt_encoder_ethernet_datacopy_serialization(test_kind, test_device, size, encoder_count, num_chips): pytest.skip() # ethernet datacopy support needs to be picked up in BBE first import os - os.environ["PYBUDA_ENABLE_ETH_DATACOPY_SERIALIZATION"] = "1" - os.environ["PYBUDA_DISABLE_INTERACTIVE_PLACER"] = "1" + os.environ["FORGE_ENABLE_ETH_DATACOPY_SERIALIZATION"] = "1" + os.environ["FORGE_DISABLE_INTERACTIVE_PLACER"] = "1" _compiler_config = _get_global_compiler_config() _compiler_config.enable_t_streaming = False @@ -622,7 +622,7 @@ def test_pt_pretrain_heads(test_kind, test_device): ) from transformers.pipelines import pipeline -import pybuda +import forge from loguru import logger class ModelWrapper(torch.nn.Module): @@ -638,12 +638,12 @@ def forward(self, *args, **kwargs): inputs.extend(kwinputs) self.device.push_to_inputs(inputs) - output_q = pybuda.run_inference() + output_q = forge.run_inference() outputs = output_q.get() return [o.value() for o in outputs] -from pybuda.transformers.pipeline import pipeline as pybuda_pipeline +from forge.transformers.pipeline import pipeline as forge_pipeline @pytest.mark.parametrize("variant", ["mrm8488/bert-tiny-finetuned-squadv2", "phiyodr/bert-base-finetuned-squad2"]) def test_pt_bert_qa_fallback(test_device, variant): # Configurations @@ -665,7 +665,7 @@ def test_pt_bert_qa_fallback(test_device, variant): answer_pt = question_answerer_pt(question=question, context=context) # TT run - question_answerer_tt = pybuda_pipeline('question-answering', model=model, tokenizer=tokenizer) + question_answerer_tt = forge_pipeline('question-answering', model=model, tokenizer=tokenizer) answer_tt = question_answerer_tt(question=question, context=context) logger.info(f"Context: {context}") @@ -733,15 +733,15 @@ def test_pt_bert_qa(test_device, size): # Create pipeline, with encoders on TT - cpu0 = pybuda.CPUDevice("cpu0", module=PyTorchModule("bert_embeddings", EmbWrapper(model.bert))) - tt1 = pybuda.TTDevice("tt1", + cpu0 = forge.CPUDevice("cpu0", module=PyTorchModule("bert_embeddings", EmbWrapper(model.bert))) + tt1 = forge.TTDevice("tt1", devtype=test_device.devtype, arch=test_device.arch, module=PyTorchModule("encoder", EncoderWrapper(model))) for input in inputs: logger.info("Running on TT") cpu0.push_to_inputs(input["data"]) - output_q = pybuda.run_inference(_verify_cfg=VerifyConfig(relative_atol=0.3), _sequential=True) + output_q = forge.run_inference(_verify_cfg=VerifyConfig(relative_atol=0.3), _sequential=True) outputs = output_q.get() logits = outputs[0].value().detach() diff --git a/pybuda/test/backend/models/test_gpt2.py b/forge/test/backend/models/test_gpt2.py similarity index 90% rename from pybuda/test/backend/models/test_gpt2.py rename to forge/test/backend/models/test_gpt2.py index af230f7ff..3bd6e6b05 100644 --- a/pybuda/test/backend/models/test_gpt2.py +++ b/forge/test/backend/models/test_gpt2.py @@ -12,13 +12,13 @@ import pytest import inspect -from pybuda.verify import verify_module, verify_module_pipeline, VerifyConfig -from pybuda import Tensor, DataFormat, BackendType +from forge.verify import verify_module, verify_module_pipeline, VerifyConfig +from forge import Tensor, DataFormat, BackendType -import pybuda -from pybuda import PyTorchModule +import forge +from forge import PyTorchModule from transformers import GPT2Model, GPT2Config, GPT2Tokenizer, GPT2LMHeadModel -from pybuda.config import CompilerConfig, _get_global_compiler_config +from forge.config import CompilerConfig, _get_global_compiler_config from loguru import logger @@ -52,7 +52,7 @@ def forward(self, input_ids, attention_mask): from transformers import pipeline, PreTrainedModel from transformers.modeling_outputs import CausalLMOutputWithCrossAttentions -from pybuda.transformers.pipeline import pipeline as pybuda_pipeline +from forge.transformers.pipeline import pipeline as forge_pipeline def test_pt_gpt2_fallback(test_kind, test_device): model = GPT2LMHeadModel.from_pretrained("gpt2") tokenizer = GPT2Tokenizer.from_pretrained("gpt2") @@ -71,7 +71,7 @@ def test_pt_gpt2_fallback(test_kind, test_device): no_repeat_ngram_size=2, ) - text_generator = pybuda_pipeline("text-generation", model=model, tokenizer=tokenizer,) + text_generator = forge_pipeline("text-generation", model=model, tokenizer=tokenizer,) torch.manual_seed(42) answer = text_generator( prefix_text, @@ -150,15 +150,15 @@ def test_pt_gpt2(test_kind, test_device): last_prefix_token = inputs["attention_mask"].index(0) - 1 tokens_to_generate = 20 - cpu0 = pybuda.CPUDevice("cpu0", module=PyTorchModule("gpt2_embeddings", embeddings)) - tt1 = pybuda.TTDevice("tt1", + cpu0 = forge.CPUDevice("cpu0", module=PyTorchModule("gpt2_embeddings", embeddings)) + tt1 = forge.TTDevice("tt1", devtype=test_device.devtype, arch=test_device.arch, module=PyTorchModule("gpt2_blocks", blocks)) - cpu1 = pybuda.CPUDevice("cpu1", module=PyTorchModule("gpt2_lm_head", lm_head)) + cpu1 = forge.CPUDevice("cpu1", module=PyTorchModule("gpt2_lm_head", lm_head)) for i in range(tokens_to_generate): cpu0.push_to_inputs((input_ids_tt, attention_mask)) - output_q = pybuda.run_inference() + output_q = forge.run_inference() outputs = output_q.get() lm_head_out = outputs[0].value().detach() next_token = torch.argmax(lm_head_out, dim=-1)[0][last_prefix_token + i] diff --git a/pybuda/test/backend/models/test_mixed_precision.py b/forge/test/backend/models/test_mixed_precision.py similarity index 93% rename from pybuda/test/backend/models/test_mixed_precision.py rename to forge/test/backend/models/test_mixed_precision.py index 5dc4156c8..97dcd485e 100644 --- a/pybuda/test/backend/models/test_mixed_precision.py +++ b/forge/test/backend/models/test_mixed_precision.py @@ -3,9 +3,9 @@ # SPDX-License-Identifier: Apache-2.0 import pytest -import pybuda -from pybuda.verify import verify_module, VerifyConfig -from pybuda import DataFormat, PyTorchModule +import forge +from forge.verify import verify_module, VerifyConfig +from forge import DataFormat, PyTorchModule from transformers import BertModel, BertConfig def get_relaxed_atol_pcc(test_kind, test_device, size = "tiny", microbatch_size = 1): @@ -35,7 +35,7 @@ def test_pt_encoder(test_kind, test_device, encoder_count): pytest.skip() # Set Mixed Precision Settings - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( op_type="softmax", output_df=DataFormat.Float16_b ) diff --git a/pybuda/test/backend/test_backend.py b/forge/test/backend/test_backend.py similarity index 82% rename from pybuda/test/backend/test_backend.py rename to forge/test/backend/test_backend.py index 5c5de2453..ccb3d91df 100644 --- a/pybuda/test/backend/test_backend.py +++ b/forge/test/backend/test_backend.py @@ -7,11 +7,11 @@ import pytest -import pybuda -from pybuda import DataFormat, BackendDevice, BackendType -from pybuda.verify import verify_module, verify_module_pipeline, VerifyConfig, TestKind +import forge +from forge import DataFormat, BackendDevice, BackendType +from forge.verify import verify_module, verify_module_pipeline, VerifyConfig, TestKind -class BudaTest(pybuda.PyBudaModule): +class BudaTest(forge.ForgeModule): """ Simple buda module for basic testing """ @@ -21,14 +21,14 @@ class BudaTest(pybuda.PyBudaModule): def __init__(self, name: str, multi_output: bool = False): super().__init__(name) self.multi_output = multi_output - self.weights1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.weights2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.weights1 = forge.Parameter(*self.shape, requires_grad=True) + self.weights2 = forge.Parameter(*self.shape, requires_grad=True) def forward(self, act1, act2): - m1 = pybuda.op.Matmul("matmul1", act1, self.weights1) - m2 = pybuda.op.Matmul("matmul2", act2, self.weights2) - m1e = pybuda.op.Sqrt("sqrt", m1) - add = pybuda.op.Add("add", m1e, m2) + m1 = forge.op.Matmul("matmul1", act1, self.weights1) + m2 = forge.op.Matmul("matmul2", act2, self.weights2) + m1e = forge.op.Sqrt("sqrt", m1) + add = forge.op.Add("add", m1e, m2) if self.multi_output: return m1e, add else: @@ -89,15 +89,15 @@ def test_concurrent(test_kind): VerifyConfig(test_kind=test_kind, sequential=False, run_net2pipe=True)) # Test the scenario where tile broadcast folds into an input node -class InputFolding(pybuda.PyBudaModule): +class InputFolding(forge.ForgeModule): def __init__(self, name): super().__init__(name) - self.eltwise_param = pybuda.Parameter(64, 64, requires_grad=True) + self.eltwise_param = forge.Parameter(64, 64, requires_grad=True) def forward(self, act): # (1, 1) + (64, 64) - need to scalar-broadcast act to get correct result - add = pybuda.op.Add("add", act, self.eltwise_param) + add = forge.op.Add("add", act, self.eltwise_param) return add def test_input_folding(test_kind): @@ -108,7 +108,7 @@ def test_input_folding(test_kind): # Test a simple PT model -from pybuda import PyTorchModule +from forge import PyTorchModule import torch class MyLinearNoConstEval(torch.nn.Module): @@ -162,35 +162,35 @@ def test_pt_linear_pipeline_no_consteval(test_kind): device_types=["CPUDevice", "TTDevice"] ) -class LargeParam(pybuda.PyBudaModule): +class LargeParam(forge.ForgeModule): def __init__(self, name: str): super().__init__(name) - self.weights = pybuda.Parameter(torch.rand(2048, 2048)) + self.weights = forge.Parameter(torch.rand(2048, 2048)) def forward(self, act): - return pybuda.op.Matmul("matmul", act, self.weights) + return forge.op.Matmul("matmul", act, self.weights) def test_memory_leak_parameter_gradients(): pytest.skip("Random fails in CI due to other processes using memory") - tt0 = pybuda.TTDevice("tt0", module=LargeParam("large_param"), arch=BackendDevice.Grayskull, devtype=BackendType.Golden) - tt0.place_loss_module(pybuda.op.loss.L1Loss("l1_loss")) + tt0 = forge.TTDevice("tt0", module=LargeParam("large_param"), arch=BackendDevice.Grayskull, devtype=BackendType.Golden) + tt0.place_loss_module(forge.op.loss.L1Loss("l1_loss")) inputs = torch.rand(1, 64, 2048) target = torch.rand(1, 64, 2048) tt0.push_to_inputs((inputs,)) tt0.push_to_target_inputs((target,)) - pybuda.run_training() + forge.run_training() print("Reading gradients first time") - pybuda.get_parameter_gradients(tt0, _sequential=True) + forge.get_parameter_gradients(tt0, _sequential=True) import psutil first_mem_use = psutil.virtual_memory().used / (1024*1024) for i in range(100): print(f"Reading gradients ({i+1}/100)") - pybuda.get_parameter_gradients(tt0, _sequential=True) + forge.get_parameter_gradients(tt0, _sequential=True) final_mem_use = psutil.virtual_memory().used / (1024 * 1024) diff --git a/pybuda/test/backend/test_device.py b/forge/test/backend/test_device.py similarity index 83% rename from pybuda/test/backend/test_device.py rename to forge/test/backend/test_device.py index f3db43df6..d44ccf00d 100644 --- a/pybuda/test/backend/test_device.py +++ b/forge/test/backend/test_device.py @@ -10,9 +10,9 @@ import torch import tensorflow as tf -import pybuda -from pybuda import PyTorchModule, TFModule, PyBudaModule -from pybuda.verify import verify_module, VerifyConfig, TestKind +import forge +from forge import PyTorchModule, TFModule, ForgeModule +from forge.verify import verify_module, VerifyConfig, TestKind class PytorchUnary(torch.nn.Module): def forward(self, x): @@ -22,7 +22,7 @@ class TFUnary(tf.keras.Model): def call(self, x): return 1 - x -class PyBudaUnary(PyBudaModule): +class ForgeUnary(ForgeModule): def forward(self, x): return 1 - x @@ -42,13 +42,13 @@ def test_tf_tt(): verify_module(TFModule("test", TFUnary()), [(1, 1, 64, 64)], VerifyConfig(test_kind=TestKind.INFERENCE, pcc=0.99), device_type="TTDevice") -def test_pybuda_tt(): - verify_module(PyBudaUnary("test"), [(1, 1, 64, 64)], +def test_forge_tt(): + verify_module(ForgeUnary("test"), [(1, 1, 64, 64)], VerifyConfig(test_kind=TestKind.INFERENCE, pcc=0.99), device_type="TTDevice") @pytest.mark.skip(reason="Not supported yet") -def test_pybuda_cpu(): - verify_module(PyBudaUnary("test"), [(1, 1, 64, 64)], +def test_forge_cpu(): + verify_module(ForgeUnary("test"), [(1, 1, 64, 64)], VerifyConfig(test_kind=TestKind.INFERENCE, pcc=0.99), device_type="CPUDevice") #def test_mix_tt(): diff --git a/pybuda/test/backend/test_e2e.py b/forge/test/backend/test_e2e.py similarity index 69% rename from pybuda/test/backend/test_e2e.py rename to forge/test/backend/test_e2e.py index b5fa18615..29ed11e1b 100644 --- a/pybuda/test/backend/test_e2e.py +++ b/forge/test/backend/test_e2e.py @@ -9,41 +9,41 @@ import pytest -import pybuda -from pybuda.verify import verify_module, VerifyConfig, TestKind +import forge +from forge.verify import verify_module, VerifyConfig, TestKind shape = (256, 256) input_types = ["input", "unary", "matmul"] binary_types = ["eltwise", "matmul"] -class SkipConnections(pybuda.PyBudaModule): +class SkipConnections(forge.ForgeModule): def __init__(self, name, input_type: str, binary_type: str): super().__init__(name) - self.weights_input = pybuda.Parameter(1, *shape, requires_grad=True) if input_type == "matmul" else None + self.weights_input = forge.Parameter(1, *shape, requires_grad=True) if input_type == "matmul" else None self._input_type = input_type self._binary_type = binary_type def forward(self, act1, act2): if self._input_type == "unary": - input1 = pybuda.op.Buffer("input1", act1) + input1 = forge.op.Buffer("input1", act1) elif self._input_type == "matmul": assert self.weights_input - input1 = pybuda.op.Matmul("input1", act1, self.weights_input) + input1 = forge.op.Matmul("input1", act1, self.weights_input) else: input1 = act1 - input2 = pybuda.op.Buffer("input2", act2) + input2 = forge.op.Buffer("input2", act2) - stage2 = pybuda.op.Buffer("stage2", input2) - stage3 = pybuda.op.Buffer("stage3", stage2) + stage2 = forge.op.Buffer("stage2", input2) + stage3 = forge.op.Buffer("stage3", stage2) if self._binary_type == "matmul": - output = pybuda.op.Matmul("binary", input1, stage3) + output = forge.op.Matmul("binary", input1, stage3) else: - output = pybuda.op.Add("binary", input1, stage3) + output = forge.op.Add("binary", input1, stage3) return output @@ -57,7 +57,7 @@ def test_skip_connections(test_kind, test_device, input_type, binary_type): microbatch_count=10, epoch_breaks=["stage2", "stage3", "binary"])) -class VConnection(pybuda.PyBudaModule): +class VConnection(forge.ForgeModule): def __init__(self, name, depth: int, binary_types: List[str]): super().__init__(name) @@ -68,14 +68,14 @@ def forward(self, act): fwd = [act] for i in range(self.depth): - fwd.append( pybuda.op.Buffer(f"down{i}", fwd[i] )) + fwd.append( forge.op.Buffer(f"down{i}", fwd[i] )) bwd = fwd[-1] for i in range(1, self.depth): if self.binary_types[i-1] == "matmul": - bwd = pybuda.op.Matmul(f"up{i-1}", bwd, fwd[-(i+1)]) + bwd = forge.op.Matmul(f"up{i-1}", bwd, fwd[-(i+1)]) else: - bwd = pybuda.op.Add(f"up{i-1}", bwd, fwd[-(i+1)]) + bwd = forge.op.Add(f"up{i-1}", bwd, fwd[-(i+1)]) return bwd @@ -99,11 +99,11 @@ def test_v_connections(test_kind, test_device, depth, pattern): microbatch_count=min(10, 2*depth), epoch_breaks=epoch_breaks), inputs_centered_on_zero=True) -class ForkSkipConnections(pybuda.PyBudaModule): +class ForkSkipConnections(forge.ForgeModule): def __init__(self, name, input_type: str, binary_type1: str, binary_type2): super().__init__(name) - self.weights_input = pybuda.Parameter(1, *shape, requires_grad=True) if input_type == "matmul" else None + self.weights_input = forge.Parameter(1, *shape, requires_grad=True) if input_type == "matmul" else None self._input_type = input_type self._binary_type1 = binary_type1 self._binary_type2 = binary_type2 @@ -111,26 +111,26 @@ def __init__(self, name, input_type: str, binary_type1: str, binary_type2): def forward(self, act): if self._input_type == "unary": - input1 = pybuda.op.Buffer("input1", act) + input1 = forge.op.Buffer("input1", act) elif self._input_type == "matmul": assert self.weights_input - input1 = pybuda.op.Matmul("input1", act, self.weights_input) + input1 = forge.op.Matmul("input1", act, self.weights_input) else: input1 = act - stage2 = pybuda.op.Buffer("stage2", input1) + stage2 = forge.op.Buffer("stage2", input1) if self._binary_type1 == "matmul": - stage3 = pybuda.op.Matmul("binary1", input1, stage2) + stage3 = forge.op.Matmul("binary1", input1, stage2) else: - stage3 = pybuda.op.Add("binary1", input1, stage2) + stage3 = forge.op.Add("binary1", input1, stage2) - stage4 = pybuda.op.Buffer("stage4", stage3) + stage4 = forge.op.Buffer("stage4", stage3) if self._binary_type2 == "matmul": - output = pybuda.op.Matmul("binary2", input1, stage4) + output = forge.op.Matmul("binary2", input1, stage4) else: - output = pybuda.op.Add("binary2", input1, stage4) + output = forge.op.Add("binary2", input1, stage4) return output @@ -145,12 +145,12 @@ def test_fork_skip_connections(test_kind, test_device, input_type, binary_type1, VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, microbatch_count=10, epoch_breaks=epoch_breaks)) -class MultiInputRead(pybuda.PyBudaModule): +class MultiInputRead(forge.ForgeModule): def forward(self, act): - const = pybuda.op.Constant("const", constant=1) - stage1 = pybuda.op.Add("add1", act, const) - stage2 = pybuda.op.Add("add2", act, const) - stage3 = pybuda.op.Add("add3", act, const) + const = forge.op.Constant("const", constant=1) + stage1 = forge.op.Add("add1", act, const) + stage2 = forge.op.Add("add2", act, const) + stage3 = forge.op.Add("add3", act, const) return stage1, stage2, stage3 @@ -163,15 +163,15 @@ def test_multi_read_input(): # Test many parameters with adam optimizer, so that we get optimizer e2e queues import torch -class ManyParams(pybuda.PyBudaModule): +class ManyParams(forge.ForgeModule): def __init__(self, name): super().__init__(name) self.size = 10 - self.params = [pybuda.Parameter(torch.normal(mean=0.0, std=0.1, size=(128, 128)), requires_grad=True) for _ in range(self.size)] + self.params = [forge.Parameter(torch.normal(mean=0.0, std=0.1, size=(128, 128)), requires_grad=True) for _ in range(self.size)] def forward(self, act): for i in range(self.size): - act = pybuda.op.Matmul(f"matmul_{i}", act, self.params[i]) + act = forge.op.Matmul(f"matmul_{i}", act, self.params[i]) return act def test_optimizer_e2e(): diff --git a/pybuda/test/backend/test_gpu_device.py b/forge/test/backend/test_gpu_device.py similarity index 77% rename from pybuda/test/backend/test_gpu_device.py rename to forge/test/backend/test_gpu_device.py index ff910210f..81af4d427 100644 --- a/pybuda/test/backend/test_gpu_device.py +++ b/forge/test/backend/test_gpu_device.py @@ -9,11 +9,11 @@ import torch -import pybuda -from pybuda import PyTorchModule -from pybuda.verify import verify_module, verify_module_pipeline, VerifyConfig, TestKind +import forge +from forge import PyTorchModule +from forge.verify import verify_module, verify_module_pipeline, VerifyConfig, TestKind -from pybuda._C.backend_api import BackendType +from forge._C.backend_api import BackendType import nvidia_smi @@ -71,16 +71,16 @@ def test_pt_linear_pipeline(): verify_module(PyTorchModule("pt_linear", MyLinear().cuda()), [(1, 128, 64)], VerifyConfig(test_kind=TestKind.INFERENCE, pcc=0.99), device_type="GPUDevice") -# Sample PyBuda module -class PyBudaTestModule(pybuda.PyBudaModule): +# Sample Forge module +class ForgeTestModule(forge.ForgeModule): def __init__(self, name): super().__init__(name) - self.weights1 = pybuda.Parameter(torch.rand(32, 32), requires_grad=True) - self.weights2 = pybuda.Parameter(torch.rand(32, 32), requires_grad=True) + self.weights1 = forge.Parameter(torch.rand(32, 32), requires_grad=True) + self.weights2 = forge.Parameter(torch.rand(32, 32), requires_grad=True) def forward(self, act1, act2): - m1 = pybuda.op.Matmul("matmul1", act1, self.weights1) - m2 = pybuda.op.Matmul("matmul2", act2, self.weights2) + m1 = forge.op.Matmul("matmul1", act1, self.weights1) + m2 = forge.op.Matmul("matmul2", act2, self.weights2) return m1 + m2, m2 # Sample PyTorch module @@ -103,9 +103,9 @@ def test_training_pipeline_read_back(): if not torch.cuda.is_available(): pytest.skip("Pytorch didn't detect cuda") - tt0 = pybuda.TTDevice("tt0", module=PyBudaTestModule("stage0"), devtype=BackendType.Golden) - cpu1 = pybuda.GPUDevice("gpu1", module=pybuda.PyTorchModule("stage1", PyTorchTestModuleOneOut().cuda())) - cpu1.place_loss_module(pybuda.PyTorchModule("l1loss", torch.nn.L1Loss().cuda())) + tt0 = forge.TTDevice("tt0", module=ForgeTestModule("stage0"), devtype=BackendType.Golden) + cpu1 = forge.GPUDevice("gpu1", module=forge.PyTorchModule("stage1", PyTorchTestModuleOneOut().cuda())) + cpu1.place_loss_module(forge.PyTorchModule("l1loss", torch.nn.L1Loss().cuda())) import torch.multiprocessing as mp loss_q = mp_context.Queue() @@ -117,7 +117,7 @@ def test_training_pipeline_read_back(): cpu1.push_to_target_inputs(torch.rand(4, 32, 32)) - pybuda.run_training(checkpoint_queue = checkpoint_q, loss_queue=loss_q) + forge.run_training(checkpoint_queue = checkpoint_q, loss_queue=loss_q) print("checkpoint: ", checkpoint_q.get()) print("loss: ", loss_q.get()) diff --git a/pybuda/test/backend/test_large_matmul.py b/forge/test/backend/test_large_matmul.py similarity index 89% rename from pybuda/test/backend/test_large_matmul.py rename to forge/test/backend/test_large_matmul.py index c0836f8a2..8f065f5eb 100644 --- a/pybuda/test/backend/test_large_matmul.py +++ b/forge/test/backend/test_large_matmul.py @@ -7,22 +7,22 @@ """ import pytest -import pybuda -from pybuda.verify import verify_module, VerifyConfig, TestKind +import forge +from forge.verify import verify_module, VerifyConfig, TestKind import torch -class MatmulTest(pybuda.PyBudaModule): +class MatmulTest(forge.ForgeModule): """ Simple matmul test """ def __init__(self, name, shape): super().__init__(name) - self.weight = pybuda.Parameter(*shape, requires_grad=True) + self.weight = forge.Parameter(*shape, requires_grad=True) def forward(self, act): - out = pybuda.op.Matmul("matmul", act, self.weight) + out = forge.op.Matmul("matmul", act, self.weight) return out diff --git a/pybuda/test/backend/test_loss.py b/forge/test/backend/test_loss.py similarity index 61% rename from pybuda/test/backend/test_loss.py rename to forge/test/backend/test_loss.py index 265c88a33..998079ff4 100644 --- a/pybuda/test/backend/test_loss.py +++ b/forge/test/backend/test_loss.py @@ -5,13 +5,13 @@ Test build-in loss """ -import pybuda +import forge -from pybuda.op.loss import L1Loss, CrossEntropyLoss -from pybuda.verify import VerifyConfig, verify_module +from forge.op.loss import L1Loss, CrossEntropyLoss +from forge.verify import VerifyConfig, verify_module import os -class BudaTest(pybuda.PyBudaModule): +class BudaTest(forge.ForgeModule): """ Simple buda module for basic testing """ @@ -20,25 +20,25 @@ class BudaTest(pybuda.PyBudaModule): def __init__(self, name: str): super().__init__(name) - self.weights1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.weights2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.weights1 = forge.Parameter(*self.shape, requires_grad=True) + self.weights2 = forge.Parameter(*self.shape, requires_grad=True) def forward(self, act1, act2): - m1 = pybuda.op.Matmul("matmul1", act1, self.weights1) - m2 = pybuda.op.Matmul("matmul2", act2, self.weights2) - add = pybuda.op.Add("add", m1, m2) + m1 = forge.op.Matmul("matmul1", act1, self.weights1) + m2 = forge.op.Matmul("matmul2", act2, self.weights2) + add = forge.op.Add("add", m1, m2) return add def test_l1_loss(test_kind, test_device): - os.environ["PYBUDA_LEGACY_UBLOCK_SHAPE"] = "1" + os.environ["FORGE_LEGACY_UBLOCK_SHAPE"] = "1" verify_module(BudaTest("test_l1_loss"), [(1, *BudaTest.shape), (1, *BudaTest.shape)], loss_module=L1Loss("l1_loss"), verify_cfg=VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch)) def test_ce_loss(test_kind, test_device): - os.environ["PYBUDA_LEGACY_UBLOCK_SHAPE"] = "1" + os.environ["FORGE_LEGACY_UBLOCK_SHAPE"] = "1" verify_module(BudaTest("test_ce_loss"), [(1, *BudaTest.shape), (1, *BudaTest.shape)], loss_module=CrossEntropyLoss("ce_loss"), diff --git a/pybuda/test/backend/test_pipeline.py b/forge/test/backend/test_pipeline.py similarity index 91% rename from pybuda/test/backend/test_pipeline.py rename to forge/test/backend/test_pipeline.py index c8e7f0ee5..aeb74c586 100644 --- a/pybuda/test/backend/test_pipeline.py +++ b/forge/test/backend/test_pipeline.py @@ -9,9 +9,9 @@ import pytest import tensorflow as tf -import pybuda -from pybuda import PyTorchModule, BackendType, TFModule -from pybuda.verify import verify_module_pipeline, VerifyConfig, TestKind +import forge +from forge import PyTorchModule, BackendType, TFModule +from forge.verify import verify_module_pipeline, VerifyConfig, TestKind class PytorchUnary(torch.nn.Module): def forward(self, x): @@ -55,13 +55,13 @@ def forward(self, act): act = act.type(torch.float32) # TODO: make this somewhat automatic? return torch.matmul(act, self.weights) -class PybudaMatmul(pybuda.PyBudaModule): +class ForgeMatmul(forge.ForgeModule): def __init__(self, name): super().__init__(name) - self.weights = pybuda.Parameter(32, 32, requires_grad=True) + self.weights = forge.Parameter(32, 32, requires_grad=True) def forward(self, act): - return pybuda.op.Matmul(self.name + ".matmul", act, self.weights) + return forge.op.Matmul(self.name + ".matmul", act, self.weights) @pytest.mark.parametrize("first", ("CPUDevice", "TTDevice")) def test_training_pipeline_1(first): @@ -76,12 +76,12 @@ def test_training_pipeline_2(first, second): @pytest.mark.parametrize("second", ("CPUDevice", "TTDevice")) @pytest.mark.parametrize("first", ("CPUDevice", "TTDevice")) -def test_training_pipeline_2_pybuda(test_device, first, second): +def test_training_pipeline_2_forge(test_device, first, second): #sequential = test_device.devtype == BackendType.Golden sequential = True verify_module_pipeline([ - PyTorchModule("pipe0", PytorchMatmul()) if first == "CPUDevice" else PybudaMatmul("pipe0"), - PyTorchModule("pipe1", PytorchMatmul()) if second == "CPUDevice" else PybudaMatmul("pipe1"), + PyTorchModule("pipe0", PytorchMatmul()) if first == "CPUDevice" else ForgeMatmul("pipe0"), + PyTorchModule("pipe1", PytorchMatmul()) if second == "CPUDevice" else ForgeMatmul("pipe1"), ], [(1, 32, 32)], VerifyConfig( test_kind=TestKind.TRAINING, arch=test_device.arch, devtype=test_device.devtype, sequential=sequential), @@ -101,11 +101,11 @@ def test_training_pipeline_3(first, second, third): @pytest.mark.parametrize("third", ("CPUDevice", "TTDevice")) @pytest.mark.parametrize("second", ("CPUDevice", "TTDevice")) @pytest.mark.parametrize("first", ("CPUDevice", "TTDevice")) -def test_training_pipeline_3_pybuda(first, second, third): +def test_training_pipeline_3_forge(first, second, third): verify_module_pipeline([ - PyTorchModule("pipe0", PytorchMatmul()) if first == "CPUDevice" else PybudaMatmul("pipe0"), - PyTorchModule("pipe1", PytorchMatmul()) if second == "CPUDevice" else PybudaMatmul("pipe1"), - PyTorchModule("pipe2", PytorchMatmul()) if third == "CPUDevice" else PybudaMatmul("pipe2"), + PyTorchModule("pipe0", PytorchMatmul()) if first == "CPUDevice" else ForgeMatmul("pipe0"), + PyTorchModule("pipe1", PytorchMatmul()) if second == "CPUDevice" else ForgeMatmul("pipe1"), + PyTorchModule("pipe2", PytorchMatmul()) if third == "CPUDevice" else ForgeMatmul("pipe2"), ], [(1, 32, 32)], VerifyConfig(test_kind=TestKind.TRAINING, relative_atol=0.35), device_types=[first, second, third]) diff --git a/forge/test/backend/test_random_grids.py b/forge/test/backend/test_random_grids.py new file mode 100644 index 000000000..b13314821 --- /dev/null +++ b/forge/test/backend/test_random_grids.py @@ -0,0 +1,69 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 +import forge +import pytest +from forge.verify import verify_module, VerifyConfig + +microbatch_size = 8 + +class MatmulSimple(forge.ForgeModule): + shape = (256, 256) + def __init__(self, name: str): + super().__init__(name) + self.weights1 = forge.Parameter(*self.shape, requires_grad=True) + self.weights2 = forge.Parameter(*self.shape, requires_grad=True) + + def forward(self, act1, act2): + m1 = forge.op.Matmul("matmul1", act1, self.weights1) + m2 = forge.op.Matmul("matmul2", act2, self.weights2) + return forge.op.Add("add", m1, m2) + +class MatmulDramFork(forge.ForgeModule): + shape = (256, 256) + def __init__(self, name: str): + super().__init__(name) + self.weights1 = forge.Parameter(*self.shape, requires_grad=True) + self.weights2 = forge.Parameter(*self.shape, requires_grad=True) + + def forward(self, act1, act2): + m1 = forge.op.Matmul("matmul1", act1, self.weights1) + m2 = forge.op.Matmul("matmul2", act1, self.weights2) + add = forge.op.Add("add", m1, m2) + return forge.op.Add("add_final", add, act2) + +class EltwiseFork(forge.ForgeModule): + shape = (256, 256) + def __init__(self, name: str): + super().__init__(name) + self.weights1 = forge.Parameter(*self.shape, requires_grad=True) + self.weights2 = forge.Parameter(*self.shape, requires_grad=True) + + def forward(self, act1, act2): + add = forge.op.Add("first_add", act1, act2) + m1 = forge.op.Matmul("matmul1", add, self.weights1) + m2 = forge.op.Matmul("matmul2", add, self.weights2) + return forge.op.Add("add", m1, m2) + +class DoubleFork(forge.ForgeModule): + shape = (256, 256) + def __init__(self, name: str): + super().__init__(name) + self.weights1 = forge.Parameter(*self.shape, requires_grad=True) + self.weights2 = forge.Parameter(*self.shape, requires_grad=True) + + def forward(self, act1, act2): + add = forge.op.Add("first_add", act1, act2) + weight_add = forge.op.Add("weight_add", self.weights1, self.weights2) + m1 = forge.op.Matmul("matmul1", add, weight_add) + m2 = forge.op.Matmul("matmul2", add, weight_add) + return forge.op.Add("add", m1, m2) + +@pytest.mark.parametrize("model", [MatmulSimple, MatmulDramFork, EltwiseFork, DoubleFork]) +def test(test_kind, test_device, model): + forge.set_configuration_options(balancer_policy="Random") + + verify_module(model("random_grid"), [(microbatch_size, *model.shape), (microbatch_size, *model.shape)], + VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch)) + + diff --git a/pybuda/test/backend/test_silicon.py b/forge/test/backend/test_silicon.py similarity index 82% rename from pybuda/test/backend/test_silicon.py rename to forge/test/backend/test_silicon.py index 117f8c7d4..f7426f3d1 100644 --- a/pybuda/test/backend/test_silicon.py +++ b/forge/test/backend/test_silicon.py @@ -7,14 +7,14 @@ import pytest -import pybuda -from pybuda.verify import verify_module, VerifyConfig, TestKind -from pybuda.ttdevice import get_device_config -from pybuda.config import _get_global_compiler_config -from pybuda.pybudaglobal import pybuda_reset +import forge +from forge.verify import verify_module, VerifyConfig, TestKind +from forge.ttdevice import get_device_config +from forge.config import _get_global_compiler_config +from forge.forgeglobal import forge_reset import torch -class BudaTrain(pybuda.PyBudaModule): +class BudaTrain(forge.ForgeModule): """ Simple buda module for basic testing, with parameters """ @@ -23,17 +23,17 @@ class BudaTrain(pybuda.PyBudaModule): def __init__(self, name): super().__init__(name) - self.weights1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.weights2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.weights1 = forge.Parameter(*self.shape, requires_grad=True) + self.weights2 = forge.Parameter(*self.shape, requires_grad=True) def forward(self, act1, act2): - in1 = pybuda.op.Matmul("matmul1", act1, self.weights1) - in2 = pybuda.op.Matmul("matmul2", act2, self.weights2) - sum_sqrt = pybuda.op.Sqrt("sqrt", in1) - sum = pybuda.op.Add("add", sum_sqrt, in2) + in1 = forge.op.Matmul("matmul1", act1, self.weights1) + in2 = forge.op.Matmul("matmul2", act2, self.weights2) + sum_sqrt = forge.op.Sqrt("sqrt", in1) + sum = forge.op.Add("add", sum_sqrt, in2) return sum -class BudaTest(pybuda.PyBudaModule): +class BudaTest(forge.ForgeModule): """ Simple buda module for basic testing No parameters for now, to avoid using rams @@ -45,8 +45,8 @@ def __init__(self, name): super().__init__(name) def forward(self, act1, act2): - sum = pybuda.op.Add("add", act1, act2) - sum_sqrt = pybuda.op.Sqrt("sqrt", sum) + sum = forge.op.Add("add", act1, act2) + sum_sqrt = forge.op.Sqrt("sqrt", sum) return sum_sqrt @pytest.mark.parametrize("microbatch_size", (1, 64), ids=("microbatch1", "microbatch64")) @@ -114,7 +114,7 @@ def test_concurrent(test_kind, test_device, microbatch_size, microbatch_count): sequential=False)) import tensorflow as tf -from pybuda import TFModule +from forge import TFModule @pytest.mark.skip(reason="TF and fp32 problems") def test_tf(test_device): @@ -126,19 +126,19 @@ def call(self, act1, act2): VerifyConfig(test_kind=TestKind.INFERENCE, devtype=test_device.devtype, arch=test_device.arch)) -class MultiChipModule(pybuda.PyBudaModule): +class MultiChipModule(forge.ForgeModule): def __init__(self, name: str, num_devices: int): super().__init__(name) self.num_devices = num_devices - self.weights = [pybuda.Parameter(64, 64, name = f"weights_{i}") for i in range(self.num_devices)] + self.weights = [forge.Parameter(64, 64, name = f"weights_{i}") for i in range(self.num_devices)] def forward(self, act): val = act for i in range(self.num_devices): - val = pybuda.op.Matmul(f"matmul_{i}", val, self.weights[i]) - val = pybuda.op.Gelu(f"gelu_{i}", val) - pybuda.set_chip_break(f"matmul_{i}") + val = forge.op.Matmul(f"matmul_{i}", val, self.weights[i]) + val = forge.op.Gelu(f"gelu_{i}", val) + forge.set_chip_break(f"matmul_{i}") return val @@ -146,10 +146,10 @@ def check_for_multi_chip_silicon(test_device): """ Skip the test if there's only one (or less) silion devices available. Return number of devices. """ - if test_device.devtype == pybuda.BackendType.Golden: + if test_device.devtype == forge.BackendType.Golden: pytest.skip("Not meant for golden") - num_devices = len(pybuda.detect_available_devices()) + num_devices = len(forge.detect_available_devices()) if num_devices < 2: pytest.skip("Need at least 2 devices to run multi-chip test") @@ -170,7 +170,7 @@ def test_multi_chip(test_kind, test_device): def test_chip_id(test_kind, test_device): - if test_device.devtype == pybuda.BackendType.Golden: + if test_device.devtype == forge.BackendType.Golden: pytest.skip("Not meant for golden") num_devices = check_for_multi_chip_silicon(test_device) diff --git a/pybuda/test/benchmark/README.md b/forge/test/benchmark/README.md similarity index 92% rename from pybuda/test/benchmark/README.md rename to forge/test/benchmark/README.md index 762b7ffd3..25c173783 100644 --- a/pybuda/test/benchmark/README.md +++ b/forge/test/benchmark/README.md @@ -1,4 +1,4 @@ -PyBuda Benchmark Infrastructure +Forge Benchmark Infrastructure =============================== `benchmark.py` allows easy way to benchmark performance of a support model, while varying configurations and compiler options. The script will measure @@ -8,9 +8,9 @@ The script optionally outputs a .json file with benchmark results and options us allowing the user to run multiple benchmark back-to-back, like: ``` -pybuda/test/benchmark/benchmark.py -m bert -c tiny -o perf.json -pybuda/test/benchmark/benchmark.py -m bert -c base -o perf.json -pybuda/test/benchmark/benchmark.py -m bert -c large -o perf.json +forge/test/benchmark/benchmark.py -m bert -c tiny -o perf.json +forge/test/benchmark/benchmark.py -m bert -c base -o perf.json +forge/test/benchmark/benchmark.py -m bert -c large -o perf.json ``` `perf.json` will have performance results for all 3 configurations of bert. @@ -18,7 +18,7 @@ pybuda/test/benchmark/benchmark.py -m bert -c large -o perf.json To see which models and configurations are currently supported, run: ``` -pybuda/test/benchmark/benchmark.py --list +forge/test/benchmark/benchmark.py --list ``` Full Usage diff --git a/pybuda/test/benchmark/benchmark.py b/forge/test/benchmark/benchmark.py similarity index 86% rename from pybuda/test/benchmark/benchmark.py rename to forge/test/benchmark/benchmark.py index ac19bbd59..7bd08f616 100755 --- a/pybuda/test/benchmark/benchmark.py +++ b/forge/test/benchmark/benchmark.py @@ -11,15 +11,15 @@ import queue import socket -import pybuda +import forge import torch from benchmark.common import get_models, df_from_str, mf_from_str, trace_from_str -from pybuda._C.backend_api import BackendDevice, BackendType +from forge._C.backend_api import BackendDevice, BackendType # Resolve imports for functional models import sys -sys.path.insert(1, 'pybuda') +sys.path.insert(1, 'forge') # models import benchmark.models.bert @@ -51,7 +51,7 @@ def single_thread_generative_model_run(args, first_device, last_device, inputs, if args.training: assert False, "Training currently not supported in single-threaded mode" - from pybuda.pybudaglobal import TILE_DIM + from forge.forgeglobal import TILE_DIM # input_ids, encoder_attention_mask, input_length, decoder_inpu_ids, decoder_attention_mask, # first_current_index, tokenizer.pad_token_id, input_ids = inputs[0] @@ -69,7 +69,7 @@ def single_thread_generative_model_run(args, first_device, last_device, inputs, first_device.push_to_inputs((input_ids, encoder_attention_mask)) else: first_device.push_to_inputs((input_ids,)) - pybuda.run_forward() + forge.run_forward() ans = output_q.get() encoder_last_hidden_state = ans[0].value().detach() generated_tokens = [] @@ -80,7 +80,7 @@ def single_thread_generative_model_run(args, first_device, last_device, inputs, first_device.set_active_subgraph(1) generate_inputs = (decoder_input_ids, decoder_attention_mask, encoder_last_hidden_state, encoder_attention_mask) first_device.push_to_inputs(generate_inputs) - pybuda.run_generate(input_count=args.loop_count, write_index=write_index) + forge.run_generate(input_count=args.loop_count, write_index=write_index) ans = output_q.get() else: if current_token_index == 1: @@ -88,7 +88,7 @@ def single_thread_generative_model_run(args, first_device, last_device, inputs, first_device.set_active_subgraph(2) generate_inputs = (decoder_input_ids, decoder_attention_mask, encoder_attention_mask) first_device.push_to_inputs(generate_inputs) - pybuda.run_generate(input_count=args.loop_count, write_index=write_index) + forge.run_generate(input_count=args.loop_count, write_index=write_index) ans = output_q.get() if is_text_inputs or current_token_index < 2: @@ -103,7 +103,7 @@ def single_thread_generative_model_run(args, first_device, last_device, inputs, past_cache_pages = current_token_index // TILE_DIM # after one page of past cache, we have to rotate. first_device.set_active_subgraph(3) - pybuda.run_generate(input_count=0, write_index=0) + forge.run_generate(input_count=0, write_index=0) pages_current = 1 decoder_attention_mask[0, -(past_cache_pages + pages_current) * TILE_DIM:] = 1 @@ -130,11 +130,11 @@ def single_thread_run(args, first_device, last_device, inputs, targets, output_q if num_tokens_to_generate: for _ in range(num_tokens_to_generate): first_device.push_to_inputs(inputs) - pybuda.run_generate(input_count=args.loop_count, write_index=0) + forge.run_generate(input_count=args.loop_count, write_index=0) output_q.get() else: first_device.push_to_inputs(inputs) - pybuda.run_forward(input_count=args.loop_count) + forge.run_forward(input_count=args.loop_count) output_q.get() end_time = time.time() @@ -153,7 +153,7 @@ def multi_thread_run(args, first_device, last_device, inputs, targets, output_q, def push_inputs_thread(): loop_count = num_tokens_to_generate if num_tokens_to_generate else args.loop_count for _ in range(loop_count): - if pybuda.error_raised(): + if forge.error_raised(): print(" * Aborting input thread due to error") return first_device.push_to_inputs(inputs) @@ -171,7 +171,7 @@ def pop_outputs_thread(output_q): output_q.get(timeout=1) break # got data, break out of forever loop except queue.Empty as _: - if pybuda.error_raised(): + if forge.error_raised(): print(" * Aborting output thread due to error") return @@ -179,14 +179,14 @@ def pop_outputs_thread(output_q): # Define input and output threads # input_thread = threading.Thread(target=push_inputs_thread) - output = output_q if not args.training else pybuda.get_loss_queue() + output = output_q if not args.training else forge.get_loss_queue() output_thread = threading.Thread(target=pop_outputs_thread, args=(output, )) output_thread.start() # # Sync - Make sure all process setup, compile, etc. is done # - pybuda.sync() + forge.sync() # # Run @@ -202,19 +202,19 @@ def pop_outputs_thread(output_q): print(f'loop_count: {args.loop_count} gives {args.loop_count//args.microbatch_count} training batches of [fwd,bwd]', flush=True) # TODO: use microbatch count / accumulation steps, depending on number of devices in pipeline for _ in range(args.loop_count//args.microbatch_count): - pybuda.run_forward(input_count=args.microbatch_count) - pybuda.run_backward(input_count=args.microbatch_count) + forge.run_forward(input_count=args.microbatch_count) + forge.run_backward(input_count=args.microbatch_count) else: if num_tokens_to_generate: for _ in range(num_tokens_to_generate): - pybuda.run_generate(input_count=args.loop_count, write_index=0) + forge.run_generate(input_count=args.loop_count, write_index=0) else: - pybuda.run_forward(input_count=args.loop_count) + forge.run_forward(input_count=args.loop_count) input_thread.join() output_thread.join() if args.training: - pybuda.sync() # wait for the last backward to finish + forge.sync() # wait for the last backward to finish end_time = time.time() @@ -229,20 +229,20 @@ def print_start_info(): def run( args, - duts: Dict[str, Union[pybuda.PyTorchModule, pybuda.PyBudaModule]], + duts: Dict[str, Union[forge.PyTorchModule, forge.ForgeModule]], inputs: Tuple[torch.Tensor, ...], targets: Tuple[torch.Tensor, ...], other: Dict[str, object]): # Emulate runs on harvested machines - from pybuda._C.backend_api import BackendDevice - available_devices = pybuda.detect_available_devices() + from forge._C.backend_api import BackendDevice + available_devices = forge.detect_available_devices() if available_devices and not args.galaxy: if available_devices[0] == BackendDevice.Wormhole_B0: - os.environ["PYBUDA_FORCE_EMULATE_HARVESTED"] = "1" + os.environ["FORGE_FORCE_EMULATE_HARVESTED"] = "1" # Set default configuration type - pybuda.config.set_configuration_options(default_df_override=df_from_str(args.dataformat), backend_runtime_params_path=args.runtime_params_yaml, device_config=args.device_config) + forge.config.set_configuration_options(default_df_override=df_from_str(args.dataformat), backend_runtime_params_path=args.runtime_params_yaml, device_config=args.device_config) # Override push timeout on slow runs os.environ["TT_BACKEND_PUSH_TIMEOUT"] = "500" @@ -253,8 +253,8 @@ def run( first_device = None last_device = None if "cpu-pre" in duts: - assert isinstance(duts["cpu-pre"], pybuda.PyTorchModule) - cpu_pre = pybuda.CPUDevice("cpu-pre", module=duts["cpu-pre"]) + assert isinstance(duts["cpu-pre"], forge.PyTorchModule) + cpu_pre = forge.CPUDevice("cpu-pre", module=duts["cpu-pre"]) first_device = cpu_pre assert args.arch @@ -263,15 +263,15 @@ def run( assert "tt" in duts if args.save_tti: - tt = pybuda.TTDevice("tt0", module=duts["tt"], fp32_fallback=df_from_str(args.dataformat), num_chips=args.chips, arch=arch, devtype=devtype) + tt = forge.TTDevice("tt0", module=duts["tt"], fp32_fallback=df_from_str(args.dataformat), num_chips=args.chips, arch=arch, devtype=devtype) elif args.load_tti: - img = pybuda.TTDeviceImage.load_from_disk(args.load_tti) + img = forge.TTDeviceImage.load_from_disk(args.load_tti) img.info() - tt = pybuda.TTDevice.load_image(img=img) + tt = forge.TTDevice.load_image(img=img) elif args.galaxy: - tt = pybuda.TTDevice("tt0", module=duts["tt"], arch=arch, devtype=devtype, fp32_fallback=df_from_str(args.dataformat), chip_ids=[0, 11, 10, 9, 8, 7, 19, 20, 21, 22, 23, 24, 6, 5, 14, 13, 12, 16, 15, 3, 4, 26, 25, 32, 31, 30, 29, 28, 27, 1, 2, 18, 17]) + tt = forge.TTDevice("tt0", module=duts["tt"], arch=arch, devtype=devtype, fp32_fallback=df_from_str(args.dataformat), chip_ids=[0, 11, 10, 9, 8, 7, 19, 20, 21, 22, 23, 24, 6, 5, 14, 13, 12, 16, 15, 3, 4, 26, 25, 32, 31, 30, 29, 28, 27, 1, 2, 18, 17]) else: - tt = pybuda.TTDevice("tt0", module=duts["tt"], arch=arch, devtype=devtype, fp32_fallback=df_from_str(args.dataformat), num_chips=args.chips) + tt = forge.TTDevice("tt0", module=duts["tt"], arch=arch, devtype=devtype, fp32_fallback=df_from_str(args.dataformat), num_chips=args.chips) if first_device is None: first_device = tt @@ -279,27 +279,27 @@ def run( cpu_post = None if "cpu-post" in duts: - assert isinstance(duts["cpu-post"], pybuda.PyTorchModule) - cpu_post = pybuda.CPUDevice("cpu-post", module=duts["cpu-post"]) + assert isinstance(duts["cpu-post"], forge.PyTorchModule) + cpu_post = forge.CPUDevice("cpu-post", module=duts["cpu-post"]) last_device = cpu_post if "cpu-loss" in duts: - assert isinstance(duts["cpu-loss"], pybuda.PyTorchModule) + assert isinstance(duts["cpu-loss"], forge.PyTorchModule) if cpu_post is None: # no cpu-post module - identity = pybuda.PyTorchModule("identity0", torch.nn.Identity()) - cpu_post = pybuda.CPUDevice("cpu-post", module=identity) + identity = forge.PyTorchModule("identity0", torch.nn.Identity()) + cpu_post = forge.CPUDevice("cpu-post", module=identity) cpu_post.place_loss_module(duts["cpu-loss"]) last_device = cpu_post - enable_auto_fusing = "PYBUDA_DISABLE_FUSE_OPS" not in os.environ + enable_auto_fusing = "FORGE_DISABLE_FUSE_OPS" not in os.environ if args.perf_analysis: - if "PYBUDA_OP_PERF" not in os.environ: - os.environ["PYBUDA_OP_PERF"] = "1" + if "FORGE_OP_PERF" not in os.environ: + os.environ["FORGE_OP_PERF"] = "1" if "TT_BACKEND_PERF_ANALYZER" not in os.environ: os.environ["TT_BACKEND_PERF_ANALYZER"] = "1" - pybuda.set_configuration_options( + forge.set_configuration_options( math_fidelity=mf_from_str(args.math_fidelity), performance_trace=trace_from_str(args.trace), backend_opt_level=args.backend_opt_level, @@ -329,7 +329,7 @@ def run( args.loop_count = 1 if args.chips == 0: - args.chips = len(pybuda.detect_available_devices()) + args.chips = len(forge.detect_available_devices()) if args.chips == 0: raise RuntimeError("No tenstorrent devices found.") @@ -341,9 +341,9 @@ def run( print(f'Using loop_count: {args.loop_count} microbatch_count: {args.microbatch_count} microbatch: {args.microbatch} chips: {args.chips}') # TODO: For silicon device runs, it seems that the `tt` from user-side is not - # the one being used with api calls like pybuda.run_forward(..). We'll fetch + # the one being used with api calls like forge.run_forward(..). We'll fetch # the arch from the first device-type available - device_list = pybuda.detect_available_devices() + device_list = forge.detect_available_devices() arch = device_list[0] if len(device_list) > 0 else tt.arch # @@ -353,7 +353,7 @@ def run( tt.compile_to_image(img_path=args.save_tti, training=args.training, sample_inputs=compile_inputs, sample_targets=targets) exit(0) - output_q = pybuda.initialize_pipeline(training=args.training, sample_inputs=compile_inputs, microbatch_count=args.microbatch_count, _verify_cfg=pybuda.VerifyConfig.disabled(), sample_targets=targets) + output_q = forge.initialize_pipeline(training=args.training, sample_inputs=compile_inputs, microbatch_count=args.microbatch_count, _verify_cfg=forge.VerifyConfig.disabled(), sample_targets=targets) if args.single_thread: if args.generative: @@ -363,7 +363,7 @@ def run( else: start_time, end_time = multi_thread_run(args, first_device, last_device, inputs, targets, output_q, num_tokens_to_generate) - if pybuda.error_raised(): + if forge.error_raised(): print("*********************************") print(" Error raised, aborting benchmark") print("*********************************") @@ -421,7 +421,7 @@ def run( parser.add_argument( '--layers', default=0, type=int, help='Number of layers to run on models where this is applicable (i.e. nlp encoders/decoders)') parser.add_argument( '--trace', default="none", choices=["none", "light", "verbose"], help='Performance trace to be generated during the run.') parser.add_argument('-l', '--list', action='store_true', help='List available models and configurations') - parser.add_argument('-e', '--env', default="", help='List of environment variable settings, i.e. "PYBUDA_OPT1=1 PYBUDA_OP2=1" to run with.') + parser.add_argument('-e', '--env', default="", help='List of environment variable settings, i.e. "FORGE_OPT1=1 FORGE_OP2=1" to run with.') parser.add_argument('-o', '--output', help='Output json file to write results to, optionally. If file already exists, results will be appended.') parser.add_argument( '--disable_output', default=0, type=int, choices=[0, 1], help='Disables the generation of the output json file') parser.add_argument( '--load_tti', default="", type=str, help='Skip compile and load from TTI-archive configured for silicon (specify path to TTI).') @@ -481,7 +481,7 @@ def run( if args.save_tti: print(f"Saving TTDevice Image to: {args.save_tti}") - pybuda.pybuda_reset() + forge.forge_reset() if args.env != "": envs = args.env.split(" ") @@ -495,7 +495,7 @@ def run( # Set bert multichip placement policy if args.model == "bert" and args.chips > 1: - os.environ["PYBUDA_MULTICHIP_BERT"] = str(args.chips) + os.environ["FORGE_MULTICHIP_BERT"] = str(args.chips) kwargs = { "training": args.training, @@ -504,7 +504,7 @@ def run( "math_fidelity": args.math_fidelity } - device_list = pybuda.detect_available_devices() + device_list = forge.detect_available_devices() if device_list: args.arch = device_list[0].name.lower() elif not args.arch: @@ -533,7 +533,7 @@ def run( # Balancer policy can be set thru benchmark script (as argument) and within the test itself # If the benchmark script is set to "default", the test will be able to override it, otherwise the script's balancer policy will have priority if args.balancer_policy != "default": - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = args.balancer_policy model_config = models[args.model]["func"](**kwargs) diff --git a/pybuda/test/benchmark/benchmark/__init__.py b/forge/test/benchmark/benchmark/__init__.py similarity index 100% rename from pybuda/test/benchmark/benchmark/__init__.py rename to forge/test/benchmark/benchmark/__init__.py diff --git a/pybuda/test/benchmark/benchmark/common/__init__.py b/forge/test/benchmark/benchmark/common/__init__.py similarity index 100% rename from pybuda/test/benchmark/benchmark/common/__init__.py rename to forge/test/benchmark/benchmark/common/__init__.py diff --git a/pybuda/test/benchmark/benchmark/common/common.py b/forge/test/benchmark/benchmark/common/common.py similarity index 74% rename from pybuda/test/benchmark/benchmark/common/common.py rename to forge/test/benchmark/benchmark/common/common.py index b49cfcd27..31a184e4d 100644 --- a/pybuda/test/benchmark/benchmark/common/common.py +++ b/forge/test/benchmark/benchmark/common/common.py @@ -7,8 +7,8 @@ import functools from dataclasses import dataclass -import pybuda -from pybuda._C.backend_api import BackendType, BackendDevice +import forge +from forge._C.backend_api import BackendType, BackendDevice MODELS = {} @@ -29,7 +29,7 @@ def wrapper(*args, **kwargs): assert isinstance(ret[0], list), err_msg assert len(ret[0]) > 0 and len(ret[0]) <= 3, err_msg for m in ret[0]: - assert isinstance(m, pybuda.Module), err_msg + assert isinstance(m, forge.Module), err_msg assert isinstance(ret[1], list), err_msg assert len(ret[1]) > 0, err_msg for s in ret[1]: @@ -41,56 +41,56 @@ def wrapper(*args, **kwargs): return benchmark_decorator -def df_from_str(df: str) -> pybuda.DataFormat: +def df_from_str(df: str) -> forge.DataFormat: if (df == "Fp32"): - return pybuda.DataFormat.Float32 + return forge.DataFormat.Float32 if (df == "Fp16"): - return pybuda.DataFormat.Float16 + return forge.DataFormat.Float16 if (df == "Fp16_b"): - return pybuda.DataFormat.Float16_b + return forge.DataFormat.Float16_b if (df == "Bfp8"): - return pybuda.DataFormat.Bfp8 + return forge.DataFormat.Bfp8 if (df == "Bfp8_b"): - return pybuda.DataFormat.Bfp8_b + return forge.DataFormat.Bfp8_b if (df == "Bfp4"): - return pybuda.DataFormat.Bfp4 + return forge.DataFormat.Bfp4 if (df == "Bfp4_b"): - return pybuda.DataFormat.Bfp4_b + return forge.DataFormat.Bfp4_b raise RuntimeError("Unknown format: " + df) -def mf_from_str(mf: str) -> pybuda.MathFidelity: +def mf_from_str(mf: str) -> forge.MathFidelity: if (mf == "LoFi"): - return pybuda.MathFidelity.LoFi + return forge.MathFidelity.LoFi if (mf == "HiFi2"): - return pybuda.MathFidelity.HiFi2 + return forge.MathFidelity.HiFi2 if (mf == "HiFi3"): - return pybuda.MathFidelity.HiFi3 + return forge.MathFidelity.HiFi3 if (mf == "HiFi4"): - return pybuda.MathFidelity.HiFi4 + return forge.MathFidelity.HiFi4 raise RuntimeError("Unknown math fidelity: " + mf) -def trace_from_str(trace: str) -> pybuda.PerfTraceLevel: +def trace_from_str(trace: str) -> forge.PerfTraceLevel: if (trace == "none"): - return pybuda.PerfTraceLevel.NONE + return forge.PerfTraceLevel.NONE if (trace == "light"): - return pybuda.PerfTraceLevel.LIGHT + return forge.PerfTraceLevel.LIGHT if (trace == "verbose"): - return pybuda.PerfTraceLevel.VERBOSE + return forge.PerfTraceLevel.VERBOSE raise RuntimeError("Unknown trace type: " + trace) diff --git a/pybuda/test/benchmark/benchmark/models/__init__.py b/forge/test/benchmark/benchmark/models/__init__.py similarity index 100% rename from pybuda/test/benchmark/benchmark/models/__init__.py rename to forge/test/benchmark/benchmark/models/__init__.py diff --git a/pybuda/test/benchmark/benchmark/models/bert.py b/forge/test/benchmark/benchmark/models/bert.py similarity index 73% rename from pybuda/test/benchmark/benchmark/models/bert.py rename to forge/test/benchmark/benchmark/models/bert.py index 04fa14984..6c93ebdaf 100644 --- a/pybuda/test/benchmark/benchmark/models/bert.py +++ b/forge/test/benchmark/benchmark/models/bert.py @@ -3,12 +3,12 @@ # SPDX-License-Identifier: Apache-2.0 import os import math -import pybuda +import forge import torch from typing import Optional -from pybuda import PyTorchModule -from pybuda.config import _get_global_compiler_config +from forge import PyTorchModule +from forge.config import _get_global_compiler_config from transformers import BertModel, BertConfig, BertForSequenceClassification @@ -41,7 +41,7 @@ def forward(self, input_ids, token_type_ids): @benchmark_model(configs=["tiny", "base", "large", "base_tc", "large_tc"]) def bert(training: bool, config: str, microbatch: int, devtype: str, arch: str, data_type: str, math_fidelity: str, force_num_layers: Optional[int] = None): - from pybuda._C.backend_api import BackendDevice + from forge._C.backend_api import BackendDevice compiler_cfg = _get_global_compiler_config() @@ -50,7 +50,7 @@ def bert(training: bool, config: str, microbatch: int, devtype: str, arch: str, seq_len = 128 target_microbatch = 512 compiler_cfg.enable_auto_transposing_placement = True - os.environ["PYBUDA_EXP_APPROX"] = "1" + os.environ["FORGE_EXP_APPROX"] = "1" elif config == "base": model_name = "bert-base-uncased" seq_len = 128 @@ -65,29 +65,29 @@ def bert(training: bool, config: str, microbatch: int, devtype: str, arch: str, target_microbatch = 128 # start each epoch at the beginning of the module - if "PYBUDA_MODULAR_BERT" in os.environ: + if "FORGE_MODULAR_BERT" in os.environ: layers = force_num_layers if force_num_layers else 24 for i in range(layers): - pybuda.config._get_global_compiler_config().place_on_new_epoch(f"matmul_{2+53*i}") + forge.config._get_global_compiler_config().place_on_new_epoch(f"matmul_{2+53*i}") - elif "PYBUDA_MULTICHIP_BERT" in os.environ: + elif "FORGE_MULTICHIP_BERT" in os.environ: layers = force_num_layers if force_num_layers else 24 - chips = int(os.environ["PYBUDA_MULTICHIP_BERT"]) + chips = int(os.environ["FORGE_MULTICHIP_BERT"]) chip_breaks = [] for i in range(chips): chip_breaks.append(i*(math.ceil(layers/chips))) for i in range(layers): if i in chip_breaks: - pybuda.config._get_global_compiler_config().place_on_new_chip(f"matmul_{2+53*i}") + forge.config._get_global_compiler_config().place_on_new_chip(f"matmul_{2+53*i}") else: - pybuda.config._get_global_compiler_config().place_on_new_epoch(f"matmul_{2+53*i}") + forge.config._get_global_compiler_config().place_on_new_epoch(f"matmul_{2+53*i}") else: # Trying to avoid 4x output bw - manual for now layers = force_num_layers if force_num_layers else 24 for i in range(layers): if (i%2 == 1): - pybuda.config._get_global_compiler_config().place_on_new_epoch(f"matmul_{41+53*i}") + forge.config._get_global_compiler_config().place_on_new_epoch(f"matmul_{41+53*i}") elif config == "base_tc": model_name = "textattack/bert-base-uncased-SST-2" seq_len = 128 @@ -99,16 +99,16 @@ def bert(training: bool, config: str, microbatch: int, devtype: str, arch: str, compiler_cfg.enable_auto_transposing_placement = True if compiler_cfg.balancer_policy == "default": compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" - os.environ["PYBUDA_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" - os.environ["PYBUDA_ENABLE_HOST_INPUT_NOP_BUFFERING"] = "1" + os.environ["FORGE_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" + os.environ["FORGE_ENABLE_HOST_INPUT_NOP_BUFFERING"] = "1" if data_type == "Bfp8_b": - if pybuda.detect_available_devices()[0] != BackendDevice.Grayskull: - os.environ["PYBUDA_FORK_JOIN_BUF_QUEUES"] = "1" - os.environ["PYBUDA_EXP_APPROX"] = "1" - pybuda.config.configure_mixed_precision(op_type="add", output_df=pybuda.DataFormat.Float16_b) - pybuda.config.configure_mixed_precision(op_type="subtract", output_df=pybuda.DataFormat.Float16_b) - pybuda.config.configure_mixed_precision(op_type="reciprocal", output_df=pybuda.DataFormat.Float16_b) + if forge.detect_available_devices()[0] != BackendDevice.Grayskull: + os.environ["FORGE_FORK_JOIN_BUF_QUEUES"] = "1" + os.environ["FORGE_EXP_APPROX"] = "1" + forge.config.configure_mixed_precision(op_type="add", output_df=forge.DataFormat.Float16_b) + forge.config.configure_mixed_precision(op_type="subtract", output_df=forge.DataFormat.Float16_b) + forge.config.configure_mixed_precision(op_type="reciprocal", output_df=forge.DataFormat.Float16_b) else: raise RuntimeError("Unknown config") @@ -145,7 +145,7 @@ def bert(training: bool, config: str, microbatch: int, devtype: str, arch: str, torch.randint(high=2, size=(microbatch, seq_len), dtype=torch.int), # token type IDs ] models = {"tt": PyTorchModule("bert", BertEncoderWrapper(model))} - pybuda.config._get_global_compiler_config().cpu_fallback_ops.remove("embedding") + forge.config._get_global_compiler_config().cpu_fallback_ops.remove("embedding") targets = tuple() if training: diff --git a/pybuda/test/benchmark/benchmark/models/custom/custom_resnet_highres.py b/forge/test/benchmark/benchmark/models/custom/custom_resnet_highres.py similarity index 68% rename from pybuda/test/benchmark/benchmark/models/custom/custom_resnet_highres.py rename to forge/test/benchmark/benchmark/models/custom/custom_resnet_highres.py index f76aff8e7..67cb2256d 100644 --- a/pybuda/test/benchmark/benchmark/models/custom/custom_resnet_highres.py +++ b/forge/test/benchmark/benchmark/models/custom/custom_resnet_highres.py @@ -6,9 +6,9 @@ import torch from loguru import logger -import pybuda -from pybuda import OnnxModule -from pybuda.config import _get_global_compiler_config +import forge +from forge import OnnxModule +from forge.config import _get_global_compiler_config from ...common import benchmark_model @@ -24,7 +24,7 @@ def custom_resnet_highres(training: bool, config: str, microbatch: int, devtype: # Load ONNX model onnx_model = onnx.load(model_path) onnx.checker.check_model(onnx_model) - pybuda_onnx_model = OnnxModule( + forge_onnx_model = OnnxModule( "CUSTOM_ResNet_HighRes", onnx_model, model_path, @@ -37,17 +37,17 @@ def custom_resnet_highres(training: bool, config: str, microbatch: int, devtype: compiler_cfg.balancer_policy = "Ribbon" # Overrides - os.environ["PYBUDA_RIBBON2"] = "1" - #os.environ["PYBUDA_RIBBON2_DISABLE_CLEANUP_BUF_NOPS"] = "1" - #os.environ["PYBUDA_SPARSE_ENABLE_LAYOUT_DATAFLOW"] = "1" - #os.environ["PYBUDA_MAXMIZE_SPARSE_UBLOCK"] = "1" - #os.environ["PYBUDA_DISABLE_CAP_SPARSE_MM_FIDELITY"] = "1" - #os.environ["PYBUDA_TEMP_ELT_UNARY_ESTIMATES_LEGACY"] = "1" + os.environ["FORGE_RIBBON2"] = "1" + #os.environ["FORGE_RIBBON2_DISABLE_CLEANUP_BUF_NOPS"] = "1" + #os.environ["FORGE_SPARSE_ENABLE_LAYOUT_DATAFLOW"] = "1" + #os.environ["FORGE_MAXMIZE_SPARSE_UBLOCK"] = "1" + #os.environ["FORGE_DISABLE_CAP_SPARSE_MM_FIDELITY"] = "1" + #os.environ["FORGE_TEMP_ELT_UNARY_ESTIMATES_LEGACY"] = "1" #os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{77*1024}" - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" - os.environ["PYBUDA_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" - models = {"tt" : pybuda_onnx_model} + models = {"tt" : forge_onnx_model} dimension = onnx_model.graph.input[0].type.tensor_type.shape input_shape = [d.dim_value for d in dimension.dim] inputs = [torch.rand(*input_shape)] diff --git a/pybuda/test/benchmark/benchmark/models/custom/custom_vit_highres.py b/forge/test/benchmark/benchmark/models/custom/custom_vit_highres.py similarity index 84% rename from pybuda/test/benchmark/benchmark/models/custom/custom_vit_highres.py rename to forge/test/benchmark/benchmark/models/custom/custom_vit_highres.py index 886e99818..8eeccda77 100644 --- a/pybuda/test/benchmark/benchmark/models/custom/custom_vit_highres.py +++ b/forge/test/benchmark/benchmark/models/custom/custom_vit_highres.py @@ -6,9 +6,9 @@ import torch from loguru import logger -import pybuda -from pybuda import OnnxModule -from pybuda.config import _get_global_compiler_config +import forge +from forge import OnnxModule +from forge.config import _get_global_compiler_config from ...common import benchmark_model @@ -24,7 +24,7 @@ def custom_vit_highres(training: bool, config: str, microbatch: int, devtype: st # Load ONNX model onnx_model = onnx.load(model_path) onnx.checker.check_model(onnx_model) - pybuda_onnx_model = OnnxModule( + forge_onnx_model = OnnxModule( "CUSTOM_ViT_HighRes", onnx_model, model_path, @@ -35,9 +35,9 @@ def custom_vit_highres(training: bool, config: str, microbatch: int, devtype: st compiler_cfg.balancer_policy = "Ribbon" # Overrides - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" - models = {"tt" : pybuda_onnx_model} + models = {"tt" : forge_onnx_model} dimension = onnx_model.graph.input[0].type.tensor_type.shape input_shape = [d.dim_value for d in dimension.dim] inputs = [torch.rand(*input_shape)] diff --git a/pybuda/test/benchmark/benchmark/models/deit.py b/forge/test/benchmark/benchmark/models/deit.py similarity index 73% rename from pybuda/test/benchmark/benchmark/models/deit.py rename to forge/test/benchmark/benchmark/models/deit.py index f6c6eba67..c6ed21e76 100644 --- a/pybuda/test/benchmark/benchmark/models/deit.py +++ b/forge/test/benchmark/benchmark/models/deit.py @@ -2,11 +2,11 @@ # SPDX-License-Identifier: Apache-2.0 import os -import pybuda +import forge import torch from ..common import benchmark_model, generate_test_device -from pybuda.config import _get_global_compiler_config +from forge.config import _get_global_compiler_config from test.model_demos.models.deit import generate_model_deit_imgcls_hf_pytorch @@ -19,19 +19,19 @@ def deit(training: bool, config: str, microbatch: int, devtype: str, arch: str, if compiler_cfg.balancer_policy == "default": compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" # These are about to be enabled by default. # - os.environ["PYBUDA_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" + os.environ["FORGE_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" if data_type == "Fp16_b": - os.environ["PYBUDA_RIBBON2_CALCULATE_TARGET_CYCLES_APPLY_FILTERING"] = "1" + os.environ["FORGE_RIBBON2_CALCULATE_TARGET_CYCLES_APPLY_FILTERING"] = "1" if data_type == "Bfp8_b": - os.environ["PYBUDA_FORK_JOIN_BUF_QUEUES"] = "1" - pybuda.config.configure_mixed_precision(op_type="reciprocal", output_df=pybuda.DataFormat.Float16_b) - os.environ["PYBUDA_FUSE_DF_OVERRIDE"] = "0" + os.environ["FORGE_FORK_JOIN_BUF_QUEUES"] = "1" + forge.config.configure_mixed_precision(op_type="reciprocal", output_df=forge.DataFormat.Float16_b) + os.environ["FORGE_FUSE_DF_OVERRIDE"] = "0" # Determine model variant if config == "base": @@ -60,7 +60,7 @@ def deit(training: bool, config: str, microbatch: int, devtype: str, arch: str, # Add loss function, if training if training: - model["cpu-loss"] = pybuda.PyTorchModule("l1loss", torch.nn.L1Loss()) + model["cpu-loss"] = forge.PyTorchModule("l1loss", torch.nn.L1Loss()) targets = [torch.rand(1, 100)] return modules, inputs, targets, {} diff --git a/pybuda/test/benchmark/benchmark/models/hrnet.py b/forge/test/benchmark/benchmark/models/hrnet.py similarity index 78% rename from pybuda/test/benchmark/benchmark/models/hrnet.py rename to forge/test/benchmark/benchmark/models/hrnet.py index de08357e5..34abc1583 100644 --- a/pybuda/test/benchmark/benchmark/models/hrnet.py +++ b/forge/test/benchmark/benchmark/models/hrnet.py @@ -2,14 +2,14 @@ # SPDX-License-Identifier: Apache-2.0 import os -import pybuda +import forge import torch import torch.multiprocessing from pytorchcv.model_provider import get_model as ptcv_get_model from ..common import benchmark_model -from pybuda.config import _get_global_compiler_config -from pybuda._C.backend_api import BackendDevice +from forge.config import _get_global_compiler_config +from forge._C.backend_api import BackendDevice torch.multiprocessing.set_sharing_strategy("file_system") @@ -34,14 +34,14 @@ def hrnet(training: bool, config: str, microbatch: int, devtype: str, arch: str, if compiler_cfg.balancer_policy == "default": compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" - os.environ["PYBUDA_SUPRESS_T_FACTOR_MM"] = "46" # removing causes hang #2139 - os.environ["PYBUDA_ENABLE_HOST_INPUT_NOP_BUFFERING"] = "1" + os.environ["FORGE_SUPRESS_T_FACTOR_MM"] = "46" # removing causes hang #2139 + os.environ["FORGE_ENABLE_HOST_INPUT_NOP_BUFFERING"] = "1" # These are about to be enabled by default. # - os.environ["PYBUDA_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" + os.environ["FORGE_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" if data_type == "Fp16_b": # Hangs with autotranspose on #2542 compiler_cfg.enable_auto_transposing_placement = False @@ -72,10 +72,10 @@ def hrnet(training: bool, config: str, microbatch: int, devtype: str, arch: str, if data_type == "Bfp8_b": if "TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE" not in os.environ: os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{10*1024}" - available_devices = pybuda.detect_available_devices() + available_devices = forge.detect_available_devices() if available_devices: if available_devices[0] == BackendDevice.Grayskull: - pybuda.config._internal_insert_fj_buffering_nop('add_312', ['add_341'], nop_count=2) + forge.config._internal_insert_fj_buffering_nop('add_312', ['add_341'], nop_count=2) else: raise RuntimeError("Unknown config") @@ -88,7 +88,7 @@ def hrnet(training: bool, config: str, microbatch: int, devtype: str, arch: str, else: model.eval() - modules = {"tt": pybuda.PyTorchModule(f"pt_hrnet_{config}", model)} + modules = {"tt": forge.PyTorchModule(f"pt_hrnet_{config}", model)} input_shape = (microbatch, 3, img_res, img_res) inputs = [torch.rand(*input_shape)] @@ -96,7 +96,7 @@ def hrnet(training: bool, config: str, microbatch: int, devtype: str, arch: str, # Add loss function, if training if training: - model["cpu-loss"] = pybuda.PyTorchModule("l1loss", torch.nn.L1Loss()) + model["cpu-loss"] = forge.PyTorchModule("l1loss", torch.nn.L1Loss()) targets = [torch.rand(1, 100)] return modules, inputs, targets, {} diff --git a/pybuda/test/benchmark/benchmark/models/implementations/yolo_v3/holli_src/utils.py b/forge/test/benchmark/benchmark/models/implementations/yolo_v3/holli_src/utils.py similarity index 100% rename from pybuda/test/benchmark/benchmark/models/implementations/yolo_v3/holli_src/utils.py rename to forge/test/benchmark/benchmark/models/implementations/yolo_v3/holli_src/utils.py diff --git a/pybuda/test/benchmark/benchmark/models/implementations/yolo_v3/holli_src/yolo_layer.py b/forge/test/benchmark/benchmark/models/implementations/yolo_v3/holli_src/yolo_layer.py similarity index 100% rename from pybuda/test/benchmark/benchmark/models/implementations/yolo_v3/holli_src/yolo_layer.py rename to forge/test/benchmark/benchmark/models/implementations/yolo_v3/holli_src/yolo_layer.py diff --git a/pybuda/test/benchmark/benchmark/models/implementations/yolo_v3/holli_src/yolov3.py b/forge/test/benchmark/benchmark/models/implementations/yolo_v3/holli_src/yolov3.py similarity index 100% rename from pybuda/test/benchmark/benchmark/models/implementations/yolo_v3/holli_src/yolov3.py rename to forge/test/benchmark/benchmark/models/implementations/yolo_v3/holli_src/yolov3.py diff --git a/pybuda/test/benchmark/benchmark/models/implementations/yolo_v3/holli_src/yolov3_base.py b/forge/test/benchmark/benchmark/models/implementations/yolo_v3/holli_src/yolov3_base.py similarity index 100% rename from pybuda/test/benchmark/benchmark/models/implementations/yolo_v3/holli_src/yolov3_base.py rename to forge/test/benchmark/benchmark/models/implementations/yolo_v3/holli_src/yolov3_base.py diff --git a/pybuda/test/benchmark/benchmark/models/implementations/yolo_v3/holli_src/yolov3_tiny.py b/forge/test/benchmark/benchmark/models/implementations/yolo_v3/holli_src/yolov3_tiny.py similarity index 100% rename from pybuda/test/benchmark/benchmark/models/implementations/yolo_v3/holli_src/yolov3_tiny.py rename to forge/test/benchmark/benchmark/models/implementations/yolo_v3/holli_src/yolov3_tiny.py diff --git a/pybuda/test/benchmark/benchmark/models/implementations/yolo_v3/license b/forge/test/benchmark/benchmark/models/implementations/yolo_v3/license similarity index 100% rename from pybuda/test/benchmark/benchmark/models/implementations/yolo_v3/license rename to forge/test/benchmark/benchmark/models/implementations/yolo_v3/license diff --git a/pybuda/test/benchmark/benchmark/models/inception_v4.py b/forge/test/benchmark/benchmark/models/inception_v4.py similarity index 82% rename from pybuda/test/benchmark/benchmark/models/inception_v4.py rename to forge/test/benchmark/benchmark/models/inception_v4.py index 3254260da..464f5a57f 100644 --- a/pybuda/test/benchmark/benchmark/models/inception_v4.py +++ b/forge/test/benchmark/benchmark/models/inception_v4.py @@ -2,12 +2,12 @@ # SPDX-License-Identifier: Apache-2.0 import os -import pybuda +import forge import torch import timm from ..common import benchmark_model -from pybuda.config import _get_global_compiler_config +from forge.config import _get_global_compiler_config @benchmark_model(configs=["224"]) @@ -18,7 +18,7 @@ def inception_v4(training: bool, config: str, microbatch: int, devtype: str, arc if compiler_cfg.balancer_policy == "default": compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" # Set model parameters based on chosen task and model configuration if config == "224": @@ -40,7 +40,7 @@ def inception_v4(training: bool, config: str, microbatch: int, devtype: str, arc else: model.eval() - modules = {"tt": pybuda.PyTorchModule(f"pt_inception_v4_{config}_{compiler_cfg.balancer_policy}", model)} + modules = {"tt": forge.PyTorchModule(f"pt_inception_v4_{config}_{compiler_cfg.balancer_policy}", model)} input_shape = (microbatch, 3, img_res, img_res) inputs = [torch.rand(*input_shape)] @@ -48,7 +48,7 @@ def inception_v4(training: bool, config: str, microbatch: int, devtype: str, arc # Add loss function, if training if training: - model["cpu-loss"] = pybuda.PyTorchModule("l1loss", torch.nn.L1Loss()) + model["cpu-loss"] = forge.PyTorchModule("l1loss", torch.nn.L1Loss()) targets = [torch.rand(1, 100)] return modules, inputs, targets, {} diff --git a/pybuda/test/benchmark/benchmark/models/mobilenet_v1.py b/forge/test/benchmark/benchmark/models/mobilenet_v1.py similarity index 59% rename from pybuda/test/benchmark/benchmark/models/mobilenet_v1.py rename to forge/test/benchmark/benchmark/models/mobilenet_v1.py index 47069a556..a5434aaf1 100644 --- a/pybuda/test/benchmark/benchmark/models/mobilenet_v1.py +++ b/forge/test/benchmark/benchmark/models/mobilenet_v1.py @@ -2,11 +2,11 @@ # SPDX-License-Identifier: Apache-2.0 import os -import pybuda +import forge import torch from ..common import benchmark_model -from pybuda.config import _get_global_compiler_config +from forge.config import _get_global_compiler_config from transformers import AutoModelForImageClassification @@ -18,24 +18,24 @@ def mobilenet_v1(training: bool, config: str, microbatch: int, devtype: str, arc if compiler_cfg.balancer_policy == "default": compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" - os.environ["PYBUDA_ENABLE_HOST_INPUT_NOP_BUFFERING"] = "1" + os.environ["FORGE_ENABLE_HOST_INPUT_NOP_BUFFERING"] = "1" # These are about to be enabled by default. # - os.environ["PYBUDA_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" + os.environ["FORGE_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" if data_type == "Fp16_b": - os.environ["PYBUDA_SUPRESS_T_FACTOR_MM"] = "40" - os.environ["PYBUDA_TEMP_DISABLE_MODEL_KB_PROLOGUE_BW"] = "1" + os.environ["FORGE_SUPRESS_T_FACTOR_MM"] = "40" + os.environ["FORGE_TEMP_DISABLE_MODEL_KB_PROLOGUE_BW"] = "1" if data_type == "Bfp8_b": - os.environ["PYBUDA_FUSE_DF_OVERRIDE"] = "0" - pybuda.config.configure_mixed_precision(name_regex="input.*add.*", output_df=pybuda.DataFormat.Float16_b) - pybuda.config.configure_mixed_precision(op_type="add", output_df=pybuda.DataFormat.Float16_b) - pybuda.config.configure_mixed_precision(op_type="multiply", math_fidelity=pybuda.MathFidelity.HiFi2) - pybuda.config.configure_mixed_precision(op_type="depthwise", output_df=pybuda.DataFormat.Float16_b, math_fidelity=pybuda.MathFidelity.HiFi2) + os.environ["FORGE_FUSE_DF_OVERRIDE"] = "0" + forge.config.configure_mixed_precision(name_regex="input.*add.*", output_df=forge.DataFormat.Float16_b) + forge.config.configure_mixed_precision(op_type="add", output_df=forge.DataFormat.Float16_b) + forge.config.configure_mixed_precision(op_type="multiply", math_fidelity=forge.MathFidelity.HiFi2) + forge.config.configure_mixed_precision(op_type="depthwise", output_df=forge.DataFormat.Float16_b, math_fidelity=forge.MathFidelity.HiFi2) # Set model parameters based on chosen task and model configuration @@ -58,7 +58,7 @@ def mobilenet_v1(training: bool, config: str, microbatch: int, devtype: str, arc else: model.eval() - modules = {"tt": pybuda.PyTorchModule(f"pt_mobilenet_v1_{config}", model)} + modules = {"tt": forge.PyTorchModule(f"pt_mobilenet_v1_{config}", model)} input_shape = (microbatch, 3, img_res, img_res) inputs = [torch.rand(*input_shape)] @@ -66,7 +66,7 @@ def mobilenet_v1(training: bool, config: str, microbatch: int, devtype: str, arc # Add loss function, if training if training: - model["cpu-loss"] = pybuda.PyTorchModule("l1loss", torch.nn.L1Loss()) + model["cpu-loss"] = forge.PyTorchModule("l1loss", torch.nn.L1Loss()) targets = [torch.rand(1, 100)] return modules, inputs, targets, {} diff --git a/pybuda/test/benchmark/benchmark/models/mobilenet_v2.py b/forge/test/benchmark/benchmark/models/mobilenet_v2.py similarity index 59% rename from pybuda/test/benchmark/benchmark/models/mobilenet_v2.py rename to forge/test/benchmark/benchmark/models/mobilenet_v2.py index 919e1bd56..4511f6f0c 100644 --- a/pybuda/test/benchmark/benchmark/models/mobilenet_v2.py +++ b/forge/test/benchmark/benchmark/models/mobilenet_v2.py @@ -2,11 +2,11 @@ # SPDX-License-Identifier: Apache-2.0 import os -import pybuda +import forge import torch from ..common import benchmark_model -from pybuda.config import _get_global_compiler_config +from forge.config import _get_global_compiler_config from transformers import AutoModelForImageClassification @@ -18,28 +18,28 @@ def mobilenet_v2(training: bool, config: str, microbatch: int, devtype: str, arc if compiler_cfg.balancer_policy == "default": compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" - os.environ["PYBUDA_ENABLE_HOST_INPUT_NOP_BUFFERING"] = "1" + os.environ["FORGE_ENABLE_HOST_INPUT_NOP_BUFFERING"] = "1" # These are about to be enabled by default. # - os.environ["PYBUDA_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" + os.environ["FORGE_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" if data_type == "Fp16_b": - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" if data_type == "Bfp8_b": - pybuda.config.configure_mixed_precision(name_regex="input.*add.*", output_df=pybuda.DataFormat.Float16_b) - pybuda.config.configure_mixed_precision(op_type="add", output_df=pybuda.DataFormat.Float16_b) - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision(name_regex="input.*add.*", output_df=forge.DataFormat.Float16_b) + forge.config.configure_mixed_precision(op_type="add", output_df=forge.DataFormat.Float16_b) + forge.config.configure_mixed_precision( op_type="depthwise", - input_df={1: (pybuda.DataFormat.Float16_b, False),}, - output_df=pybuda.DataFormat.Float16_b, - math_fidelity=pybuda.MathFidelity.HiFi2 + input_df={1: (forge.DataFormat.Float16_b, False),}, + output_df=forge.DataFormat.Float16_b, + math_fidelity=forge.MathFidelity.HiFi2 ) - pybuda.config.configure_mixed_precision(op_type="multiply", math_fidelity=pybuda.MathFidelity.HiFi2) - pybuda.config.configure_mixed_precision(op_type="matmul", math_fidelity=pybuda.MathFidelity.HiFi2) + forge.config.configure_mixed_precision(op_type="multiply", math_fidelity=forge.MathFidelity.HiFi2) + forge.config.configure_mixed_precision(op_type="matmul", math_fidelity=forge.MathFidelity.HiFi2) # Set model parameters based on chosen task and model configuration if config == "224": @@ -63,7 +63,7 @@ def mobilenet_v2(training: bool, config: str, microbatch: int, devtype: str, arc else: model.eval() - modules = {"tt": pybuda.PyTorchModule(f"pt_mobilenet_v2_{config}", model)} + modules = {"tt": forge.PyTorchModule(f"pt_mobilenet_v2_{config}", model)} input_shape = (microbatch, 3, img_res, img_res) inputs = [torch.rand(*input_shape)] @@ -71,7 +71,7 @@ def mobilenet_v2(training: bool, config: str, microbatch: int, devtype: str, arc # Add loss function, if training if training: - model["cpu-loss"] = pybuda.PyTorchModule("l1loss", torch.nn.L1Loss()) + model["cpu-loss"] = forge.PyTorchModule("l1loss", torch.nn.L1Loss()) targets = [torch.rand(1, 100)] return modules, inputs, targets, {} diff --git a/pybuda/test/benchmark/benchmark/models/mobilenet_v3_timm.py b/forge/test/benchmark/benchmark/models/mobilenet_v3_timm.py similarity index 74% rename from pybuda/test/benchmark/benchmark/models/mobilenet_v3_timm.py rename to forge/test/benchmark/benchmark/models/mobilenet_v3_timm.py index 4d6d08075..4f66a32b4 100644 --- a/pybuda/test/benchmark/benchmark/models/mobilenet_v3_timm.py +++ b/forge/test/benchmark/benchmark/models/mobilenet_v3_timm.py @@ -2,12 +2,12 @@ # SPDX-License-Identifier: Apache-2.0 import os -import pybuda +import forge import torch import timm from ..common import benchmark_model -from pybuda.config import _get_global_compiler_config +from forge.config import _get_global_compiler_config @benchmark_model(configs=["small", "large"]) @@ -16,11 +16,11 @@ def mobilenet_v3_timm(training: bool, config: str, microbatch: int, devtype: str if compiler_cfg.balancer_policy == "default": compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" - os.environ["PYBUDA_BALANCER_PREPASS_DISABLED"] = "1" - os.environ["PYBUDA_ENABLE_HOST_INPUT_NOP_BUFFERING"] = "1" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_BALANCER_PREPASS_DISABLED"] = "1" + os.environ["FORGE_ENABLE_HOST_INPUT_NOP_BUFFERING"] = "1" # Set model parameters based on chosen task and model configuration model_name = "" @@ -42,7 +42,7 @@ def mobilenet_v3_timm(training: bool, config: str, microbatch: int, devtype: str else: model.eval() - modules = {"tt": pybuda.PyTorchModule(f"pt_mobilenet_v3_timm_{config}", model)} + modules = {"tt": forge.PyTorchModule(f"pt_mobilenet_v3_timm_{config}", model)} input_shape = (microbatch, 3, img_res, img_res) inputs = [torch.rand(*input_shape)] @@ -50,7 +50,7 @@ def mobilenet_v3_timm(training: bool, config: str, microbatch: int, devtype: str # Add loss function, if training if training: - model["cpu-loss"] = pybuda.PyTorchModule("l1loss", torch.nn.L1Loss()) + model["cpu-loss"] = forge.PyTorchModule("l1loss", torch.nn.L1Loss()) targets = [torch.rand(1, 100)] return modules, inputs, targets, {} diff --git a/pybuda/test/benchmark/benchmark/models/openpose_body.py b/forge/test/benchmark/benchmark/models/openpose_body.py similarity index 74% rename from pybuda/test/benchmark/benchmark/models/openpose_body.py rename to forge/test/benchmark/benchmark/models/openpose_body.py index f5c19df5b..9890b6e8d 100644 --- a/pybuda/test/benchmark/benchmark/models/openpose_body.py +++ b/forge/test/benchmark/benchmark/models/openpose_body.py @@ -2,26 +2,26 @@ # SPDX-License-Identifier: Apache-2.0 import os -import pybuda +import forge import torch from ..common import benchmark_model -from pybuda.config import _get_global_compiler_config +from forge.config import _get_global_compiler_config from pytorchcv.model_provider import get_model as ptcv_get_model @benchmark_model(configs=["2d", "3d"]) def openpose_osmr_body(training: bool, config: str, microbatch: int, devtype: str, arch: str, data_type: str, math_fidelity: str): # Configurations - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.enable_auto_transposing_placement = True if compiler_cfg.balancer_policy == "default": compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" - os.environ["PYBUDA_SUPRESS_T_FACTOR_MM"] = "13" - os.environ["PYBUDA_ENABLE_HOST_INPUT_NOP_BUFFERING"] = "1" + os.environ["FORGE_SUPRESS_T_FACTOR_MM"] = "13" + os.environ["FORGE_ENABLE_HOST_INPUT_NOP_BUFFERING"] = "1" # Set model parameters based on chosen task and model configuration model_name = "" @@ -42,7 +42,7 @@ def openpose_osmr_body(training: bool, config: str, microbatch: int, devtype: st else: model.eval() - modules = {"tt": pybuda.PyTorchModule("openpose_body_" + config + "_pt", model)} + modules = {"tt": forge.PyTorchModule("openpose_body_" + config + "_pt", model)} input_shape = (microbatch, 3, img_res, img_res) inputs = [torch.rand(*input_shape)] @@ -50,7 +50,7 @@ def openpose_osmr_body(training: bool, config: str, microbatch: int, devtype: st # Add loss function, if training if training: - model["cpu-loss"] = pybuda.PyTorchModule("l1loss", torch.nn.L1Loss()) + model["cpu-loss"] = forge.PyTorchModule("l1loss", torch.nn.L1Loss()) targets = [torch.rand(1, 100)] return modules, inputs, targets, {} \ No newline at end of file diff --git a/pybuda/test/benchmark/benchmark/models/openpose_hand.py b/forge/test/benchmark/benchmark/models/openpose_hand.py similarity index 74% rename from pybuda/test/benchmark/benchmark/models/openpose_hand.py rename to forge/test/benchmark/benchmark/models/openpose_hand.py index 26b0d843b..b57f2b0a2 100644 --- a/pybuda/test/benchmark/benchmark/models/openpose_hand.py +++ b/forge/test/benchmark/benchmark/models/openpose_hand.py @@ -2,11 +2,11 @@ # SPDX-License-Identifier: Apache-2.0 import os -import pybuda +import forge import torch import sys -from pybuda._C.backend_api import BackendDevice +from forge._C.backend_api import BackendDevice from ..common import benchmark_model @benchmark_model(configs=["basic"]) @@ -16,25 +16,25 @@ def openpose_hand(training: bool, config: str, microbatch: int, devtype: str, ar from benchmarks.openpose import OpenPoseHandModel, transfer # Configurations - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.enable_auto_transposing_placement = True if compiler_cfg.balancer_policy == "default": compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" - os.environ["PYBUDA_ENABLE_HOST_INPUT_NOP_BUFFERING"] = "1" + os.environ["FORGE_ENABLE_HOST_INPUT_NOP_BUFFERING"] = "1" # These are about to be enabled by default. # - os.environ["PYBUDA_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" + os.environ["FORGE_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" # Manually enable amp light for Ribbon if compiler_cfg.balancer_policy == "Ribbon": compiler_cfg.enable_amp_light() - if pybuda.detect_available_devices()[0] == BackendDevice.Grayskull: - pybuda.set_epoch_break("conv2d_99.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2") + if forge.detect_available_devices()[0] == BackendDevice.Grayskull: + forge.set_epoch_break("conv2d_99.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2") # Set model parameters based on chosen task and model configuration model_name = "" @@ -56,7 +56,7 @@ def openpose_hand(training: bool, config: str, microbatch: int, devtype: str, ar else: model.eval() - modules = {"tt": pybuda.PyTorchModule("openpose_hand_" + model_name, model)} + modules = {"tt": forge.PyTorchModule("openpose_hand_" + model_name, model)} input_shape = (microbatch, 3, img_res, img_res) inputs = [torch.rand(*input_shape)] @@ -64,7 +64,7 @@ def openpose_hand(training: bool, config: str, microbatch: int, devtype: str, ar # Add loss function, if training if training: - model["cpu-loss"] = pybuda.PyTorchModule("l1loss", torch.nn.L1Loss()) + model["cpu-loss"] = forge.PyTorchModule("l1loss", torch.nn.L1Loss()) targets = [torch.rand(1, 100)] return modules, inputs, targets, {} diff --git a/pybuda/test/benchmark/benchmark/models/other.py b/forge/test/benchmark/benchmark/models/other.py similarity index 78% rename from pybuda/test/benchmark/benchmark/models/other.py rename to forge/test/benchmark/benchmark/models/other.py index 4f7fb74e0..08e62c80b 100644 --- a/pybuda/test/benchmark/benchmark/models/other.py +++ b/forge/test/benchmark/benchmark/models/other.py @@ -6,18 +6,18 @@ """ import os -import pybuda +import forge import torch from ..common import benchmark_model -from pybuda.op.common import PyBudaOp -from pybuda.utils import align_up_tile, round_up_div -from pybuda.op.eval.sparse_utils import interleave_tiles, vslice, calculate_conv2d_output_dimensions, create_conv2d_sparse_picker_matrix, conv2d_padding_to_canonical -from pybuda.op.nn import Conv2dModule -from pybuda.config import _get_global_compiler_config +from forge.op.common import ForgeOp +from forge.utils import align_up_tile, round_up_div +from forge.op.eval.sparse_utils import interleave_tiles, vslice, calculate_conv2d_output_dimensions, create_conv2d_sparse_picker_matrix, conv2d_padding_to_canonical +from forge.op.nn import Conv2dModule +from forge.config import _get_global_compiler_config -class ConvTModule(pybuda.PyBudaModule): +class ConvTModule(forge.ForgeModule): """ ConvTModule """ @@ -61,13 +61,13 @@ def forward(self, act): outy, outx = calculate_conv2d_output_dimensions(act.shape[-2], act.shape[-1], self.kernel_size, self.stride, self.padding, self.dilation) y = self.conv(act) - y = pybuda.op.Reshape("", y, (1, 1, self.out_channels, outy * outx)) - y = pybuda.op.Transpose("", y, 2, 3) + y = forge.op.Reshape("", y, (1, 1, self.out_channels, outy * outx)) + y = forge.op.Transpose("", y, 2, 3) return y -class SimpleAddModule(pybuda.PyBudaModule): +class SimpleAddModule(forge.ForgeModule): """ Simple add module """ @@ -76,11 +76,11 @@ class SimpleAddModule(pybuda.PyBudaModule): def __init__(self, name): super().__init__(name) - self.weights = pybuda.Parameter(*self.shape, requires_grad=True) + self.weights = forge.Parameter(*self.shape, requires_grad=True) self.set_parameter("weights", torch.rand(*self.shape, requires_grad=True)) def forward(self, x): - return pybuda.op.Add("add1", x, self.weights) + return forge.op.Add("add1", x, self.weights) @benchmark_model(configs=["224"]) @@ -114,7 +114,7 @@ def big_conv(training: bool, config: str, microbatch: int, devtype: str, arch: s compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" models = {"tt": mod} inputs = [torch.rand(microbatch, cin, input_size[0], input_size[1])] @@ -122,7 +122,7 @@ def big_conv(training: bool, config: str, microbatch: int, devtype: str, arch: s # if training: # assert False - # # models["cpu-loss"] = pybuda.PyTorchModule("l1loss", torch.nn.L1Loss()) + # # models["cpu-loss"] = forge.PyTorchModule("l1loss", torch.nn.L1Loss()) return models, inputs, targets, {} diff --git a/pybuda/test/benchmark/benchmark/models/resnet.py b/forge/test/benchmark/benchmark/models/resnet.py similarity index 84% rename from pybuda/test/benchmark/benchmark/models/resnet.py rename to forge/test/benchmark/benchmark/models/resnet.py index f760b7e46..29387f898 100644 --- a/pybuda/test/benchmark/benchmark/models/resnet.py +++ b/forge/test/benchmark/benchmark/models/resnet.py @@ -7,9 +7,9 @@ import torch.nn as nn from typing import Callable, Optional, Type -import pybuda -from pybuda import PyTorchModule, OnnxModule -from pybuda.config import _get_global_compiler_config +import forge +from forge import PyTorchModule, OnnxModule +from forge.config import _get_global_compiler_config from ..common import benchmark_model from transformers import ResNetForImageClassification @@ -21,20 +21,20 @@ def resnet(training: bool, config: str, microbatch: int, devtype: str, arch: str if compiler_cfg.balancer_policy == "default": compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" - os.environ["PYBUDA_ENABLE_HOST_INPUT_NOP_BUFFERING"] = "1" - os.environ["PYBUDA_ALLOW_MULTICOLUMN_SPARSE_MATMUL"] = "1" + os.environ["FORGE_ENABLE_HOST_INPUT_NOP_BUFFERING"] = "1" + os.environ["FORGE_ALLOW_MULTICOLUMN_SPARSE_MATMUL"] = "1" # These are about to be enabled by default. # - os.environ["PYBUDA_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" + os.environ["FORGE_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" if data_type == "Fp16_b": - os.environ["PYBUDA_RIBBON2_CALCULATE_TARGET_CYCLES_APPLY_FILTERING"] = "1" + os.environ["FORGE_RIBBON2_CALCULATE_TARGET_CYCLES_APPLY_FILTERING"] = "1" if data_type == "Bfp8_b": - pybuda.config.configure_mixed_precision(name_regex="input.*add.*", output_df=pybuda.DataFormat.Float16_b) + forge.config.configure_mixed_precision(name_regex="input.*add.*", output_df=forge.DataFormat.Float16_b) # Set model parameters based on chosen task and model configuration if config == "resnet18": @@ -67,10 +67,10 @@ def resnet_quant(training: bool, config: str, microbatch: int, devtype: str, arc compiler_cfg.enable_auto_fusing = False compiler_cfg.graph_solver_self_cut_type = "FastCut" - os.environ["PYBUDA_DISABLE_CONV_MULTI_OP_FRACTURE"] = "1" - os.environ["PYBUDA_FRACTURIZATION_DISABLE"] = "1" - os.environ["PYBUDA_DISABLE_FUSE_OPS"] = "1" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_DISABLE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_FRACTURIZATION_DISABLE"] = "1" + os.environ["FORGE_DISABLE_FUSE_OPS"] = "1" + os.environ["FORGE_RIBBON2"] = "1" # Set model parameters based on chosen task and model configuration if config == "resnet50": @@ -280,7 +280,7 @@ def resnet50_layer(training: bool, config: str, microbatch: int, devtype: str, a layer = config compiler_cfg = _get_global_compiler_config() - # verify_cfg.verify_pybuda_codegen_vs_framework = False # hacking 7x7 to 1x1 will cause mismatches + # verify_cfg.verify_forge_codegen_vs_framework = False # hacking 7x7 to 1x1 will cause mismatches if compiler_cfg.balancer_policy == "default": compiler_cfg.balancer_policy = "Ribbon" @@ -296,30 +296,30 @@ def resnet50_layer(training: bool, config: str, microbatch: int, devtype: str, a fractured_conv_sparse_mms = [f"conv2d_0.dc.conv2d.3.dc.conv2d.{1 + i * 2}.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2" for i in range(fracture_factor)] fractured_conv_dense_mms = [f"conv2d_0.dc.conv2d.3.dc.conv2d.{1 + i * 2}.dc.matmul.11" for i in range(fracture_factor)] - pybuda.insert_nop( + forge.insert_nop( "input_1", fractured_conv_sparse_mms, hoist_tms=True) - pybuda.config.override_op_size("buffer_0_input_1_conv2d_0.dc.conv2d.3.dc.conv2d.1.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (7, 1)) - pybuda.config.override_t_stream_shape("buffer_0_input_1_conv2d_0.dc.conv2d.3.dc.conv2d.1.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (2, 1)) + forge.config.override_op_size("buffer_0_input_1_conv2d_0.dc.conv2d.3.dc.conv2d.1.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (7, 1)) + forge.config.override_t_stream_shape("buffer_0_input_1_conv2d_0.dc.conv2d.3.dc.conv2d.1.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (2, 1)) - pybuda.config.override_multi_op_fracture_factor("conv2d_0.dc.conv2d.3", fracture_factor) - # pybuda.config.override_multi_op_fracture_factor("conv2d_0", 7) + forge.config.override_multi_op_fracture_factor("conv2d_0.dc.conv2d.3", fracture_factor) + # forge.config.override_multi_op_fracture_factor("conv2d_0", 7) for sparse_mm in fractured_conv_sparse_mms: - pybuda.config.override_op_size(sparse_mm, (7, 1)) - pybuda.config.override_t_stream_shape(sparse_mm, (2, 1)) + forge.config.override_op_size(sparse_mm, (7, 1)) + forge.config.override_t_stream_shape(sparse_mm, (2, 1)) for dense_mm in fractured_conv_dense_mms: - pybuda.config.override_op_size(dense_mm, (7, 1)) - pybuda.config.override_t_stream_shape(dense_mm, (2, 1)) - pybuda.config.override_u_kt(dense_mm, 1) + forge.config.override_op_size(dense_mm, (7, 1)) + forge.config.override_t_stream_shape(dense_mm, (2, 1)) + forge.config.override_u_kt(dense_mm, 1) - pybuda.config.set_epoch_break(["_fused_op_0"]) + forge.config.set_epoch_break(["_fused_op_0"]) else: - pybuda.config.override_op_size("conv2d_0.dc.conv2d.3.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (2, 4)) - pybuda.config.override_fracture_factor("conv2d_0.dc.conv2d.3.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", 4) - pybuda.config.override_op_size("conv2d_0.dc.conv2d.3.dc.matmul.11", (2, 1)) - pybuda.config.override_op_size("max_pool2d_2.dc.sparse_matmul.5.dc.sparse_matmul.1.lc2", (2, 2)) + forge.config.override_op_size("conv2d_0.dc.conv2d.3.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (2, 4)) + forge.config.override_fracture_factor("conv2d_0.dc.conv2d.3.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", 4) + forge.config.override_op_size("conv2d_0.dc.conv2d.3.dc.matmul.11", (2, 1)) + forge.config.override_op_size("max_pool2d_2.dc.sparse_matmul.5.dc.sparse_matmul.1.lc2", (2, 2)) set_env_common(arch) diff --git a/pybuda/test/benchmark/benchmark/models/resnet_bringup.py b/forge/test/benchmark/benchmark/models/resnet_bringup.py similarity index 74% rename from pybuda/test/benchmark/benchmark/models/resnet_bringup.py rename to forge/test/benchmark/benchmark/models/resnet_bringup.py index cc224a81d..8a606a6d8 100644 --- a/pybuda/test/benchmark/benchmark/models/resnet_bringup.py +++ b/forge/test/benchmark/benchmark/models/resnet_bringup.py @@ -6,16 +6,16 @@ """ import numpy as np -import pybuda +import forge import torch from ..common import benchmark_model -from pybuda.op.common import PyBudaOp -from pybuda.utils import align_up_tile, round_up_div -from pybuda.op.eval.sparse_utils import interleave_tiles, vslice, calculate_conv2d_output_dimensions, create_conv2d_sparse_picker_matrix +from forge.op.common import ForgeOp +from forge.utils import align_up_tile, round_up_div +from forge.op.eval.sparse_utils import interleave_tiles, vslice, calculate_conv2d_output_dimensions, create_conv2d_sparse_picker_matrix -class ConvCustomTStreamModule(pybuda.PyBudaModule): +class ConvCustomTStreamModule(forge.ForgeModule): def __init__( self, name: str, @@ -42,7 +42,7 @@ def __init__( self.conv_mm_t = conv_mm_t self.sparse_mm_t = sparse_mm_t - self.weights = pybuda.Parameter(cout, cin, kH, kW) + self.weights = forge.Parameter(cout, cin, kH, kW) self.weights_pt_tensor = (torch.rand(cout, cin, kH, kW, requires_grad=False, dtype=torch.float32) + 0.00001).detach() # self.weights_pt_tensor = (1 + torch.arange(cout * cin * kH * kW, requires_grad=False, dtype=torch.float32).detach().view((cout, cin, kH, kW))) self.weights_pt_tensor.requires_grad_(True) @@ -54,40 +54,40 @@ def forward(self, x): outy, outx = calculate_conv2d_output_dimensions(shape[-2], shape[-1], (self.kH, self.kW), self.stride, self.padding, self.dilation) # activations - x = pybuda.op.Reshape(f"", x, (1, 1, self.cin, shape[2] * shape[3])) - x = pybuda.op.Transpose(f"", x, 2, 3) - x = pybuda.op.PadTile(f"", x, 3, self.cin) - x = pybuda.op.PadTile(f"", x, 2, shape[2] * shape[3]) + x = forge.op.Reshape(f"", x, (1, 1, self.cin, shape[2] * shape[3])) + x = forge.op.Transpose(f"", x, 2, 3) + x = forge.op.PadTile(f"", x, 3, self.cin) + x = forge.op.PadTile(f"", x, 2, shape[2] * shape[3]) # weights w = self.weights - w = pybuda.op.Reshape("", w, (1, self.cout, self.cin, self.kH * self.kW)) - w = pybuda.op.Transpose("", w, 1, 3, self.kH * self.kW) - w = pybuda.op.PadTile("", w, 3, self.cout) - w = pybuda.op.Transpose("", w, 1, 2, self.cin) - w = pybuda.op.Transpose("", w, -3, -2, self.kH * self.kW) - w = pybuda.op.HStack("", w, self.kH * self.kW) - w = pybuda.op.PadTile("", w, 2, self.cin) + w = forge.op.Reshape("", w, (1, self.cout, self.cin, self.kH * self.kW)) + w = forge.op.Transpose("", w, 1, 3, self.kH * self.kW) + w = forge.op.PadTile("", w, 3, self.cout) + w = forge.op.Transpose("", w, 1, 2, self.cin) + w = forge.op.Transpose("", w, -3, -2, self.kH * self.kW) + w = forge.op.HStack("", w, self.kH * self.kW) + w = forge.op.PadTile("", w, 2, self.cin) # t stream conv matmul if self.conv_mm_t != 1: assert x.shape[2] % self.conv_mm_t == 0, "invalid vslice" - # x = pybuda.op.VSlice("", x, x.shape.rt) - x = pybuda.op.VSlice("", x, self.conv_mm_t) + # x = forge.op.VSlice("", x, x.shape.rt) + x = forge.op.VSlice("", x, self.conv_mm_t) # conv matmul - x = pybuda.op.Matmul("conv_mm", x, w) + x = forge.op.Matmul("conv_mm", x, w) # maximize t dim - x = pybuda.op.VSlice("", x, x.shape.rt) + x = forge.op.VSlice("", x, x.shape.rt) # Buffer between vslice and hslice - x = pybuda.op.Buffer("", x) # HW workaround for: tenstorrent/budabackend#656 + x = forge.op.Buffer("", x) # HW workaround for: tenstorrent/budabackend#656 # tms before sparse mm - x = pybuda.op.HSlice("", x, self.kH * self.kW) - x = pybuda.op.Buffer("", x) # HW workaround for: tenstorrent/budabackend#656 - x = pybuda.op.VStack("", x, x.shape[-3] // self.sparse_mm_t) + x = forge.op.HSlice("", x, self.kH * self.kW) + x = forge.op.Buffer("", x) # HW workaround for: tenstorrent/budabackend#656 + x = forge.op.VStack("", x, x.shape[-3] // self.sparse_mm_t) # create sparse picker pickers = [] @@ -101,22 +101,22 @@ def forward(self, x): # Split the sparse tensor sparse = interleave_tiles(pickers) # to match max vslice after conv matmul sparse = torch.stack(vslice(sparse, self.sparse_mm_t), dim=0).unsqueeze(0) - sparse_tensor = pybuda.Tensor.create_from_torch(sparse, constant=True) + sparse_tensor = forge.Tensor.create_from_torch(sparse, constant=True) # sparse mm - x = pybuda.op.SparseMatmul("sparse_mm", sparse_tensor, x) + x = forge.op.SparseMatmul("sparse_mm", sparse_tensor, x) # undo t streamed result if x.shape.z > 1: - x = pybuda.op.VStack("", x) + x = forge.op.VStack("", x) - x = pybuda.op.Buffer("", x) # vstack can't be followed by other tm ops (transpose below), need to buffer + x = forge.op.Buffer("", x) # vstack can't be followed by other tm ops (transpose below), need to buffer # remaining tms to match the regular conv test - x = pybuda.op.Narrow("", x, 3, 0, self.cout, x.shape[-1]) - x = pybuda.op.Narrow("", x, 2, 0, outy * outx, x.shape[-2]) - x = pybuda.op.Transpose("", x, 2, 3) - x = pybuda.op.Reshape("", x, (1, self.cout, outy, outx)) + x = forge.op.Narrow("", x, 3, 0, self.cout, x.shape[-1]) + x = forge.op.Narrow("", x, 2, 0, outy * outx, x.shape[-2]) + x = forge.op.Transpose("", x, 2, 3) + x = forge.op.Reshape("", x, (1, self.cout, outy, outx)) return x @@ -130,7 +130,7 @@ def forward_golden(self, x): ) -class ResnetBottleneckReduce(pybuda.PyBudaModule): +class ResnetBottleneckReduce(forge.ForgeModule): def __init__( self, name: str, @@ -146,7 +146,7 @@ def __init__( self.no_reduce = no_reduce # left branch - self.conv_l0 = pybuda.op.nn.Conv2dModule( + self.conv_l0 = forge.op.nn.Conv2dModule( name=name + "_l0", in_channels=ch_mid, out_channels=ch_hi, @@ -159,7 +159,7 @@ def __init__( ) # right branch - self.conv_r0 = pybuda.op.nn.Conv2dModule( + self.conv_r0 = forge.op.nn.Conv2dModule( name=name + "_r0", in_channels=ch_mid, out_channels=ch_lo, @@ -171,7 +171,7 @@ def __init__( bias=False, ) - self.conv_r1 = pybuda.op.nn.Conv2dModule( + self.conv_r1 = forge.op.nn.Conv2dModule( name=name + "_r1", in_channels=ch_lo, out_channels=ch_lo, @@ -182,7 +182,7 @@ def __init__( groups=1, bias=False, ) - self.conv_r2 = pybuda.op.nn.Conv2dModule( + self.conv_r2 = forge.op.nn.Conv2dModule( name=name + "_r2", in_channels=ch_lo, out_channels=ch_hi, @@ -195,18 +195,18 @@ def __init__( ) def forward(self, x): - r = pybuda.op.Relu(f"", self.conv_r0(x)) - r = pybuda.op.Relu(f"", self.conv_r1(r)) + r = forge.op.Relu(f"", self.conv_r0(x)) + r = forge.op.Relu(f"", self.conv_r1(r)) r = self.conv_r2(r) if self.use_skip: l = self.conv_l0(x) - r = pybuda.op.Add(f"", l, r) + r = forge.op.Add(f"", l, r) - return pybuda.op.Relu(f"", r) + return forge.op.Relu(f"", r) -class ResnetBottleneck(pybuda.PyBudaModule): +class ResnetBottleneck(forge.ForgeModule): def __init__( self, name: str, @@ -220,7 +220,7 @@ def __init__( self.use_relu = True # right branch - self.conv_r0 = pybuda.op.nn.Conv2dModule( + self.conv_r0 = forge.op.nn.Conv2dModule( name=name + "_r0", in_channels=ch_hi, out_channels=ch_lo, @@ -231,7 +231,7 @@ def __init__( groups=1, bias=False, ) - self.conv_r1 = pybuda.op.nn.Conv2dModule( + self.conv_r1 = forge.op.nn.Conv2dModule( name=name + "_r1", in_channels=ch_lo, out_channels=ch_lo, @@ -242,7 +242,7 @@ def __init__( groups=1, bias=False, ) - self.conv_r2 = pybuda.op.nn.Conv2dModule( + self.conv_r2 = forge.op.nn.Conv2dModule( name=name + "_r2", in_channels=ch_lo, out_channels=ch_hi, @@ -258,20 +258,20 @@ def set_relu(self, use_relu: bool): self.use_relu = use_relu def forward(self, x): - r = pybuda.op.Relu(f"", self.conv_r0(x)) - r = pybuda.op.Relu(f"", self.conv_r1(r)) + r = forge.op.Relu(f"", self.conv_r0(x)) + r = forge.op.Relu(f"", self.conv_r1(r)) r = self.conv_r2(r) if self.use_skip: - r = pybuda.op.Add(f"", x, r) + r = forge.op.Add(f"", x, r) if self.use_relu: - r = pybuda.op.Relu(f"", r) + r = forge.op.Relu(f"", r) return r -class ResnetBlock(pybuda.PyBudaModule): +class ResnetBlock(forge.ForgeModule): def __init__( self, name: str, @@ -315,7 +315,7 @@ def forward(self, x): return y -class ResnetBlock2(pybuda.PyBudaModule): +class ResnetBlock2(forge.ForgeModule): def __init__( self, name: str, @@ -358,7 +358,7 @@ def forward(self, x): return y -class Resnet(pybuda.PyBudaModule): +class Resnet(forge.ForgeModule): def __init__( self, name: str, @@ -379,7 +379,7 @@ def __init__( sparse_mm_t=14, ) - self.big_conv_automatic = pybuda.op.nn.Conv2dModule( + self.big_conv_automatic = forge.op.nn.Conv2dModule( name=name + "_big_conv_automatic", in_channels=3, out_channels=64, @@ -391,7 +391,7 @@ def __init__( bias=False, ) - self.max_pool = pybuda.op.nn.MaxPool2dModule( + self.max_pool = forge.op.nn.MaxPool2dModule( name=name + "_max_pool", kernel_size=3, stride=2, @@ -432,7 +432,7 @@ def __init__( use_relu=False, ) - self.linear = pybuda.op.nn.Linear( + self.linear = forge.op.nn.Linear( name=name + "_linear", in_features=2048, out_features=1000, @@ -444,14 +444,14 @@ def forward(self, x): # TODO: add batchnorms # # # Head: 224x224 conv, relu, maxpool - # y = pybuda.op.Relu(self.name + "_big_conv_relu", self.big_conv(x)) + # y = forge.op.Relu(self.name + "_big_conv_relu", self.big_conv(x)) # y = self.max_pool(y) # # Just maxpool # y = self.max_pool(x) # <---- x # Auto conv - y = pybuda.op.Relu(self.name + "_big_conv_relu", self.big_conv_automatic(x)) + y = forge.op.Relu(self.name + "_big_conv_relu", self.big_conv_automatic(x)) y = self.max_pool(y) # 4 blocks of bottlenecks of convs (no batchnorm) @@ -461,11 +461,11 @@ def forward(self, x): y = self.block5(y) # Tail: global avg pool + linear + softmax - y = pybuda.op.Reshape("", y, (1, 1, y.shape[-3], y.shape[-1] * y.shape[-2])) - y = pybuda.op.Transpose("", y, -1, -2) - y = pybuda.op.ReduceAvg("", y, -2) + y = forge.op.Reshape("", y, (1, 1, y.shape[-3], y.shape[-1] * y.shape[-2])) + y = forge.op.Transpose("", y, -1, -2) + y = forge.op.ReduceAvg("", y, -2) y = self.linear(y) - y = pybuda.op.Softmax("", y, dim=-1, stable=True) + y = forge.op.Softmax("", y, dim=-1, stable=True) return y @@ -490,6 +490,6 @@ def resnet(training: bool, config: str, microbatch: int, devtype: str, arch: str if training: assert False - # models["cpu-loss"] = pybuda.PyTorchModule("l1loss", torch.nn.L1Loss()) + # models["cpu-loss"] = forge.PyTorchModule("l1loss", torch.nn.L1Loss()) return models, inputs, targets, {} diff --git a/pybuda/test/benchmark/benchmark/models/t5.py b/forge/test/benchmark/benchmark/models/t5.py similarity index 62% rename from pybuda/test/benchmark/benchmark/models/t5.py rename to forge/test/benchmark/benchmark/models/t5.py index 3c9ec955f..d1a384401 100644 --- a/pybuda/test/benchmark/benchmark/models/t5.py +++ b/forge/test/benchmark/benchmark/models/t5.py @@ -2,12 +2,12 @@ # SPDX-License-Identifier: Apache-2.0 import os -import pybuda +import forge -from pybuda._C.backend_api import BackendDevice +from forge._C.backend_api import BackendDevice from ..common import benchmark_model, generate_test_device from test.model_demos.models.t5 import generate_t5_past_cache_enc_dec -from pybuda.config import _get_global_compiler_config +from forge.config import _get_global_compiler_config @benchmark_model(configs=["base", "large"]) @@ -17,19 +17,19 @@ def t5(training: bool, config: str, microbatch: int, devtype: str, arch: str, da if compiler_cfg.balancer_policy == "default": compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" # These are about to be enabled by default. # - os.environ["PYBUDA_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" - os.environ["PYBUDA_EXP_APPROX"] = "1" + os.environ["FORGE_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" + os.environ["FORGE_EXP_APPROX"] = "1" if data_type == "Bfp8_b": - pybuda.config.configure_mixed_precision(op_type="add", output_df=pybuda.DataFormat.Float16_b) - pybuda.config.configure_mixed_precision(op_type="subtract", output_df=pybuda.DataFormat.Float16_b) - pybuda.config.configure_mixed_precision(op_type="reciprocal", output_df=pybuda.DataFormat.Float16_b) + forge.config.configure_mixed_precision(op_type="add", output_df=forge.DataFormat.Float16_b) + forge.config.configure_mixed_precision(op_type="subtract", output_df=forge.DataFormat.Float16_b) + forge.config.configure_mixed_precision(op_type="reciprocal", output_df=forge.DataFormat.Float16_b) - available_devices = pybuda.detect_available_devices() + available_devices = forge.detect_available_devices() # Determine model variant if config == "base": variant = "t5-base" @@ -57,12 +57,12 @@ def flan_t5(training: bool, config: str, microbatch: int, devtype: str, arch: st if compiler_cfg.balancer_policy == "default": compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" # These are about to be enabled by default. # - os.environ["PYBUDA_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" - os.environ["PYBUDA_EXP_APPROX"] = "1" + os.environ["FORGE_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" + os.environ["FORGE_EXP_APPROX"] = "1" # Determine model variant if config == "base": @@ -73,9 +73,9 @@ def flan_t5(training: bool, config: str, microbatch: int, devtype: str, arch: st raise RuntimeError("Unknown config") if data_type == "Bfp8_b": - pybuda.config.configure_mixed_precision(op_type="add", output_df=pybuda.DataFormat.Float16_b) - pybuda.config.configure_mixed_precision(op_type="subtract", output_df=pybuda.DataFormat.Float16_b) - pybuda.config.configure_mixed_precision(op_type="reciprocal", output_df=pybuda.DataFormat.Float16_b) + forge.config.configure_mixed_precision(op_type="add", output_df=forge.DataFormat.Float16_b) + forge.config.configure_mixed_precision(op_type="subtract", output_df=forge.DataFormat.Float16_b) + forge.config.configure_mixed_precision(op_type="reciprocal", output_df=forge.DataFormat.Float16_b) # Load model modules, inputs, other = generate_t5_past_cache_enc_dec( diff --git a/pybuda/test/benchmark/benchmark/models/unet.py b/forge/test/benchmark/benchmark/models/unet.py similarity index 74% rename from pybuda/test/benchmark/benchmark/models/unet.py rename to forge/test/benchmark/benchmark/models/unet.py index 4d20606dc..e55c121de 100644 --- a/pybuda/test/benchmark/benchmark/models/unet.py +++ b/forge/test/benchmark/benchmark/models/unet.py @@ -2,11 +2,11 @@ # SPDX-License-Identifier: Apache-2.0 import os -import pybuda +import forge import torch from ..common import benchmark_model -from pybuda.config import _get_global_compiler_config +from forge.config import _get_global_compiler_config @benchmark_model(configs=["256"]) @@ -17,19 +17,19 @@ def unet(training: bool, config: str, microbatch: int, devtype: str, arch: str, if compiler_cfg.balancer_policy == "default": compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" # Manually enable amp light for Ribbon if compiler_cfg.balancer_policy == "Ribbon": compiler_cfg.enable_amp_light() - os.environ["PYBUDA_ENABLE_HOST_INPUT_NOP_BUFFERING"] = "1" - os.environ["PYBUDA_ALLOW_MULTICOLUMN_SPARSE_MATMUL"] = "1" - os.environ["PYBUDA_SUPRESS_T_FACTOR_MM"] = "60" + os.environ["FORGE_ENABLE_HOST_INPUT_NOP_BUFFERING"] = "1" + os.environ["FORGE_ALLOW_MULTICOLUMN_SPARSE_MATMUL"] = "1" + os.environ["FORGE_SUPRESS_T_FACTOR_MM"] = "60" # These are about to be enabled by default. # - os.environ["PYBUDA_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" + os.environ["FORGE_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" # Set model parameters based on chosen task and model configuration if config == "256": @@ -51,7 +51,7 @@ def unet(training: bool, config: str, microbatch: int, devtype: str, arch: str, else: model.eval() - modules = {"tt": pybuda.PyTorchModule(f"th_unet_{config}_{compiler_cfg.balancer_policy}", model)} + modules = {"tt": forge.PyTorchModule(f"th_unet_{config}_{compiler_cfg.balancer_policy}", model)} input_shape = (microbatch, 3, img_res, img_res) inputs = [torch.rand(*input_shape)] @@ -59,7 +59,7 @@ def unet(training: bool, config: str, microbatch: int, devtype: str, arch: str, # Add loss function, if training if training: - model["cpu-loss"] = pybuda.PyTorchModule("l1loss", torch.nn.L1Loss()) + model["cpu-loss"] = forge.PyTorchModule("l1loss", torch.nn.L1Loss()) targets = [torch.rand(1, 100)] return modules, inputs, targets, {} diff --git a/pybuda/test/benchmark/benchmark/models/vit.py b/forge/test/benchmark/benchmark/models/vit.py similarity index 68% rename from pybuda/test/benchmark/benchmark/models/vit.py rename to forge/test/benchmark/benchmark/models/vit.py index 8c93d9ba9..0e35925dd 100644 --- a/pybuda/test/benchmark/benchmark/models/vit.py +++ b/forge/test/benchmark/benchmark/models/vit.py @@ -2,11 +2,11 @@ # SPDX-License-Identifier: Apache-2.0 import os -import pybuda +import forge import torch from ..common import benchmark_model -from pybuda.config import _get_global_compiler_config +from forge.config import _get_global_compiler_config from transformers import ViTForImageClassification @@ -18,19 +18,19 @@ def vit(training: bool, config: str, microbatch: int, devtype: str, arch: str, d if compiler_cfg.balancer_policy == "default": compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" # These are about to be enabled by default. # - os.environ["PYBUDA_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" + os.environ["FORGE_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" if data_type == "Fp16_b": - os.environ["PYBUDA_RIBBON2_CALCULATE_TARGET_CYCLES_APPLY_FILTERING"] = "1" + os.environ["FORGE_RIBBON2_CALCULATE_TARGET_CYCLES_APPLY_FILTERING"] = "1" if data_type == "Bfp8_b": - os.environ["PYBUDA_FORK_JOIN_BUF_QUEUES"] = "1" - pybuda.config.configure_mixed_precision(op_type="reciprocal", output_df=pybuda.DataFormat.Float16_b) - os.environ["PYBUDA_FUSE_DF_OVERRIDE"] = "0" + os.environ["FORGE_FORK_JOIN_BUF_QUEUES"] = "1" + forge.config.configure_mixed_precision(op_type="reciprocal", output_df=forge.DataFormat.Float16_b) + os.environ["FORGE_FUSE_DF_OVERRIDE"] = "0" # Set model parameters based on chosen task and model configuration img_res = 224 @@ -51,7 +51,7 @@ def vit(training: bool, config: str, microbatch: int, devtype: str, arch: str, d else: model.eval() - modules = {"tt": pybuda.PyTorchModule(f"pt_vit_{config}_{compiler_cfg.balancer_policy}", model)} + modules = {"tt": forge.PyTorchModule(f"pt_vit_{config}_{compiler_cfg.balancer_policy}", model)} input_shape = (microbatch, 3, img_res, img_res) inputs = [torch.rand(*input_shape)] @@ -59,7 +59,7 @@ def vit(training: bool, config: str, microbatch: int, devtype: str, arch: str, d # Add loss function, if training if training: - model["cpu-loss"] = pybuda.PyTorchModule("l1loss", torch.nn.L1Loss()) + model["cpu-loss"] = forge.PyTorchModule("l1loss", torch.nn.L1Loss()) targets = [torch.rand(1, 100)] return modules, inputs, targets, {} diff --git a/pybuda/test/benchmark/benchmark/models/vovnet_v2.py b/forge/test/benchmark/benchmark/models/vovnet_v2.py similarity index 72% rename from pybuda/test/benchmark/benchmark/models/vovnet_v2.py rename to forge/test/benchmark/benchmark/models/vovnet_v2.py index 7b5c0a9f7..b6b2587c5 100644 --- a/pybuda/test/benchmark/benchmark/models/vovnet_v2.py +++ b/forge/test/benchmark/benchmark/models/vovnet_v2.py @@ -2,34 +2,34 @@ # SPDX-License-Identifier: Apache-2.0 import os -import pybuda +import forge import timm import torch from ..common import benchmark_model -from pybuda.config import _get_global_compiler_config +from forge.config import _get_global_compiler_config @benchmark_model(configs=["19", "39", "99"]) def vovnet_v2(training: bool, config: str, microbatch: int, devtype: str, arch: str, data_type: str, math_fidelity: str): compiler_cfg = _get_global_compiler_config() - from pybuda._C.backend_api import BackendDevice - available_devices = pybuda.detect_available_devices() + from forge._C.backend_api import BackendDevice + available_devices = forge.detect_available_devices() if available_devices[0] != BackendDevice.Grayskull: compiler_cfg.enable_auto_transposing_placement = True if compiler_cfg.balancer_policy == "default": compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" - os.environ["PYBUDA_ALLOW_MULTICOLUMN_SPARSE_MATMUL"] = "1" - os.environ["PYBUDA_FORK_JOIN_BUF_QUEUES"] = "1" - os.environ["PYBUDA_SUPRESS_T_FACTOR_MM"] = "60" + os.environ["FORGE_ALLOW_MULTICOLUMN_SPARSE_MATMUL"] = "1" + os.environ["FORGE_FORK_JOIN_BUF_QUEUES"] = "1" + os.environ["FORGE_SUPRESS_T_FACTOR_MM"] = "60" # These are about to be enabled by default. # - os.environ["PYBUDA_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" + os.environ["FORGE_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" if config == "39" and data_type != "Bfp8_b": compiler_cfg.enable_amp_light() @@ -56,7 +56,7 @@ def vovnet_v2(training: bool, config: str, microbatch: int, devtype: str, arch: else: model.eval() - modules = {"tt": pybuda.PyTorchModule(f"pt_vovnet_v2_{config}", model)} + modules = {"tt": forge.PyTorchModule(f"pt_vovnet_v2_{config}", model)} input_shape = (microbatch, 3, img_res, img_res) inputs = [torch.rand(*input_shape)] @@ -64,7 +64,7 @@ def vovnet_v2(training: bool, config: str, microbatch: int, devtype: str, arch: # Add loss function, if training if training: - model["cpu-loss"] = pybuda.PyTorchModule("l1loss", torch.nn.L1Loss()) + model["cpu-loss"] = forge.PyTorchModule("l1loss", torch.nn.L1Loss()) targets = [torch.rand(1, 100)] return modules, inputs, targets, {} diff --git a/pybuda/test/benchmark/benchmark/models/whisper.py b/forge/test/benchmark/benchmark/models/whisper.py similarity index 80% rename from pybuda/test/benchmark/benchmark/models/whisper.py rename to forge/test/benchmark/benchmark/models/whisper.py index d2ea32946..f981d6074 100644 --- a/pybuda/test/benchmark/benchmark/models/whisper.py +++ b/forge/test/benchmark/benchmark/models/whisper.py @@ -1,12 +1,12 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -import pybuda +import forge import torch import os from ..common import benchmark_model, generate_test_device -from pybuda.config import _get_global_compiler_config +from forge.config import _get_global_compiler_config from test.model_demos.models.whisper import generate_model_whisper_decoder_past_cache, generate_model_whisper_enc_dec @@ -40,7 +40,7 @@ def whisper(training: bool, config: str, microbatch: int, devtype: str, arch: st if compiler_cfg.balancer_policy == "default": compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" # Determine model variant if config == "small": @@ -48,13 +48,13 @@ def whisper(training: bool, config: str, microbatch: int, devtype: str, arch: st else: raise RuntimeError("Unknown config") - from pybuda._C.backend_api import BackendDevice + from forge._C.backend_api import BackendDevice - available_devices = pybuda.detect_available_devices() + available_devices = forge.detect_available_devices() if available_devices: if available_devices[0] == BackendDevice.Grayskull: - pybuda.config.set_epoch_break("conv2d_9.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2") - pybuda.config.override_op_size("conv2d_9.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (1, 12)) + forge.config.set_epoch_break("conv2d_9.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2") + forge.config.override_op_size("conv2d_9.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (1, 12)) # Load model model, inputs, other = generate_model_whisper_enc_dec( diff --git a/pybuda/test/benchmark/benchmark/models/yolo_v3.py b/forge/test/benchmark/benchmark/models/yolo_v3.py similarity index 76% rename from pybuda/test/benchmark/benchmark/models/yolo_v3.py rename to forge/test/benchmark/benchmark/models/yolo_v3.py index eff53873e..fc7d6802e 100644 --- a/pybuda/test/benchmark/benchmark/models/yolo_v3.py +++ b/forge/test/benchmark/benchmark/models/yolo_v3.py @@ -2,11 +2,11 @@ # SPDX-License-Identifier: Apache-2.0 import os -import pybuda +import forge import torch from ..common import benchmark_model -from pybuda.config import _get_global_compiler_config +from forge.config import _get_global_compiler_config from .implementations.yolo_v3.holli_src import utils from .implementations.yolo_v3.holli_src.yolo_layer import * from .implementations.yolo_v3.holli_src.yolov3_tiny import * @@ -20,20 +20,20 @@ def yolo_v3(training: bool, config: str, microbatch: int, devtype: str, arch: st if compiler_cfg.balancer_policy == "default": compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" - os.environ["PYBUDA_ENABLE_HOST_INPUT_NOP_BUFFERING"] = "1" + os.environ["FORGE_ENABLE_HOST_INPUT_NOP_BUFFERING"] = "1" # These are about to be enabled by default. # - os.environ["PYBUDA_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" + os.environ["FORGE_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" if data_type == "Bfp8_b": - os.environ["PYBUDA_FORK_JOIN_SKIP_EXPANDING_BUFFERS"] = "1" - os.environ["PYBUDA_ALLOW_MULTICOLUMN_SPARSE_MATMUL"] = "1" + os.environ["FORGE_FORK_JOIN_SKIP_EXPANDING_BUFFERS"] = "1" + os.environ["FORGE_ALLOW_MULTICOLUMN_SPARSE_MATMUL"] = "1" if data_type == "Fp16_b": - os.environ["PYBUDA_OVERRIDE_INPUT_QUEUE_ENTRIES"] = "32" + os.environ["FORGE_OVERRIDE_INPUT_QUEUE_ENTRIES"] = "32" # Set model parameters based on chosen task and model configuration config_name = "" @@ -56,7 +56,7 @@ def yolo_v3(training: bool, config: str, microbatch: int, devtype: str, arch: st else: model.eval() - modules = {"tt": pybuda.PyTorchModule(f"yolov3_holli_{config}_{compiler_cfg.balancer_policy}", model)} + modules = {"tt": forge.PyTorchModule(f"yolov3_holli_{config}_{compiler_cfg.balancer_policy}", model)} input_shape = (microbatch, 3, img_res, img_res) inputs = [torch.rand(*input_shape)] @@ -64,7 +64,7 @@ def yolo_v3(training: bool, config: str, microbatch: int, devtype: str, arch: st # Add loss function, if training if training: - model["cpu-loss"] = pybuda.PyTorchModule("l1loss", torch.nn.L1Loss()) + model["cpu-loss"] = forge.PyTorchModule("l1loss", torch.nn.L1Loss()) targets = [torch.rand(1, 100)] return modules, inputs, targets, {} diff --git a/pybuda/test/benchmark/benchmark/models/yolo_v5.py b/forge/test/benchmark/benchmark/models/yolo_v5.py similarity index 72% rename from pybuda/test/benchmark/benchmark/models/yolo_v5.py rename to forge/test/benchmark/benchmark/models/yolo_v5.py index 3af749ea5..b6590a8a4 100644 --- a/pybuda/test/benchmark/benchmark/models/yolo_v5.py +++ b/forge/test/benchmark/benchmark/models/yolo_v5.py @@ -1,13 +1,13 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -import pybuda +import forge import torch import os -# import yolov5 # use this instead pybuda/test/tvm/cnn/pytorch/tests_C/test_yolov5.py +# import yolov5 # use this instead forge/test/tvm/cnn/pytorch/tests_C/test_yolov5.py from ..common import benchmark_model -from pybuda.config import _get_global_compiler_config +from forge.config import _get_global_compiler_config @benchmark_model(configs=["s", "m"]) @@ -17,28 +17,28 @@ def yolo_v5(training: bool, config: str, microbatch: int, devtype: str, arch: st if compiler_cfg.balancer_policy == "default": compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" - from pybuda._C.backend_api import BackendDevice - available_devices = pybuda.detect_available_devices() + from forge._C.backend_api import BackendDevice + available_devices = forge.detect_available_devices() # Temp perf workaround for tenstorrent/bbe#2595 - os.environ["PYBUDA_PAD_OUTPUT_BUFFER"] = "1" + os.environ["FORGE_PAD_OUTPUT_BUFFER"] = "1" if data_type == "Fp16_b": if available_devices[0] != BackendDevice.Grayskull: - os.environ["PYBUDA_FORK_JOIN_BUF_QUEUES"] = "1" + os.environ["FORGE_FORK_JOIN_BUF_QUEUES"] = "1" if data_type == "Bfp8_b": - os.environ["PYBUDA_FORK_JOIN_SKIP_EXPANDING_BUFFERS"] = "1" + os.environ["FORGE_FORK_JOIN_SKIP_EXPANDING_BUFFERS"] = "1" # Temp workaround for tenstorrent/bbe#2595, output BW is unpredictable. - os.environ["PYBUDA_DISABLE_STREAM_OUTPUT"] = "1" + os.environ["FORGE_DISABLE_STREAM_OUTPUT"] = "1" if available_devices[0] == BackendDevice.Grayskull: compiler_cfg.enable_tm_cpu_fallback = True compiler_cfg.enable_tm_cpu_fallback = True compiler_cfg.enable_auto_fusing = False # required to fix accuracy - os.environ["PYBUDA_DECOMPOSE_SIGMOID"] = "1" + os.environ["FORGE_DECOMPOSE_SIGMOID"] = "1" # Set model parameters based on chosen task and model configuration config_name = "" @@ -69,14 +69,14 @@ def yolo_v5(training: bool, config: str, microbatch: int, devtype: str, arch: st targets = tuple() # The model is implemented with dynamic shapes as it supports various input sizes... Needs to be run with proper - # input shape on CPU so that the dynamic shapes get resolved properly, before running thru pybuda + # input shape on CPU so that the dynamic shapes get resolved properly, before running thru forge model(inputs[0]) - modules = {"tt": pybuda.PyTorchModule(f"yolov5_{config}_{compiler_cfg.balancer_policy}", model)} + modules = {"tt": forge.PyTorchModule(f"yolov5_{config}_{compiler_cfg.balancer_policy}", model)} # Add loss function, if training if training: - model["cpu-loss"] = pybuda.PyTorchModule("l1loss", torch.nn.L1Loss()) + model["cpu-loss"] = forge.PyTorchModule("l1loss", torch.nn.L1Loss()) targets = [torch.rand(1, 100)] return modules, inputs, targets, {} diff --git a/pybuda/test/benchmark/run_benchmark.py b/forge/test/benchmark/run_benchmark.py similarity index 98% rename from pybuda/test/benchmark/run_benchmark.py rename to forge/test/benchmark/run_benchmark.py index 3f7c77654..cad034d66 100755 --- a/pybuda/test/benchmark/run_benchmark.py +++ b/forge/test/benchmark/run_benchmark.py @@ -127,7 +127,7 @@ def check_for_net2pipe_errors(run_object, log_path): def get_benchmark_commands(benchmark_commands_file_path): abs_path = os.path.join(os.path.dirname(__file__), benchmark_commands_file_path) lines = open(abs_path, "r").readlines() - benchmark_commands = [line.strip() for line in lines if line.startswith("pybuda/test/benchmark/benchmark.py")] + benchmark_commands = [line.strip() for line in lines if line.startswith("forge/test/benchmark/benchmark.py")] return benchmark_commands @@ -166,7 +166,7 @@ def get_config_from_benchmark_command(command): def main(): import shlex - # os.environ["PYBUDA_FORCE_EMULATE_HARVESTED"] = "1" + # os.environ["FORGE_FORCE_EMULATE_HARVESTED"] = "1" # import argparse diff --git a/forge/test/benchmark/run_benchmark_debug b/forge/test/benchmark/run_benchmark_debug new file mode 100644 index 000000000..b5cf0b299 --- /dev/null +++ b/forge/test/benchmark/run_benchmark_debug @@ -0,0 +1,8 @@ +rm perf.json + +# emulate runs on harvested machines +export FORGE_FORCE_EMULATE_HARVESTED=1 + +forge/test/benchmark/benchmark.py -m simple_add -c default -opt 4 -o perf.json --auto_transpose +forge/test/benchmark/benchmark.py -m simple_add -c default1 -opt 4 -o perf.json --auto_transpose +forge/test/benchmark/benchmark.py -m simple_add -c default2 -opt 4 -o perf.json --auto_transpose \ No newline at end of file diff --git a/forge/test/benchmark/run_benchmark_gs_e150_df_bfp8 b/forge/test/benchmark/run_benchmark_gs_e150_df_bfp8 new file mode 100644 index 000000000..4adc3c16f --- /dev/null +++ b/forge/test/benchmark/run_benchmark_gs_e150_df_bfp8 @@ -0,0 +1,49 @@ +# ------------------------------------------------------------------------------------------------------------------------------------------------------------ # +# Data Format Bfp8_b +# ------------------------------------------------------------------------------------------------------------------------------------------------------------ # + +# ------------------------------------------------------- # +# Grayskull e150, unharvested chip, grid size: 10x12 +# ------------------------------------------------------- # + +# Resnet +forge/test/benchmark/benchmark.py -m resnet -c resnet50 -o forge-silicon-gs-e150-perf-all-perf.json + +# Mobilenet v1 +forge/test/benchmark/benchmark.py -m mobilenet_v1 -c 224 -mf HiFi2 -o forge-silicon-gs-e150-perf-all-perf.json + +# Mobilenet v2 +forge/test/benchmark/benchmark.py -m mobilenet_v2 -c 224 -o forge-silicon-gs-e150-perf-all-perf.json + +# Mobilenet v3 +forge/test/benchmark/benchmark.py -m mobilenet_v3_timm -c large -o forge-silicon-gs-e150-perf-all-perf.json + +# Vovnet v2 +forge/test/benchmark/benchmark.py -m vovnet_v2 -c 39 -o forge-silicon-gs-e150-perf-all-perf.json + +# Openpose Body +forge/test/benchmark/benchmark.py -m openpose_osmr_body -c 2d -o forge-silicon-gs-e150-perf-all-perf.json + +# Openpose Hand +forge/test/benchmark/benchmark.py -m openpose_hand -c basic -o forge-silicon-gs-e150-perf-all-perf.json + +# YOLOv3 +forge/test/benchmark/benchmark.py -m yolo_v3 -c default -mb 32 -o forge-silicon-gs-e150-perf-all-perf.json + +# YOLOv5 +forge/test/benchmark/benchmark.py -m yolo_v5 -c s -o forge-silicon-gs-e150-perf-all-perf.json + +# Inception v4 +forge/test/benchmark/benchmark.py -m inception_v4 -c 224 -o forge-silicon-gs-e150-perf-all-perf.json + +# Unet +forge/test/benchmark/benchmark.py -m unet -c 256 -mb 64 -o forge-silicon-gs-e150-perf-all-perf.json + +# Whisper +forge/test/benchmark/benchmark.py -m whisper -c small --loop_count 1 -mb 1 --single-thread --generative -o forge-silicon-gs-e150-perf-all-perf.json + +# T5 +forge/test/benchmark/benchmark.py -m t5 -c large --loop_count 1 -mb 1 --single-thread --generative -o forge-silicon-gs-e150-perf-all-perf.json + +# Flan-T5 +forge/test/benchmark/benchmark.py -m flan_t5 -c large --loop_count 1 -mb 1 -mf HiFi2 --single-thread --generative -o forge-silicon-gs-e150-perf-all-perf.json \ No newline at end of file diff --git a/pybuda/test/benchmark/run_benchmark_gs_e150_df_fp16 b/forge/test/benchmark/run_benchmark_gs_e150_df_fp16 similarity index 53% rename from pybuda/test/benchmark/run_benchmark_gs_e150_df_fp16 rename to forge/test/benchmark/run_benchmark_gs_e150_df_fp16 index 7f1e7c1a3..098379e97 100644 --- a/pybuda/test/benchmark/run_benchmark_gs_e150_df_fp16 +++ b/forge/test/benchmark/run_benchmark_gs_e150_df_fp16 @@ -7,13 +7,13 @@ # ------------------------------------------------------- # # Vit -pybuda/test/benchmark/benchmark.py -m vit -c base -df Fp16_b -mf HiFi2 -o pybuda-silicon-gs-e150-perf-all-perf.json +forge/test/benchmark/benchmark.py -m vit -c base -df Fp16_b -mf HiFi2 -o forge-silicon-gs-e150-perf-all-perf.json # Deit -pybuda/test/benchmark/benchmark.py -m deit -c base -df Fp16_b -mf HiFi2 -o pybuda-silicon-gs-e150-perf-all-perf.json +forge/test/benchmark/benchmark.py -m deit -c base -df Fp16_b -mf HiFi2 -o forge-silicon-gs-e150-perf-all-perf.json # Hrnet -pybuda/test/benchmark/benchmark.py -m hrnet -c v2_w64 -df Fp16_b -mf HiFi3 -o pybuda-silicon-gs-e150-perf-all-perf.json +forge/test/benchmark/benchmark.py -m hrnet -c v2_w64 -df Fp16_b -mf HiFi3 -o forge-silicon-gs-e150-perf-all-perf.json # Bert -pybuda/test/benchmark/benchmark.py -m bert -c large_tc -df Fp16_b -mf HiFi3 -o pybuda-silicon-gs-e150-perf-all-perf.json \ No newline at end of file +forge/test/benchmark/benchmark.py -m bert -c large_tc -df Fp16_b -mf HiFi3 -o forge-silicon-gs-e150-perf-all-perf.json \ No newline at end of file diff --git a/forge/test/benchmark/run_benchmark_gs_e150_release b/forge/test/benchmark/run_benchmark_gs_e150_release new file mode 100644 index 000000000..80f852bfa --- /dev/null +++ b/forge/test/benchmark/run_benchmark_gs_e150_release @@ -0,0 +1,61 @@ +# ------------------------------------------------------------------------------------------------------------------------------------------------------------ # +# Models with data Formats that have good accuracy on Grayskull and that we release as official numbers +# ------------------------------------------------------------------------------------------------------------------------------------------------------------ # + +# ------------------------------------------------------- # +# Grayskull e150, unharvested chip, grid size: 10x12 +# ------------------------------------------------------- # + +# ResNet fp16_b +forge/test/benchmark/benchmark.py -m resnet -c resnet50 -mb 64 -df Fp16_b -mf HiFi3 -o forge-silicon-gs-e150-perf-all-perf.json + +# Mobilenet v1 fp16_b +forge/test/benchmark/benchmark.py -m mobilenet_v1 -c 224 -mb 64 -df Fp16_b -mf HiFi3 -o forge-silicon-gs-e150-perf-all-perf.json + +# Mobilenet v2 fp16_b +forge/test/benchmark/benchmark.py -m mobilenet_v2 -c 224 -mb 64 -df Fp16_b -mf HiFi3 -o forge-silicon-gs-e150-perf-all-perf.json + +# Mobilenet v3 fp16_b +forge/test/benchmark/benchmark.py -m mobilenet_v3_timm -c large -mb 64 -df Fp16_b -mf HiFi3 -o forge-silicon-gs-e150-perf-all-perf.json + +# Vit bfp8_b +forge/test/benchmark/benchmark.py -m vit -c base -mb 64 -o forge-silicon-gs-e150-perf-all-perf.json + +# Deit bfp8_b +forge/test/benchmark/benchmark.py -m deit -c base -mb 64 -o forge-silicon-gs-e150-perf-all-perf.json + +# VoVNet v2 fp16_b +forge/test/benchmark/benchmark.py -m vovnet_v2 -c 39 -mb 64 -df Fp16_b -mf HiFi3 -o forge-silicon-gs-e150-perf-all-perf.json + +# OpenPose Body fp16 +forge/test/benchmark/benchmark.py -m openpose_osmr_body -c 2d -mb 64 -df Fp16 -mf HiFi3 -o forge-silicon-gs-e150-perf-all-perf.json + +# OpenPose Hand fp16_b +forge/test/benchmark/benchmark.py -m openpose_hand -c basic -mb 64 -df Fp16_b -mf HiFi3 -o forge-silicon-gs-e150-perf-all-perf.json + +# HRNet bfp8_b +forge/test/benchmark/benchmark.py -m hrnet -c v2_w64 -mb 64 -o forge-silicon-gs-e150-perf-all-perf.json + +# YOLOv3 fp16_b +forge/test/benchmark/benchmark.py -m yolo_v3 -c default -mb 32 -df Fp16_b -mf HiFi3 -o forge-silicon-gs-e150-perf-all-perf.json + +# YOLOv5 fp16_b +forge/test/benchmark/benchmark.py -m yolo_v5 -c s -mb 64 -df Fp16_b -mf HiFi3 -o forge-silicon-gs-e150-perf-all-perf.json + +# Inception v4 fp16_b +forge/test/benchmark/benchmark.py -m inception_v4 -c 224 -mb 64 -df Fp16_b -mf HiFi3 -o forge-silicon-gs-e150-perf-all-perf.json + +# UNet fp16_b +forge/test/benchmark/benchmark.py -m unet -c 256 -mb 64 -df Fp16_b -mf HiFi3 -o forge-silicon-gs-e150-perf-all-perf.json + +# Bert large bfp8_b +forge/test/benchmark/benchmark.py -m bert -c large_tc -mb 64 -o forge-silicon-gs-e150-perf-all-perf.json + +# Whisper fp16_b +forge/test/benchmark/benchmark.py -m whisper -c small --loop_count 1 -mb 1 -df Fp16_b -mf HiFi3 --single-thread --generative -o forge-silicon-gs-e150-perf-all-perf.json + +# T5 fp16_b +forge/test/benchmark/benchmark.py -m t5 -c large --loop_count 1 -mb 1 -df Fp16_b -mf HiFi3 --single-thread --generative -o forge-silicon-gs-e150-perf-all-perf.json + +# Flan-T5 fp16_b +forge/test/benchmark/benchmark.py -m flan_t5 -c large --loop_count 1 -mb 1 -df Fp16_b -mf HiFi3 --single-thread --generative -o forge-silicon-gs-e150-perf-all-perf.json diff --git a/forge/test/benchmark/run_benchmark_gs_e75_df_bfp8 b/forge/test/benchmark/run_benchmark_gs_e75_df_bfp8 new file mode 100644 index 000000000..24426f8cb --- /dev/null +++ b/forge/test/benchmark/run_benchmark_gs_e75_df_bfp8 @@ -0,0 +1,51 @@ +# ------------------------------------------------------------------------------------------------------------------------------------------------------------ # +# Data Format Bfp8_b +# ------------------------------------------------------------------------------------------------------------------------------------------------------------ # + +# ------------------------------------------------------- # +# Grayskull e75, two-row harvested chip, grid size: 8x12 +# ------------------------------------------------------- # + +export FORGE_FORCE_EMULATE_HARVESTED=2 + +# Resnet +forge/test/benchmark/benchmark.py -m resnet -c resnet50 -o forge-silicon-gs-e75-perf-all-perf.json + +# Mobilenet v1 +forge/test/benchmark/benchmark.py -m mobilenet_v1 -c 224 -mf HiFi2 -o forge-silicon-gs-e75-perf-all-perf.json + +# Mobilenet v2 +forge/test/benchmark/benchmark.py -m mobilenet_v2 -c 224 -o forge-silicon-gs-e75-perf-all-perf.json + +# Mobilenet v3 +forge/test/benchmark/benchmark.py -m mobilenet_v3_timm -c large -o forge-silicon-gs-e75-perf-all-perf.json + +# Vovnet v2 +forge/test/benchmark/benchmark.py -m vovnet_v2 -c 39 -o forge-silicon-gs-e75-perf-all-perf.json + +# Openpose Body +forge/test/benchmark/benchmark.py -m openpose_osmr_body -c 2d -o forge-silicon-gs-e75-perf-all-perf.json + +# Openpose Hand +forge/test/benchmark/benchmark.py -m openpose_hand -c basic -o forge-silicon-gs-e75-perf-all-perf.json + +# YOLOv3 +forge/test/benchmark/benchmark.py -m yolo_v3 -c default -mb 32 -o forge-silicon-gs-e75-perf-all-perf.json + +# YOLOv5 +forge/test/benchmark/benchmark.py -m yolo_v5 -c s -o forge-silicon-gs-e75-perf-all-perf.json + +# Inception v4 +forge/test/benchmark/benchmark.py -m inception_v4 -c 224 -o forge-silicon-gs-e75-perf-all-perf.json + +# Unet +forge/test/benchmark/benchmark.py -m unet -c 256 -mb 64 -o forge-silicon-gs-e75-perf-all-perf.json + +# Whisper +forge/test/benchmark/benchmark.py -m whisper -c small --loop_count 1 -mb 1 --single-thread --generative -o forge-silicon-gs-e75-perf-all-perf.json + +# T5 +forge/test/benchmark/benchmark.py -m t5 -c large --loop_count 1 -mb 1 --single-thread --generative -o forge-silicon-gs-e75-perf-all-perf.json + +# Flan-T5 +forge/test/benchmark/benchmark.py -m flan_t5 -c large --loop_count 1 -mb 1 -mf HiFi2 --single-thread --generative -o forge-silicon-gs-e75-perf-all-perf.json \ No newline at end of file diff --git a/pybuda/test/benchmark/run_benchmark_gs_e75_df_fp16 b/forge/test/benchmark/run_benchmark_gs_e75_df_fp16 similarity index 52% rename from pybuda/test/benchmark/run_benchmark_gs_e75_df_fp16 rename to forge/test/benchmark/run_benchmark_gs_e75_df_fp16 index 7dc2461e7..82475cc15 100644 --- a/pybuda/test/benchmark/run_benchmark_gs_e75_df_fp16 +++ b/forge/test/benchmark/run_benchmark_gs_e75_df_fp16 @@ -6,16 +6,16 @@ # Grayskull e75, two-row harvested chip, grid size: 8x12 # ------------------------------------------------------- # -export PYBUDA_FORCE_EMULATE_HARVESTED=2 +export FORGE_FORCE_EMULATE_HARVESTED=2 # Vit -pybuda/test/benchmark/benchmark.py -m vit -c base -df Fp16_b -mf HiFi2 -o pybuda-silicon-gs-e75-perf-all-perf.json +forge/test/benchmark/benchmark.py -m vit -c base -df Fp16_b -mf HiFi2 -o forge-silicon-gs-e75-perf-all-perf.json # Deit -pybuda/test/benchmark/benchmark.py -m deit -c base -df Fp16_b -mf HiFi2 -o pybuda-silicon-gs-e75-perf-all-perf.json +forge/test/benchmark/benchmark.py -m deit -c base -df Fp16_b -mf HiFi2 -o forge-silicon-gs-e75-perf-all-perf.json # Hrnet -pybuda/test/benchmark/benchmark.py -m hrnet -c v2_w64 -df Fp16_b -mf HiFi3 -o pybuda-silicon-gs-e75-perf-all-perf.json +forge/test/benchmark/benchmark.py -m hrnet -c v2_w64 -df Fp16_b -mf HiFi3 -o forge-silicon-gs-e75-perf-all-perf.json # Bert -pybuda/test/benchmark/benchmark.py -m bert -c large_tc -df Fp16_b -mf HiFi3 -o pybuda-silicon-gs-e75-perf-all-perf.json \ No newline at end of file +forge/test/benchmark/benchmark.py -m bert -c large_tc -df Fp16_b -mf HiFi3 -o forge-silicon-gs-e75-perf-all-perf.json \ No newline at end of file diff --git a/forge/test/benchmark/run_benchmark_gs_e75_release b/forge/test/benchmark/run_benchmark_gs_e75_release new file mode 100644 index 000000000..05f5c3afc --- /dev/null +++ b/forge/test/benchmark/run_benchmark_gs_e75_release @@ -0,0 +1,63 @@ +# ------------------------------------------------------------------------------------------------------------------------------------------------------------ # +# Models with data Formats that have good accuracy on Grayskull and that we release as official numbers +# ------------------------------------------------------------------------------------------------------------------------------------------------------------ # + +# ------------------------------------------------------- # +# Grayskull e75, two-row harvested chip, grid size: 8x12 +# ------------------------------------------------------- # + +export FORGE_FORCE_EMULATE_HARVESTED=2 + +# ResNet fp16_b +forge/test/benchmark/benchmark.py -m resnet -c resnet50 -mb 64 -df Fp16_b -mf HiFi3 -o forge-silicon-gs-e75-perf-all-perf.json + +# Mobilenet v1 fp16_b +forge/test/benchmark/benchmark.py -m mobilenet_v1 -c 224 -mb 64 -df Fp16_b -mf HiFi3 -o forge-silicon-gs-e75-perf-all-perf.json + +# Mobilenet v2 fp16_b +forge/test/benchmark/benchmark.py -m mobilenet_v2 -c 224 -mb 64 -df Fp16_b -mf HiFi3 -o forge-silicon-gs-e75-perf-all-perf.json + +# Mobilenet v3 fp16_b +forge/test/benchmark/benchmark.py -m mobilenet_v3_timm -c large -mb 64 -df Fp16_b -mf HiFi3 -o forge-silicon-gs-e75-perf-all-perf.json + +# Vit bfp8_b +forge/test/benchmark/benchmark.py -m vit -c base -mb 64 -o forge-silicon-gs-e75-perf-all-perf.json + +# Deit bfp8_b +forge/test/benchmark/benchmark.py -m deit -c base -mb 64 -o forge-silicon-gs-e75-perf-all-perf.json + +# VoVNet v2 fp16_b +forge/test/benchmark/benchmark.py -m vovnet_v2 -c 39 -mb 64 -df Fp16_b -mf HiFi3 -o forge-silicon-gs-e75-perf-all-perf.json + +# OpenPose Body fp16 +forge/test/benchmark/benchmark.py -m openpose_osmr_body -c 2d -mb 64 -df Fp16 -mf HiFi3 -o forge-silicon-gs-e75-perf-all-perf.json + +# OpenPose Hand fp16_b +forge/test/benchmark/benchmark.py -m openpose_hand -c basic -mb 64 -df Fp16_b -mf HiFi3 -o forge-silicon-gs-e75-perf-all-perf.json + +# HRNet bfp8_b +forge/test/benchmark/benchmark.py -m hrnet -c v2_w64 -mb 64 -o forge-silicon-gs-e75-perf-all-perf.json + +# YOLOv3 fp16_b +forge/test/benchmark/benchmark.py -m yolo_v3 -c default -mb 32 -df Fp16_b -mf HiFi3 -o forge-silicon-gs-e75-perf-all-perf.json + +# YOLOv5 fp16_b +forge/test/benchmark/benchmark.py -m yolo_v5 -c s -mb 64 -df Fp16_b -mf HiFi3 -o forge-silicon-gs-e75-perf-all-perf.json + +# Inception v4 fp16_b +forge/test/benchmark/benchmark.py -m inception_v4 -c 224 -mb 64 -df Fp16_b -mf HiFi3 -o forge-silicon-gs-e75-perf-all-perf.json + +# UNet fp16_b +forge/test/benchmark/benchmark.py -m unet -c 256 -mb 64 -df Fp16_b -mf HiFi3 -o forge-silicon-gs-e75-perf-all-perf.json + +# Bert large bfp8_b +forge/test/benchmark/benchmark.py -m bert -c large_tc -mb 64 -o forge-silicon-gs-e75-perf-all-perf.json + +# Whisper fp16_b +forge/test/benchmark/benchmark.py -m whisper -c small --loop_count 1 -mb 1 -df Fp16_b -mf HiFi3 --single-thread --generative -o forge-silicon-gs-e75-perf-all-perf.json + +# T5 fp16_b +forge/test/benchmark/benchmark.py -m t5 -c large --loop_count 1 -mb 1 -df Fp16_b -mf HiFi3 --single-thread --generative -o forge-silicon-gs-e75-perf-all-perf.json + +# Flan-T5 fp16_b +forge/test/benchmark/benchmark.py -m flan_t5 -c large --loop_count 1 -mb 1 -df Fp16_b -mf HiFi3 --single-thread --generative -o forge-silicon-gs-e75-perf-all-perf.json diff --git a/forge/test/benchmark/run_benchmark_tti b/forge/test/benchmark/run_benchmark_tti new file mode 100644 index 000000000..3c97f0039 --- /dev/null +++ b/forge/test/benchmark/run_benchmark_tti @@ -0,0 +1,11 @@ +# emulate runs on harvested machines +export FORGE_FORCE_EMULATE_HARVESTED=1 +unset FORGE_CI_DIR + +# TTI Save +forge/test/benchmark/benchmark.py -m bert -c tiny -opt 4 -o perf.json --env "FORGE_EXP_APPROX=1 FORGE_DISABLE_DYNAMIC_DRAM=1 FORGE_FORCE_INTERMED_TO_OUTPUT_DF=1" --auto_transpose --save_tti device_images/bert_tiny.tti +forge/test/benchmark/benchmark.py -m mobilenet_v1 -c 224 -opt 4 --loop_count 32 -mb 64 -bp Ribbon -df Fp16_b -mf HiFi2 --env "FORGE_RIBBON2=1 FORGE_LEGACY_UBLOCK_SHAPE=1 FORGE_MAXIMIZE_SPARSE_UBLOCK=1 FORGE_ENABLE_L1_ACCUMULATE=1 FORGE_EXTRA_L1_MARGIN=65536 FORGE_FUSED_OP_MULTIPLIER=20 FORGE_ENABLE_DEPTHWISE=1" -o perf.json --auto_transpose --save_tti device_images/mobilenet_v1.tti + +# TTI Load +forge/test/benchmark/benchmark.py -m bert -c tiny -opt 4 -o perf.json --env "FORGE_EXP_APPROX=1 FORGE_DISABLE_DYNAMIC_DRAM=1 FORGE_FORCE_INTERMED_TO_OUTPUT_DF=1" --auto_transpose --load_tti device_images/bert_tiny.tti +forge/test/benchmark/benchmark.py -m mobilenet_v1 -c 224 -opt 4 --loop_count 32 -mb 64 -bp Ribbon -df Fp16_b -mf HiFi2 --env "FORGE_RIBBON2=1 FORGE_LEGACY_UBLOCK_SHAPE=1 FORGE_MAXIMIZE_SPARSE_UBLOCK=1 FORGE_ENABLE_L1_ACCUMULATE=1 FORGE_EXTRA_L1_MARGIN=65536 FORGE_FUSED_OP_MULTIPLIER=20 FORGE_ENABLE_DEPTHWISE=1" -o perf.json --auto_transpose --load_tti device_images/mobilenet_v1.tti diff --git a/forge/test/benchmark/run_benchmark_wh_df_bfp8 b/forge/test/benchmark/run_benchmark_wh_df_bfp8 new file mode 100644 index 000000000..730c7928d --- /dev/null +++ b/forge/test/benchmark/run_benchmark_wh_df_bfp8 @@ -0,0 +1,25 @@ +# ------------------------------------------------------------------------------------------------------------------------------------------------------------ # +# Data Format Bfp8_b +# ------------------------------------------------------------------------------------------------------------------------------------------------------------ # + +# Default data format (-df) is Bfp8_b, default math fidelity (-mf) is LoFi + +# Mobilenet v3 +forge/test/benchmark/benchmark.py -m mobilenet_v3_timm -c large -o forge-silicon-wh-b0-perf-all-bfp8_b-perf.json + +# OpenPose Body +forge/test/benchmark/benchmark.py -m openpose_osmr_body -c 2d -o forge-silicon-wh-b0-perf-all-bfp8_b-perf.json + +# YOLOv5 +forge/test/benchmark/benchmark.py -m yolo_v5 -c s -o forge-silicon-wh-b0-perf-all-bfp8_b-perf.json + +# Whisper +forge/test/benchmark/benchmark.py -m whisper -c small --loop_count 1 -mb 1 --single-thread --generative -o forge-silicon-wh-b0-perf-all-bfp8_b-perf.json + +# T5 +# Low accuracy. +forge/test/benchmark/benchmark.py -m t5 -c large --loop_count 1 -mb 1 --single-thread --generative -o forge-silicon-wh-b0-perf-all-bfp8_b-perf.json + +# Flan-T5 +# Low accuracy. +forge/test/benchmark/benchmark.py -m flan_t5 -c large --loop_count 1 -mb 1 -mf HiFi2 --single-thread --generative -o forge-silicon-wh-b0-perf-all-bfp8_b-perf.json diff --git a/forge/test/benchmark/run_benchmark_wh_df_fp16 b/forge/test/benchmark/run_benchmark_wh_df_fp16 new file mode 100644 index 000000000..68ef3bc74 --- /dev/null +++ b/forge/test/benchmark/run_benchmark_wh_df_fp16 @@ -0,0 +1,41 @@ +# ------------------------------------------------------------------------------------------------------------------------------------------------------------ # +# Data Format Fp16, Fp16_b +# ------------------------------------------------------------------------------------------------------------------------------------------------------------ # + +# ResNet +forge/test/benchmark/benchmark.py -m resnet -c resnet50 -df Fp16_b -mf HiFi3 -o forge-silicon-wh-b0-perf-all-fp16-perf.json + +# Mobilenet v1 +forge/test/benchmark/benchmark.py -m mobilenet_v1 -c 224 -df Fp16_b -mf HiFi2 -o forge-silicon-wh-b0-perf-all-fp16-perf.json + +# Mobilenet v2 +forge/test/benchmark/benchmark.py -m mobilenet_v2 -c 224 -df Fp16_b -mf HiFi2 -o forge-silicon-wh-b0-perf-all-fp16-perf.json + +# Vit +forge/test/benchmark/benchmark.py -m vit -c base -df Fp16_b -mf HiFi2 -o forge-silicon-wh-b0-perf-all-fp16-perf.json + +# Deit +forge/test/benchmark/benchmark.py -m deit -c base -df Fp16_b -mf HiFi2 -o forge-silicon-wh-b0-perf-all-fp16-perf.json + +# VoVNet v2 +forge/test/benchmark/benchmark.py -m vovnet_v2 -c 39 -df Fp16_b -mf HiFi3 -o forge-silicon-wh-b0-perf-all-fp16-perf.json + +# OpenPose Hand +forge/test/benchmark/benchmark.py -m openpose_hand -c basic -df Fp16_b -mf HiFi3 -o forge-silicon-wh-b0-perf-all-fp16-perf.json + +# HRNet +forge/test/benchmark/benchmark.py -m hrnet -c v2_w64 -df Fp16_b -mf HiFi3 -o forge-silicon-wh-b0-perf-all-fp16-perf.json + +# YOLOv3 +# Issue to make it run with mb 64 tenstorrent/forge#1298 +# Issue to remove FORGE_OVERRIDE_INPUT_QUEUE_ENTRIES=32 tenstorrent/forge#1299 +forge/test/benchmark/benchmark.py -m yolo_v3 -c default -mb 32 -df Fp16_b -mf HiFi3 -o forge-silicon-wh-b0-perf-all-fp16-perf.json + +# Inception v4 +forge/test/benchmark/benchmark.py -m inception_v4 -c 224 -df Fp16_b -mf HiFi3 -o forge-silicon-wh-b0-perf-all-fp16-perf.json + +# UNet +forge/test/benchmark/benchmark.py -m unet -c 256 -df Fp16_b -mf HiFi3 -o forge-silicon-wh-b0-perf-all-fp16-perf.json + +# Bert large +forge/test/benchmark/benchmark.py -m bert -c large_tc -df Fp16_b -mf HiFi3 -o forge-silicon-wh-b0-perf-all-fp16-perf.json diff --git a/forge/test/benchmark/run_benchmark_wh_release b/forge/test/benchmark/run_benchmark_wh_release new file mode 100644 index 000000000..7ad5682d1 --- /dev/null +++ b/forge/test/benchmark/run_benchmark_wh_release @@ -0,0 +1,62 @@ +# ------------------------------------------------------------------------------------------------------------------------------------------------------------ # +# Models with data Formats that have good accuracy on Wormhole B0 and that we release as official numbers +# ------------------------------------------------------------------------------------------------------------------------------------------------------------ # + + +# ResNet bfp8_b +forge/test/benchmark/benchmark.py -m resnet -c resnet50 -mb 256 -o forge-silicon-wh-b0-perf-all-perf.json + +#ResNet quant fp32 +forge/test/benchmark/benchmark.py -m resnet_quant -c resnet50 -df Fp32 -mf HiFi4 -mb 64 -o forge-silicon-wh-b0-perf-all-perf.json + +# Mobilenet v1 bfp8_b +forge/test/benchmark/benchmark.py -m mobilenet_v1 -c 224 -mb 256 -o forge-silicon-wh-b0-perf-all-perf.json + +# Mobilenet v2 bfp8_b +forge/test/benchmark/benchmark.py -m mobilenet_v2 -c 224 -mb 256 -o forge-silicon-wh-b0-perf-all-perf.json + +# Mobilenet v3 fp16_b +forge/test/benchmark/benchmark.py -m mobilenet_v3_timm -c large -df Fp16_b -mf HiFi2 -mb 64 -o forge-silicon-wh-b0-perf-all-perf.json + +# Vit bfp8_b +forge/test/benchmark/benchmark.py -m vit -c base -mb 256 -o forge-silicon-wh-b0-perf-all-perf.json + +# Deit bfp8_b +forge/test/benchmark/benchmark.py -m deit -c base -mb 256 -o forge-silicon-wh-b0-perf-all-perf.json + +# VoVNet v2 bfp8_b +forge/test/benchmark/benchmark.py -m vovnet_v2 -c 39 -o forge-silicon-wh-b0-perf-all-perf.json + +# OpenPose Body fp16 +forge/test/benchmark/benchmark.py -m openpose_osmr_body -c 2d -df Fp16 -mf HiFi3 -o forge-silicon-wh-b0-perf-all-perf.json + +# OpenPose Hand bfp8_b +forge/test/benchmark/benchmark.py -m openpose_hand -c basic -o forge-silicon-wh-b0-perf-all-perf.json + +# HRNet bfp8_b +forge/test/benchmark/benchmark.py -m hrnet -c v2_w64 -o forge-silicon-wh-b0-perf-all-perf.json + +# YOLOv3 bfp8_b +# Issue to make it run with mb 64 tenstorrent/forge#1298 +forge/test/benchmark/benchmark.py -m yolo_v3 -c default -mb 32 -o forge-silicon-wh-b0-perf-all-perf.json + +# YOLOv5 fp16_b +forge/test/benchmark/benchmark.py -m yolo_v5 -c s -df Fp16_b -mf HiFi3 -o forge-silicon-wh-b0-perf-all-perf.json + +# Inception v4 bfp8_b +forge/test/benchmark/benchmark.py -m inception_v4 -c 224 -o forge-silicon-wh-b0-perf-all-perf.json + +# UNet bfp8_b +forge/test/benchmark/benchmark.py -m unet -c 256 -mb 64 -o forge-silicon-wh-b0-perf-all-perf.json + +# Bert large bfp8_b +forge/test/benchmark/benchmark.py -m bert -c large_tc -mb 64 -o forge-silicon-wh-b0-perf-all-perf.json + +# Whisper fp16_b +forge/test/benchmark/benchmark.py -m whisper -c small --loop_count 1 -mb 1 -df Fp16_b -mf HiFi3 --single-thread --generative -o forge-silicon-wh-b0-perf-all-perf.json + +# T5 fp16_b +forge/test/benchmark/benchmark.py -m t5 -c large --loop_count 1 -mb 1 -df Fp16_b -mf HiFi3 --single-thread --generative -o forge-silicon-wh-b0-perf-all-perf.json + +# Flan-T5 fp16_b +forge/test/benchmark/benchmark.py -m flan_t5 -c large --loop_count 1 -mb 1 -df Fp16_b -mf HiFi3 --single-thread --generative -o forge-silicon-wh-b0-perf-all-perf.json diff --git a/pybuda/test/bert/__init__.py b/forge/test/bert/__init__.py similarity index 100% rename from pybuda/test/bert/__init__.py rename to forge/test/bert/__init__.py diff --git a/pybuda/test/bert/modules.py b/forge/test/bert/modules.py similarity index 94% rename from pybuda/test/bert/modules.py rename to forge/test/bert/modules.py index d9a795309..78236adec 100644 --- a/pybuda/test/bert/modules.py +++ b/forge/test/bert/modules.py @@ -3,8 +3,8 @@ # SPDX-License-Identifier: Apache-2.0 from typing import Dict -from pybuda import PyBudaModule -from pybuda.op import ( +from forge import ForgeModule +from forge.op import ( Matmul, HSlice, Add, @@ -20,8 +20,8 @@ Subtract, Constant, ) -from pybuda.op import nn -from pybuda import Parameter +from forge.op import nn +from forge import Parameter def get_bert_parameters(module: str, hidden_dim=128, encoder_index=0, vocab_size=0) -> Dict[str, Parameter]: intermed_dim = 4 * hidden_dim @@ -75,7 +75,7 @@ def get_bert_parameters(module: str, hidden_dim=128, encoder_index=0, vocab_size raise RuntimeError("Unknown bert module type") -class PyBudaBertMHA(PyBudaModule): +class ForgeBertMHA(ForgeModule): def __init__(self, name, parameters, config): super().__init__(name) @@ -130,7 +130,7 @@ def param(name): return output -class PyBudaFeedForward(PyBudaModule): +class ForgeFeedForward(ForgeModule): def __init__(self, name, parameters, config): super().__init__(name) @@ -157,13 +157,13 @@ def param(name): return output -class PyBudaFFNorm(PyBudaModule): +class ForgeFFNorm(ForgeModule): def __init__(self, name, parameters, config): super().__init__(name) self.parameters = parameters self.config = config - self.ff = PyBudaFeedForward("ff", parameters, config) + self.ff = ForgeFeedForward("ff", parameters, config) def forward(self, input): @@ -179,14 +179,14 @@ def param(name): return result -class PyBudaBertEncoder(PyBudaModule): +class ForgeBertEncoder(ForgeModule): def __init__(self, name, parameters, config): super().__init__(name) self.parameters = parameters self.config = config - self.mha = PyBudaBertMHA("mha", parameters, config) - self.ff = PyBudaFeedForward("ff", parameters, config) + self.mha = ForgeBertMHA("mha", parameters, config) + self.ff = ForgeFeedForward("ff", parameters, config) def forward(self, encoder_input, attention_mask): @@ -208,7 +208,7 @@ def param(name): else: return result -class PyBudaPredictionHeadTransform(PyBudaModule): +class ForgePredictionHeadTransform(ForgeModule): def __init__(self, name, parameters, config): super().__init__(name) self.parameters = parameters @@ -225,7 +225,7 @@ def param(name): hidden_states = nn.Layernorm("pred_ln", hidden_states, param("LayerNorm.weight"), param("LayerNorm.bias")) return hidden_states -class PyBudaPredictionHeadDecoder(PyBudaModule): +class ForgePredictionHeadDecoder(ForgeModule): def __init__(self, name, parameters, config): super().__init__(name) self.parameters = parameters diff --git a/pybuda/test/common.py b/forge/test/common.py similarity index 90% rename from pybuda/test/common.py rename to forge/test/common.py index 61fdacce0..4fdadd772 100644 --- a/pybuda/test/common.py +++ b/forge/test/common.py @@ -3,10 +3,10 @@ # SPDX-License-Identifier: Apache-2.0 import numpy as np import torch -import pybuda -from pybuda import is_silicon -from pybuda import PyBudaModule, PyTorchModule, TTDevice, pybuda_compile, Parameter, BackendType -from pybuda.verify import verify_module +import forge +from forge import is_silicon +from forge import ForgeModule, PyTorchModule, TTDevice, forge_compile, Parameter, BackendType +from forge.verify import verify_module from .conftest import TestDevice from loguru import logger @@ -25,7 +25,7 @@ def create_microbatches(epochs, steps, accumulation_steps, microbatch_count, mic return torch.split(tensor, micro_batch_size, 1) -class ModuleBuilder(PyBudaModule): +class ModuleBuilder(ForgeModule): def __init__(self, forward_fn, **kwargs): super().__init__(self.__class__.__name__ + "." + forward_fn.__name__) self.forward_fn = forward_fn @@ -90,7 +90,7 @@ def compile( chip_ids: List of chips to run on (optional) - Everything else gets passed through to pybuda_compile + Everything else gets passed through to forge_compile """ def decorator(test_fn): @@ -111,7 +111,7 @@ def wrapper(*activations, **kwargs): optimizer = create_optimizer(module) device = create_device(optimizer) device.place_module(module) - return pybuda_compile( + return forge_compile( device, test_fn.__name__, *activations, **compile_kwargs ) @@ -133,7 +133,7 @@ def run(verify_cfg, module_cls=ModuleBuilder, num_inputs=1, **compile_kwargs): Everything gets passed through to verify_module """ if isinstance(verify_cfg, TestDevice): - verify_cfg = pybuda.VerifyConfig(devtype=verify_cfg.devtype, arch=verify_cfg.arch, devmode=verify_cfg.devmode, tti_path=verify_cfg.tti_path) + verify_cfg = forge.VerifyConfig(devtype=verify_cfg.devtype, arch=verify_cfg.arch, devmode=verify_cfg.devmode, tti_path=verify_cfg.tti_path) def decorator(test_fn): def wrapper(*activations, **kwargs): @@ -152,7 +152,7 @@ def run_torch(verify_cfg, **compile_kwargs): def create_sgd_optimizer(learning_rate): def create_fn(mod): - sgd_optimizer = pybuda.optimizers.SGD( + sgd_optimizer = forge.optimizers.SGD( learning_rate=learning_rate, parameters=mod.get_parameters(), ) diff --git a/pybuda/test/conftest.py b/forge/test/conftest.py similarity index 87% rename from pybuda/test/conftest.py rename to forge/test/conftest.py index 4ef3670fb..cbe4d228e 100644 --- a/pybuda/test/conftest.py +++ b/forge/test/conftest.py @@ -17,16 +17,16 @@ # hence need to be set as GLOBAL. This is a requirement for ZeBu. import sys original_flags = sys.getdlopenflags() -if (os.environ.get("PYBUDA_ENABLE_EMULATION_DEVICE") == "1"): +if (os.environ.get("FORGE_ENABLE_EMULATION_DEVICE") == "1"): sys.setdlopenflags(os.RTLD_LAZY | os.RTLD_GLOBAL) # Import code that requires os.RTLD_GLOBAL goes here # Reset the flags to their original value -if (os.environ.get("PYBUDA_ENABLE_EMULATION_DEVICE") == "1"): +if (os.environ.get("FORGE_ENABLE_EMULATION_DEVICE") == "1"): sys.setdlopenflags(original_flags) -import pybuda -from pybuda.verify.config import TestKind -from pybuda.torch_compile import reset_state +import forge +from forge.verify.config import TestKind +from forge.torch_compile import reset_state collect_ignore = ["legacy_tests"] @@ -34,8 +34,8 @@ def pytest_sessionstart(session): # See: https://github.com/pytorch/pytorch/wiki/Autograd-and-Fork mp.set_start_method('spawn') num_threads = 8 - if 'PYBUDA_NUM_THREADS' in os.environ: - num_threads = int(os.environ['PYBUDA_NUM_THREADS']) + if 'FORGE_NUM_THREADS' in os.environ: + num_threads = int(os.environ['FORGE_NUM_THREADS']) torch.set_num_threads(num_threads) mp.set_sharing_strategy('file_system') os.environ["TVM_NUM_THREADS"] = f"{num_threads}" @@ -47,18 +47,18 @@ def pytest_sessionstart(session): # It can be useful in CI jobs to get the state of the enviroment variables before test session starts print_env_variables = bool(int(os.environ.get("PYTEST_PRINT_ENV_VARIABLES", "0"))) if print_env_variables: - pybuda_specific_vars = {} + forge_specific_vars = {} tt_backend_specific_vars = {} print(f"####### Environment variables - Count: {len(os.environ)} #######") for key, value in os.environ.items(): print(f"{key}={value}") - if key.startswith("PYBUDA_") or key.startswith("GOLDEN_"): - pybuda_specific_vars[key] = value + if key.startswith("FORGE_") or key.startswith("GOLDEN_"): + forge_specific_vars[key] = value elif key.startswith("TT_BACKEND_"): tt_backend_specific_vars[key] = value - print(f"####### PYBUDA specific enviroment variables - Count: {len(pybuda_specific_vars)} #######") - for key, value in pybuda_specific_vars.items(): + print(f"####### FORGE specific enviroment variables - Count: {len(forge_specific_vars)} #######") + for key, value in forge_specific_vars.items(): print(f"{key}={value}") print(f"####### TT_BACKEND specific enviroment variables - Count: {len(tt_backend_specific_vars)} #######") @@ -66,8 +66,8 @@ def pytest_sessionstart(session): print(f"{key}={value}") @pytest.fixture(autouse=True) -def clear_pybuda(): - if "PYBUDA_RESET_DEV_BEFORE_TEST" in os.environ: +def clear_forge(): + if "FORGE_RESET_DEV_BEFORE_TEST" in os.environ: # Reset device between tests # For this to work, pytest must be called with --forked subprocess.check_call(["device/bin/silicon/reset.sh"], cwd=os.environ["BUDA_HOME"]) @@ -86,7 +86,7 @@ def clear_pybuda(): yield # clean up after each test - pybuda.pybuda_reset() + forge.forge_reset() torch._dynamo.reset() reset_state() @@ -186,7 +186,7 @@ def no_skip(*args, **kwargs): # return TestDevice(devtype=BackendType.Golden, arch=DEVICE_CONFIG_TO_BACKEND_DEVICE_TYPE[device_config], devmode=devmode, tti_path=tti_path) # elif "GOLDEN_WORMHOLE_B0" in os.environ: # return TestDevice(devtype=BackendType.Golden, arch=BackendDevice.Wormhole_B0, devmode=devmode, tti_path=tti_path) -# elif "PYBUDA_GOLDEN_BLACKHOLE" in os.environ: +# elif "FORGE_GOLDEN_BLACKHOLE" in os.environ: # return TestDevice(devtype=BackendType.Golden, arch=BackendDevice.Blackhole, devmode=devmode, tti_path=tti_path) # return TestDevice(devtype=BackendType.Golden, arch=BackendDevice.Grayskull, devmode=devmode, tti_path=tti_path) # if name == "Model": @@ -194,16 +194,16 @@ def no_skip(*args, **kwargs): # if name == "Versim": # # Set default versim device arch to Grayskull # versim_backend_device = BackendDevice.Grayskull -# # If PYBUDA_VERSIM_DEVICE_ARCH is set, use that arch for Versim device -# versim_arch_name = os.environ.get("PYBUDA_VERSIM_DEVICE_ARCH", None) +# # If FORGE_VERSIM_DEVICE_ARCH is set, use that arch for Versim device +# versim_arch_name = os.environ.get("FORGE_VERSIM_DEVICE_ARCH", None) # if versim_arch_name != None: # versim_backend_device = BackendDevice.from_string(versim_arch_name) # return TestDevice(devtype=BackendType.Versim, arch=versim_backend_device, devmode=devmode, tti_path=tti_path) # if name == "Emulation": # # Set default emulation device arch to Grayskull # emulation_backend_device = BackendDevice.Grayskull -# # If PYBUDA_EMULATION_DEVICE_ARCH is set, use that arch for Emulation device -# emulation_arch_name = os.environ.get("PYBUDA_EMULATION_DEVICE_ARCH", None) +# # If FORGE_EMULATION_DEVICE_ARCH is set, use that arch for Emulation device +# emulation_arch_name = os.environ.get("FORGE_EMULATION_DEVICE_ARCH", None) # if emulation_arch_name != None: # emulation_backend_device = BackendDevice.from_string(emulation_arch_name) # return TestDevice(devtype=BackendType.Emulation, arch=emulation_backend_device, devmode=devmode, tti_path=tti_path) @@ -227,13 +227,13 @@ def no_skip(*args, **kwargs): # return not silicon_only # if self.devtype == BackendType.Model: -# return bool(int(os.environ.get("PYBUDA_ENABLE_MODEL_DEVICE", "0"))) +# return bool(int(os.environ.get("FORGE_ENABLE_MODEL_DEVICE", "0"))) # if self.devtype == BackendType.Versim: -# return bool(int(os.environ.get("PYBUDA_ENABLE_VERSIM_DEVICE", "0"))) +# return bool(int(os.environ.get("FORGE_ENABLE_VERSIM_DEVICE", "0"))) # if self.devtype == BackendType.Emulation: -# return bool(int(os.environ.get("PYBUDA_ENABLE_EMULATION_DEVICE", "0"))) +# return bool(int(os.environ.get("FORGE_ENABLE_EMULATION_DEVICE", "0"))) # if self.devtype == BackendType.Silicon: # compiled_arch_name = os.environ.get("BACKEND_ARCH_NAME", None) or os.environ.get("ARCH_NAME", None) @@ -316,14 +316,14 @@ def pytest_runtest_logreport(report): global device_cfg_global if device_cfg_global: - pybuda.set_configuration_options(device_config=device_cfg_global) + forge.set_configuration_options(device_config=device_cfg_global) - if "PYBUDA_OVERRIDES_VETO" in os.environ: - from pybuda.config import _set_pybuda_override_veto + if "FORGE_OVERRIDES_VETO" in os.environ: + from forge.config import _set_forge_override_veto # This functionality represents one way to control general and env based compiler configuration (enable us to # add/update/remove existing configs in each test with ease during runtime). In sum, it uses a dict of key-value pairs - # that control all PyBuda specific overrides set in test. Have in mind that this doesn't apply for everything set + # that control all Forge specific overrides set in test. Have in mind that this doesn't apply for everything set # outside of the test itself (e.g. env vars set before calling the specific pytest). # # Input to this function is represented as two dicts: @@ -341,12 +341,12 @@ def pytest_runtest_logreport(report): # - Level 2 - set by dev in test; we want to remove them (e.g. enable/disable by default, redefine as more user friendly, etc.) # - Level 3 - set by dev in test; we want to remove them entirely (purely for testing purposes) # - if "PYBUDA_OVERRIDES_VETO_CUSTOM_SETUP" in os.environ: - _set_pybuda_override_veto({ + if "FORGE_OVERRIDES_VETO_CUSTOM_SETUP" in os.environ: + _set_forge_override_veto({ "backend_output_dir": "", }, {}) else: - _set_pybuda_override_veto({ + _set_forge_override_veto({ "backend_output_dir": "", "backend_runtime_params_path": "", "harvesting_mask": "", @@ -359,10 +359,10 @@ def pytest_runtest_logreport(report): }, { # Level 2 overrides - "PYBUDA_RIBBON2": "", - "PYBUDA_DISABLE_STREAM_OUTPUT": "", - "PYBUDA_PAD_OUTPUT_BUFFER": "", - "PYBUDA_OVERRIDE_DEVICE_YAML": "" # Mostly used for 1x1 model overrides + "FORGE_RIBBON2": "", + "FORGE_DISABLE_STREAM_OUTPUT": "", + "FORGE_PAD_OUTPUT_BUFFER": "", + "FORGE_OVERRIDE_DEVICE_YAML": "" # Mostly used for 1x1 model overrides }) elif report.when == "teardown": diff --git a/pybuda/test/data_formats/test_df.py b/forge/test/data_formats/test_df.py similarity index 76% rename from pybuda/test/data_formats/test_df.py rename to forge/test/data_formats/test_df.py index dab898917..7ba6aa634 100644 --- a/pybuda/test/data_formats/test_df.py +++ b/forge/test/data_formats/test_df.py @@ -8,26 +8,26 @@ import os import torch -import pybuda -import pybuda.op -from pybuda import ( - PyBudaModule, +import forge +import forge.op +from forge import ( + ForgeModule, TTDevice, BackendType, Tensor, - pybuda_compile, + forge_compile, CompilerConfig, VerifyConfig, DataFormat, ) -from pybuda._C.backend_api import BackendDevice -from pybuda._C import NodeEpochType -from pybuda.verify import verify_module, TestKind, config +from forge._C.backend_api import BackendDevice +from forge._C import NodeEpochType +from forge.verify import verify_module, TestKind, config from test.common import ModuleBuilder, run, run_torch verify_cfg = VerifyConfig(run_golden=True) # Run backend golden check on all tests in here -class BudaTest(PyBudaModule): +class BudaTest(ForgeModule): """ Simple buda module for basic testing """ @@ -38,7 +38,7 @@ def __init__(self, name): super().__init__(name) def forward(self, act1, act2): - m1 = pybuda.op.Matmul("matmul1", act1, act2) + m1 = forge.op.Matmul("matmul1", act1, act2) return m1 @pytest.mark.parametrize("pt_format", (torch.float32, torch.bfloat16, torch.float16), ids=["float32", "bfloat16", "float16"]) @@ -53,7 +53,7 @@ def test_input(pt_format): act1 = Tensor.create_from_torch(torch.rand(*BudaTest.shape, dtype=pt_format)) act2 = Tensor.create_from_torch(torch.rand(*BudaTest.shape, requires_grad=True, dtype=pt_format)) - pybuda_compile(tt0, "sanity", act1, act2, compiler_cfg=CompilerConfig(enable_training=False), verify_cfg=verify_cfg) + forge_compile(tt0, "sanity", act1, act2, compiler_cfg=CompilerConfig(enable_training=False), verify_cfg=verify_cfg) @pytest.mark.parametrize("pt_format", (torch.float32, torch.bfloat16, torch.float16, torch.int8), ids=["float32", "bfloat16", "float16", "int8"]) @@ -81,7 +81,7 @@ def test_input_with_conversion(pt_format, target_format): act1 = Tensor.create_from_torch(torch.rand(*BudaTest.shape, dtype=pt_format), dev_data_format=target_format) act2 = Tensor.create_from_torch(torch.rand(*BudaTest.shape, requires_grad=True, dtype=pt_format), dev_data_format=target_format) - pybuda_compile(tt0, "sanity", act1, act2, compiler_cfg=CompilerConfig(enable_training=False), verify_cfg=verify_cfg) + forge_compile(tt0, "sanity", act1, act2, compiler_cfg=CompilerConfig(enable_training=False), verify_cfg=verify_cfg) @pytest.mark.skip(reason="Still working on this") @pytest.mark.parametrize("pt_format", (torch.float32, torch.bfloat16, torch.float16, torch.int8), ids=["float32", "bfloat16", "float16", "int8"]) @@ -100,7 +100,7 @@ def test_input_with_conversion_backend(pt_format, target_format): "requires_grad": pt_format != torch.int8} ], verify_cfg=VerifyConfig(test_kind=TestKind.INFERENCE)) -class BudaDFTest(pybuda.PyBudaModule): +class BudaDFTest(forge.ForgeModule): """ Simple buda module for basic testing """ @@ -109,14 +109,14 @@ class BudaDFTest(pybuda.PyBudaModule): def __init__(self, name: str): super().__init__(name) - self.weights1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.weights2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.weights1 = forge.Parameter(*self.shape, requires_grad=True) + self.weights2 = forge.Parameter(*self.shape, requires_grad=True) def forward(self, act1, act2): - m1 = pybuda.op.Matmul("matmul1", act1, self.weights1) - m2 = pybuda.op.Matmul("matmul2", act2, self.weights2) - m1e = pybuda.op.Sqrt("sqrt", m1) - add = pybuda.op.Add("add", m1e, m2) + m1 = forge.op.Matmul("matmul1", act1, self.weights1) + m2 = forge.op.Matmul("matmul2", act2, self.weights2) + m1e = forge.op.Sqrt("sqrt", m1) + add = forge.op.Add("add", m1e, m2) return add # Too many to run, ends up failing in pytest due to "too many open files" @@ -140,48 +140,48 @@ def test_data_formats(test_kind, test_device, param1_df, param2_df, input1_df, i def test_bwd_op_format_promotion(): def bwd_op_format_promotio(act, *, ff1_weights): - return pybuda.op.Matmul(f"ff1", act, ff1_weights) + return forge.op.Matmul(f"ff1", act, ff1_weights) - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( epoch_type=NodeEpochType.Backward, output_df=DataFormat.Float32, accumulate_df=DataFormat.Float16_b ) - module = ModuleBuilder(bwd_op_format_promotio, ff1_weights=pybuda.Parameter(1,1,64,64)) + module = ModuleBuilder(bwd_op_format_promotio, ff1_weights=forge.Parameter(1,1,64,64)) verify_module(module, [(1, 1, 64, 64)], VerifyConfig(test_kind=config.TestKind.TRAINING)) def test_gradient_op_format_promotion(): def gradient_op_format_promotion(act, *, ff1_weights): - return pybuda.op.Matmul(f"ff1", act, ff1_weights) + return forge.op.Matmul(f"ff1", act, ff1_weights) - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( is_gradient_op=True, output_df=DataFormat.Float32, accumulate_df=DataFormat.Float16_b ) - module = ModuleBuilder(gradient_op_format_promotion, ff1_weights=pybuda.Parameter(1,1,64,64)) + module = ModuleBuilder(gradient_op_format_promotion, ff1_weights=forge.Parameter(1,1,64,64)) verify_module(module, [(1, 1, 64, 64)], VerifyConfig(test_kind=config.TestKind.TRAINING)) def test_bwd_fail(): def bwd_op_format_promotio(act, *, ff1_weights): - return pybuda.op.Matmul(f"ff1", act, ff1_weights) + return forge.op.Matmul(f"ff1", act, ff1_weights) df = DataFormat.Float16 - pybuda.config.set_configuration_options( + forge.config.set_configuration_options( default_df_override=df, accumulate_df=df, enable_auto_transposing_placement=True, backend_opt_level=3 ) - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( op_type="matmul", - accumulate_df=pybuda._C.DataFormat.Float32, - intermediate_df=pybuda._C.DataFormat.Float32, + accumulate_df=forge._C.DataFormat.Float32, + intermediate_df=forge._C.DataFormat.Float32, is_gradient_op=False ) - module = ModuleBuilder(bwd_op_format_promotio, ff1_weights=pybuda.Parameter(1,1,64,64)) + module = ModuleBuilder(bwd_op_format_promotio, ff1_weights=forge.Parameter(1,1,64,64)) verify_module(module, [(1, 1, 64, 64)], VerifyConfig(test_kind=config.TestKind.TRAINING)) def test_eltwise_binary_mixed_ab_inputs(test_device): @@ -191,7 +191,7 @@ def test_eltwise_binary_mixed_ab_inputs(test_device): VerifyConfig(test_kind=TestKind.INFERENCE, devtype=test_device.devtype, arch=test_device.arch), ) def mixed_ab_inputs(x, y): - return pybuda.op.Add("add", x, y) + return forge.op.Add("add", x, y) x = Tensor.create_from_torch(torch.randn(shape, dtype=torch.bfloat16)) y = Tensor.create_from_torch(torch.randn(shape, dtype=torch.float16)) @@ -208,15 +208,15 @@ def test_matmul_large_mk_decoupled_acc_intermediate_df(test_device): VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch), ) def simple_matmul_gradient_t(x, weight=None): - return pybuda.op.Matmul("mm0", x, weight) + return forge.op.Matmul("mm0", x, weight) x = Tensor.create_from_torch(torch.randn(shape, requires_grad=test_kind.is_training())) - w = pybuda.Parameter(*shape, requires_grad=test_kind.is_training()) + w = forge.Parameter(*shape, requires_grad=test_kind.is_training()) - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( op_type="matmul", - intermediate_df=pybuda._C.DataFormat.Float16_b, - accumulate_df=pybuda._C.DataFormat.Float32, + intermediate_df=forge._C.DataFormat.Float16_b, + accumulate_df=forge._C.DataFormat.Float32, ) simple_matmul_gradient_t(x, weight=w) @@ -231,23 +231,23 @@ def test_gradient_matmul_decoupled_acc_intermediate_df(test_device): VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch), ) def simple_matmul_gradient_t(x, weight=None): - return pybuda.op.Matmul("mm0", x, weight) + return forge.op.Matmul("mm0", x, weight) x = Tensor.create_from_torch(torch.randn(shape, requires_grad=test_kind.is_training(), dtype=torch.bfloat16)) - w = pybuda.Parameter(torch.randn(shape, requires_grad=test_kind.is_training(), dtype=torch.bfloat16)) + w = forge.Parameter(torch.randn(shape, requires_grad=test_kind.is_training(), dtype=torch.bfloat16)) - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( op_type="matmul", - intermediate_df=pybuda._C.DataFormat.Float16_b, - accumulate_df=pybuda._C.DataFormat.Float32, - output_df=pybuda._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Float16_b, + accumulate_df=forge._C.DataFormat.Float32, + output_df=forge._C.DataFormat.Float16_b, is_gradient_op=True ) simple_matmul_gradient_t(x, weight=w) def test_stochastic_rounding(test_device): - os.environ["PYBUDA_ENABLE_STOCHASTIC_ROUNDING"] = "1" + os.environ["FORGE_ENABLE_STOCHASTIC_ROUNDING"] = "1" if test_device.devtype == BackendType.Golden: os.environ["GOLDEN_WORMHOLE_B0"] = "1" @run( @@ -257,9 +257,9 @@ def test_stochastic_rounding(test_device): arch=test_device.arch), ) def operation(x, y, z): - matmul = pybuda.op.Matmul("matmul", x, y) - gelu = pybuda.op.Gelu("gelu", matmul) - add = pybuda.op.Add("add", gelu, z) + matmul = forge.op.Matmul("matmul", x, y) + gelu = forge.op.Gelu("gelu", matmul) + add = forge.op.Add("add", gelu, z) return add dims = (1, 1, 128, 128) @@ -271,11 +271,11 @@ def operation(x, y, z): def test_splice(test_device): - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( op_type="splice", input_df={ - 0: [pybuda.DataFormat.Float32, True], - 1: [pybuda.DataFormat.Bfp8, True], + 0: [forge.DataFormat.Float32, True], + 1: [forge.DataFormat.Bfp8, True], } ) @@ -296,13 +296,13 @@ def test_format_conversion(test_device): Grayskull, matmul, a-formats on inputs, b-format on output w/ bfp8_b """ - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( op_type="matmul", input_df={ - 0: [pybuda.DataFormat.Float16_b, True], - 1: [pybuda.DataFormat.Float16_b, True], + 0: [forge.DataFormat.Float16_b, True], + 1: [forge.DataFormat.Float16_b, True], }, - output_df=pybuda.DataFormat.Bfp8 + output_df=forge.DataFormat.Bfp8 ) @run_torch( diff --git a/pybuda/test/data_formats/test_int8.py b/forge/test/data_formats/test_int8.py similarity index 82% rename from pybuda/test/data_formats/test_int8.py rename to forge/test/data_formats/test_int8.py index 975d058ac..04e27986c 100644 --- a/pybuda/test/data_formats/test_int8.py +++ b/forge/test/data_formats/test_int8.py @@ -5,19 +5,19 @@ import os import torch -import pybuda -import pybuda.op -from pybuda import ( +import forge +import forge.op +from forge import ( Tensor, VerifyConfig, DataFormat, MathFidelity, ) -from pybuda.verify import TestKind +from forge.verify import TestKind from test.common import run_torch # Skip backend compilation pending bbe#1442 -os.environ["PYBUDA_ENABLE_OUTPUT_QUEUES_ON_HOST"] = "0" +os.environ["FORGE_ENABLE_OUTPUT_QUEUES_ON_HOST"] = "0" data_formats = { DataFormat.Int8 : "Int8", DataFormat.Float32: "Float32", @@ -35,15 +35,15 @@ ) def test_int8_math_fidelity(test_device, math_fidelity): # math-fidelity must be set to HiFi4 - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( op_type="matmul", input_df={ - 0: [pybuda.DataFormat.Int8, True], - 1: [pybuda.DataFormat.Int8, True], + 0: [forge.DataFormat.Int8, True], + 1: [forge.DataFormat.Int8, True], }, math_fidelity=math_fidelity, - accumulate_df=pybuda.DataFormat.Int8, - output_df=pybuda.DataFormat.Int8, + accumulate_df=forge.DataFormat.Int8, + output_df=forge.DataFormat.Int8, ) @run_torch( @@ -70,11 +70,11 @@ def matmul_int8_math_fidelity(x, y): @pytest.mark.parametrize("output_df", data_formats.keys(), ids=data_formats.values()) def test_int8_dfs(test_device, accumulate_df, intermediate_df, output_df): # acc_df must be set to int8 - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( op_type="matmul", input_df={ - 0: [pybuda.DataFormat.Int8, True], - 1: [pybuda.DataFormat.Int8, True], + 0: [forge.DataFormat.Int8, True], + 1: [forge.DataFormat.Int8, True], }, math_fidelity=MathFidelity.HiFi4, accumulate_df=accumulate_df, diff --git a/pybuda/test/emulation/test_emulation_basic_ops.py b/forge/test/emulation/test_emulation_basic_ops.py similarity index 80% rename from pybuda/test/emulation/test_emulation_basic_ops.py rename to forge/test/emulation/test_emulation_basic_ops.py index ce7a37f0e..aec345aec 100644 --- a/pybuda/test/emulation/test_emulation_basic_ops.py +++ b/forge/test/emulation/test_emulation_basic_ops.py @@ -4,13 +4,13 @@ # # Emulation-related tests for end-to-end emulation # -from pybuda import pybuda -from pybuda._C.backend_api import BackendType -from pybuda.module import PyTorchModule -from pybuda.verify.backend import verify_module -from pybuda.tensor import Tensor +from forge import forge +from forge._C.backend_api import BackendType +from forge.module import PyTorchModule +from forge.verify.backend import verify_module +from forge.tensor import Tensor from test.utils import download_model -from pybuda.verify.config import TestKind, VerifyConfig +from forge.verify.config import TestKind, VerifyConfig import pytest import torch from test.common import run @@ -29,10 +29,10 @@ def test_emulation_simple_matmul(test_device): pcc=0.99), ) def simple_matmul(a, b): - c = pybuda.op.Matmul("matmul0", a, b) + c = forge.op.Matmul("matmul0", a, b) return c - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.input_queues_on_host = False compiler_cfg.output_queues_on_host = False @@ -51,7 +51,7 @@ def test_bert_tiny(test_device): pt_module = PyTorchModule("bert", model) - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.retain_tvm_python_files = True compiler_cfg.input_queues_on_host = False compiler_cfg.output_queues_on_host = False diff --git a/pybuda/test/falcon/__init__.py b/forge/test/falcon/__init__.py similarity index 100% rename from pybuda/test/falcon/__init__.py rename to forge/test/falcon/__init__.py diff --git a/pybuda/test/falcon/data/two_cities.json b/forge/test/falcon/data/two_cities.json similarity index 100% rename from pybuda/test/falcon/data/two_cities.json rename to forge/test/falcon/data/two_cities.json diff --git a/pybuda/test/falcon/finetune_configs/ci_basic.json b/forge/test/falcon/finetune_configs/ci_basic.json similarity index 93% rename from pybuda/test/falcon/finetune_configs/ci_basic.json rename to forge/test/falcon/finetune_configs/ci_basic.json index cc6f02f64..ddb48ac02 100644 --- a/pybuda/test/falcon/finetune_configs/ci_basic.json +++ b/forge/test/falcon/finetune_configs/ci_basic.json @@ -1,6 +1,6 @@ { "with_pytorch" : true, - "with_pybuda" : true, + "with_forge" : true, "load_dataset_from_disk" : true, "prefiltered_dataset_dir" : "/proj_sw/large-model-cache/falcon7b/datasets", "sequence_length" : 128, @@ -11,7 +11,7 @@ "explicit_pad_token" : true, "version" : "padded_split", "precision" : "high-mp", - "pybuda_device" : "silicon", + "forge_device" : "silicon", "batch_size" : 4, "num_accumulation_steps": 1, "num_lora_layers" : 1, diff --git a/pybuda/test/falcon/finetune_configs/ci_basic_lora.json b/forge/test/falcon/finetune_configs/ci_basic_lora.json similarity index 93% rename from pybuda/test/falcon/finetune_configs/ci_basic_lora.json rename to forge/test/falcon/finetune_configs/ci_basic_lora.json index 6a1667687..fd964c338 100644 --- a/pybuda/test/falcon/finetune_configs/ci_basic_lora.json +++ b/forge/test/falcon/finetune_configs/ci_basic_lora.json @@ -1,6 +1,6 @@ { "with_pytorch" : true, - "with_pybuda" : true, + "with_forge" : true, "load_dataset_from_disk" : true, "prefiltered_dataset_dir" : "/proj_sw/large-model-cache/falcon7b/datasets", "sequence_length" : 128, @@ -11,7 +11,7 @@ "explicit_pad_token" : true, "version" : "padded_split", "precision" : "low-mp", - "pybuda_device" : "silicon", + "forge_device" : "silicon", "batch_size" : 4, "num_accumulation_steps": 1, "num_lora_layers" : 1, diff --git a/pybuda/test/falcon/models/falcon7b/README.md b/forge/test/falcon/models/falcon7b/README.md similarity index 100% rename from pybuda/test/falcon/models/falcon7b/README.md rename to forge/test/falcon/models/falcon7b/README.md diff --git a/pybuda/test/falcon/models/falcon7b/config.json b/forge/test/falcon/models/falcon7b/config.json similarity index 100% rename from pybuda/test/falcon/models/falcon7b/config.json rename to forge/test/falcon/models/falcon7b/config.json diff --git a/pybuda/test/falcon/models/falcon7b/config_padded.json b/forge/test/falcon/models/falcon7b/config_padded.json similarity index 100% rename from pybuda/test/falcon/models/falcon7b/config_padded.json rename to forge/test/falcon/models/falcon7b/config_padded.json diff --git a/pybuda/test/falcon/models/falcon7b/configuration_RW.py b/forge/test/falcon/models/falcon7b/configuration_RW.py similarity index 100% rename from pybuda/test/falcon/models/falcon7b/configuration_RW.py rename to forge/test/falcon/models/falcon7b/configuration_RW.py diff --git a/pybuda/test/falcon/models/falcon7b/generation_config.json b/forge/test/falcon/models/falcon7b/generation_config.json similarity index 100% rename from pybuda/test/falcon/models/falcon7b/generation_config.json rename to forge/test/falcon/models/falcon7b/generation_config.json diff --git a/pybuda/test/falcon/models/falcon7b/modelling_RW.py b/forge/test/falcon/models/falcon7b/modelling_RW.py similarity index 100% rename from pybuda/test/falcon/models/falcon7b/modelling_RW.py rename to forge/test/falcon/models/falcon7b/modelling_RW.py diff --git a/pybuda/test/falcon/models/falcon7b/modelling_RW_original.py b/forge/test/falcon/models/falcon7b/modelling_RW_original.py similarity index 100% rename from pybuda/test/falcon/models/falcon7b/modelling_RW_original.py rename to forge/test/falcon/models/falcon7b/modelling_RW_original.py diff --git a/pybuda/test/falcon/models/falcon7b/pytorch_model.bin.index.json b/forge/test/falcon/models/falcon7b/pytorch_model.bin.index.json similarity index 100% rename from pybuda/test/falcon/models/falcon7b/pytorch_model.bin.index.json rename to forge/test/falcon/models/falcon7b/pytorch_model.bin.index.json diff --git a/pybuda/test/falcon/models/falcon7b/special_tokens_map.json b/forge/test/falcon/models/falcon7b/special_tokens_map.json similarity index 100% rename from pybuda/test/falcon/models/falcon7b/special_tokens_map.json rename to forge/test/falcon/models/falcon7b/special_tokens_map.json diff --git a/pybuda/test/falcon/models/falcon7b/tokenizer_config.json b/forge/test/falcon/models/falcon7b/tokenizer_config.json similarity index 100% rename from pybuda/test/falcon/models/falcon7b/tokenizer_config.json rename to forge/test/falcon/models/falcon7b/tokenizer_config.json diff --git a/pybuda/test/falcon/models/falcon7b/tt_modeling_RW.py b/forge/test/falcon/models/falcon7b/tt_modeling_RW.py similarity index 100% rename from pybuda/test/falcon/models/falcon7b/tt_modeling_RW.py rename to forge/test/falcon/models/falcon7b/tt_modeling_RW.py diff --git a/pybuda/test/falcon/models/falcon7b/tt_modeling_RW_pad.py b/forge/test/falcon/models/falcon7b/tt_modeling_RW_pad.py similarity index 100% rename from pybuda/test/falcon/models/falcon7b/tt_modeling_RW_pad.py rename to forge/test/falcon/models/falcon7b/tt_modeling_RW_pad.py diff --git a/pybuda/test/falcon/models/falcon7b/tt_modeling_RW_pad_masked_odkv.py b/forge/test/falcon/models/falcon7b/tt_modeling_RW_pad_masked_odkv.py similarity index 100% rename from pybuda/test/falcon/models/falcon7b/tt_modeling_RW_pad_masked_odkv.py rename to forge/test/falcon/models/falcon7b/tt_modeling_RW_pad_masked_odkv.py diff --git a/pybuda/test/falcon/models/falcon7b/tt_modeling_RW_pad_odkv.py b/forge/test/falcon/models/falcon7b/tt_modeling_RW_pad_odkv.py similarity index 100% rename from pybuda/test/falcon/models/falcon7b/tt_modeling_RW_pad_odkv.py rename to forge/test/falcon/models/falcon7b/tt_modeling_RW_pad_odkv.py diff --git a/pybuda/test/falcon/models/falcon7b/tt_modeling_RW_pad_odkv_conc.py b/forge/test/falcon/models/falcon7b/tt_modeling_RW_pad_odkv_conc.py similarity index 100% rename from pybuda/test/falcon/models/falcon7b/tt_modeling_RW_pad_odkv_conc.py rename to forge/test/falcon/models/falcon7b/tt_modeling_RW_pad_odkv_conc.py diff --git a/pybuda/test/falcon/models/falcon7b/tt_modeling_RW_pad_split.py b/forge/test/falcon/models/falcon7b/tt_modeling_RW_pad_split.py similarity index 100% rename from pybuda/test/falcon/models/falcon7b/tt_modeling_RW_pad_split.py rename to forge/test/falcon/models/falcon7b/tt_modeling_RW_pad_split.py diff --git a/pybuda/test/falcon/models/falcon7b/tt_modeling_RW_pad_split_cache.py b/forge/test/falcon/models/falcon7b/tt_modeling_RW_pad_split_cache.py similarity index 100% rename from pybuda/test/falcon/models/falcon7b/tt_modeling_RW_pad_split_cache.py rename to forge/test/falcon/models/falcon7b/tt_modeling_RW_pad_split_cache.py diff --git a/pybuda/test/falcon/pybudify.py b/forge/test/falcon/pybudify.py similarity index 63% rename from pybuda/test/falcon/pybudify.py rename to forge/test/falcon/pybudify.py index dcb62d90f..bee3d88cf 100644 --- a/pybuda/test/falcon/pybudify.py +++ b/forge/test/falcon/pybudify.py @@ -25,76 +25,76 @@ def __init__(self, pt_module, device='silicon', arch='wormhole_b0', precision='f os.environ["LOGGER_LEVEL"] = log_level os.environ["LOGURU_LEVEL"] = log_level - # pybuda workarounds + # forge workarounds os.environ["GOLDEN_WORMHOLE_B0"] = "1" # golden should always simulate a B0 as that's all we use now - os.environ["PYBUDA_CONVERT_PARAMS_TO_TVM"] = "0" # faster compile times... why would this ever be 1? + os.environ["FORGE_CONVERT_PARAMS_TO_TVM"] = "0" # faster compile times... why would this ever be 1? os.environ["TT_BACKEND_TIMEOUT"] = "0" # default is too aggressive for large models? # os.environ["ENABLE_ETH_SERIALIZATON"] = "1" - # os.environ["PYBUDA_ENABLE_BROADCAST_SPLITTING"] = "1" - # os.environ["PYBUDA_DISABLE_FORK_JOIN_BUF"] = "1" - # os.environ["PYBUDA_DRAM_PICK_CAPACITY"] = "1" + # os.environ["FORGE_ENABLE_BROADCAST_SPLITTING"] = "1" + # os.environ["FORGE_DISABLE_FORK_JOIN_BUF"] = "1" + # os.environ["FORGE_DRAM_PICK_CAPACITY"] = "1" # os.environ["WHA0_DISABLE_RELAY_BUFS"] = "1" - # os.environ["PYBUDA_FUSE_STOP_ON_RECIPROCAL"] = "1" - # os.environ["PYBUDA_PLACER_SNAKE"] = "1" Not what we want for dual chip placement - # os.environ["PYBUDA_DISABLE_INTERACTIVE_PLACER"] = "1" # Until interactive placer supports multi-chip placement overrides - # os.environ["PYBUDA_PLACER_SNAKE"] = "1" - # os.environ["PYBUDA_ETH_LINKS_NEBULA"] = "1" + # os.environ["FORGE_FUSE_STOP_ON_RECIPROCAL"] = "1" + # os.environ["FORGE_PLACER_SNAKE"] = "1" Not what we want for dual chip placement + # os.environ["FORGE_DISABLE_INTERACTIVE_PLACER"] = "1" # Until interactive placer supports multi-chip placement overrides + # os.environ["FORGE_PLACER_SNAKE"] = "1" + # os.environ["FORGE_ETH_LINKS_NEBULA"] = "1" - pybuda = self.pybuda = __import__('pybuda') # let us set log levels before importing pybuda + forge = self.forge = __import__('forge') # let us set log levels before importing forge if device == 'pytorch': pass else: - devtype = { 'golden' : pybuda.BackendType.Golden, - 'silicon': pybuda.BackendType.Silicon, + devtype = { 'golden' : forge.BackendType.Golden, + 'silicon': forge.BackendType.Silicon, }[device] - module = pybuda.PyTorchModule(netlist_name, self.bound_module) + module = forge.PyTorchModule(netlist_name, self.bound_module) if precision == 'fp32': - fallback = pybuda.DataFormat.Float32 + fallback = forge.DataFormat.Float32 elif precision == 'fp16': - fallback = pybuda.DataFormat.Float16 + fallback = forge.DataFormat.Float16 elif precision == 'bf16': - fallback = pybuda.DataFormat.Float16_b + fallback = forge.DataFormat.Float16_b elif precision == 'fp8': - fallback = pybuda.DataFormat.Bfp8 + fallback = forge.DataFormat.Bfp8 elif precision == 'fp8b': - fallback = pybuda.DataFormat.Bfp8_b + fallback = forge.DataFormat.Bfp8_b else: raise ValueError('Precision "%s" not implemented' % precision) if matmuls == 'bfp8': # Lower-precision: All matmuls inputs lowered to bfp8 - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( name_regex="matmul_.*", - input_df={0: [pybuda.DataFormat.Bfp8_b, True], 1: [pybuda.DataFormat.Bfp8_b, True], 2: [pybuda.DataFormat.Bfp8_b, True]}) + input_df={0: [forge.DataFormat.Bfp8_b, True], 1: [forge.DataFormat.Bfp8_b, True], 2: [forge.DataFormat.Bfp8_b, True]}) elif matmuls == '2nd_mlp_bfp8': # Higher-precision: 2nd matmul in MLP lower weights to bfp8 # Layer 0 has different offset mm_offset = 80 - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( name_regex=f"matmul_{23}", - input_df={0: [pybuda.DataFormat.Bfp8_b, False], 1: [pybuda.DataFormat.Bfp8_b, False], 2: [pybuda.DataFormat.Bfp8_b, False]}) + input_df={0: [forge.DataFormat.Bfp8_b, False], 1: [forge.DataFormat.Bfp8_b, False], 2: [forge.DataFormat.Bfp8_b, False]}) for i in range(31): - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( name_regex=f"matmul_{104+i*mm_offset}", - input_df={0: [pybuda.DataFormat.Bfp8_b, False], 1: [pybuda.DataFormat.Bfp8_b, False], 2: [pybuda.DataFormat.Bfp8_b, False]}) + input_df={0: [forge.DataFormat.Bfp8_b, False], 1: [forge.DataFormat.Bfp8_b, False], 2: [forge.DataFormat.Bfp8_b, False]}) elif matmuls == 'weight_bfp8_act_bf16': # Experiment 4 - Weights-bfp8 [Current best alpaca-eval score for finetune decode demo]: All matmul weights BPF8. All activations BFP16 - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( name_regex="matmul_.*", - input_df={0: [pybuda.DataFormat.Bfp8_b, False], 1: [pybuda.DataFormat.Bfp8_b, False], 2: [pybuda.DataFormat.Bfp8_b, False]}) + input_df={0: [forge.DataFormat.Bfp8_b, False], 1: [forge.DataFormat.Bfp8_b, False], 2: [forge.DataFormat.Bfp8_b, False]}) else: raise ValueError('Matmul precision "%s" not implemented' % matmuls) if decode_mode: # Required for decode or we get invalid DF error. Important: DO not set intermed, acc_df or we hang on prefill. - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( op_type="splice", - output_df=pybuda.DataFormat.Float16_b, - input_df={0: [pybuda.DataFormat.Float16_b, True], 1: [pybuda.DataFormat.Float16_b, True], 2: [pybuda.DataFormat.Float16_b, True]}) + output_df=forge.DataFormat.Float16_b, + input_df={0: [forge.DataFormat.Float16_b, True], 1: [forge.DataFormat.Float16_b, True], 2: [forge.DataFormat.Float16_b, True]}) if lora: os.environ['TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE'] = "147456" @@ -106,43 +106,43 @@ def __init__(self, pt_module, device='silicon', arch='wormhole_b0', precision='f q_offset = 70 h4h_offset = 70 for layer_num in range(len(self.bound_module.layers)): - pybuda.config.insert_fracture_group([(f"matmul_{26+layer_num*q_offset}", pybuda.k_dim, 2)]) - pybuda.config.insert_fracture_group([(f"matmul_{23+layer_num*h4h_offset}", pybuda.k_dim, 4)]) + forge.config.insert_fracture_group([(f"matmul_{26+layer_num*q_offset}", forge.k_dim, 2)]) + forge.config.insert_fracture_group([(f"matmul_{23+layer_num*h4h_offset}", forge.k_dim, 4)]) if padded_fracture: offset = 73 factor = 2 for layer_num in range(len(self.bound_module.layers)): - pybuda.config.insert_fracture_group([(f"matmul_{18+layer_num*offset}", -1, factor), (f"matmul_{23+layer_num*offset}", pybuda.k_dim, factor)]) + forge.config.insert_fracture_group([(f"matmul_{18+layer_num*offset}", -1, factor), (f"matmul_{23+layer_num*offset}", forge.k_dim, factor)]) if layer_num > 0 and layer_num < len(self.bound_module.layers)-1: - pybuda.set_epoch_break(f'multiply_{0+layer_num*offset}') + forge.set_epoch_break(f'multiply_{0+layer_num*offset}') # 4 bit precision for fracturing required otherwise DRAM error occurs for 32 layers - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( name_regex="fractured_1_matmul_.*", - input_df={0: [pybuda.DataFormat.Bfp8_b, True], 1: [pybuda.DataFormat.Bfp4_b, True], 2: [pybuda.DataFormat.Bfp8_b, True]}) - pybuda.config.configure_mixed_precision( + input_df={0: [forge.DataFormat.Bfp8_b, True], 1: [forge.DataFormat.Bfp4_b, True], 2: [forge.DataFormat.Bfp8_b, True]}) + forge.config.configure_mixed_precision( name_regex="fractured_0_matmul_.*", - input_df={0: [pybuda.DataFormat.Bfp8_b, True], 1: [pybuda.DataFormat.Bfp4_b, True], 2: [pybuda.DataFormat.Bfp8_b, True]}) + input_df={0: [forge.DataFormat.Bfp8_b, True], 1: [forge.DataFormat.Bfp4_b, True], 2: [forge.DataFormat.Bfp8_b, True]}) if padded_fracture_p: offset = 73 for layer_num in range(len(self.bound_module.layers)): if layer_num < len(self.bound_module.layers)//2: - pybuda.config.insert_fracture_group([(f"matmul_{18+layer_num*offset}", -1, 2), (f"matmul_{23+layer_num*offset}", pybuda.k_dim, 2)]) + forge.config.insert_fracture_group([(f"matmul_{18+layer_num*offset}", -1, 2), (f"matmul_{23+layer_num*offset}", forge.k_dim, 2)]) if layer_num > 0 and layer_num < len(self.bound_module.layers)-1: - pybuda.set_epoch_break(f'multiply_{0+layer_num*offset}') + forge.set_epoch_break(f'multiply_{0+layer_num*offset}') # Running padded fracture full (Dragon's exploration) if padded_fracture_full: # 4 bit precision for fracturing required otherwise DRAM error occurs for 32 layers # TODO change this to only affect MLP fractured matmuls and not attention - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( name_regex="fractured_*._matmul_.*", - input_df={0: [pybuda.DataFormat.Bfp8_b, True], 1: [pybuda.DataFormat.Bfp4_b, True], 2: [pybuda.DataFormat.Bfp8_b, True]}) + input_df={0: [forge.DataFormat.Bfp8_b, True], 1: [forge.DataFormat.Bfp4_b, True], 2: [forge.DataFormat.Bfp8_b, True]}) # Attn fracture if fracture_attn > 0: @@ -152,7 +152,7 @@ def __init__(self, pt_module, device='silicon', arch='wormhole_b0', precision='f for layer_num in range(len(self.bound_module.layers)): # Since we move around the users dimension, full attn fracturing won't be possible in a single group - pybuda.config.insert_fracture_group([ + forge.config.insert_fracture_group([ # Q # (f"matmul_{26+layer_num*q_offset}", -2, attn_factor), # K @@ -184,8 +184,8 @@ def __init__(self, pt_module, device='silicon', arch='wormhole_b0', precision='f ]] exits = [f'matmul_{68 + layer_num*q_offset}'] # exits = [f'add_{70 + layer_num*q_offset}'] - attn_constr = self.add_sched(pybuda, entries, exits, ops, attn_factor, attn_constr) - pybuda.config.add_schedule_constraint(attn_constr) + attn_constr = self.add_sched(forge, entries, exits, ops, attn_factor, attn_constr) + forge.config.add_schedule_constraint(attn_constr) # MLP fracture if fracture_mlp > 0: @@ -197,49 +197,49 @@ def __init__(self, pt_module, device='silicon', arch='wormhole_b0', precision='f print(f"MLP Fracture: Layer: {layer_num}, matmul offset = {18+mlp_offset} & {23+mlp_offset}") # Manual scheduling to support MLP fracture 2-chip on full size falcon-7B - pybuda.set_epoch_break(f'softmax_{55+mlp_offset}.dc.reduce_max.0') - pybuda.config.override_op_placement(f"concatenate_{48+mlp_offset}.dc.concatenate.2", chip_id=1, temporal_epoch_break=True) + forge.set_epoch_break(f'softmax_{55+mlp_offset}.dc.reduce_max.0') + forge.config.override_op_placement(f"concatenate_{48+mlp_offset}.dc.concatenate.2", chip_id=1, temporal_epoch_break=True) # MLP fracture - pybuda.config.insert_fracture_group([ + forge.config.insert_fracture_group([ # Can't do fracturing of weights due to transpose # mlp.dense_h_to_4h (f"matmul_{18+mlp_offset}", -1, mlp_factor), # mlp.dense_4h_to_h - (f"matmul_{23+mlp_offset}", pybuda.k_dim, mlp_factor), + (f"matmul_{23+mlp_offset}", forge.k_dim, mlp_factor), ]) entries = [f'multiply_{15 + mlp_offset}'] ops = [[f'matmul_{18 + mlp_offset}', f'matmul_{23 + mlp_offset}']] exits = [f'add_{70 + mlp_offset}'] - mlp_constr = self.add_sched(pybuda, entries, exits, ops, mlp_factor, mlp_constr) - pybuda.config.add_schedule_constraint(mlp_constr) + mlp_constr = self.add_sched(forge, entries, exits, ops, mlp_factor, mlp_constr) + forge.config.add_schedule_constraint(mlp_constr) perf_level = { None : None, 'none' : None, - 'light' : pybuda.PerfTraceLevel.LIGHT, - 'verbose': pybuda.PerfTraceLevel.VERBOSE }[perf] - pybuda.set_configuration_options(default_df_override=fallback, - accumulate_df=pybuda.DataFormat.Float32, + 'light' : forge.PerfTraceLevel.LIGHT, + 'verbose': forge.PerfTraceLevel.VERBOSE }[perf] + forge.set_configuration_options(default_df_override=fallback, + accumulate_df=forge.DataFormat.Float32, amp_level=amp_level, enable_auto_fusing=fuse, performance_trace=perf_level, backend_opt_level=4, enable_auto_transposing_placement=True ) - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.input_queues_on_host = host_queues if self.masked_odkv: - # pybuda.config.override_t_stream_dir(f"concatenate_50.dc.sparse_matmul.4.lc2", "c") - # pybuda.config.override_t_stream_dir(f"concatenate_67.dc.sparse_matmul.4.lc2", "c") + # forge.config.override_t_stream_dir(f"concatenate_50.dc.sparse_matmul.4.lc2", "c") + # forge.config.override_t_stream_dir(f"concatenate_67.dc.sparse_matmul.4.lc2", "c") - # pybuda.config.set_epoch_break("transpose_58.dc.sparse_matmul.4.lc2") - # pybuda.config.set_epoch_break("matmul_64") + # forge.config.set_epoch_break("transpose_58.dc.sparse_matmul.4.lc2") + # forge.config.set_epoch_break("matmul_64") - # pybuda.config.add_schedule_constraint(['transpose_58.dc.sparse_matmul.4.lc2', 'add_59']) + # forge.config.add_schedule_constraint(['transpose_58.dc.sparse_matmul.4.lc2', 'add_59']) names = [] if num_layers == 1: names_start_idx = 56 @@ -262,22 +262,22 @@ def __init__(self, pt_module, device='silicon', arch='wormhole_b0', precision='f names_dict = { name: (i+1) for i, name in enumerate(names) } - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.loopback_outputs = names_dict - # pybuda.config.insert_fracture_group([(f"concatenate_50", 2, 2)]) - # pybuda.config.insert_fracture_group([(f"concatenate_67", 2, 2)]) + # forge.config.insert_fracture_group([(f"concatenate_50", 2, 2)]) + # forge.config.insert_fracture_group([(f"concatenate_67", 2, 2)]) - # pybuda.config.configure_mixed_precision( + # forge.config.configure_mixed_precision( # name_regex="concatenate_50.dc.sparse_matmul.4.lc2", - # input_df={0: [pybuda.DataFormat.Bfp8_b, True], 1: [pybuda.DataFormat.Bfp8_b, True], 2: [pybuda.DataFormat.Bfp8_b, True]}) + # input_df={0: [forge.DataFormat.Bfp8_b, True], 1: [forge.DataFormat.Bfp8_b, True], 2: [forge.DataFormat.Bfp8_b, True]}) - # pybuda.config.configure_mixed_precision( + # forge.config.configure_mixed_precision( # name_regex="concatenate_50.dc.sparse_matmul.4.lc2", - # input_df={0: [pybuda.DataFormat.Bfp8_b, True], 1: [pybuda.DataFormat.Bfp8_b, True], 2: [pybuda.DataFormat.Bfp8_b, True]}) + # input_df={0: [forge.DataFormat.Bfp8_b, True], 1: [forge.DataFormat.Bfp8_b, True], 2: [forge.DataFormat.Bfp8_b, True]}) elif self.odkv: - # pybuda.config.override_t_stream_dir(f"concatenate_50.dc.sparse_matmul.4.lc2", "c") - # pybuda.config.override_t_stream_dir(f"concatenate_67.dc.sparse_matmul.4.lc2", "c") + # forge.config.override_t_stream_dir(f"concatenate_50.dc.sparse_matmul.4.lc2", "c") + # forge.config.override_t_stream_dir(f"concatenate_67.dc.sparse_matmul.4.lc2", "c") names = [] if num_layers == 1: names_start_idx = 54 @@ -293,30 +293,30 @@ def __init__(self, pt_module, device='silicon', arch='wormhole_b0', precision='f print(f'names" {names}') names_dict = { name: (i+1) for i, name in enumerate(names) } - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() - # pybuda.config.insert_fracture_group([(f"concatenate_50", 2, 2)]) - # pybuda.config.insert_fracture_group([(f"concatenate_67", 2, 2)]) + # forge.config.insert_fracture_group([(f"concatenate_50", 2, 2)]) + # forge.config.insert_fracture_group([(f"concatenate_67", 2, 2)]) - # pybuda.config.configure_mixed_precision( + # forge.config.configure_mixed_precision( # name_regex="concatenate_50.dc.sparse_matmul.4.lc2", - # input_df={0: [pybuda.DataFormat.Bfp8_b, True], 1: [pybuda.DataFormat.Bfp8_b, True], 2: [pybuda.DataFormat.Bfp8_b, True]}) + # input_df={0: [forge.DataFormat.Bfp8_b, True], 1: [forge.DataFormat.Bfp8_b, True], 2: [forge.DataFormat.Bfp8_b, True]}) - # pybuda.config.configure_mixed_precision( + # forge.config.configure_mixed_precision( # name_regex="concatenate_50.dc.sparse_matmul.4.lc2", - # input_df={0: [pybuda.DataFormat.Bfp8_b, True], 1: [pybuda.DataFormat.Bfp8_b, True], 2: [pybuda.DataFormat.Bfp8_b, True]}) + # input_df={0: [forge.DataFormat.Bfp8_b, True], 1: [forge.DataFormat.Bfp8_b, True], 2: [forge.DataFormat.Bfp8_b, True]}) compiler_cfg.loopback_outputs = names_dict - pybuda_arch = { 'grayskull': pybuda.BackendDevice.Grayskull, - 'wormhole_b0': pybuda.BackendDevice.Wormhole_B0 }[arch] + forge_arch = { 'grayskull': forge.BackendDevice.Grayskull, + 'wormhole_b0': forge.BackendDevice.Wormhole_B0 }[arch] if tti_load is not None: - self.tt0 = pybuda.TTDevice.load_image(img_path=tti_load) + self.tt0 = forge.TTDevice.load_image(img_path=tti_load) else: - self.tt0 = pybuda.TTDevice('tt0', module=module, + self.tt0 = forge.TTDevice('tt0', module=module, fp32_fallback=fallback, - arch=pybuda_arch, + arch=forge_arch, devtype=devtype, chip_ids=list(range(num_chips))) @@ -324,10 +324,10 @@ def __init__(self, pt_module, device='silicon', arch='wormhole_b0', precision='f self.output_q = mp.Queue() if verify: - self.verify_cfg = pybuda.VerifyConfig(verify_all=True, + self.verify_cfg = forge.VerifyConfig(verify_all=True, verify_last=True, - devtype=pybuda.BackendType.Silicon, - arch=pybuda_arch,) + devtype=forge.BackendType.Silicon, + arch=forge_arch,) else: self.verify_cfg = None @@ -336,20 +336,20 @@ def __init__(self, pt_module, device='silicon', arch='wormhole_b0', precision='f def run_async(self, *args): - """ Send inputs to pybuda and run forward pass asynchronously. + """ Send inputs to forge and run forward pass asynchronously. Outputs can be read from self.output_q. """ - assert self.device != 'pytorch', "run_async() is only supported for pybuda devices" + assert self.device != 'pytorch', "run_async() is only supported for forge devices" if self.odkv or self.masked_odkv: self.ensure_initialized(*args) - # print(f'pybuda pushing data') - self.pybuda.sync() + # print(f'forge pushing data') + self.forge.sync() in_args = list(args[0]) + list(args[1]) + list(args[2]) + list(args[3]) self.tt0.push_to_inputs(in_args) # don't pass in kv over and over again - self.pybuda.run_generate(input_count=1, write_index=0) #, _sequential=True) + self.forge.run_generate(input_count=1, write_index=0) #, _sequential=True) else: self.ensure_initialized(*args) self.tt0.push_to_inputs(*args) - self.pybuda.run_forward(input_count=1) + self.forge.run_forward(input_count=1) def ensure_initialized(self, *args): @@ -363,7 +363,7 @@ def ensure_initialized(self, *args): ) print(f'Saved image to {self.tti_save}') sys.exit(0) - self.pybuda.initialize_pipeline(training=False, + self.forge.initialize_pipeline(training=False, sample_inputs=args, output_queue=self.output_q, microbatch_count=self.micro_batch_size, @@ -386,20 +386,20 @@ def __call__(self, *args, **kwargs): if self.masked_odkv: # print('run_generate1') - self.pybuda.sync() + self.forge.sync() in_args = list(args[0]) + list(args[1]) + list(args[2]) + list(args[3]) + list(args[4]) + list(args[5]) self.tt0.push_to_inputs(in_args) # don't pass in kv over and over again - self.pybuda.run_generate(input_count=1, write_index=0, _sequential=True) + self.forge.run_generate(input_count=1, write_index=0, _sequential=True) elif self.odkv: - self.pybuda.sync() + self.forge.sync() in_args = list(args[0]) + list(args[1]) + list(args[2]) + list(args[3]) self.tt0.push_to_inputs(in_args) # don't pass in kv over and over again - self.pybuda.run_generate(input_count=1, write_index=0, _sequential=True) + self.forge.run_generate(input_count=1, write_index=0, _sequential=True) else: self.tt0.push_to_inputs(*args) - self.pybuda.run_forward(input_count=1, _sequential=True) + self.forge.run_forward(input_count=1, _sequential=True) ys = self.output_q.get() - outputs = tuple([ y.value().float() for y in ys if isinstance(y, self.pybuda.tensor.TensorFromPytorch)]) + outputs = tuple([ y.value().float() for y in ys if isinstance(y, self.forge.tensor.TensorFromPytorch)]) if len(outputs) == 1: outputs = outputs[0] if self.verify_cfg: @@ -414,7 +414,7 @@ def __call__(self, *args, **kwargs): result = outputs return result - def add_sched(self, pybuda, entries, exits, ops, factor, constr): + def add_sched(self, forge, entries, exits, ops, factor, constr): for elem in entries: constr.append(elem) for lst in ops: @@ -424,14 +424,14 @@ def add_sched(self, pybuda, entries, exits, ops, factor, constr): if i == 0: if f == 0: print(f"[add_sched]: Override op temp. epoch: {fop}, chip {f}") - pybuda.config.override_op_placement(fop, chip_id=f, temporal_epoch_break=True) + forge.config.override_op_placement(fop, chip_id=f, temporal_epoch_break=True) else: print(f"[add_sched]: Override op spatial epoch: {fop}, chip {f}") - pybuda.config.override_op_placement(fop, chip_id=f, spatial_epoch_break=True) + forge.config.override_op_placement(fop, chip_id=f, spatial_epoch_break=True) constr.append(fop) # for elem in exits: # constr.append(elem) - # pybuda.config.override_op_placement(exits[0], temporal_epoch_break=True) + # forge.config.override_op_placement(exits[0], temporal_epoch_break=True) print(f"[add_sched] sched: {constr}") return constr diff --git a/pybuda/test/falcon/requirements.txt b/forge/test/falcon/requirements.txt similarity index 100% rename from pybuda/test/falcon/requirements.txt rename to forge/test/falcon/requirements.txt diff --git a/pybuda/test/falcon/tests/__init__.py b/forge/test/falcon/tests/__init__.py similarity index 100% rename from pybuda/test/falcon/tests/__init__.py rename to forge/test/falcon/tests/__init__.py diff --git a/pybuda/test/falcon/tests/falcon_modules/falcon.py b/forge/test/falcon/tests/falcon_modules/falcon.py similarity index 88% rename from pybuda/test/falcon/tests/falcon_modules/falcon.py rename to forge/test/falcon/tests/falcon_modules/falcon.py index f33c51144..a863cc4ba 100644 --- a/pybuda/test/falcon/tests/falcon_modules/falcon.py +++ b/forge/test/falcon/tests/falcon_modules/falcon.py @@ -94,7 +94,7 @@ def run_demo_sync_finetune(model, tokenizer, dataloader, num_layers, sequence_le temperature = 1.0 batch_idx = 0 # dataset batch index - generator_name = f"pybuda-greedy-alpaca_eval-ci" + generator_name = f"forge-greedy-alpaca_eval-ci" sequence_nlls = [] # list of negative log likelihood @@ -145,7 +145,7 @@ def run_demo_sync_finetune(model, tokenizer, dataloader, num_layers, sequence_le assert min(prompt_token_counts) > 0, "Empty prompts for unconditional generation not currently supported" assert batch_size == 1 - # tensor of right size and shape needed for pybuda to compile. initialise kv with zeros + # tensor of right size and shape needed for forge to compile. initialise kv with zeros # value in tensor doesn't matter. we're going to prefill this in anyways past_key_values = tuple([(torch.zeros((batch_size, 1, seqlen, 64 * user_rows)), # [batch, 1, seqlen, head_sim * num_users] torch.zeros((batch_size, 1, seqlen, 64 * user_rows))) # [batch, 1, seqlen, head_sim * num_users] @@ -310,7 +310,7 @@ def run_demo_sync_masked_odkv(model, tokenizer, tokenized, num_layers, sequence_ assert min(prompt_token_counts) > 0, "Empty prompts for unconditional generation not currently supported" assert batch_size == 1, "Pretty sure this code assumes batch size == 1, FIXME" - # tensor of right size and shape needed for pybuda to compile. initialise kv with zeros + # tensor of right size and shape needed for forge to compile. initialise kv with zeros # value in tensor doesn't matter. we're going to prefill this in anyways # TODO: replace constants 32 and 64 past_key_values = tuple([(torch.zeros((batch_size, 1, seqlen, 64 * user_rows)), @@ -410,7 +410,7 @@ def run_demo_sync_masked_odkv(model, tokenizer, tokenized, num_layers, sequence_ model_seed=42, perf='none', sequence_length=128, - pybuda_device='silicon', + forge_device='silicon', num_epochs=4, max_num_steps=0, num_layers=32, @@ -430,11 +430,11 @@ def run_demo_sync_masked_odkv(model, tokenizer, tokenized, num_layers, sequence_ checkpoint_at_steps=None, languages='en,es,fr', prefiltered_dataset_dir='/proj_sw/large-model-cache/falcon7b/datasets', - activation_cache_file='/proj_sw/large-model-cache/falcon7b/cached_activations/cached_activations_bfp8b_quantW_fp16b_compute_pybuda.pth', + activation_cache_file='/proj_sw/large-model-cache/falcon7b/cached_activations/cached_activations_bfp8b_quantW_fp16b_compute_forge.pth', num_cache_layers=16, wandb_log_steps=20, wandb_project='falcon-tuning', - pybuda_log_level='ERROR', + forge_log_level='ERROR', loss_on_device=False, optimizer_on_host=False, save_checkpoints_at_epoch_end=False, @@ -453,8 +453,8 @@ def run_finetune(cfg_file): parser.add_argument('--config', type=str, help='Path of a pre-defined training configuration') parser.add_argument('--with-pytorch', action='store_true', help='Train a model using pytorch') parser.add_argument('--with-reference', action='store_true', help='Compare trained models to saved checkpoint steps in the format golden_step_*.pt') - parser.add_argument('--with-pybuda', action='store_true', help='Train a model using pybuda (use --device to select golden or silicon)') - parser.add_argument('--pybuda-device', choices=['golden', 'silicon'], help='PyBuda argument only, run on golden/silicon') + parser.add_argument('--with-forge', action='store_true', help='Train a model using forge (use --device to select golden or silicon)') + parser.add_argument('--forge-device', choices=['golden', 'silicon'], help='Forge argument only, run on golden/silicon') parser.add_argument('--num-chips', type=int, help='Number of chips to run on') # Training configurations @@ -500,7 +500,7 @@ def run_finetune(cfg_file): parser.add_argument('--hf-cache', type=str, default='/proj_sw/user_dev/hf_data', help='Cache directory for huggingface') # Logging, checkpointing - parser.add_argument('--verify', action='store_true', help='Enable pybuda verification code to compare tensors of intermediate pybuda passes') + parser.add_argument('--verify', action='store_true', help='Enable forge verification code to compare tensors of intermediate forge passes') parser.add_argument('--save-final', action='store_true', help='Save final state_dicts for each trainer in ".pt"') parser.add_argument('--checkpoint-at-steps', nargs='*', type=int, help='Save state_dicts for each trainer in "_checkpoint_*.pt" for each step index contained in this comma-separated list. E.g. "100,200" for checkoints at step 100 and 200') parser.add_argument('--checkpoint-every-steps', type=int, help='Checkpoint every X steps') @@ -521,15 +521,15 @@ def run_finetune(cfg_file): parser.add_argument('--tti-save', type=str, help='Path to save a TTImage to') - # PyBuda and TTDevice specific options + # Forge and TTDevice specific options parser.add_argument('--optimizer-on-host', action='store_true', help='Run optimizer on host') parser.add_argument('--loss-on-device', action='store_true', help='Run loss on device') - parser.add_argument('--pybuda-log-level', choices=['TRACE', 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], help='PyBuda log level') + parser.add_argument('--forge-log-level', choices=['TRACE', 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], help='Forge log level') parser.add_argument('--tt-build-dir', type=str, help='Use this custom TT build directory') - parser.add_argument('--perf', choices=['none', 'light', 'verbose'], help='PyBuda performance trace setting') + parser.add_argument('--perf', choices=['none', 'light', 'verbose'], help='Forge performance trace setting') parser.add_argument('--netlist-name', type=str, help='Netlist name') - # PyBuda specific placement overwrites + # Forge specific placement overwrites parser.add_argument('--placement-overwrites', action='store_true', help='Apply placement overwites for general settings') parser.add_argument('--placement-overwrites-seqlen', action='store_true', help='Apply placement overwrites specific to sequence lengths') @@ -542,7 +542,7 @@ def run_finetune(cfg_file): parser.add_argument('--ci', action='store_true', help='Run in CI mode') parser.add_argument('--ci-exit-zero', action='store_true', help='Run in CI mode but only warn if CI target metric is not met and exit with code 0. Used for debugging.') parser.add_argument('--ci-target', choices=['loss', 'grad_norm'], default='loss', help='CI target metric') - parser.add_argument('--ci-tolerance', type=float, help='Assert if CI target metric (loss/grad_norm) difference between pybuda and pytorch is not within this tolerance in CI mode') + parser.add_argument('--ci-tolerance', type=float, help='Assert if CI target metric (loss/grad_norm) difference between forge and pytorch is not within this tolerance in CI mode') parser.add_argument('--ci-abs-upper-limit', type=float, help='Assert if CI target metric (loss/grad_norm) not below this value in CI mode') # Debugging @@ -573,7 +573,7 @@ def run_finetune(cfg_file): args.data_seed=args.data_seed if args.data_seed is not None else default_args.data_seed args.model_seed=args.model_seed if args.model_seed is not None else default_args.model_seed args.sequence_length=args.sequence_length if args.sequence_length is not None else default_args.sequence_length - args.pybuda_device=args.pybuda_device if args.pybuda_device is not None else default_args.pybuda_device + args.forge_device=args.forge_device if args.forge_device is not None else default_args.forge_device args.num_epochs=args.num_epochs if args.num_epochs is not None else default_args.num_epochs args.max_num_steps=args.max_num_steps if args.max_num_steps is not None else default_args.max_num_steps args.num_layers=args.num_layers if args.num_layers is not None else default_args.num_layers @@ -597,7 +597,7 @@ def run_finetune(cfg_file): args.num_cache_layers=args.num_cache_layers if args.num_cache_layers is not None else default_args.num_cache_layers args.wandb_log_steps=args.wandb_log_steps if args.wandb_log_steps is not None else default_args.wandb_log_steps args.wandb_project=args.wandb_project if args.wandb_project is not None else default_args.wandb_project - args.pybuda_log_level=args.pybuda_log_level if args.pybuda_log_level is not None else default_args.pybuda_log_level + args.forge_log_level=args.forge_log_level if args.forge_log_level is not None else default_args.forge_log_level args.optimizer_on_host=args.optimizer_on_host if args.optimizer_on_host else default_args.optimizer_on_host args.loss_on_device=args.loss_on_device if args.loss_on_device else default_args.loss_on_device args.save_checkpoints_at_epoch_end=args.save_checkpoints_at_epoch_end if args.save_checkpoints_at_epoch_end else default_args.save_checkpoints_at_epoch_end @@ -606,9 +606,9 @@ def run_finetune(cfg_file): from getpass import getuser - # Ensure a non-weka PYBUDA_BUILD_DIR for tt_build files unless specified - pybuda_build_dir = os.environ.setdefault("PYBUDA_BUILD_DIR", f"/tmp/{getuser()}/tt_build") - print(f"PYBUDA_BUILD_DIR set to {pybuda_build_dir}") + # Ensure a non-weka FORGE_BUILD_DIR for tt_build files unless specified + forge_build_dir = os.environ.setdefault("FORGE_BUILD_DIR", f"/tmp/{getuser()}/tt_build") + print(f"FORGE_BUILD_DIR set to {forge_build_dir}") # Autogenerated arguments lora_config_name = get_lora_short_name_from_modules(args.lora_modules) @@ -620,11 +620,11 @@ def run_finetune(cfg_file): print(f"Output location: {experiment_path}") if not args.netlist_name: - assert os.getenv('PYBUDA_NETLIST_OVERRIDE') is None, 'PYBUDA_NETLIST_OVERRIDE is set, but --netlist-name is not specified, you probably wanted to use "transformer"' + assert os.getenv('FORGE_NETLIST_OVERRIDE') is None, 'FORGE_NETLIST_OVERRIDE is set, but --netlist-name is not specified, you probably wanted to use "transformer"' args.netlist_name = f'seqlen-{args.sequence_length}_lora-lay-{args.num_lora_layers}_lora-mod-{lora_config_name}_r-{args.rank}'.replace('-', '_') if args.wandb_log_name is None: - with_name = {(True, True): 'both', (True, False): 'pytorch', (False, True): 'pybuda'}[(args.with_pytorch, args.with_pybuda)] + with_name = {(True, True): 'both', (True, False): 'pytorch', (False, True): 'forge'}[(args.with_pytorch, args.with_forge)] args.wandb_log_name = f'{getuser()}_with-{with_name}_{args.experiment_name}' print('\n\n' + '_'*20) @@ -665,7 +665,7 @@ def run_finetune(cfg_file): assert not (args.ci_tolerance and args.ci_abs_upper_limit), 'Cannot specify both --ci-tolerance and --ci-abs-upper-limit' if args.ci_tolerance is not None: - assert args.with_pytorch and args.with_pybuda, 'CI mode with a target tolerance requires both --with-pytorch and --with-pybuda' + assert args.with_pytorch and args.with_forge, 'CI mode with a target tolerance requires both --with-pytorch and --with-forge' if args.load_in_8bit and args.version != 'padded_split': assert args.version == 'padded_split', "Argument load-in-8bit only supported for version=padded_split." @@ -675,7 +675,7 @@ def run_finetune(cfg_file): assert (args.num_lora_layers <= args.num_layers), "Number of LoRA layers cannot be larger than number of layers" - assert args.with_pytorch or args.with_pybuda, "Need to specify at least one trainer to run" + assert args.with_pytorch or args.with_forge, "Need to specify at least one trainer to run" if args.num_samples <= 0: # use full dataset if we specify <= 0 @@ -708,9 +708,9 @@ def set_model_seed(): if args.with_pytorch: set_model_seed() trainers.append(PyTorchTrainer(args)) - if args.with_pybuda: + if args.with_forge: set_model_seed() - trainers.append(PyBudaTrainer(args)) + trainers.append(ForgeTrainer(args)) if args.with_reference: set_model_seed() trainers.append(ReferenceTrainer('finetune_steps/1decoder/falcon_ref')) @@ -781,28 +781,28 @@ def set_model_seed(): trainer.save(checkpoint_name) if args.data_debug: - pytorch_loss, pybuda_loss = losses[0], losses[1] - is_loss_difference = abs(pytorch_loss - pybuda_loss) / pytorch_loss > 0.05 - pytorch_grad_norm, pybuda_grad_norm = trainers[0].grad_norm, trainers[1].grad_norm - is_grad_norm_difference = abs(pytorch_grad_norm - pybuda_grad_norm) / pytorch_grad_norm > 0.05 + pytorch_loss, forge_loss = losses[0], losses[1] + is_loss_difference = abs(pytorch_loss - forge_loss) / pytorch_loss > 0.05 + pytorch_grad_norm, forge_grad_norm = trainers[0].grad_norm, trainers[1].grad_norm + is_grad_norm_difference = abs(pytorch_grad_norm - forge_grad_norm) / pytorch_grad_norm > 0.05 if is_loss_difference: - print(f"WARNING: Losses more than 5% apart: pytorch_loss={pytorch_loss}, pybuda_loss={pybuda_loss}") + print(f"WARNING: Losses more than 5% apart: pytorch_loss={pytorch_loss}, forge_loss={forge_loss}") if is_grad_norm_difference: - print(f"WARNING: Gradient norms more than 5% apart: pytorch_grad={pytorch_grad_norm}, pybuda_grad={pybuda_grad_norm}") + print(f"WARNING: Gradient norms more than 5% apart: pytorch_grad={pytorch_grad_norm}, forge_grad={forge_grad_norm}") if is_loss_difference or is_grad_norm_difference: for sample_idx, sample in enumerate(inputs['input_ids']): input_text = tokenizer.decode(sample.tolist()) # , skip_special_tokens=True print(f"Input text sample {sample_idx}: {input_text}") if args.ci_tolerance: if args.ci_target == 'loss': - pytorch_val, pybuda_val = losses[0], losses[1] + pytorch_val, forge_val = losses[0], losses[1] elif args.ci_target == 'grad_norm': - pytorch_val, pybuda_val = trainers[0].grad_norm, trainers[1].grad_norm + pytorch_val, forge_val = trainers[0].grad_norm, trainers[1].grad_norm else: assert False, "Target metric not supported" - diff = abs(pytorch_val - pybuda_val) + diff = abs(pytorch_val - forge_val) if diff > args.ci_tolerance: - error_msg = f"CI FAIL: Metric {args.ci_target} more than ci_tolerance ({args.ci_tolerance}) apart: pytorch={pytorch_val}, pybuda={pybuda_val}" + error_msg = f"CI FAIL: Metric {args.ci_target} more than ci_tolerance ({args.ci_tolerance}) apart: pytorch={pytorch_val}, forge={forge_val}" if args.ci_exit_zero: print(error_msg) @@ -1122,38 +1122,38 @@ def evaluate(self, eval_dataloader, mmlu_eval_dataloader=None): self.model.train() #Turning on training mode return metrics -class PyBudaTrainer(): +class ForgeTrainer(): def __init__(self, args): - self.name = 'pybuda' + self.name = 'forge' self.args = args try: - netlist_override_file = os.environ['PYBUDA_NETLIST_OVERRIDE'] - print("WARNING: PYBUDA_NETLIST_OVERRIDE is set, overriding netlist with %s" % netlist_override_file) + netlist_override_file = os.environ['FORGE_NETLIST_OVERRIDE'] + print("WARNING: FORGE_NETLIST_OVERRIDE is set, overriding netlist with %s" % netlist_override_file) except KeyError: pass - os.environ["LOGGER_LEVEL"] = args.pybuda_log_level - os.environ["LOGURU_LEVEL"] = args.pybuda_log_level - # pybuda workarounds + os.environ["LOGGER_LEVEL"] = args.forge_log_level + os.environ["LOGURU_LEVEL"] = args.forge_log_level + # forge workarounds os.environ["GOLDEN_WORMHOLE_B0"] = "1" os.environ["WHA0_DISABLE_RELAY_BUFS"] = "1" - os.environ["PYBUDA_CONVERT_PARAMS_TO_TVM"] = "0" + os.environ["FORGE_CONVERT_PARAMS_TO_TVM"] = "0" os.environ["TT_BACKEND_TIMEOUT"] = "0" # lora specific os.environ['TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE'] = "147456" # for AdamW: "294912" invalid grid # "245760" oom # "196608" oom # "147456" oom - os.environ['PYBUDA_DRAM_FLIP_FLOP'] = '1' + os.environ['FORGE_DRAM_FLIP_FLOP'] = '1' - # os.environ['PYBUDA_DRAM_PICK_CAPACITY'] = '1' # Don't need this anymore and better for perf without + # os.environ['FORGE_DRAM_PICK_CAPACITY'] = '1' # Don't need this anymore and better for perf without # # Testing for better handling out of valid grid issues with seqlen=1024 - # os.environ["PYBUDA_PADDING_PASS"] = "1" - # os.environ["PYBUDA_PADDING_PASS_MATMUL"] = "1" + # os.environ["FORGE_PADDING_PASS"] = "1" + # os.environ["FORGE_PADDING_PASS_MATMUL"] = "1" - pybuda = self.pybuda = __import__('pybuda') # let us set log levels before importing pybuda + forge = self.forge = __import__('forge') # let us set log levels before importing forge # Model @@ -1174,7 +1174,7 @@ def __init__(self, args): if args.optimizer_on_host: self.optimizer = torch.optim.AdamW(self.model.transformer.parameters(), lr=args.learning_rate) else: - self.optimizer = pybuda.optimizers.AdamW(learning_rate=args.learning_rate, device_params=True) + self.optimizer = forge.optimizers.AdamW(learning_rate=args.learning_rate, device_params=True) if args.optimizer_load_state_dict: checkpoint = torch.load(args.optimizer_load_state_dict) @@ -1192,48 +1192,48 @@ def __init__(self, args): self.log_histograms = args.log_histograms self.log_opt = args.log_opt - device = args.pybuda_device + device = args.forge_device if args.tt_build_dir: - backend_output_dir = os.path.join(pybuda_build_dir, args.tt_build_dir) - os.environ["PYBUDA_BUILD_DIR"] = backend_output_dir + backend_output_dir = os.path.join(forge_build_dir, args.tt_build_dir) + os.environ["FORGE_BUILD_DIR"] = backend_output_dir print("Setting backend output dir to %s" % backend_output_dir) - pybuda.set_configuration_options(backend_output_dir=backend_output_dir) + forge.set_configuration_options(backend_output_dir=backend_output_dir) if device == 'golden': - devtype = pybuda.BackendType.Golden + devtype = forge.BackendType.Golden elif device == 'silicon': - devtype = pybuda.BackendType.Silicon + devtype = forge.BackendType.Silicon else: raise NotImplementedError('Unknown device "%s" is not supported' % device) - self.embeddings_module = pybuda.PyTorchModule("embeddings", self.model.before_decoders) - self.transformer_module = pybuda.PyTorchModule(args.netlist_name, self.model.transformer) - self.lm_head_module = pybuda.PyTorchModule("lm_head", self.model.after_decoders) - self.loss_module = pybuda.PyTorchModule("loss", self.model.loss_fct) + self.embeddings_module = forge.PyTorchModule("embeddings", self.model.before_decoders) + self.transformer_module = forge.PyTorchModule(args.netlist_name, self.model.transformer) + self.lm_head_module = forge.PyTorchModule("lm_head", self.model.after_decoders) + self.loss_module = forge.PyTorchModule("loss", self.model.loss_fct) if args.precision == 'fp32': - default_df_override = pybuda.DataFormat.Float32 + default_df_override = forge.DataFormat.Float32 elif args.precision == 'fp16': - default_df_override = pybuda.DataFormat.Float16 + default_df_override = forge.DataFormat.Float16 elif args.precision == 'bf16': - default_df_override = pybuda.DataFormat.Float16_b + default_df_override = forge.DataFormat.Float16_b elif args.precision == 'very-low-mp': - default_df_override = pybuda.DataFormat.Float16_b + default_df_override = forge.DataFormat.Float16_b elif args.precision == 'almost-low-mp': - default_df_override = pybuda.DataFormat.Float16_b + default_df_override = forge.DataFormat.Float16_b elif args.precision == 'low-mp': - default_df_override = pybuda.DataFormat.Float16_b + default_df_override = forge.DataFormat.Float16_b elif args.precision == 'high-mp': - default_df_override = pybuda.DataFormat.Float16_b + default_df_override = forge.DataFormat.Float16_b elif args.precision == 'debug': - default_df_override = pybuda.DataFormat.Float16_b + default_df_override = forge.DataFormat.Float16_b else: default_df_override = None num_layers = args.num_layers - args.num_cache_layers if args.load_from_activation_cache else args.num_layers - self.apply_data_formats(lora_data_format=pybuda.DataFormat.Float32, num_layers=num_layers, num_lora_layers=args.num_lora_layers, precision=args.precision) + self.apply_data_formats(lora_data_format=forge.DataFormat.Float32, num_layers=num_layers, num_lora_layers=args.num_lora_layers, precision=args.precision) self.apply_custom_overrides() if args.placement_overwrites: self.placement_overwrites(num_layers=num_layers, num_lora_layers=args.num_lora_layers) @@ -1244,30 +1244,30 @@ def __init__(self, args): perf_level = { None : None, 'none' : None, - 'light' : pybuda.PerfTraceLevel.LIGHT, - 'verbose': pybuda.PerfTraceLevel.VERBOSE }[args.perf] + 'light' : forge.PerfTraceLevel.LIGHT, + 'verbose': forge.PerfTraceLevel.VERBOSE }[args.perf] - pybuda.set_configuration_options( + forge.set_configuration_options( default_df_override=default_df_override, - accumulate_df=pybuda.DataFormat.Float32, + accumulate_df=forge.DataFormat.Float32, amp_level=0, enable_auto_fusing=False, performance_trace=perf_level, backend_opt_level=4, enable_auto_transposing_placement=True, - # backend_cluster_descriptor_path="/proj_sw/user_dev/jrock/pybuda-falcon-stable-avx/pybuda/third_party/budabackend/wormhole_2chip_cluster.yaml" if args.num_chips > 1 else None, + # backend_cluster_descriptor_path="/proj_sw/user_dev/jrock/forge-falcon-stable-avx/forge/third_party/budabackend/wormhole_2chip_cluster.yaml" if args.num_chips > 1 else None, ) - pybuda.config._get_global_compiler_config().use_interactive_placer = True + forge.config._get_global_compiler_config().use_interactive_placer = True - self.cpu0 = pybuda.CPUDevice("cpu0", module=self.embeddings_module) + self.cpu0 = forge.CPUDevice("cpu0", module=self.embeddings_module) start_time = time() if args.tti_load is not None: print(f"Load TTImage from : {args.tti_load}") - self.tt0 = pybuda.TTDevice.load_image(img_path=args.tti_load) + self.tt0 = forge.TTDevice.load_image(img_path=args.tti_load) else: - self.tt0 = pybuda.TTDevice('tt0', module=self.transformer_module, - arch=pybuda.BackendDevice.Wormhole_B0, + self.tt0 = forge.TTDevice('tt0', module=self.transformer_module, + arch=forge.BackendDevice.Wormhole_B0, devtype=devtype, chip_ids=list(range(args.num_chips)), optimizer=self.optimizer if not args.optimizer_on_host else None) @@ -1278,20 +1278,20 @@ def __init__(self, args): self.tt0.place_module(self.lm_head_module) self.tt0.place_loss_module(self.loss_module) else: - self.cpu1 = pybuda.CPUDevice("cpu1", module=self.lm_head_module) + self.cpu1 = forge.CPUDevice("cpu1", module=self.lm_head_module) self.cpu1.place_loss_module(self.loss_module) mp = torch.multiprocessing.get_context('spawn') self.output_q = mp.Queue() if args.verify: - self.verify_cfg=pybuda.VerifyConfig(enabled=True, + self.verify_cfg=forge.VerifyConfig(enabled=True, # waive_gradient_errors=small_bias_keys, # errors are invalid for small values scale_loss=1.0, # defaults to 50! golden_ignore_df_precision=True, - test_kind=self.pybuda.verify.config.TestKind.TRAINING, - devtype=pybuda.BackendType.Silicon, - arch=pybuda.BackendDevice.Wormhole_B0, + test_kind=self.forge.verify.config.TestKind.TRAINING, + devtype=forge.BackendType.Silicon, + arch=forge.BackendDevice.Wormhole_B0, pcc=99.999, verify_all=True, verify_last=True @@ -1312,8 +1312,8 @@ def apply_custom_overrides(self): def placement_overwrites(self, num_layers, num_lora_layers): for i in range(10000): # Overwrite all gelu op placements - self.pybuda.config.override_op_size(f'bw_in0_gelu_{i}_multiply_1', (1, 8)) - self.pybuda.config.override_op_size('input_1_multiply_247_splt_brcst_1_0_splt_brcst_3_0', (1, 4)) + self.forge.config.override_op_size(f'bw_in0_gelu_{i}_multiply_1', (1, 8)) + self.forge.config.override_op_size('input_1_multiply_247_splt_brcst_1_0_splt_brcst_3_0', (1, 4)) def placement_overwrites_seqlen(self, num_layers, num_lora_layers): @@ -1330,21 +1330,21 @@ def placement_overwrites_seqlen(self, num_layers, num_lora_layers): # bw_in0_matmul_87_matmul_1 # bw_in0_matmul_271_matmul_1 op_name = f'bw_in0_matmul_{87 + lora_loop_offset*i}_matmul_1' - self.pybuda.config.override_op_size(op_name, (4, 8)) + self.forge.config.override_op_size(op_name, (4, 8)) print("Setting op size ", op_name, " to (4, 8)") # TODO: check if we need this! op_name = f'bw_in1_matmul_{83 + lora_loop_offset*i}_matmul_1' - self.pybuda.config.override_op_size(op_name, (8, 2)) + self.forge.config.override_op_size(op_name, (8, 2)) print("Setting op size ", op_name, " to (8, 2)") # For > 1 layers op_name = f'bw_in0_gelu_{22 + lora_loop_offset*i}_gelu_derivative_0' - self.pybuda.config.override_op_size(op_name, (1, 8)) + self.forge.config.override_op_size(op_name, (1, 8)) print("Setting op size ", op_name, " to (1, 8)") op_name = f'bw_in0_matmul_{29 + lora_loop_offset*i}_matmul_1' - self.pybuda.config.override_op_size(op_name, (2, 8)) + self.forge.config.override_op_size(op_name, (2, 8)) print("Setting op size ", op_name, " to (1, 8)") @@ -1366,7 +1366,7 @@ def multichip_placement(self, num_layers, num_lora_layers, precision): # non-lora fwd for i in range(num_non_lora_layers): op_name = f"multiply_{0 + non_lora_loop_offset_multiply*i}" - self.pybuda.config.set_epoch_break(op_name) + self.forge.config.set_epoch_break(op_name) print("Setting epoch break at ", op_name) initial_offset_multiply = num_non_lora_layers*non_lora_loop_offset_multiply @@ -1375,12 +1375,12 @@ def multichip_placement(self, num_layers, num_lora_layers, precision): for i in range(num_lora_layers): # fwd op_name = f"multiply_{0 + initial_offset_multiply + lora_loop_offset_multiply*i}" - self.pybuda.config.set_epoch_break(op_name) + self.forge.config.set_epoch_break(op_name) print("Setting epoch break at ", op_name) # bwd op_name = f"bw_in0_matmul_{87 + initial_offset_matmul + lora_loop_offset_matmul*i}_matmul_1" - self.pybuda.config.set_epoch_break(op_name) + self.forge.config.set_epoch_break(op_name) print("Setting epoch break at ", op_name) @@ -1395,15 +1395,15 @@ def multichip_placement(self, num_layers, num_lora_layers, precision): for i in range(num_lora_layers): chip_id = chip_id_1 if i >= start_layer_idx_chip_1 else chip_id_0 op_name = f"multiply_{0 + lora_loop_offset_multiply*i}" - self.pybuda.config.override_op_placement(op_name, chip_id=chip_id) + self.forge.config.override_op_placement(op_name, chip_id=chip_id) print("Setting chip_id for ", op_name, " to ", chip_id) op_name = f"matmul_{18 + lora_loop_offset_matmul*i}" - self.pybuda.config.override_op_placement(op_name, chip_id=chip_id) + self.forge.config.override_op_placement(op_name, chip_id=chip_id) print("Setting chip_id for ", op_name, " to ", chip_id) op_name = f"matmul_{61 + lora_loop_offset_matmul*i}" - self.pybuda.config.override_op_placement(op_name, chip_id=chip_id) + self.forge.config.override_op_placement(op_name, chip_id=chip_id) print("Setting chip_id for ", op_name, " to ", chip_id) @@ -1412,12 +1412,12 @@ def multichip_placement(self, num_layers, num_lora_layers, precision): chip_id = chip_id_1 if i >= start_layer_idx_chip_1 else chip_id_0 # bw_in0_matmul_157_matmul_1 op_name = f"bw_in0_matmul_{87 + lora_loop_offset_matmul*i}_matmul_1" - self.pybuda.config.override_op_placement(op_name, chip_id=chip_id) + self.forge.config.override_op_placement(op_name, chip_id=chip_id) print("Setting chip_id for ", op_name, " to ", chip_id) # bw_in0_multiply_108_multiply_0 op_name = f"bw_in0_multiply_{38 + lora_loop_offset_multiply*i}_multiply_0" - self.pybuda.config.override_op_placement(op_name, chip_id=chip_id) + self.forge.config.override_op_placement(op_name, chip_id=chip_id) print("Setting chip_id for ", op_name, " to ", chip_id) else: # set chip ids: non-lora on chip 0 and lora on chip 1 @@ -1425,30 +1425,30 @@ def multichip_placement(self, num_layers, num_lora_layers, precision): # chip ids for fwd for i in range(num_non_lora_layers): op_name = f"multiply_{0 + non_lora_loop_offset_multiply*i}" - self.pybuda.config.override_op_placement(op_name, chip_id=chip_id_0) + self.forge.config.override_op_placement(op_name, chip_id=chip_id_0) print("Setting chip_id for ", op_name, " to ", chip_id_0) op_name = f"matmul_{18 + non_lora_loop_offset_matmul*i}" - self.pybuda.config.override_op_placement(op_name, chip_id=chip_id_0) + self.forge.config.override_op_placement(op_name, chip_id=chip_id_0) print("Setting chip_id for ", op_name, " to ", chip_id_0) op_name = f"matmul_{61 + non_lora_loop_offset_matmul*i}" - self.pybuda.config.override_op_placement(op_name, chip_id=chip_id_0) + self.forge.config.override_op_placement(op_name, chip_id=chip_id_0) print("Setting chip_id for ", op_name, " to ", chip_id_0) initial_offset_multiply = num_non_lora_layers*non_lora_loop_offset_multiply initial_offset_matmul = num_non_lora_layers*non_lora_loop_offset_matmul for i in range(num_lora_layers): op_name = f"multiply_{0 + initial_offset_multiply + lora_loop_offset_multiply*i}" - self.pybuda.config.override_op_placement(op_name, chip_id=chip_id_1) + self.forge.config.override_op_placement(op_name, chip_id=chip_id_1) print("Setting chip_id for ", op_name, " to ", chip_id_1) op_name = f"matmul_{18 + initial_offset_matmul + lora_loop_offset_matmul*i}" - self.pybuda.config.override_op_placement(op_name, chip_id=chip_id_1) + self.forge.config.override_op_placement(op_name, chip_id=chip_id_1) print("Setting chip_id for ", op_name, " to ", chip_id_1) op_name = f"matmul_{61 + initial_offset_matmul + lora_loop_offset_matmul*i}" - self.pybuda.config.override_op_placement(op_name, chip_id=chip_id_1) + self.forge.config.override_op_placement(op_name, chip_id=chip_id_1) print("Setting chip_id for ", op_name, " to ", chip_id_1) @@ -1458,12 +1458,12 @@ def multichip_placement(self, num_layers, num_lora_layers, precision): for i in range(num_lora_layers): # bw_in0_matmul_157_matmul_1 op_name = f"bw_in0_matmul_{87 + initial_offset_matmul + lora_loop_offset_matmul*i}_matmul_1" - self.pybuda.config.override_op_placement(op_name, chip_id=chip_id_1) + self.forge.config.override_op_placement(op_name, chip_id=chip_id_1) print("Setting chip_id for ", op_name, " to ", chip_id_1) # bw_in0_multiply_108_multiply_0 op_name = f"bw_in0_multiply_{38 + initial_offset_multiply + lora_loop_offset_multiply*i}_multiply_0" - self.pybuda.config.override_op_placement(op_name, chip_id=chip_id_1) + self.forge.config.override_op_placement(op_name, chip_id=chip_id_1) print("Setting chip_id for ", op_name, " to ", chip_id_1) @@ -1473,7 +1473,7 @@ def multichip_placement(self, num_layers, num_lora_layers, precision): # Core (c=0,y=1,x=1) [routing] (c=0,y=0,x=0) [worker] [op_name=add_159] exceeded resource constraints: # active dram queues used: 44 limit: 40 add_name = f"add_{89 + num_non_lora_layers*non_lora_loop_offset_matmul + (num_lora_layers-1)*lora_loop_offset_matmul}" - self.pybuda.config.override_op_size(add_name, (1, 2)) + self.forge.config.override_op_size(add_name, (1, 2)) @@ -1487,52 +1487,52 @@ def apply_data_formats(self, lora_data_format, num_layers, num_lora_layers, prec num_non_lora_layers = num_layers - num_lora_layers if precision == 'very-low-mp': # all matmul inputs are bfp8_b; keep bwd/loss/lora bf16b - self.pybuda.config.configure_mixed_precision( + self.forge.config.configure_mixed_precision( name_regex="matmul_.*", - input_df={0: [self.pybuda.DataFormat.Bfp8_b, True], 1: [self.pybuda.DataFormat.Bfp8_b, True], 2: [self.pybuda.DataFormat.Bfp8_b, True]}) + input_df={0: [self.forge.DataFormat.Bfp8_b, True], 1: [self.forge.DataFormat.Bfp8_b, True], 2: [self.forge.DataFormat.Bfp8_b, True]}) if not self.args.optimizer_on_host: # Overwrites needed only for opt on device - self.pybuda.config.configure_mixed_precision( + self.forge.config.configure_mixed_precision( name_regex=".*lora_.*", - output_df=self.pybuda.DataFormat.Float16_b) + output_df=self.forge.DataFormat.Float16_b) if precision == 'almost-low-mp': # all matmul inputs are bfp8_b; keep bwd/loss/lora bf16b - self.pybuda.config.configure_mixed_precision( + self.forge.config.configure_mixed_precision( name_regex="matmul_.*", - input_df={0: [self.pybuda.DataFormat.Bfp8_b, True], 1: [self.pybuda.DataFormat.Bfp8_b, True], 2: [self.pybuda.DataFormat.Bfp8_b, True]}) + input_df={0: [self.forge.DataFormat.Bfp8_b, True], 1: [self.forge.DataFormat.Bfp8_b, True], 2: [self.forge.DataFormat.Bfp8_b, True]}) - self.pybuda.config.configure_mixed_precision( + self.forge.config.configure_mixed_precision( name_regex=".*lora_.*", - output_df=self.pybuda.DataFormat.Float16_b, - input_df={0: [self.pybuda.DataFormat.Float16_b, True], 1: [self.pybuda.DataFormat.Float16_b, True], 2: [self.pybuda.DataFormat.Float16_b, True]}) + output_df=self.forge.DataFormat.Float16_b, + input_df={0: [self.forge.DataFormat.Float16_b, True], 1: [self.forge.DataFormat.Float16_b, True], 2: [self.forge.DataFormat.Float16_b, True]}) - self.pybuda.config.configure_mixed_precision( + self.forge.config.configure_mixed_precision( name_regex="loss_.*", accumulate_df=lora_data_format, output_df=lora_data_format) - self.pybuda.config.configure_mixed_precision( + self.forge.config.configure_mixed_precision( name_regex="bw_.*", accumulate_df=lora_data_format, output_df=lora_data_format) elif precision == 'low-mp': # all matmul inputs are bfp8_b except for matmul_33 (compile issue); increase bw/loss/lora to fp32 - self.pybuda.config.configure_mixed_precision( + self.forge.config.configure_mixed_precision( name_regex="matmul_.*", - input_df={0: [self.pybuda.DataFormat.Bfp8_b, True], 1: [self.pybuda.DataFormat.Bfp8_b, True], 2: [self.pybuda.DataFormat.Bfp8_b, True]}) + input_df={0: [self.forge.DataFormat.Bfp8_b, True], 1: [self.forge.DataFormat.Bfp8_b, True], 2: [self.forge.DataFormat.Bfp8_b, True]}) # except all lora ops + params - self.pybuda.config.configure_mixed_precision( + self.forge.config.configure_mixed_precision( name_regex=".*lora_.*", output_df=lora_data_format, input_df={0: [lora_data_format, True], 1: [lora_data_format, True], 2: [lora_data_format, True]}) - self.pybuda.config.configure_mixed_precision( + self.forge.config.configure_mixed_precision( name_regex="loss_.*", accumulate_df=lora_data_format, output_df=lora_data_format) - self.pybuda.config.configure_mixed_precision( + self.forge.config.configure_mixed_precision( name_regex="bw_.*", accumulate_df=lora_data_format, output_df=lora_data_format) @@ -1540,40 +1540,40 @@ def apply_data_formats(self, lora_data_format, num_layers, num_lora_layers, prec elif precision == 'high-mp': # all falcon frozen weights are bfp8_b, everything else is bfp16_b # all frozen weights - self.pybuda.config.configure_mixed_precision( + self.forge.config.configure_mixed_precision( name_regex="input_0_transpose_.*", - output_df=self.pybuda.DataFormat.Bfp8_b, + output_df=self.forge.DataFormat.Bfp8_b, ) # except all lora ops + params - self.pybuda.config.configure_mixed_precision( + self.forge.config.configure_mixed_precision( name_regex=".*lora_.*", output_df=lora_data_format, input_df={0: [lora_data_format, True], 1: [lora_data_format, True], 2: [lora_data_format, True]}) - self.pybuda.config.configure_mixed_precision( + self.forge.config.configure_mixed_precision( name_regex="loss_.*", accumulate_df=lora_data_format, output_df=lora_data_format) - self.pybuda.config.configure_mixed_precision( + self.forge.config.configure_mixed_precision( name_regex="bw_.*", accumulate_df=lora_data_format, output_df=lora_data_format) - # required for falcon-stable pybuda (08/2023) + # required for falcon-stable forge (08/2023) # OP matmul_664 packer : Chip = 0, Core x = 18, y = 19(logical x = 0, y = 1): out of memory. Total bytes alloced = 1295424 B, used by bufs = 977920 B, used by pipegen = 317504 B Limit: 1290240 # non-lora for i in range(num_non_lora_layers): op_name = f"matmul_{20 + non_lora_loop_offset*i}" - self.pybuda.config.override_op_size(op_name, (4, 8)) + self.forge.config.override_op_size(op_name, (4, 8)) print("Overwriting op size for ", op_name, "to (4, 8)") initial_offset = num_non_lora_layers*non_lora_loop_offset # lora for i in range(num_lora_layers): op_name = f"matmul_{20 + initial_offset + lora_loop_offset*i}" - self.pybuda.config.override_op_size(op_name, (4, 8)) + self.forge.config.override_op_size(op_name, (4, 8)) print("Overwriting op size for ", op_name, "to (4, 8)") elif precision == 'debug': # use this for debugging wihout changing existing options @@ -1593,7 +1593,7 @@ def ensure_initialized(self, batch): ) print(f'Saved TTImage to {self.args.tti_save}') sys.exit(0) - self.pybuda.initialize_pipeline(training=True, + self.forge.initialize_pipeline(training=True, sample_inputs=batch, sample_targets=batch, output_queue=self.output_q, @@ -1625,7 +1625,7 @@ def forward_step(self, batch): else: self.cpu1.push_to_target_inputs(targets) - self.pybuda.run_forward(input_count=1, _sequential=True) + self.forge.run_forward(input_count=1, _sequential=True) self.time_per_fwd_step = (time() - step_start_time) self.accumulated_batch_time_per_step += self.time_per_fwd_step @@ -1633,19 +1633,19 @@ def forward_step(self, batch): self.loss = self.scaled_loss / self.effective_loss_scale self.accumulated_batch_loss += self.loss - assert not self.args.save_to_activation_cache, "Saving activation cache for pybuda model not implemented yet!" + assert not self.args.save_to_activation_cache, "Saving activation cache for forge model not implemented yet!" return self.loss def backward_step(self, zero_grad): step_start_time = time() - self.pybuda.run_backward(input_count=1, zero_grad=zero_grad, _sequential=True) + self.forge.run_backward(input_count=1, zero_grad=zero_grad, _sequential=True) self.time_per_bwd_step = (time() - step_start_time) self.accumulated_batch_time_per_step += self.time_per_bwd_step def optimizer_step(self, do_update): if not do_update and self.args.optimizer_on_host: - self.pybuda.sync() + self.forge.sync() if not do_update: self.grad_norm = 0.0 return @@ -1657,7 +1657,7 @@ def optimizer_step(self, do_update): module = self.model.transformer # Get gradients from chip - gradients = self.pybuda.get_parameter_gradients(tt_device, _sequential=True)[0] + gradients = self.forge.get_parameter_gradients(tt_device, _sequential=True)[0] self.gradients = {} self.params = {} @@ -1697,15 +1697,15 @@ def optimizer_step(self, do_update): #FIXME: add gradient scaling for optimizer on device! # if self.args.extensive_logging: - # self.gradients = self.pybuda.get_parameter_gradients(self.tt0, _sequential=True)[0] + # self.gradients = self.forge.get_parameter_gradients(self.tt0, _sequential=True)[0] # self.grad_norm = get_gradient_norm(self.model.transformer.named_parameters(), self.gradients) - # self.params = self.pybuda.get_parameter_checkpoint(self.tt0, _sequential=True)[0] + # self.params = self.forge.get_parameter_checkpoint(self.tt0, _sequential=True)[0] - self.pybuda.run_optimizer(_sequential=True) + self.forge.run_optimizer(_sequential=True) if self.log_histograms: # FIXME: not tested. get gradients and params from device for logging - self.gradients = self.pybuda.get_parameter_gradients(tt_device, _sequential=True)[0] - self.params = self.pybuda.get_parameters(tt_device, _sequential=True)[0] + self.gradients = self.forge.get_parameter_gradients(tt_device, _sequential=True)[0] + self.params = self.forge.get_parameters(tt_device, _sequential=True)[0] self.time_per_opt_step = (time() - step_start_time) self.accumulated_batch_time_per_step += self.time_per_opt_step @@ -1756,7 +1756,7 @@ def log(self, step): print(f"WARNING: Could not log netlist at path: {netlist_path}") try: - netlist_override_file = os.environ['PYBUDA_NETLIST_OVERRIDE'] + netlist_override_file = os.environ['FORGE_NETLIST_OVERRIDE'] artifact = wandb.Artifact("netlist_override", type="yaml") artifact.add_file(netlist_override_file) wandb.log_artifact(artifact) @@ -1779,7 +1779,7 @@ def save(self, filename): module = self.model.transformer # Get parameters from device - device_params = self.pybuda.get_parameter_checkpoint(self.tt0, _sequential=True)[0] + device_params = self.forge.get_parameter_checkpoint(self.tt0, _sequential=True)[0] # Assign gradients to module parameters for name, parameter in module.named_parameters(): diff --git a/pybuda/test/falcon/tests/test_falcon7b_decode.py b/forge/test/falcon/tests/test_falcon7b_decode.py similarity index 96% rename from pybuda/test/falcon/tests/test_falcon7b_decode.py rename to forge/test/falcon/tests/test_falcon7b_decode.py index 5b2e36a1d..9ce5e64d1 100644 --- a/pybuda/test/falcon/tests/test_falcon7b_decode.py +++ b/forge/test/falcon/tests/test_falcon7b_decode.py @@ -34,7 +34,7 @@ def test_decode_padded(num_layers, sequence_length): seqlen = sequence_length # using padded model - with open("pybuda/test/falcon/models/falcon7b/config_padded.json", "r") as f: + with open("forge/test/falcon/models/falcon7b/config_padded.json", "r") as f: params = json.loads(f.read()) config = RWConfig(**params, user_rows=1) config.n_layer = n_layers @@ -89,12 +89,12 @@ def test_decode_padded(num_layers, sequence_length): # python -u decode_demo.py --max-tokens 120 --user-rows 32 --seqlen 512 # --device silicon --precision bf16 --prompts-file data/two_cities.json # --output-at-end --matmul-precision weight_bfp8_act_bf16 -@pytest.mark.parametrize("num_layers,sequence_length,prompts_file", [(32,512,'pybuda/test/falcon/data/two_cities.json')]) +@pytest.mark.parametrize("num_layers,sequence_length,prompts_file", [(32,512,'forge/test/falcon/data/two_cities.json')]) def test_decode_demo_masked_odkv(num_layers, sequence_length,prompts_file): user_rows = 32 # using padded model - with open("pybuda/test/falcon/models/falcon7b/config_padded.json", "r") as f: + with open("forge/test/falcon/models/falcon7b/config_padded.json", "r") as f: params = json.loads(f.read()) config = RWConfig(**params, user_rows=user_rows) config.n_layer = num_layers @@ -144,7 +144,7 @@ def test_finetune_decode_ppl(num_layers, sequence_length): tokenizer = get_tokenizer('tiiuae/falcon-7b', explicit_pad_token=False) #using padded model - with open("pybuda/test/falcon/models/falcon7b/config_padded.json", "r") as f: + with open("forge/test/falcon/models/falcon7b/config_padded.json", "r") as f: params = json.loads(f.read()) config = RWConfig(**params, user_rows=user_rows) config.n_layer = num_layers diff --git a/pybuda/test/falcon/tests/test_falcon7b_finetune.py b/forge/test/falcon/tests/test_falcon7b_finetune.py similarity index 80% rename from pybuda/test/falcon/tests/test_falcon7b_finetune.py rename to forge/test/falcon/tests/test_falcon7b_finetune.py index 0114d9404..a74955ccf 100644 --- a/pybuda/test/falcon/tests/test_falcon7b_finetune.py +++ b/forge/test/falcon/tests/test_falcon7b_finetune.py @@ -14,6 +14,6 @@ # Tests Falcon-7B finetune basic demo # Config 1 (ci_basic.json): basic test with wq/wv lora modules # Config 2 (ci_basic_lora.json): basic test with all lora modules, rank 2, precision low-mp -@pytest.mark.parametrize("config_file", [('pybuda/test/falcon/finetune_configs/ci_basic.json'), ('pybuda/test/falcon/finetune_configs/ci_basic_lora.json')]) +@pytest.mark.parametrize("config_file", [('forge/test/falcon/finetune_configs/ci_basic.json'), ('forge/test/falcon/finetune_configs/ci_basic_lora.json')]) def test_finetune_basic(config_file): run_finetune(config_file) diff --git a/pybuda/test/falcon/tests/utils.py b/forge/test/falcon/tests/utils.py similarity index 99% rename from pybuda/test/falcon/tests/utils.py rename to forge/test/falcon/tests/utils.py index 4e7abf18c..7cf295cbb 100644 --- a/pybuda/test/falcon/tests/utils.py +++ b/forge/test/falcon/tests/utils.py @@ -293,7 +293,7 @@ def get_falcon_model_with_version(version, model_name, tokenizer, training, num_ def lorify_model(model, target_modules, rank, num_lora_layers=None): - # prepare_model_for_kbit_training is not needed at the moment, sets everything to FP32 and we overwrite data formats in pybuda anyways + # prepare_model_for_kbit_training is not needed at the moment, sets everything to FP32 and we overwrite data formats in forge anyways # from peft import prepare_model_for_kbit_training # model = prepare_model_for_kbit_training(model) model.config.use_cache = False @@ -579,8 +579,8 @@ def create_experiment_dir(parent_dir='falcon_lora_experiments', experiment_name= os.makedirs(parent_dir, exist_ok=True) experiment_path = os.path.join(parent_dir, experiment_name) os.makedirs(experiment_path, exist_ok=True) - pybuda_exp_path = os.path.join(experiment_path, 'pybuda') - os.makedirs(pybuda_exp_path, exist_ok=True) + forge_exp_path = os.path.join(experiment_path, 'forge') + os.makedirs(forge_exp_path, exist_ok=True) pytorch_exp_path = os.path.join(experiment_path, 'pytorch') os.makedirs(pytorch_exp_path, exist_ok=True) return experiment_path diff --git a/pybuda/test/fx/__init__.py b/forge/test/fx/__init__.py similarity index 100% rename from pybuda/test/fx/__init__.py rename to forge/test/fx/__init__.py diff --git a/pybuda/test/fx/conftest.py b/forge/test/fx/conftest.py similarity index 92% rename from pybuda/test/fx/conftest.py rename to forge/test/fx/conftest.py index 28352b9ff..88f1bd8a0 100644 --- a/pybuda/test/fx/conftest.py +++ b/forge/test/fx/conftest.py @@ -4,8 +4,8 @@ import torch import pytest -from pybuda.torch_compile import compile_torch -from pybuda.config import remove_cpu_fallback_ops +from forge.torch_compile import compile_torch +from forge.config import remove_cpu_fallback_ops @pytest.fixture(autouse=True) def disable_embedding_fallback(): diff --git a/pybuda/test/fx/test_basics.py b/forge/test/fx/test_basics.py similarity index 97% rename from pybuda/test/fx/test_basics.py rename to forge/test/fx/test_basics.py index 6b3597a6c..3367c9b06 100644 --- a/pybuda/test/fx/test_basics.py +++ b/forge/test/fx/test_basics.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 import pytest import torch -from pybuda.torch_compile import compile_torch +from forge.torch_compile import compile_torch from .conftest import generic_model_test diff --git a/pybuda/test/fx/test_features.py b/forge/test/fx/test_features.py similarity index 89% rename from pybuda/test/fx/test_features.py rename to forge/test/fx/test_features.py index f5cb4bc15..1c02ba3a2 100644 --- a/pybuda/test/fx/test_features.py +++ b/forge/test/fx/test_features.py @@ -8,9 +8,9 @@ import torch import torch.nn as nn -import pybuda -from pybuda.torch_compile import compile_torch -from pybuda.config import _get_global_compiler_config +import forge +from forge.torch_compile import compile_torch +from forge.config import _get_global_compiler_config from .conftest import generic_model_test @@ -30,26 +30,26 @@ def forward(self, x1): return m1 _get_global_compiler_config().enable_pt2_fx_graph_link = True - os.environ["PYBUDA_DEVMODE"] = "1" + os.environ["FORGE_DEVMODE"] = "1" input = torch.rand(1, 32, 32) input2 = torch.rand(1, 32, 32) input3 = torch.rand(1, 32, 32) input = input.to("tt") - pybuda_mod = torch.compile(Linear().to("tt"), backend=compile_torch) - result_c = pybuda_mod(input) - pybuda_mod_2 = torch.compile(Linear().to("tt"), backend=compile_torch) - result__ = pybuda_mod_2(result_c) + forge_mod = torch.compile(Linear().to("tt"), backend=compile_torch) + result_c = forge_mod(input) + forge_mod_2 = torch.compile(Linear().to("tt"), backend=compile_torch) + result__ = forge_mod_2(result_c) - result_c = pybuda_mod(input) - result = pybuda_mod_2(result_c) + result_c = forge_mod(input) + result = forge_mod_2(result_c) result = result.to("cpu") def test_decomp(): pytest.skip() #TODO fix: FATAL | Always - Unsupported (for now) _copy_from TTDevice[0] to TTDevice[0] - os.environ["PYBUDA_DEVMODE"] = "1" + os.environ["FORGE_DEVMODE"] = "1" class BasicModule(nn.Module): def forward(self, x): x = x * 2 @@ -58,8 +58,8 @@ def forward(self, x): mod, input = BasicModule(), torch.randn(2, 9).to(dtype=torch.float16) - pybuda_mod = torch.compile(mod, backend=compile_torch, dynamic=False) - out = pybuda_mod(input) + forge_mod = torch.compile(mod, backend=compile_torch, dynamic=False) + out = forge_mod(input) @pytest.mark.parametrize("shape", [(1024, 1024)]) @pytest.mark.parametrize("mb", [1, 8, 16]) @@ -68,11 +68,11 @@ def forward(self, x): def test_push(shape, mb, loop, native): if mb != 1: pytest.skip() #TODO - os.environ["PYBUDA_DEVMODE"] = "1" + os.environ["FORGE_DEVMODE"] = "1" import time - pybuda.config.set_configuration_options( - default_df_override=pybuda.config.DataFormat.Float32 + forge.config.set_configuration_options( + default_df_override=forge.config.DataFormat.Float32 ) class Add(nn.Module): @@ -88,26 +88,26 @@ def forward(self, x1, x2): if native: model = model.to("tt") - pybuda_mod = pybuda_mod = torch.compile(model, backend=compile_torch, dynamic=False) + forge_mod = forge_mod = torch.compile(model, backend=compile_torch, dynamic=False) comp_inputs = [i.to("tt") for i in inputs[0]] - result = pybuda_mod(*comp_inputs) # compile + result = forge_mod(*comp_inputs) # compile start = time.perf_counter() for args in inputs: args = [a.to("tt") for a in args] - result = pybuda_mod(*args) + result = forge_mod(*args) result.to("cpu") elapsed = time.perf_counter() - start else: - tt0 = pybuda.TTDevice("tt0") - tt0.place_module(pybuda.module.PyTorchModule("add", model)) - output_q = pybuda.initialize_pipeline( + tt0 = forge.TTDevice("tt0") + tt0.place_module(forge.module.PyTorchModule("add", model)) + output_q = forge.initialize_pipeline( training=False, sample_inputs=sample_inputs ) start = time.perf_counter() for i in range(loop): tt0.push_to_inputs(inputs[i]) - pybuda.run_forward(input_count=loop) + forge.run_forward(input_count=loop) for i in range(loop): result = output_q.get(timeout=30) elapsed = time.perf_counter() - start diff --git a/pybuda/test/fx/test_models.py b/forge/test/fx/test_models.py similarity index 72% rename from pybuda/test/fx/test_models.py rename to forge/test/fx/test_models.py index ed01a332d..fdf174de3 100644 --- a/pybuda/test/fx/test_models.py +++ b/forge/test/fx/test_models.py @@ -2,32 +2,32 @@ # SPDX-License-Identifier: Apache-2.0 import pytest -import pybuda +import forge import torch import torch.nn as nn import os from pytorchcv.model_provider import get_model as ptcv_get_model from transformers import BertModel, GPT2LMHeadModel, GPT2Config, GPT2Model, AutoFeatureExtractor, ResNetForImageClassification -from pybuda.torch_compile import compile_torch +from forge.torch_compile import compile_torch def test_unet_osmr_cityscape_pytorch(): - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.cpu_fallback_ops = set() compiler_cfg.enable_t_streaming = True compiler_cfg.enable_auto_fusing = False compiler_cfg.enable_enumerate_u_kt = False compiler_cfg.default_dram_parameters = False - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b - os.environ["PYBUDA_FORCE_RESIZE_DENSE_MM"] = "1" - os.environ["PYBUDA_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b + os.environ["FORGE_FORCE_RESIZE_DENSE_MM"] = "1" + os.environ["FORGE_RIBBON2"] = "1" #if test_device.arch == BackendDevice.Wormhole_B0: compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_BALANCER_PREPASS_DISABLED"] = "1" + os.environ["FORGE_BALANCER_PREPASS_DISABLED"] = "1" #elif test_device.arch == BackendDevice.Grayskull: # compiler_cfg.balancer_policy = "CNN" - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model unet_osmr = ptcv_get_model("unet_cityscapes", pretrained=False) unet_osmr.eval() @@ -40,22 +40,22 @@ def test_unet_osmr_cityscape_pytorch(): # Run the model on TT device unet_osmr.to("tt") img_tensor = img_tensor.to("tt") - pybuda_mod = torch.compile(unet_osmr, backend=compile_torch, dynamic=False) - result = pybuda_mod(img_tensor) + forge_mod = torch.compile(unet_osmr, backend=compile_torch, dynamic=False) + result = forge_mod(img_tensor) output = result[0].to("cpu") # Compare the result - assert pybuda.op.eval.compare_tensor_to_golden(f"pt_unet_osmr_cityscape", golden[0], output, is_buda=True, pcc=0.99) + assert forge.op.eval.compare_tensor_to_golden(f"pt_unet_osmr_cityscape", golden[0], output, is_buda=True, pcc=0.99) def test_resnet(): - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.cpu_fallback_ops = set() compiler_cfg.balancer_policy = "Ribbon" compiler_cfg.enable_t_streaming = True compiler_cfg.enable_training = False - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b - os.environ["PYBUDA_PAD_OUTPUT_BUFFER"] = "1" + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b + os.environ["FORGE_PAD_OUTPUT_BUFFER"] = "1" # Load ResNet feature extractor and model checkpoint from HuggingFace feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/resnet-50", torchscript=True) @@ -78,35 +78,35 @@ def test_resnet(): # Run the model on TT device resnet.to("tt") pixel_values = pixel_values.to("tt") - pybuda_mod = torch.compile(resnet, backend=compile_torch, dynamic=False) - result = pybuda_mod(pixel_values) + forge_mod = torch.compile(resnet, backend=compile_torch, dynamic=False) + result = forge_mod(pixel_values) output = result[0].to("cpu") # Compare the result - assert pybuda.op.eval.compare_tensor_to_golden(f"pt_resnet50", golden[0], output, is_buda=True, pcc=0.99) + assert forge.op.eval.compare_tensor_to_golden(f"pt_resnet50", golden[0], output, is_buda=True, pcc=0.99) def test_gpt2(): config = GPT2Config.from_pretrained("gpt2") config.num_hidden_layers = 2 - os.environ["PYBUDA_DEVMODE"] = "1" - compile_cfg = pybuda.config._get_global_compiler_config() + os.environ["FORGE_DEVMODE"] = "1" + compile_cfg = forge.config._get_global_compiler_config() compile_cfg.enable_link_past_cache_ios = True compile_cfg.cpu_fallback_ops = set() - compile_cfg.default_df_override = pybuda._C.Float16_b + compile_cfg.default_df_override = forge._C.Float16_b gpt2 = GPT2LMHeadModel(config).eval() input_ids = torch.randint(0, 10000, (1, 32)).int() golden = gpt2(input_ids) - pybuda_mod = torch.compile(gpt2, backend=compile_torch, dynamic=False) - result = pybuda_mod(input_ids) + forge_mod = torch.compile(gpt2, backend=compile_torch, dynamic=False) + result = forge_mod(input_ids) next_token_logits = result[0] next_token_logits = next_token_logits.to("cpu") res = result[0].to("cpu") - assert pybuda.op.eval.compare_tensor_to_golden(f"gpt2", golden[0], res, is_buda=True, pcc=0.99) + assert forge.op.eval.compare_tensor_to_golden(f"gpt2", golden[0], res, is_buda=True, pcc=0.99) def test_gen(): torch.set_num_threads(1) @@ -115,29 +115,29 @@ def test_gen(): config.num_hidden_layers = 1 config.return_dict = False - os.environ["PYBUDA_DEVMODE"] = "1" - compile_cfg = pybuda.config._get_global_compiler_config() + os.environ["FORGE_DEVMODE"] = "1" + compile_cfg = forge.config._get_global_compiler_config() compile_cfg.enable_link_past_cache_ios = True compile_cfg.cpu_fallback_ops = set() - compile_cfg.default_df_override = pybuda._C.Float16_b + compile_cfg.default_df_override = forge._C.Float16_b gpt2 = GPT2Model(config).eval() gpt2.to("tt") input_ids = torch.randint(0, 10000, (1, 32)).int().to("tt") - pybuda_mod = torch.compile(gpt2, backend=compile_torch, dynamic=False) - result = pybuda_mod(input_ids) + forge_mod = torch.compile(gpt2, backend=compile_torch, dynamic=False) + result = forge_mod(input_ids) res = result[0].to("cpu") inp2 = torch.randint(0, 10000, (1, 32)).int() inp2 = inp2.to("tt") - result = pybuda_mod(inp2, result[1]) + result = forge_mod(inp2, result[1]) rs2 = result[0].to("cpu") def test_bert(): - os.environ["PYBUDA_DEVMODE"] = "1" - compile_cfg = pybuda.config._get_global_compiler_config() + os.environ["FORGE_DEVMODE"] = "1" + compile_cfg = forge.config._get_global_compiler_config() compile_cfg.cpu_fallback_ops = set() bert = BertModel.from_pretrained("prajjwal1/bert-tiny", torchscript=True) @@ -154,46 +154,46 @@ def test_bert(): input_ids = input_ids.to("tt") print("Compiling Model") - pybuda_mod = torch.compile(bert, backend=compile_torch, dynamic=False) - result = pybuda_mod(input_ids) + forge_mod = torch.compile(bert, backend=compile_torch, dynamic=False) + result = forge_mod(input_ids) print("Copying outputs") result = [r.to("cpu") for r in result] for i, (g, r) in enumerate(zip(golden, result)): - assert pybuda.op.eval.compare_tensor_to_golden(f"bert_{i}", g, r, is_buda=True, pcc=0.99) + assert forge.op.eval.compare_tensor_to_golden(f"bert_{i}", g, r, is_buda=True, pcc=0.99) inp2 = torch.randint(0, 10000, (1, 128)).int() golden = bert_cpu(inp2) inp2 = inp2.to("tt") - result = pybuda_mod(inp2) + result = forge_mod(inp2) result = [r.to("cpu") for r in result] for i, (g, r) in enumerate(zip(golden, result)): - assert pybuda.op.eval.compare_tensor_to_golden(f"bert_{i}", g, r, is_buda=True, pcc=0.99) + assert forge.op.eval.compare_tensor_to_golden(f"bert_{i}", g, r, is_buda=True, pcc=0.99) inp3 = torch.randint(0, 10000, (1, 64)).int() golden = bert_cpu(inp3) inp3 = inp3.to("tt") - result = pybuda_mod(inp3) + result = forge_mod(inp3) result = [r.to("cpu") for r in result] for i, (g, r) in enumerate(zip(golden, result)): - assert pybuda.op.eval.compare_tensor_to_golden(f"bert_{i}", g, r, is_buda=True, pcc=0.99) + assert forge.op.eval.compare_tensor_to_golden(f"bert_{i}", g, r, is_buda=True, pcc=0.99) inp4 = torch.randint(0, 10000, (1, 128)).int() golden = bert_cpu(inp4) inp4 = inp4.to("tt") - result = pybuda_mod(inp4) + result = forge_mod(inp4) result = [r.to("cpu") for r in result] for i, (g, r) in enumerate(zip(golden, result)): - assert pybuda.op.eval.compare_tensor_to_golden(f"bert_{i}", g, r, is_buda=True, pcc=0.99) + assert forge.op.eval.compare_tensor_to_golden(f"bert_{i}", g, r, is_buda=True, pcc=0.99) inp5 = torch.randint(0, 10000, (1, 64)).int() golden = bert_cpu(inp5) inp5 = inp5.to("tt") - result = pybuda_mod(inp5) + result = forge_mod(inp5) result = [r.to("cpu") for r in result] for i, (g, r) in enumerate(zip(golden, result)): - assert pybuda.op.eval.compare_tensor_to_golden(f"bert_{i}", g, r, is_buda=True, pcc=0.99) + assert forge.op.eval.compare_tensor_to_golden(f"bert_{i}", g, r, is_buda=True, pcc=0.99) from diffusers import StableDiffusionPipeline @@ -201,8 +201,8 @@ def test_sd(): model = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") prompt = "a photo of an astronaut riding a horse on mars" model = model.to("tt") - pybuda_mod = torch.compile(model, backend=compile_torch) - image = pybuda_mod(prompt=prompt, num_images_per_prompt=1, output_type="pil").images[0] + forge_mod = torch.compile(model, backend=compile_torch) + image = forge_mod(prompt=prompt, num_images_per_prompt=1, output_type="pil").images[0] from transformers import MobileNetV2FeatureExtractor, MobileNetV2ForImageClassification from PIL import Image @@ -224,10 +224,10 @@ def test_mobilenet_v2(): predicted_class_idx_cpu = logits.argmax(-1).item() #print("Predicted class:", model.config.id2label[predicted_class_idx]) - pybuda_mod = torch.compile(model.to('tt'), backend=compile_torch) + forge_mod = torch.compile(model.to('tt'), backend=compile_torch) for k, v in inputs.items(): inputs[k] = v.to("tt") - outputs = pybuda_mod(**inputs) + outputs = forge_mod(**inputs) logits = outputs.logits # model predicts one of the 1000 ImageNet classes diff --git a/pybuda/test/fx/test_ops.py b/forge/test/fx/test_ops.py similarity index 60% rename from pybuda/test/fx/test_ops.py rename to forge/test/fx/test_ops.py index 8ff77edd3..18205eb46 100644 --- a/pybuda/test/fx/test_ops.py +++ b/forge/test/fx/test_ops.py @@ -2,11 +2,11 @@ # SPDX-License-Identifier: Apache-2.0 import pytest -import pybuda +import forge import torch import torch.nn as nn import os -from pybuda.torch_compile import compile_torch +from forge.torch_compile import compile_torch def test_add(): class Add(nn.Module): @@ -16,13 +16,13 @@ def __init__(self): def forward(self, x1, x2): return x1 + x2, x2 + x1 + 2 - os.environ["PYBUDA_DEVMODE"] = "1" + os.environ["FORGE_DEVMODE"] = "1" model = Add() inputs = [torch.rand(1, 32, 32), torch.rand(1, 32, 32)] golden = model(*inputs) - pybuda_mod = torch.compile(model, backend="tt") + forge_mod = torch.compile(model, backend="tt") # inputs = [i.to("tt") for i in inputs] - result = pybuda_mod(*inputs) + result = forge_mod(*inputs) result = [r.to("cpu") for r in result] assert [torch.allclose(g, r) for g, r in zip(golden, result)] @@ -37,26 +37,26 @@ def forward(self, x): x = self.conv(x) return x - os.environ["PYBUDA_DEVMODE"] = "1" + os.environ["FORGE_DEVMODE"] = "1" model = Conv2d() inputs = torch.rand(1, 3, 32, 32) golden = model(inputs) if True: - pybuda_mod = torch.compile(model, backend=compile_torch, dynamic=False) - result = pybuda_mod(inputs) + forge_mod = torch.compile(model, backend=compile_torch, dynamic=False) + result = forge_mod(inputs) result = result.to("cpu") - assert pybuda.op.eval.compare_tensor_to_golden(f"conv2d", golden, result, is_buda=True, pcc=0.99) + assert forge.op.eval.compare_tensor_to_golden(f"conv2d", golden, result, is_buda=True, pcc=0.99) else: - from pybuda.verify.backend import verify_module - mod = pybuda.PyTorchModule("conv", model) + from forge.verify.backend import verify_module + mod = forge.PyTorchModule("conv", model) verify_module( mod, ([1,3,32,32],), - verify_cfg=pybuda.VerifyConfig( - arch=pybuda.BackendDevice.Wormhole_B0, - devtype=pybuda.BackendType.Golden, - test_kind=pybuda.verify.TestKind.INFERENCE, + verify_cfg=forge.VerifyConfig( + arch=forge.BackendDevice.Wormhole_B0, + devtype=forge.BackendType.Golden, + test_kind=forge.verify.TestKind.INFERENCE, pcc=0.99 ), ) @@ -71,18 +71,18 @@ def forward(self, x): x = self.bn(x) return x - os.environ["PYBUDA_DEVMODE"] = "1" + os.environ["FORGE_DEVMODE"] = "1" model = BN() model.eval() inputs = torch.rand(1, 64, 32, 32) golden = model(inputs) # inputs = [i.to("tt") for i in inputs] - pybuda_mod = torch.compile(model, backend=compile_torch) - result = pybuda_mod(inputs) + forge_mod = torch.compile(model, backend=compile_torch) + result = forge_mod(inputs) result = result.to("cpu") - assert pybuda.op.eval.compare_tensor_to_golden(f"linear", golden, result, is_buda=True, pcc=0.99) + assert forge.op.eval.compare_tensor_to_golden(f"linear", golden, result, is_buda=True, pcc=0.99) def test_linear(): class Linear(nn.Module): @@ -94,13 +94,13 @@ def forward(self, x1, x2): m1 = self.linear(x1) return m1 + x2 - os.environ["PYBUDA_DEVMODE"] = "1" + os.environ["FORGE_DEVMODE"] = "1" model = Linear() inputs = [torch.rand(1, 32, 32), torch.rand(1, 32, 64)] golden = model(*inputs) # inputs = [i.to("tt") for i in inputs] - pybuda_mod = torch.compile(model.to("tt"), backend=compile_torch) - result = pybuda_mod(*[i.to("tt") for i in inputs]) + forge_mod = torch.compile(model.to("tt"), backend=compile_torch) + result = forge_mod(*[i.to("tt") for i in inputs]) result = result.to("cpu") - assert pybuda.op.eval.compare_tensor_to_golden(f"linear", golden, result, is_buda=True, pcc=0.99) + assert forge.op.eval.compare_tensor_to_golden(f"linear", golden, result, is_buda=True, pcc=0.99) diff --git a/pybuda/test/galaxy/bert/run_squad_wh.py b/forge/test/galaxy/bert/run_squad_wh.py similarity index 82% rename from pybuda/test/galaxy/bert/run_squad_wh.py rename to forge/test/galaxy/bert/run_squad_wh.py index 7448ffcfb..d6f130a0c 100644 --- a/pybuda/test/galaxy/bert/run_squad_wh.py +++ b/forge/test/galaxy/bert/run_squad_wh.py @@ -20,8 +20,8 @@ from torch.utils.data import DataLoader from transformers import BertForQuestionAnswering -from pybuda.config import _get_global_compiler_config -import pybuda +from forge.config import _get_global_compiler_config +import forge import pytest sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "squad_preprocessing")) @@ -32,7 +32,7 @@ "raw_file": "/proj_sw/large-model-cache/bert_squad_data/data/squad/dev-v1.1.json", "examples_file": "/proj_sw/large-model-cache/bert_squad_data/preprocessed_data/squad_tokenized/eval_examples.pt", "features_file": "/proj_sw/large-model-cache/bert_squad_data/preprocessed_data/squad_tokenized/eval_features.pt", - "eval_script": "pybuda/test/galaxy/bert/squad_preprocessing/evaluate-v1.1.py", + "eval_script": "forge/test/galaxy/bert/squad_preprocessing/evaluate-v1.1.py", "results_file": "results.pt", "predictions_file": "predictions.json", "out_file": "results.json", @@ -76,7 +76,7 @@ def get_netlist_total_ops(netlist): def encoder_output_buffering_single_chip(): compiler_cfg = _get_global_compiler_config() - config = pybuda.config + config = forge.config # input_1 -> matmul_2, matmul_8, matmul_22 config.insert_nop("input_1", ["matmul_2", "matmul_8", "matmul_22"], hoist_tms=False) @@ -155,7 +155,7 @@ def encoder_output_buffering_single_chip(): config.override_op_size("buffer_0__fused_op_206_matmul_1221", [1, 1]) def chip_breaks_single_chip(): - config = pybuda.config + config = forge.config compiler_cfg = _get_global_compiler_config() config.set_chip_break("buffer_0__fused_op_8_matmul_55") @@ -186,7 +186,7 @@ def chip_breaks_single_chip(): def encoder_output_buffering_galaxy(): compiler_cfg = _get_global_compiler_config() - config = pybuda.config + config = forge.config # input_1 -> matmul_2, matmul_8, matmul_22, add_37 config.insert_nop("input_1", ["matmul_2", "matmul_8", "matmul_22", "add_37"], hoist_tms=False) @@ -268,7 +268,7 @@ def encoder_output_buffering_galaxy(): def chip_breaks_galaxy(): compiler_cfg = _get_global_compiler_config() - config = pybuda.config + config = forge.config config.set_chip_break("buffer_0__fused_op_8_matmul_55") config.set_chip_break("buffer_0__fused_op_8_matmul_55") @@ -298,7 +298,7 @@ def chip_breaks_galaxy(): def attention_mask_buffering_galaxy(): compiler_cfg = _get_global_compiler_config() - config = pybuda.config + config = forge.config config.insert_nop("attention_mask", ["_fused_op_0", "_fused_op_9", "_fused_op_18", "_fused_op_27", "_fused_op_36", "_fused_op_45", "_fused_op_54", "_fused_op_63", "_fused_op_72", "_fused_op_81", "_fused_op_90", "_fused_op_99", "_fused_op_108", "_fused_op_117", "_fused_op_126", "_fused_op_135", "_fused_op_144", "_fused_op_153", "_fused_op_162", "_fused_op_171", "_fused_op_180", "_fused_op_189", "_fused_op_198", "_fused_op_207"], hoist_tms=False) config.insert_nop("buffer_0_attention_mask__fused_op_0", ["_fused_op_9", "_fused_op_18", "_fused_op_27", "_fused_op_36", "_fused_op_45", "_fused_op_54", "_fused_op_63", "_fused_op_72", "_fused_op_81", "_fused_op_90", "_fused_op_99", "_fused_op_108", "_fused_op_117", "_fused_op_126", "_fused_op_135", "_fused_op_144", "_fused_op_153", "_fused_op_162", "_fused_op_171", "_fused_op_180", "_fused_op_189", "_fused_op_198", "_fused_op_207"], hoist_tms=False) @@ -427,353 +427,353 @@ def intermediate_dram_queues_galaxy(): "buffer_0_buffer_0_buffer_0_buffer_0_buffer_0_buffer_0_buffer_0_buffer_0_buffer_0_buffer_0_buffer_0_buffer_0_buffer_0_buffer_0_buffer_0_buffer_0_buffer_0_buffer_0_buffer_0_buffer_0_buffer_0_buffer_0_buffer_0_buffer_0_attention_mask__fused_op_0__fused_op_9__fused_op_18__fused_op_27__fused_op_36__fused_op_45__fused_op_54__fused_op_63__fused_op_72__fused_op_81__fused_op_90__fused_op_99__fused_op_108__fused_op_117__fused_op_126__fused_op_135__fused_op_144__fused_op_153__fused_op_162__fused_op_171__fused_op_180__fused_op_189__fused_op_198__fused_op_207" ] - pybuda.config._get_global_compiler_config().insert_queues = [] + forge.config._get_global_compiler_config().insert_queues = [] for i in range(len(buffer_list)-1): - pybuda.config._get_global_compiler_config().insert_queues.append((buffer_list[i], buffer_list[i+1], 0)) - - pybuda.config._get_global_compiler_config().insert_queues.append(("buffer_0_input_1_matmul_2", "add_37", 1)) - pybuda.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_8_matmul_55", "add_90", 1)) - pybuda.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_17_matmul_108", "add_143", 1)) - pybuda.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_26_matmul_161", "add_196", 1)) - pybuda.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_35_matmul_214", "add_249", 1)) - pybuda.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_44_matmul_267", "add_302", 1)) - pybuda.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_53_matmul_320", "add_355", 1)) - pybuda.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_62_matmul_373", "add_408", 1)) - pybuda.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_71_matmul_426", "add_461", 1)) - pybuda.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_80_matmul_479", "add_514", 1)) - pybuda.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_89_matmul_532", "add_567", 1)) - pybuda.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_98_matmul_585", "add_620", 1)) - pybuda.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_107_matmul_638", "add_673", 1)) - pybuda.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_116_matmul_691", "add_726", 1)) - pybuda.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_125_matmul_744", "add_779", 1)) - pybuda.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_134_matmul_797", "add_832", 1)) - pybuda.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_143_matmul_850", "add_885", 1)) - pybuda.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_152_matmul_903", "add_938", 1)) - pybuda.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_161_matmul_956", "add_991", 1)) - pybuda.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_170_matmul_1009", "add_1044", 1)) - pybuda.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_179_matmul_1062", "add_1097", 1)) - pybuda.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_188_matmul_1115", "add_1150", 1)) - pybuda.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_197_matmul_1168", "add_1203", 1)) - pybuda.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_206_matmul_1221", "add_1256", 1)) + forge.config._get_global_compiler_config().insert_queues.append((buffer_list[i], buffer_list[i+1], 0)) + + forge.config._get_global_compiler_config().insert_queues.append(("buffer_0_input_1_matmul_2", "add_37", 1)) + forge.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_8_matmul_55", "add_90", 1)) + forge.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_17_matmul_108", "add_143", 1)) + forge.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_26_matmul_161", "add_196", 1)) + forge.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_35_matmul_214", "add_249", 1)) + forge.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_44_matmul_267", "add_302", 1)) + forge.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_53_matmul_320", "add_355", 1)) + forge.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_62_matmul_373", "add_408", 1)) + forge.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_71_matmul_426", "add_461", 1)) + forge.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_80_matmul_479", "add_514", 1)) + forge.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_89_matmul_532", "add_567", 1)) + forge.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_98_matmul_585", "add_620", 1)) + forge.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_107_matmul_638", "add_673", 1)) + forge.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_116_matmul_691", "add_726", 1)) + forge.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_125_matmul_744", "add_779", 1)) + forge.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_134_matmul_797", "add_832", 1)) + forge.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_143_matmul_850", "add_885", 1)) + forge.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_152_matmul_903", "add_938", 1)) + forge.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_161_matmul_956", "add_991", 1)) + forge.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_170_matmul_1009", "add_1044", 1)) + forge.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_179_matmul_1062", "add_1097", 1)) + forge.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_188_matmul_1115", "add_1150", 1)) + forge.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_197_matmul_1168", "add_1203", 1)) + forge.config._get_global_compiler_config().insert_queues.append(("buffer_0__fused_op_206_matmul_1221", "add_1256", 1)) def df_overrides(): compiler_cfg = _get_global_compiler_config() - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="matmul", name_regex_match="matmul_2", - math_fidelity=pybuda.MathFidelity.LoFi, - output_df=pybuda._C.DataFormat.Bfp8_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Bfp8_b, - input_df= {0: [pybuda._C.DataFormat.Bfp8_b, True], 1: [pybuda._C.DataFormat.Bfp8_b, True], 2: [pybuda._C.DataFormat.Bfp8_b, True]} + math_fidelity=forge.MathFidelity.LoFi, + output_df=forge._C.DataFormat.Bfp8_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Bfp8_b, + input_df= {0: [forge._C.DataFormat.Bfp8_b, True], 1: [forge._C.DataFormat.Bfp8_b, True], 2: [forge._C.DataFormat.Bfp8_b, True]} )) - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="matmul", name_regex_match="matmul_8", - math_fidelity=pybuda.MathFidelity.LoFi, - output_df=pybuda._C.DataFormat.Bfp8_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Bfp8_b, - input_df= {0: [pybuda._C.DataFormat.Bfp8_b, True], 1: [pybuda._C.DataFormat.Bfp8_b, True], 2: [pybuda._C.DataFormat.Bfp8_b, True]} + math_fidelity=forge.MathFidelity.LoFi, + output_df=forge._C.DataFormat.Bfp8_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Bfp8_b, + input_df= {0: [forge._C.DataFormat.Bfp8_b, True], 1: [forge._C.DataFormat.Bfp8_b, True], 2: [forge._C.DataFormat.Bfp8_b, True]} )) - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="matmul", name_regex_match="matmul_22", - math_fidelity=pybuda.MathFidelity.LoFi, - output_df=pybuda._C.DataFormat.Bfp8_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Bfp8_b, - input_df= {0: [pybuda._C.DataFormat.Bfp8_b, True], 1: [pybuda._C.DataFormat.Bfp8_b, True], 2: [pybuda._C.DataFormat.Bfp8_b, True]} + math_fidelity=forge.MathFidelity.LoFi, + output_df=forge._C.DataFormat.Bfp8_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Bfp8_b, + input_df= {0: [forge._C.DataFormat.Bfp8_b, True], 1: [forge._C.DataFormat.Bfp8_b, True], 2: [forge._C.DataFormat.Bfp8_b, True]} )) - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="add", name_regex_match="add_37", - math_fidelity=pybuda.MathFidelity.HiFi3, - output_df=pybuda._C.DataFormat.Float16_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Float16_b, - input_df= {0: [pybuda._C.DataFormat.Bfp8_b, True], 1: [pybuda._C.DataFormat.Bfp8_b, True]} + math_fidelity=forge.MathFidelity.HiFi3, + output_df=forge._C.DataFormat.Float16_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Float16_b, + input_df= {0: [forge._C.DataFormat.Bfp8_b, True], 1: [forge._C.DataFormat.Bfp8_b, True]} )) for i in range(24): # START - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="matmul", name_regex_match=f"matmul_{55+(53*i)}", - math_fidelity=pybuda.MathFidelity.LoFi, - output_df=pybuda._C.DataFormat.Bfp8_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Bfp8_b, - input_df= {0: [pybuda._C.DataFormat.Float16_b, True], 1: [pybuda._C.DataFormat.Bfp8_b, True], 2: [pybuda._C.DataFormat.Bfp8_b, True]} + math_fidelity=forge.MathFidelity.LoFi, + output_df=forge._C.DataFormat.Bfp8_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Bfp8_b, + input_df= {0: [forge._C.DataFormat.Float16_b, True], 1: [forge._C.DataFormat.Bfp8_b, True], 2: [forge._C.DataFormat.Bfp8_b, True]} )) - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="matmul", name_regex_match=f"matmul_{61+(53*i)}", - math_fidelity=pybuda.MathFidelity.LoFi, - output_df=pybuda._C.DataFormat.Bfp8_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Bfp8_b, - input_df= {0: [pybuda._C.DataFormat.Bfp8_b, True], 1: [pybuda._C.DataFormat.Bfp8_b, True], 2: [pybuda._C.DataFormat.Bfp8_b, True]} + math_fidelity=forge.MathFidelity.LoFi, + output_df=forge._C.DataFormat.Bfp8_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Bfp8_b, + input_df= {0: [forge._C.DataFormat.Bfp8_b, True], 1: [forge._C.DataFormat.Bfp8_b, True], 2: [forge._C.DataFormat.Bfp8_b, True]} )) - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="matmul", name_regex_match=f"matmul_{14+(53*i)}", - math_fidelity=pybuda.MathFidelity.LoFi, - output_df=pybuda._C.DataFormat.Bfp8_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Bfp8_b, - input_df= {0: [pybuda._C.DataFormat.Bfp8_b, True], 1: [pybuda._C.DataFormat.Bfp8_b, True]} + math_fidelity=forge.MathFidelity.LoFi, + output_df=forge._C.DataFormat.Bfp8_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Bfp8_b, + input_df= {0: [forge._C.DataFormat.Bfp8_b, True], 1: [forge._C.DataFormat.Bfp8_b, True]} )) - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="matmul", name_regex_match=f"matmul_{75+(53*i)}", - math_fidelity=pybuda.MathFidelity.LoFi, - output_df=pybuda._C.DataFormat.Bfp8_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Bfp8_b, - input_df= {0: [pybuda._C.DataFormat.Bfp8_b, True], 1: [pybuda._C.DataFormat.Bfp8_b, True], 2: [pybuda._C.DataFormat.Bfp8_b, True]} + math_fidelity=forge.MathFidelity.LoFi, + output_df=forge._C.DataFormat.Bfp8_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Bfp8_b, + input_df= {0: [forge._C.DataFormat.Bfp8_b, True], 1: [forge._C.DataFormat.Bfp8_b, True], 2: [forge._C.DataFormat.Bfp8_b, True]} )) - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="matmul", name_regex_match=f"matmul_{29+(53*i)}", - math_fidelity=pybuda.MathFidelity.LoFi, - output_df=pybuda._C.DataFormat.Bfp8_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Bfp8_b, - input_df= {0: [pybuda._C.DataFormat.Bfp8_b, True], 1: [pybuda._C.DataFormat.Bfp8_b, True]} + math_fidelity=forge.MathFidelity.LoFi, + output_df=forge._C.DataFormat.Bfp8_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Bfp8_b, + input_df= {0: [forge._C.DataFormat.Bfp8_b, True], 1: [forge._C.DataFormat.Bfp8_b, True]} )) - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="matmul", name_regex_match=f"matmul_{33+(53*i)}", - math_fidelity=pybuda.MathFidelity.LoFi, - output_df=pybuda._C.DataFormat.Bfp8_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Bfp8_b, - input_df= {0: [pybuda._C.DataFormat.Bfp8_b, True], 1: [pybuda._C.DataFormat.Bfp8_b, True], 2: [pybuda._C.DataFormat.Bfp8_b, True]} + math_fidelity=forge.MathFidelity.LoFi, + output_df=forge._C.DataFormat.Bfp8_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Bfp8_b, + input_df= {0: [forge._C.DataFormat.Bfp8_b, True], 1: [forge._C.DataFormat.Bfp8_b, True], 2: [forge._C.DataFormat.Bfp8_b, True]} )) - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="matmul", name_regex_match=f"matmul_{41+(53*i)}", - math_fidelity=pybuda.MathFidelity.LoFi, - output_df=pybuda._C.DataFormat.Bfp8_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Bfp8_b, - input_df= {0: [pybuda._C.DataFormat.Bfp8_b, True], 1: [pybuda._C.DataFormat.Bfp8_b, True], 2: [pybuda._C.DataFormat.Bfp8_b, True]} + math_fidelity=forge.MathFidelity.LoFi, + output_df=forge._C.DataFormat.Bfp8_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Bfp8_b, + input_df= {0: [forge._C.DataFormat.Bfp8_b, True], 1: [forge._C.DataFormat.Bfp8_b, True], 2: [forge._C.DataFormat.Bfp8_b, True]} )) - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="matmul", name_regex_match=f"matmul_{47+(53*i)}", - math_fidelity=pybuda.MathFidelity.LoFi, - output_df=pybuda._C.DataFormat.Bfp8_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Bfp8_b, - input_df= {0: [pybuda._C.DataFormat.Bfp8_b, True], 1: [pybuda._C.DataFormat.Bfp8_b, True], 2: [pybuda._C.DataFormat.Bfp8_b, True]} + math_fidelity=forge.MathFidelity.LoFi, + output_df=forge._C.DataFormat.Bfp8_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Bfp8_b, + input_df= {0: [forge._C.DataFormat.Bfp8_b, True], 1: [forge._C.DataFormat.Bfp8_b, True], 2: [forge._C.DataFormat.Bfp8_b, True]} )) - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="add", name_regex_match=f"add_{90+(53*i)}", - math_fidelity=pybuda.MathFidelity.HiFi3, - output_df=pybuda._C.DataFormat.Float16_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Float16_b, - input_df= {0: [pybuda._C.DataFormat.Bfp8_b, True], 1: [pybuda._C.DataFormat.Float16_b, True]} + math_fidelity=forge.MathFidelity.HiFi3, + output_df=forge._C.DataFormat.Float16_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Float16_b, + input_df= {0: [forge._C.DataFormat.Bfp8_b, True], 1: [forge._C.DataFormat.Float16_b, True]} )) - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="add", name_regex_match=f"add_{51+(53*i)}", - math_fidelity=pybuda.MathFidelity.HiFi3, - output_df=pybuda._C.DataFormat.Float16_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Float16_b, - input_df= {0: [pybuda._C.DataFormat.Bfp8_b, True], 1: [pybuda._C.DataFormat.Bfp8_b, True]} + math_fidelity=forge.MathFidelity.HiFi3, + output_df=forge._C.DataFormat.Float16_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Float16_b, + input_df= {0: [forge._C.DataFormat.Bfp8_b, True], 1: [forge._C.DataFormat.Bfp8_b, True]} )) - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="fused_op", name_regex_match=f"_fused_op_{0+(9*i)}", - math_fidelity=pybuda.MathFidelity.HiFi3, - output_df=pybuda._C.DataFormat.Bfp8_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Bfp8_b, - input_df= {0: [pybuda._C.DataFormat.Bfp8_b, True], 1: [pybuda._C.DataFormat.Float16_b, True], 2: [pybuda._C.DataFormat.Float16_b, True]} + math_fidelity=forge.MathFidelity.HiFi3, + output_df=forge._C.DataFormat.Bfp8_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Bfp8_b, + input_df= {0: [forge._C.DataFormat.Bfp8_b, True], 1: [forge._C.DataFormat.Float16_b, True], 2: [forge._C.DataFormat.Float16_b, True]} )) - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="fused_op", name_regex_match=f"_fused_op_{1+(9*i)}", - math_fidelity=pybuda.MathFidelity.HiFi3, - output_df=pybuda._C.DataFormat.Float16_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Float16_b, - input_df= {0: [pybuda._C.DataFormat.Bfp8_b, True], 1: [pybuda._C.DataFormat.Bfp8_b, True]} + math_fidelity=forge.MathFidelity.HiFi3, + output_df=forge._C.DataFormat.Float16_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Float16_b, + input_df= {0: [forge._C.DataFormat.Bfp8_b, True], 1: [forge._C.DataFormat.Bfp8_b, True]} )) - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="fused_op", name_regex_match=f"_fused_op_{2+(9*i)}", - math_fidelity=pybuda.MathFidelity.HiFi3, - output_df=pybuda._C.DataFormat.Bfp8_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Bfp8_b, - input_df= {0: [pybuda._C.DataFormat.Bfp8_b, True], 1: [pybuda._C.DataFormat.Float16_b, True]} + math_fidelity=forge.MathFidelity.HiFi3, + output_df=forge._C.DataFormat.Bfp8_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Bfp8_b, + input_df= {0: [forge._C.DataFormat.Bfp8_b, True], 1: [forge._C.DataFormat.Float16_b, True]} )) - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="fused_op", name_regex_match=f"_fused_op_{3+(9*i)}", - math_fidelity=pybuda.MathFidelity.HiFi3, - output_df=pybuda._C.DataFormat.Float16_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Float16_b, - input_df= {0: [pybuda._C.DataFormat.Float16_b, True], 1: [pybuda._C.DataFormat.Float16_b, True], 2: [pybuda._C.DataFormat.Float16_b, True]} + math_fidelity=forge.MathFidelity.HiFi3, + output_df=forge._C.DataFormat.Float16_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Float16_b, + input_df= {0: [forge._C.DataFormat.Float16_b, True], 1: [forge._C.DataFormat.Float16_b, True], 2: [forge._C.DataFormat.Float16_b, True]} )) - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="fused_op", name_regex_match=f"_fused_op_{4+(9*i)}", - math_fidelity=pybuda.MathFidelity.HiFi3, - output_df=pybuda._C.DataFormat.Float16_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Float16_b, - input_df= {0: [pybuda._C.DataFormat.Bfp8_b, True], 1: [pybuda._C.DataFormat.Bfp8_b, True], 2: [pybuda._C.DataFormat.Bfp8_b, True]} + math_fidelity=forge.MathFidelity.HiFi3, + output_df=forge._C.DataFormat.Float16_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Float16_b, + input_df= {0: [forge._C.DataFormat.Bfp8_b, True], 1: [forge._C.DataFormat.Bfp8_b, True], 2: [forge._C.DataFormat.Bfp8_b, True]} )) - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="fused_op", name_regex_match=f"_fused_op_{5+(9*i)}", - math_fidelity=pybuda.MathFidelity.HiFi3, - output_df=pybuda._C.DataFormat.Bfp8_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Float16_b, - input_df= {0: [pybuda._C.DataFormat.Bfp8_b, True], 1: [pybuda._C.DataFormat.Float16_b, True], 2: [pybuda._C.DataFormat.Float16_b, True], 3: [pybuda._C.DataFormat.Float16_b, True]} + math_fidelity=forge.MathFidelity.HiFi3, + output_df=forge._C.DataFormat.Bfp8_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Float16_b, + input_df= {0: [forge._C.DataFormat.Bfp8_b, True], 1: [forge._C.DataFormat.Float16_b, True], 2: [forge._C.DataFormat.Float16_b, True], 3: [forge._C.DataFormat.Float16_b, True]} )) - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="fused_op", name_regex_match=f"_fused_op_{6+(9*i)}", - math_fidelity=pybuda.MathFidelity.HiFi3, - output_df=pybuda._C.DataFormat.Bfp8_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Float16_b, - input_df= {0: [pybuda._C.DataFormat.Float16_b, True], 1: [pybuda._C.DataFormat.Float16_b, True], 2: [pybuda._C.DataFormat.Float16_b, True]} + math_fidelity=forge.MathFidelity.HiFi3, + output_df=forge._C.DataFormat.Bfp8_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Float16_b, + input_df= {0: [forge._C.DataFormat.Float16_b, True], 1: [forge._C.DataFormat.Float16_b, True], 2: [forge._C.DataFormat.Float16_b, True]} )) - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="fused_op", name_regex_match=f"_fused_op_{7+(9*i)}", - math_fidelity=pybuda.MathFidelity.HiFi3, - output_df=pybuda._C.DataFormat.Float16_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Float16_b, - input_df= {0: [pybuda._C.DataFormat.Bfp8_b, True], 1: [pybuda._C.DataFormat.Bfp8_b, True], 2: [pybuda._C.DataFormat.Bfp8_b, True]} + math_fidelity=forge.MathFidelity.HiFi3, + output_df=forge._C.DataFormat.Float16_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Float16_b, + input_df= {0: [forge._C.DataFormat.Bfp8_b, True], 1: [forge._C.DataFormat.Bfp8_b, True], 2: [forge._C.DataFormat.Bfp8_b, True]} )) - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="fused_op", name_regex_match=f"_fused_op_{8+(9*i)}", - math_fidelity=pybuda.MathFidelity.HiFi3, - output_df=pybuda._C.DataFormat.Bfp8_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Float16_b, - input_df= {0: [pybuda._C.DataFormat.Bfp8_b, True], 1: [pybuda._C.DataFormat.Float16_b, True], 2: [pybuda._C.DataFormat.Float16_b, True], 3: [pybuda._C.DataFormat.Float16_b, True]} + math_fidelity=forge.MathFidelity.HiFi3, + output_df=forge._C.DataFormat.Bfp8_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Float16_b, + input_df= {0: [forge._C.DataFormat.Bfp8_b, True], 1: [forge._C.DataFormat.Float16_b, True], 2: [forge._C.DataFormat.Float16_b, True], 3: [forge._C.DataFormat.Float16_b, True]} )) - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="matmul", name_regex_match=f"softmax_{18+(53*i)}.dc.reduce_sum.1.lc1", - math_fidelity=pybuda.MathFidelity.LoFi, - output_df=pybuda._C.DataFormat.Bfp8_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Bfp8_b, - input_df= {0: [pybuda._C.DataFormat.Bfp8_b, True], 1: [pybuda._C.DataFormat.Bfp8_b, True]} + math_fidelity=forge.MathFidelity.LoFi, + output_df=forge._C.DataFormat.Bfp8_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Bfp8_b, + input_df= {0: [forge._C.DataFormat.Bfp8_b, True], 1: [forge._C.DataFormat.Bfp8_b, True]} )) - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="matmul", name_regex_match=f"layernorm_{38+(53*i)}.dc.reduce_sum.0.lc1", - math_fidelity=pybuda.MathFidelity.HiFi3, - output_df=pybuda._C.DataFormat.Float16_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Float16_b, - input_df= {0: [pybuda._C.DataFormat.Float16_b, True], 1: [pybuda._C.DataFormat.Float16_b, True]} + math_fidelity=forge.MathFidelity.HiFi3, + output_df=forge._C.DataFormat.Float16_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Float16_b, + input_df= {0: [forge._C.DataFormat.Float16_b, True], 1: [forge._C.DataFormat.Float16_b, True]} )) - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="matmul", name_regex_match=f"layernorm_{38+(53*i)}.dc.multiply.4", - math_fidelity=pybuda.MathFidelity.HiFi3, - output_df=pybuda._C.DataFormat.Float16_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Float16_b, - input_df= {0: [pybuda._C.DataFormat.Bfp8_b, True], 1: [pybuda._C.DataFormat.Bfp8_b, True]} + math_fidelity=forge.MathFidelity.HiFi3, + output_df=forge._C.DataFormat.Float16_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Float16_b, + input_df= {0: [forge._C.DataFormat.Bfp8_b, True], 1: [forge._C.DataFormat.Bfp8_b, True]} )) - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="matmul", name_regex_match=f"layernorm_{38+(53*i)}.dc.reduce_sum.5.lc1", - math_fidelity=pybuda.MathFidelity.HiFi3, - output_df=pybuda._C.DataFormat.Bfp8_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Float16_b, - input_df= {0: [pybuda._C.DataFormat.Float16_b, True], 1: [pybuda._C.DataFormat.Float16_b, True]} + math_fidelity=forge.MathFidelity.HiFi3, + output_df=forge._C.DataFormat.Bfp8_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Float16_b, + input_df= {0: [forge._C.DataFormat.Float16_b, True], 1: [forge._C.DataFormat.Float16_b, True]} )) - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="matmul", name_regex_match=f"layernorm_{52+(53*i)}.dc.reduce_sum.0.lc1", - math_fidelity=pybuda.MathFidelity.HiFi3, - output_df=pybuda._C.DataFormat.Float16_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Float16_b, - input_df= {0: [pybuda._C.DataFormat.Float16_b, True], 1: [pybuda._C.DataFormat.Float16_b, True]} + math_fidelity=forge.MathFidelity.HiFi3, + output_df=forge._C.DataFormat.Float16_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Float16_b, + input_df= {0: [forge._C.DataFormat.Float16_b, True], 1: [forge._C.DataFormat.Float16_b, True]} )) - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="matmul", name_regex_match=f"layernorm_{52+(53*i)}.dc.multiply.4", - math_fidelity=pybuda.MathFidelity.HiFi3, - output_df=pybuda._C.DataFormat.Float16_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Float16_b, - input_df= {0: [pybuda._C.DataFormat.Bfp8_b, True], 1: [pybuda._C.DataFormat.Bfp8_b, True]} + math_fidelity=forge.MathFidelity.HiFi3, + output_df=forge._C.DataFormat.Float16_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Float16_b, + input_df= {0: [forge._C.DataFormat.Bfp8_b, True], 1: [forge._C.DataFormat.Bfp8_b, True]} )) - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="matmul", name_regex_match=f"layernorm_{52+(53*i)}.dc.reduce_sum.5.lc1", - math_fidelity=pybuda.MathFidelity.HiFi3, - output_df=pybuda._C.DataFormat.Bfp8_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Float16_b, - input_df= {0: [pybuda._C.DataFormat.Float16_b, True], 1: [pybuda._C.DataFormat.Float16_b, True]} + math_fidelity=forge.MathFidelity.HiFi3, + output_df=forge._C.DataFormat.Bfp8_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Float16_b, + input_df= {0: [forge._C.DataFormat.Float16_b, True], 1: [forge._C.DataFormat.Float16_b, True]} )) # Set data format for LM head - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="matmul", name_regex_match="matmul_1274", - math_fidelity=pybuda.MathFidelity.LoFi, - output_df=pybuda._C.DataFormat.Bfp8_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Bfp8_b, - input_df= {0: [pybuda._C.DataFormat.Bfp8_b, True], 1: [pybuda._C.DataFormat.Bfp8_b, True], 2: [pybuda._C.DataFormat.Bfp8_b, True]} + math_fidelity=forge.MathFidelity.LoFi, + output_df=forge._C.DataFormat.Bfp8_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Bfp8_b, + input_df= {0: [forge._C.DataFormat.Bfp8_b, True], 1: [forge._C.DataFormat.Bfp8_b, True], 2: [forge._C.DataFormat.Bfp8_b, True]} )) # Set data format for final output - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="nop", name_regex_match="matmul_1274_output_nop_0", - output_df=pybuda._C.DataFormat.Float16_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Float16_b, - input_df= {0: [pybuda._C.DataFormat.Bfp8_b, True]} + output_df=forge._C.DataFormat.Float16_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Float16_b, + input_df= {0: [forge._C.DataFormat.Bfp8_b, True]} )) # Set data format for additional buffers for i in range(24): - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="nop", name_regex_match=f"buffer_0__fused_op_{0+(9*i)}__fused_op_{2+(9*i)}", - output_df=pybuda._C.DataFormat.Bfp8_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Float16_b, - input_df= {0: [pybuda._C.DataFormat.Bfp8_b, True]} + output_df=forge._C.DataFormat.Bfp8_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Float16_b, + input_df= {0: [forge._C.DataFormat.Bfp8_b, True]} )) for i in range(24): - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="nop", name_regex_match=f"buffer_0__fused_op_{5+(9*i)}_add_{51+(53*i)}", - output_df=pybuda._C.DataFormat.Bfp8_b, - accumulate_df=pybuda._C.DataFormat.Bfp8_b, - intermediate_df=pybuda._C.DataFormat.Bfp8_b, - input_df= {0: [pybuda._C.DataFormat.Bfp8_b, True]} + output_df=forge._C.DataFormat.Bfp8_b, + accumulate_df=forge._C.DataFormat.Bfp8_b, + intermediate_df=forge._C.DataFormat.Bfp8_b, + input_df= {0: [forge._C.DataFormat.Bfp8_b, True]} )) # Mixed precision settings below @@ -805,13 +805,13 @@ def df_overrides(): ] for buffer in encoder_output_list: - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="nop", name_regex_match=buffer, - output_df=pybuda._C.DataFormat.Float16_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Float16_b, - input_df= {0: [pybuda._C.DataFormat.Float16_b, True]} + output_df=forge._C.DataFormat.Float16_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Float16_b, + input_df= {0: [forge._C.DataFormat.Float16_b, True]} )) # Set data format for attention mask buffer @@ -843,13 +843,13 @@ def df_overrides(): ] for buffer in attention_buffer_list: - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="nop", name_regex_match=buffer, - output_df=pybuda._C.DataFormat.Float16_b, - accumulate_df=pybuda._C.DataFormat.Float16_b, - intermediate_df=pybuda._C.DataFormat.Float16_b, - input_df= {0: [pybuda._C.DataFormat.Float16_b, True]} + output_df=forge._C.DataFormat.Float16_b, + accumulate_df=forge._C.DataFormat.Float16_b, + intermediate_df=forge._C.DataFormat.Float16_b, + input_df= {0: [forge._C.DataFormat.Float16_b, True]} )) # Set data format for input buffer @@ -858,17 +858,17 @@ def df_overrides(): ] for buffer in input_buffer_list: - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="nop", name_regex_match=buffer, - output_df=pybuda._C.DataFormat.Bfp8_b, - accumulate_df=pybuda._C.DataFormat.Bfp8_b, - intermediate_df=pybuda._C.DataFormat.Bfp8_b, - input_df= {0: [pybuda._C.DataFormat.Bfp8_b, True]} + output_df=forge._C.DataFormat.Bfp8_b, + accumulate_df=forge._C.DataFormat.Bfp8_b, + intermediate_df=forge._C.DataFormat.Bfp8_b, + input_df= {0: [forge._C.DataFormat.Bfp8_b, True]} )) def op_overrides(parameters): - config = pybuda.config + config = forge.config compiler_cfg = _get_global_compiler_config() for i in range(parameters["num_encoders"]): @@ -966,13 +966,13 @@ def op_overrides(parameters): config.override_op_placement(op_name=f"buffer_0__fused_op_{5+9*i}_add_{51+53*i}", start=[8, 6]) def apply_overrides(parameters): - pybuda.set_configuration_options( - math_fidelity=pybuda.MathFidelity.HiFi3, + forge.set_configuration_options( + math_fidelity=forge.MathFidelity.HiFi3, backend_opt_level=4, enable_auto_fusing=True, enable_auto_transposing_placement=False, - accumulate_df=pybuda._C.DataFormat.Float16_b, - #performance_trace=pybuda.PerfTraceLevel.LIGHT + accumulate_df=forge._C.DataFormat.Float16_b, + #performance_trace=forge.PerfTraceLevel.LIGHT ) if parameters["num_chips"] >= 32: @@ -995,33 +995,33 @@ def apply_overrides(parameters): def main(parameters): # Apply environment variables - os.environ['PYBUDA_EXP_APPROX'] = '1' - os.environ['PYBUDA_FUSE_OPS'] = '1' - os.environ['PYBUDA_NLP_MANUAL_TARGET'] = '185000' - os.environ['PYBUDA_DISABLE_DRAM0'] = '1' - os.environ['PYBUDA_FORCE_INTERMED_TO_OUTPUT_DF'] = '1' - os.environ['PYBUDA_EXTRA_L1_MARGIN'] = '131072' - os.environ['PYBUDA_DISABLE_FORK_JOIN_NOPS'] = '1' + os.environ['FORGE_EXP_APPROX'] = '1' + os.environ['FORGE_FUSE_OPS'] = '1' + os.environ['FORGE_NLP_MANUAL_TARGET'] = '185000' + os.environ['FORGE_DISABLE_DRAM0'] = '1' + os.environ['FORGE_FORCE_INTERMED_TO_OUTPUT_DF'] = '1' + os.environ['FORGE_EXTRA_L1_MARGIN'] = '131072' + os.environ['FORGE_DISABLE_FORK_JOIN_NOPS'] = '1' os.environ['ENABLE_ETH_SERIALIZATON'] = '1' os.environ['TT_BACKEND_PUSH_TIMEOUT'] = '500' os.environ['TT_BACKEND_TIMEOUT'] = '500' os.environ['TT_BACKEND_GET_TIMEOUT'] = '500' os.environ['TT_BACKEND_POP_TIMEOUT'] = '500' - os.environ['PYBUDA_MIN_MATMUL_BUFFER_ALLOW_IN1'] = '1' - os.environ['PYBUDA_FUSE_STOP_ON_RECIPROCAL'] = '1' - os.environ['PYBUDA_FUSE_MATMUL_GELU'] = '1' - os.environ['PYBUDA_DISABLE_STABLE_SOFTMAX'] = '1' - os.environ['PYBUDA_DISABLE_DYNAMIC_DRAM'] = '1' + os.environ['FORGE_MIN_MATMUL_BUFFER_ALLOW_IN1'] = '1' + os.environ['FORGE_FUSE_STOP_ON_RECIPROCAL'] = '1' + os.environ['FORGE_FUSE_MATMUL_GELU'] = '1' + os.environ['FORGE_DISABLE_STABLE_SOFTMAX'] = '1' + os.environ['FORGE_DISABLE_DYNAMIC_DRAM'] = '1' input_sleep_timer = 0 if parameters["num_chips"] == 1: input_sleep_timer = 1 if parameters["num_chips"] >= 32: - os.environ['PYBUDA_MICROBATCH_LOOPING'] = '1' + os.environ['FORGE_MICROBATCH_LOOPING'] = '1' if parameters["num_chips"] > 1 and parameters["num_chips"] % 2 != 0: - os.environ['PYBUDA_NEBULA_GALAXY_PLACER'] = '1' + os.environ['FORGE_NEBULA_GALAXY_PLACER'] = '1' # Update parameters object with cached file locations parameters["raw_file"] = ci_files['raw_file'] @@ -1094,21 +1094,21 @@ def main(parameters): else: galaxy_chips = [0] - tt0 = pybuda.TTDevice('tt0', - module=pybuda.PyTorchModule("bert_squad", model), + tt0 = forge.TTDevice('tt0', + module=forge.PyTorchModule("bert_squad", model), chip_ids=galaxy_chips, - fp32_fallback=pybuda.DataFormat.Float16, - devtype=pybuda.BackendType.Silicon, - arch=pybuda.BackendDevice.Wormhole_B0) + fp32_fallback=forge.DataFormat.Float16, + devtype=forge.BackendType.Silicon, + arch=forge.BackendDevice.Wormhole_B0) compiler_cfg = _get_global_compiler_config() - pybuda.config.set_configuration_options(default_df_override=pybuda.DataFormat.Float16_b) + forge.config.set_configuration_options(default_df_override=forge.DataFormat.Float16_b) # Apply overrides apply_overrides(parameters) - output_queue = pybuda.initialize_pipeline(training=False, + output_queue = forge.initialize_pipeline(training=False, sample_inputs=(embeddings[0].expand(batch_size, *embeddings[0].size()[1:]).to(dtype=torch.float32), attention_masks[0].expand(batch_size, *attention_masks[0].size()[1:]).to(dtype=torch.float32)), microbatch_count=batch_size) @@ -1134,7 +1134,7 @@ def main(parameters): print('*' * len(start_str)) print(start_str) print('*' * len(start_str)) - pybuda.sync() + forge.sync() # Run inference # ----------------------------------------------------------------------- @@ -1197,7 +1197,7 @@ def pop_outputs_thread(): # Update a rudimentary progress bar with the current rate/s and then '=' for each input for the current loop pre = '\r' if parameters["quiet"] else '' end = '' if parameters["quiet"] else '\n' - latency_str = f"{mean_latency_ms:3.0f} ms " if parameters["pybuda_latency"] else " " + latency_str = f"{mean_latency_ms:3.0f} ms " if parameters["forge_latency"] else " " print(f"{pre}{loop_name} @ {inputs_per_second:4.0f} seq/s, {latency_str}|{'=' * (i + 1)}{' ' * (num_batches - i - 1)}|", end=end, flush=True) if loop == 0: # only record outputs for one loop to calculate EM + F1 @@ -1205,7 +1205,7 @@ def pop_outputs_thread(): end_logits.extend(output[:, :, 1].detach().cpu().tolist()) break except queue.Empty as _: - if pybuda.error_raised(): + if forge.error_raised(): print(" * Aborting output thread due to error") return if parameters["quiet"]: @@ -1214,7 +1214,7 @@ def pop_outputs_thread(): output_thread = threading.Thread(target=pop_outputs_thread) output_thread.start() - pybuda.sync() + forge.sync() input_thread = threading.Thread(target=push_inputs_thread) input_thread.start() @@ -1224,13 +1224,13 @@ def pop_outputs_thread(): #for _ in range(parameters["loops"] + 1): # add a warmup loop # for _ in range(num_batches): - # pybuda.run_forward(input_count=1) - pybuda.run_forward(input_count=(num_batches * (parameters["loops"] + 1))) + # forge.run_forward(input_count=1) + forge.run_forward(input_count=(num_batches * (parameters["loops"] + 1))) input_thread.join() output_thread.join() - pybuda.sync() + forge.sync() end_time = time.time() @@ -1327,7 +1327,7 @@ def test_bert_squad(data_type, seq_max_length, batch_size, n_best_size, max_answ parameters["loops"] = int(loops.replace("loops", "")) - 1 # Reduce loop count by one since a warmup loop is added by default parameters["wait_for_user"] = False parameters["quiet"] = False - parameters["pybuda_latency"] = False + parameters["forge_latency"] = False parameters["num_chips"] = int(num_chips.replace("chip", "")) parameters["num_encoders"] = int(encoders.replace("encoder", "")) @@ -1388,8 +1388,8 @@ def test_bert_squad(data_type, seq_max_length, batch_size, n_best_size, max_answ action="store_true" ) parser.add_argument( - "--pybuda_latency", - help="Show latency including time through pybuda queues (higher than hw latency).", + "--forge_latency", + help="Show latency including time through forge queues (higher than hw latency).", action="store_true" ) parser.add_argument( diff --git a/pybuda/test/galaxy/bert/squad_preprocessing/evaluate-v1.1.py b/forge/test/galaxy/bert/squad_preprocessing/evaluate-v1.1.py similarity index 100% rename from pybuda/test/galaxy/bert/squad_preprocessing/evaluate-v1.1.py rename to forge/test/galaxy/bert/squad_preprocessing/evaluate-v1.1.py diff --git a/pybuda/test/galaxy/bert/squad_preprocessing/helpers/__init__.py b/forge/test/galaxy/bert/squad_preprocessing/helpers/__init__.py similarity index 100% rename from pybuda/test/galaxy/bert/squad_preprocessing/helpers/__init__.py rename to forge/test/galaxy/bert/squad_preprocessing/helpers/__init__.py diff --git a/pybuda/test/galaxy/bert/squad_preprocessing/helpers/data_processing.py b/forge/test/galaxy/bert/squad_preprocessing/helpers/data_processing.py similarity index 100% rename from pybuda/test/galaxy/bert/squad_preprocessing/helpers/data_processing.py rename to forge/test/galaxy/bert/squad_preprocessing/helpers/data_processing.py diff --git a/pybuda/test/galaxy/bert/squad_preprocessing/helpers/tokenization.py b/forge/test/galaxy/bert/squad_preprocessing/helpers/tokenization.py similarity index 100% rename from pybuda/test/galaxy/bert/squad_preprocessing/helpers/tokenization.py rename to forge/test/galaxy/bert/squad_preprocessing/helpers/tokenization.py diff --git a/pybuda/test/galaxy/conftest.py b/forge/test/galaxy/conftest.py similarity index 100% rename from pybuda/test/galaxy/conftest.py rename to forge/test/galaxy/conftest.py diff --git a/pybuda/test/galaxy/one_shelf_eth_connections.yaml b/forge/test/galaxy/one_shelf_eth_connections.yaml similarity index 100% rename from pybuda/test/galaxy/one_shelf_eth_connections.yaml rename to forge/test/galaxy/one_shelf_eth_connections.yaml diff --git a/pybuda/test/galaxy/one_shelf_runtime_params.yaml b/forge/test/galaxy/one_shelf_runtime_params.yaml similarity index 99% rename from pybuda/test/galaxy/one_shelf_runtime_params.yaml rename to forge/test/galaxy/one_shelf_runtime_params.yaml index 14276454d..efe5edb17 100644 --- a/pybuda/test/galaxy/one_shelf_runtime_params.yaml +++ b/forge/test/galaxy/one_shelf_runtime_params.yaml @@ -87,7 +87,7 @@ arch_level_params: system_level_params: system-device-chip_locations: 28,2,7,0,0,-27,1,7,0,0,-26,0,7,0,0,-6,0,6,0,0,-7,0,5,0,0,-8,0,4,0,0,-9,0,3,0,0,-10,0,2,0,0,-11,0,1,0,0,-12,0,0,0,0,-3,2,5,0,0,-29,2,6,0,0,-0,3,5,0,0,-4,1,5,0,0,-30,3,6,0,0,-1,3,4,0,0,-5,1,6,0,0,-31,3,7,0,0,-2,2,4,0,0,-13,1,0,0,0,-14,1,1,0,0,-15,1,2,0,0,-16,1,3,0,0,-17,1,4,0,0,-18,2,3,0,0,-19,2,2,0,0,-20,2,1,0,0,-21,2,0,0,0,-22,3,0,0,0,-23,3,1,0,0,-24,3,2,0,0,-25,3,3,0,0,- system-device-chips_with_mmio: 0- - system-device-cluster_descriptor: pybuda/test/galaxy/one_shelf_eth_connections.yaml + system-device-cluster_descriptor: forge/test/galaxy/one_shelf_eth_connections.yaml system-device-ethernet_connections: 23,4,20,12,-23,5,20,13,-23,6,20,14,-23,7,20,15,-23,0,22,0,-23,1,22,1,-23,2,22,2,-23,3,22,3,-23,8,24,8,-23,9,24,9,-23,10,24,10,-23,11,24,11,-24,4,19,12,-24,5,19,13,-24,6,19,14,-24,7,19,15,-24,8,23,8,-24,9,23,9,-24,10,23,10,-24,11,23,11,-24,0,25,0,-24,1,25,1,-24,2,25,2,-24,3,25,3,-19,12,24,4,-19,11,20,11,-19,10,20,10,-19,15,24,7,-19,2,18,2,-19,14,24,6,-19,1,18,1,-19,13,24,5,-19,0,18,0,-19,4,15,12,-19,5,15,13,-19,6,15,14,-19,7,15,15,-19,3,18,3,-19,8,20,8,-19,9,20,9,-1,0,0,0,-1,1,0,1,-1,2,0,2,-1,3,0,3,-1,4,2,12,-1,5,2,13,-1,6,2,14,-1,7,2,15,-1,8,25,8,-1,9,25,9,-1,10,25,10,-1,11,25,11,-30,8,0,8,-30,9,0,9,-30,10,0,10,-30,11,0,11,-30,4,29,12,-30,5,29,13,-30,6,29,14,-30,7,29,15,-30,0,31,0,-30,1,31,1,-30,2,31,2,-30,3,31,3,-17,7,8,15,-17,6,8,14,-17,5,8,13,-17,3,16,3,-17,2,16,2,-17,15,2,7,-17,1,16,1,-17,14,2,6,-17,12,2,4,-17,13,2,5,-17,0,16,0,-17,8,4,8,-17,9,4,9,-17,10,4,10,-17,11,4,11,-17,4,8,12,-4,8,17,8,-4,7,7,15,-4,6,7,14,-4,11,17,11,-4,10,17,10,-4,9,17,9,-4,12,3,4,-4,13,3,5,-4,0,5,0,-4,14,3,6,-4,1,5,1,-4,15,3,7,-4,2,5,2,-4,3,5,3,-4,4,7,12,-4,5,7,13,-25,8,1,8,-25,9,1,9,-25,10,1,10,-25,11,1,11,-25,4,18,12,-25,5,18,13,-25,6,18,14,-25,7,18,15,-25,0,24,0,-25,1,24,1,-25,2,24,2,-25,3,24,3,-7,12,4,4,-7,13,4,5,-7,0,6,0,-7,14,4,6,-7,1,6,1,-7,15,4,7,-7,2,6,2,-7,3,6,3,-7,8,8,8,-7,9,8,9,-7,10,8,10,-7,11,8,11,-6,12,5,4,-6,13,5,5,-6,0,7,0,-6,14,5,6,-6,1,7,1,-6,15,5,7,-6,2,7,2,-6,3,7,3,-6,8,26,8,-6,9,26,9,-6,10,26,10,-6,11,26,11,-22,4,21,12,-22,5,21,13,-22,6,21,14,-22,7,21,15,-22,0,23,0,-22,1,23,1,-22,2,23,2,-22,3,23,3,-2,8,18,8,-2,7,17,15,-2,6,17,14,-2,11,18,11,-2,10,18,10,-2,9,18,9,-2,12,1,4,-2,13,1,5,-2,0,3,0,-2,14,1,6,-2,1,3,1,-2,15,1,7,-2,2,3,2,-2,3,3,3,-2,4,17,12,-2,5,17,13,-31,4,28,12,-31,5,28,13,-31,6,28,14,-31,7,28,15,-31,0,30,0,-31,1,30,1,-31,2,30,2,-31,3,30,3,-28,4,27,12,-28,5,27,13,-28,6,27,14,-28,7,27,15,-28,0,29,0,-28,13,31,5,-28,1,29,1,-28,14,31,6,-28,2,29,2,-28,15,31,7,-28,3,29,3,-28,12,31,4,-18,12,25,4,-18,3,19,3,-18,2,19,2,-18,15,25,7,-18,14,25,6,-18,1,19,1,-18,13,25,5,-18,0,19,0,-18,8,2,8,-18,9,2,9,-18,10,2,10,-18,11,2,11,-18,4,16,12,-18,5,16,13,-18,6,16,14,-18,7,16,15,-5,12,29,4,-5,11,27,11,-5,10,27,10,-5,15,29,7,-5,2,4,2,-5,14,29,6,-5,1,4,1,-5,13,29,5,-5,0,4,0,-5,3,4,3,-5,4,6,12,-5,5,6,13,-5,6,6,14,-5,7,6,15,-5,8,27,8,-5,9,27,9,-29,12,30,4,-29,3,28,3,-29,2,28,2,-29,15,30,7,-29,14,30,6,-29,1,28,1,-29,13,30,5,-29,0,28,0,-29,8,3,8,-29,9,3,9,-29,10,3,10,-29,11,3,11,-29,4,5,12,-29,5,5,13,-29,6,5,14,-29,7,5,15,-0,0,1,0,-0,1,1,1,-0,2,1,2,-0,3,1,3,-0,4,3,12,-0,5,3,13,-0,6,3,14,-0,7,3,15,-0,8,30,8,-0,9,30,9,-0,10,30,10,-0,11,30,11,-3,8,29,8,-3,7,4,15,-3,6,4,14,-3,11,29,11,-3,10,29,10,-3,9,29,9,-3,12,0,4,-3,13,0,5,-3,0,2,0,-3,14,0,6,-3,1,2,1,-3,15,0,7,-3,2,2,2,-3,3,2,3,-3,4,4,12,-3,5,4,13,-27,8,5,8,-27,9,5,9,-27,10,5,10,-27,11,5,11,-27,4,26,12,-27,5,26,13,-27,6,26,14,-27,7,26,15,-27,12,28,4,-27,13,28,5,-27,14,28,6,-27,15,28,7,-26,8,6,8,-26,9,6,9,-26,10,6,10,-26,11,6,11,-26,12,27,4,-26,13,27,5,-26,14,27,6,-26,15,27,7,-8,8,7,8,-8,9,7,9,-8,10,7,10,-8,11,7,11,-8,0,9,0,-8,13,17,5,-8,1,9,1,-8,14,17,6,-8,2,9,2,-8,15,17,7,-8,3,9,3,-8,12,17,4,-9,0,8,0,-9,13,16,5,-9,1,8,1,-9,14,16,6,-9,2,8,2,-9,15,16,7,-9,3,8,3,-9,8,10,8,-9,9,10,9,-9,10,10,10,-9,11,10,11,-9,12,16,4,-10,8,9,8,-10,9,9,9,-10,10,9,10,-10,11,9,11,-10,0,11,0,-10,13,15,5,-10,1,11,1,-10,14,15,6,-10,2,11,2,-10,15,15,7,-10,3,11,3,-10,12,15,4,-16,12,18,4,-16,3,17,3,-16,2,17,2,-16,15,18,7,-16,14,18,6,-16,1,17,1,-16,13,18,5,-16,0,17,0,-16,4,9,12,-16,5,9,13,-16,6,9,14,-16,7,9,15,-16,8,15,8,-16,9,15,9,-16,10,15,10,-16,11,15,11,-11,0,10,0,-11,13,14,5,-11,1,10,1,-11,14,14,6,-11,2,10,2,-11,15,14,7,-11,3,10,3,-11,8,12,8,-11,9,12,9,-11,10,12,10,-11,11,12,11,-11,12,14,4,-15,12,19,4,-15,11,16,11,-15,10,16,10,-15,15,19,7,-15,2,14,2,-15,14,19,6,-15,1,14,1,-15,13,19,5,-15,0,14,0,-15,4,10,12,-15,5,10,13,-15,6,10,14,-15,7,10,15,-15,3,14,3,-15,8,16,8,-15,9,16,9,-12,8,11,8,-12,9,11,9,-12,10,11,10,-12,11,11,11,-12,12,13,4,-12,13,13,5,-12,14,13,6,-12,15,13,7,-14,12,20,4,-14,3,15,3,-14,2,15,2,-14,15,20,7,-14,14,20,6,-14,1,15,1,-14,13,20,5,-14,0,15,0,-14,4,11,12,-14,5,11,13,-14,6,11,14,-14,7,11,15,-14,8,13,8,-14,9,13,9,-14,10,13,10,-14,11,13,11,-13,4,12,12,-13,5,12,13,-13,6,12,14,-13,7,12,15,-13,8,14,8,-13,9,14,9,-13,10,14,10,-13,11,14,11,-13,12,21,4,-13,13,21,5,-13,14,21,6,-13,15,21,7,-21,4,13,12,-21,5,13,13,-21,6,13,14,-21,7,13,15,-21,0,20,0,-21,13,22,5,-21,1,20,1,-21,14,22,6,-21,2,20,2,-21,15,22,7,-21,3,20,3,-21,12,22,4,-20,12,23,4,-20,3,21,3,-20,2,21,2,-20,15,23,7,-20,14,23,6,-20,1,21,1,-20,13,23,5,-20,0,21,0,-20,4,14,12,-20,5,14,13,-20,6,14,14,-20,7,14,15,-20,8,19,8,-20,9,19,9,-20,10,19,10,-20,11,19,11,- system-device-num_mmio_devices: 32 system-device-number_of_chips: 32 diff --git a/pybuda/test/galaxy/test_galaxy_bert_demo.py b/forge/test/galaxy/test_galaxy_bert_demo.py similarity index 77% rename from pybuda/test/galaxy/test_galaxy_bert_demo.py rename to forge/test/galaxy/test_galaxy_bert_demo.py index b047ad882..00b7e978a 100644 --- a/pybuda/test/galaxy/test_galaxy_bert_demo.py +++ b/forge/test/galaxy/test_galaxy_bert_demo.py @@ -12,24 +12,24 @@ from transformers import BertForQuestionAnswering -import pybuda -import pybuda.op -from pybuda import ( +import forge +import forge.op +from forge import ( Tensor, CompilerConfig, DataFormat, PyTorchModule, ) -from pybuda.verify import verify_module, verify_module_pipeline, VerifyConfig, TestKind -from pybuda._C.backend_api import BackendDevice, BackendType +from forge.verify import verify_module, verify_module_pipeline, VerifyConfig, TestKind +from forge._C.backend_api import BackendDevice, BackendType from test.common import compile, device, ModuleBuilder -from pybuda.config import _get_global_compiler_config, _clear_global_compiler_config -from pybuda.ttdevice import get_device_config +from forge.config import _get_global_compiler_config, _clear_global_compiler_config +from forge.ttdevice import get_device_config from test_galaxy_unit_tests import ( get_two_chip_op_tests, get_galaxy_chip_adjacency_list, get_chip_ids_for_galaxy, - reset_pybuda_between_tests, + reset_forge_between_tests, ) from loguru import logger @@ -62,14 +62,14 @@ def forward(self, hidden_state, attention_mask): ) def set_bert_demo_env_settings(): - os.environ['PYBUDA_EXP_APPROX'] = '1' - os.environ['PYBUDA_FUSE_OPS'] = '1' - #os.environ['PYBUDA_NO_FUSE_MATMUL_BIAS'] = '1' - #os.environ['PYBUDA_NO_FUSE_MATMUL_GELU'] = '1' - os.environ['PYBUDA_NLP_MANUAL_TARGET'] = '185000' - os.environ['PYBUDA_DISABLE_DRAM0'] = '1' - os.environ['PYBUDA_EXTRA_L1_MARGIN'] = '131072' - os.environ['PYBUDA_DISABLE_FORK_JOIN_NOPS'] = '1' + os.environ['FORGE_EXP_APPROX'] = '1' + os.environ['FORGE_FUSE_OPS'] = '1' + #os.environ['FORGE_NO_FUSE_MATMUL_BIAS'] = '1' + #os.environ['FORGE_NO_FUSE_MATMUL_GELU'] = '1' + os.environ['FORGE_NLP_MANUAL_TARGET'] = '185000' + os.environ['FORGE_DISABLE_DRAM0'] = '1' + os.environ['FORGE_EXTRA_L1_MARGIN'] = '131072' + os.environ['FORGE_DISABLE_FORK_JOIN_NOPS'] = '1' os.environ['ENABLE_ETH_SERIALIZATON'] = '1' @@ -89,14 +89,14 @@ def apply_config_overrides(config): for i in range(num_enc): config.set_chip_break(f"matmul_{55+53*i}") - pybuda.config.set_configuration_options( - default_df_override=pybuda.DataFormat.Float16_b + forge.config.set_configuration_options( + default_df_override=forge.DataFormat.Float16_b ) - pybuda.set_configuration_options( - math_fidelity=pybuda.MathFidelity.HiFi3, + forge.set_configuration_options( + math_fidelity=forge.MathFidelity.HiFi3, backend_opt_level=3, enable_auto_transposing_placement=True, - accumulate_df=pybuda._C.DataFormat.Float16_b, + accumulate_df=forge._C.DataFormat.Float16_b, enable_consteval = False, ) @@ -104,13 +104,13 @@ def apply_config_overrides(config): devtype = test_device.devtype arch = test_device.arch - pybuda.pybuda_reset() + forge.forge_reset() set_bert_demo_env_settings() - apply_config_overrides(pybuda.config) - apply_galaxy_am_buffering(pybuda.config) + apply_config_overrides(forge.config) + apply_galaxy_am_buffering(forge.config) if devtype == BackendType.Golden: - pybuda.set_configuration_options( + forge.set_configuration_options( backend_runtime_params_path = ONE_SHELF_RUNTIME_PARAMS, backend_cluster_descriptor_path = ONE_SHELF_ETH_CONNECTIONS ) @@ -118,7 +118,7 @@ def apply_config_overrides(config): model = BertForQuestionAnswering.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad").to(dtype=torch.float32) model.eval() model = BertEncoderLMHeadWrapper(model) - module = pybuda.PyTorchModule("bert_squad", model) + module = forge.PyTorchModule("bert_squad", model) microbatch = 1 @@ -165,14 +165,14 @@ def apply_config_overrides(config): config.set_chip_break(f"matmul_{41+53*i}") config.set_chip_break(f"matmul_{55+53*i}") - pybuda.config.set_configuration_options( - default_df_override=pybuda.DataFormat.Float16_b + forge.config.set_configuration_options( + default_df_override=forge.DataFormat.Float16_b ) - pybuda.set_configuration_options( - math_fidelity=pybuda.MathFidelity.HiFi3, + forge.set_configuration_options( + math_fidelity=forge.MathFidelity.HiFi3, backend_opt_level=3, enable_auto_transposing_placement=True, - accumulate_df=pybuda._C.DataFormat.Float16_b, + accumulate_df=forge._C.DataFormat.Float16_b, enable_consteval = False, ) @@ -180,13 +180,13 @@ def apply_config_overrides(config): devtype = test_device.devtype arch = test_device.arch - pybuda.pybuda_reset() + forge.forge_reset() set_bert_demo_env_settings() - apply_config_overrides(pybuda.config) - apply_galaxy_am_buffering(pybuda.config) + apply_config_overrides(forge.config) + apply_galaxy_am_buffering(forge.config) if devtype == BackendType.Golden: - pybuda.set_configuration_options( + forge.set_configuration_options( backend_runtime_params_path = TWO_SHELF_RUNTIME_PARAMS, backend_cluster_descriptor_path = TWO_SHELF_ETH_CONNECTIONS ) @@ -194,7 +194,7 @@ def apply_config_overrides(config): model = BertForQuestionAnswering.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad").to(dtype=torch.float32) model.eval() model = BertEncoderLMHeadWrapper(model) - module = pybuda.PyTorchModule("bert_squad", model) + module = forge.PyTorchModule("bert_squad", model) microbatch = 1 diff --git a/pybuda/test/galaxy/test_galaxy_inputs.py b/forge/test/galaxy/test_galaxy_inputs.py similarity index 84% rename from pybuda/test/galaxy/test_galaxy_inputs.py rename to forge/test/galaxy/test_galaxy_inputs.py index c58a25cbb..3c005f5b0 100644 --- a/pybuda/test/galaxy/test_galaxy_inputs.py +++ b/forge/test/galaxy/test_galaxy_inputs.py @@ -9,18 +9,18 @@ import torch import math -import pybuda -from pybuda import ( +import forge +from forge import ( Tensor, CompilerConfig, DataFormat, PyTorchModule, ) -from pybuda.verify import verify_module, verify_module_pipeline, VerifyConfig, TestKind -from pybuda._C.backend_api import BackendDevice, BackendType +from forge.verify import verify_module, verify_module_pipeline, VerifyConfig, TestKind +from forge._C.backend_api import BackendDevice, BackendType from test.common import compile, run, device, ModuleBuilder -from pybuda.config import _get_global_compiler_config, _clear_global_compiler_config -from pybuda.ttdevice import get_device_config +from forge.config import _get_global_compiler_config, _clear_global_compiler_config +from forge.ttdevice import get_device_config from test_galaxy_unit_tests import get_galaxy_chip_adjacency_list from loguru import logger @@ -44,9 +44,9 @@ def test_galaxy_large_inputs(): # This test crashes intel CPUs as of Feb 10 def test_large_mb_bert_size_host_input(act): mmio_chip = 0 - pybuda.override_dram_queue_placement("input_0_unary0", chip_id=mmio_chip, channel=0) + forge.override_dram_queue_placement("input_0_unary0", chip_id=mmio_chip, channel=0) - unary0 = pybuda.op.Exp("unary0", act) + unary0 = forge.op.Exp("unary0", act) return unary0 compiler_cfg = _get_global_compiler_config() @@ -72,9 +72,9 @@ def test_large_mb_bert_size_host_input(act): chip_ids = list(galaxy_adjacent_chips.keys()) chip_ids.sort() - pybuda.pybuda_reset() + forge.forge_reset() compiler_cfg.enable_consteval = False - pybuda.set_configuration_options(output_queues_on_host=False) + forge.set_configuration_options(output_queues_on_host=False) module = ModuleBuilder(test_large_mb_bert_size_host_input) inputs_shape = [(256, 1, 1028, 384)] # bert large input shape, 256 microbatch @@ -103,10 +103,10 @@ def get_bandwidth_test(input_chip, dram_channel, verify_cfg): num_inputs=2, ) def test_unary_op_input_on_host(act): - pybuda.override_dram_queue_placement("inputs", chip_id=input_chip, channel=dram_channel) - #pybuda.override_op_size("unary0", (1,1)) + forge.override_dram_queue_placement("inputs", chip_id=input_chip, channel=dram_channel) + #forge.override_op_size("unary0", (1,1)) - unary0 = pybuda.op.Buffer("unary0", act) + unary0 = forge.op.Buffer("unary0", act) return unary0 return test_unary_op_input_on_host @@ -119,9 +119,9 @@ def test_chips(request): @pytest.mark.parametrize("input_shape", [(32, 1, 128, 128), (256, 1, 1028, 384)], ids=["shape1", "shape2"]) @pytest.mark.parametrize("input_df", [torch.float32, torch.float16, torch.bfloat16], ids=["Float32", "Float16", "BFloat16"]) def test_galaxy_bandwidth_sweep(test_chips, input_shape, output_on_host, input_df): - def reset_pybuda_between_tests(): - pybuda.pybuda_reset() - pybuda.set_configuration_options( + def reset_forge_between_tests(): + forge.forge_reset() + forge.set_configuration_options( backend_cluster_descriptor_path=BACKEND_CLUSTER_DESC, output_queues_on_host=output_on_host, enable_consteval=False, @@ -151,7 +151,7 @@ def reset_pybuda_between_tests(): for input_chip in chip_ids: dram_channel = 1 logger.info(f"Running {input_df} bandwidth test with input on chip {input_chip} dram chan {dram_channel}"); - reset_pybuda_between_tests() + reset_forge_between_tests() test = get_bandwidth_test(input_chip, dram_channel, VerifyConfig( test_kind=TestKind.INFERENCE, diff --git a/pybuda/test/galaxy/test_galaxy_multichip.py b/forge/test/galaxy/test_galaxy_multichip.py similarity index 68% rename from pybuda/test/galaxy/test_galaxy_multichip.py rename to forge/test/galaxy/test_galaxy_multichip.py index 1d28f9ccd..ebce3e15c 100644 --- a/pybuda/test/galaxy/test_galaxy_multichip.py +++ b/forge/test/galaxy/test_galaxy_multichip.py @@ -17,27 +17,27 @@ BertForQuestionAnswering, ) -import pybuda -import pybuda.op -from pybuda import ( +import forge +import forge.op +from forge import ( Tensor, CompilerConfig, DataFormat, PyTorchModule, ) from test.bert.modules import ( - PyBudaBertMHA, - PyBudaBertEncoder, - PyBudaFeedForward, - PyBudaPredictionHeadDecoder, - PyBudaPredictionHeadTransform, + ForgeBertMHA, + ForgeBertEncoder, + ForgeFeedForward, + ForgePredictionHeadDecoder, + ForgePredictionHeadTransform, get_bert_parameters, ) -from pybuda.verify import verify_module, verify_module_pipeline, VerifyConfig, TestKind -from pybuda._C.backend_api import BackendDevice, BackendType +from forge.verify import verify_module, verify_module_pipeline, VerifyConfig, TestKind +from forge._C.backend_api import BackendDevice, BackendType from test.common import compile, device, ModuleBuilder -from pybuda.config import _get_global_compiler_config, _clear_global_compiler_config -from pybuda.ttdevice import get_device_config +from forge.config import _get_global_compiler_config, _clear_global_compiler_config +from forge.ttdevice import get_device_config from loguru import logger @@ -96,7 +96,7 @@ def test_pt_encoder(test_kind, test_device, size, encoder_count, num_chips): _get_global_compiler_config().enable_broadcast_splitting = ( True # fork error workaround ) - pybuda.config.override_op_size( + forge.config.override_op_size( "bw_in0_matmul_128_matmul_1", (1, 2) ) # tenstorrent/budabackend#667 pytest.skip( @@ -128,16 +128,16 @@ def test_pt_encoder(test_kind, test_device, size, encoder_count, num_chips): pcc = 0.9 if test_device.is_silicon() and test_kind.is_training() and size == "large": - # Revert when issue is closed: tenstorrent/pybuda#207 + # Revert when issue is closed: tenstorrent/forge#207 import os - os.environ["PYBUDA_NO_FUSE_MATMUL_BIAS"] = "1" - os.environ["PYBUDA_ENABLE_BROADCAST_SPLITTING"] = "1" + os.environ["FORGE_NO_FUSE_MATMUL_BIAS"] = "1" + os.environ["FORGE_ENABLE_BROADCAST_SPLITTING"] = "1" waive_gradient_errors = {"attention.self.key.bias"} compiler_cfg = _get_global_compiler_config() - pybuda.set_configuration_options( + forge.set_configuration_options( backend_cluster_descriptor_path=eth_connections_file ) @@ -189,7 +189,7 @@ def test_multichip_wormhole_b0_multi_encoder_split_concurrent( compiler_cfg = _get_global_compiler_config() - pybuda.set_configuration_options( + forge.set_configuration_options( backend_cluster_descriptor_path=ONE_SHELF_ETH_CONNECTIONS ) relative_atol, pcc = get_relaxed_atol_pcc( @@ -205,7 +205,7 @@ def test_multichip_wormhole_b0_multi_encoder_split_concurrent( config["encoder_index"] = encoder_index config["passthrough_attn_mask"] = bool(encoder_index != (encoder_count - 1)) - mod = PyBudaBertEncoder(f"encoder{encoder_index}", enc_params, config) + mod = ForgeBertEncoder(f"encoder{encoder_index}", enc_params, config) enc_params[f"reciprocal_of_sqrt_of_head_size_{encoder_index}"].set_value( torch.full((1, 1, 1, 1), 1 / math.sqrt(num_heads)) @@ -246,30 +246,30 @@ def test_galaxy_linked_unary_ops(test_kind, test_device, num_chips): def linked_list_32_chips(act): unary_op_list = [ - pybuda.op.Gelu, - pybuda.op.Log, - pybuda.op.Buffer, - pybuda.op.Exp, - pybuda.op.Sqrt, + forge.op.Gelu, + forge.op.Log, + forge.op.Buffer, + forge.op.Exp, + forge.op.Sqrt, ] - op = pybuda.op.Gelu(op_names[0], act) + op = forge.op.Gelu(op_names[0], act) for i in range(1, num_chips): - pybuda_op = unary_op_list[i % len(unary_op_list)] - op = pybuda_op(op_names[i], op) + forge_op = unary_op_list[i % len(unary_op_list)] + op = forge_op(op_names[i], op) return op compiler_cfg = _get_global_compiler_config() compiler_cfg.enable_consteval = False for op in op_names: - pybuda.set_chip_break(op) + forge.set_chip_break(op) if test_device.devtype == BackendType.Golden: - pybuda.set_configuration_options( + forge.set_configuration_options( backend_cluster_descriptor_path=ONE_SHELF_ETH_CONNECTIONS, backend_runtime_params_path=ONE_SHELF_RUNTIME_PARAMS, ) - pybuda.set_configuration_options(accumulate_df=DataFormat.Float32) + forge.set_configuration_options(accumulate_df=DataFormat.Float32) module = ModuleBuilder(linked_list_32_chips) verify_module( @@ -296,38 +296,38 @@ def test_galaxy_linked_8_unaries_per_chip(test_kind, test_device, num_chips): def linked_list_8_unaries_per_chip(act0, act1, act2, act3, act4, act5, act6, act7): unary_op_list = [ - pybuda.op.Gelu, - pybuda.op.Log, - pybuda.op.Buffer, - pybuda.op.Exp, - pybuda.op.Sqrt, + forge.op.Gelu, + forge.op.Log, + forge.op.Buffer, + forge.op.Exp, + forge.op.Sqrt, ] - op0 = pybuda.op.Buffer(op_names[0], act0) - op1 = pybuda.op.Buffer(op_names[1], act1) - op2 = pybuda.op.Buffer(op_names[2], act2) - op3 = pybuda.op.Buffer(op_names[3], act3) - op4 = pybuda.op.Buffer(op_names[4], act4) - op5 = pybuda.op.Buffer(op_names[5], act5) - op6 = pybuda.op.Buffer(op_names[6], act6) - op7 = pybuda.op.Buffer(op_names[7], act7) + op0 = forge.op.Buffer(op_names[0], act0) + op1 = forge.op.Buffer(op_names[1], act1) + op2 = forge.op.Buffer(op_names[2], act2) + op3 = forge.op.Buffer(op_names[3], act3) + op4 = forge.op.Buffer(op_names[4], act4) + op5 = forge.op.Buffer(op_names[5], act5) + op6 = forge.op.Buffer(op_names[6], act6) + op7 = forge.op.Buffer(op_names[7], act7) for i in range(1, num_chips): - pybuda_op = unary_op_list[i % len(unary_op_list)] - op0 = pybuda_op(op_names[MAX_STREAMS_PER_CHAN * i], op0) - op1 = pybuda_op(op_names[MAX_STREAMS_PER_CHAN * i + 1], op1) - op2 = pybuda_op(op_names[MAX_STREAMS_PER_CHAN * i + 2], op2) - op3 = pybuda_op(op_names[MAX_STREAMS_PER_CHAN * i + 3], op3) - op4 = pybuda_op(op_names[MAX_STREAMS_PER_CHAN * i + 4], op4) - op5 = pybuda_op(op_names[MAX_STREAMS_PER_CHAN * i + 5], op5) - op6 = pybuda_op(op_names[MAX_STREAMS_PER_CHAN * i + 6], op6) - op7 = pybuda_op(op_names[MAX_STREAMS_PER_CHAN * i + 7], op7) + forge_op = unary_op_list[i % len(unary_op_list)] + op0 = forge_op(op_names[MAX_STREAMS_PER_CHAN * i], op0) + op1 = forge_op(op_names[MAX_STREAMS_PER_CHAN * i + 1], op1) + op2 = forge_op(op_names[MAX_STREAMS_PER_CHAN * i + 2], op2) + op3 = forge_op(op_names[MAX_STREAMS_PER_CHAN * i + 3], op3) + op4 = forge_op(op_names[MAX_STREAMS_PER_CHAN * i + 4], op4) + op5 = forge_op(op_names[MAX_STREAMS_PER_CHAN * i + 5], op5) + op6 = forge_op(op_names[MAX_STREAMS_PER_CHAN * i + 6], op6) + op7 = forge_op(op_names[MAX_STREAMS_PER_CHAN * i + 7], op7) return op0, op1, op2, op3, op4, op5, op6, op7 compiler_cfg = _get_global_compiler_config() compiler_cfg.enable_consteval = False for i in range(MAX_STREAMS_PER_CHAN, len(op_names), MAX_STREAMS_PER_CHAN): - pybuda.set_chip_break(op_names[i]) + forge.set_chip_break(op_names[i]) - pybuda.set_configuration_options( + forge.set_configuration_options( # backend_cluster_descriptor_path=eth_connections_file, accumulate_df=DataFormat.Float32 ) @@ -362,47 +362,47 @@ def test_galaxy_bert_large_simple_graph_test(test_kind, test_device): # ops match dimensions of cross chip ops in bert large, for debugging # essentially, a larger 32 chip test def bert_ops(hidden, attention_q_weights, attention_q_bias, input_5): - pybuda.override_op_size("matmul1", (2, 4)) - pybuda.override_op_size("op1_a", (2, 1)) - matmul1 = pybuda.op.Matmul( + forge.override_op_size("matmul1", (2, 4)) + forge.override_op_size("op1_a", (2, 1)) + matmul1 = forge.op.Matmul( "matmul1", hidden, attention_q_weights, attention_q_bias ) - op1_a = pybuda.op.Gelu("op1_a", matmul1) - op1_b = pybuda.op.Gelu("op1_b", matmul1) - - pybuda.set_chip_break("multiply2") - pybuda.override_op_size("multiply2", (2, 1)) - pybuda.override_op_size("op2_a", (2, 1)) - multiply2 = pybuda.op.Multiply("multiply2", op1_a, op1_b) - op2_a = pybuda.op.Gelu("op2_a", multiply2) - op2_b = pybuda.op.Sqrt("op2_b", multiply2) - - pybuda.set_chip_break("multiply3") - pybuda.override_op_size("multiply3", (2, 1)) - pybuda.override_op_size("op3_a", (2, 1)) - multiply3 = pybuda.op.Multiply("multiply3", op2_a, op2_b) - op3_a = pybuda.op.Gelu("op3_a", multiply3) - op3_b = pybuda.op.Sqrt("op3_b", multiply3) - - pybuda.set_chip_break("multiply4") - pybuda.override_op_size("multiply4", (2, 1)) - pybuda.override_op_size("op4_a", (2, 4)) - multiply4 = pybuda.op.Multiply("multiply4", op3_a, op3_b) - op4_a = pybuda.op.Gelu("op4_a", multiply4) - - pybuda.set_chip_break("multiply4") - pybuda.override_op_size("matmul5", (2, 8)) - pybuda.override_op_size("op5_a", (2, 1)) - matmul5 = pybuda.op.Matmul("matmul5", op4_a, input_5) - op5_a = pybuda.op.Gelu("op5_a", matmul1) - op5_b = pybuda.op.Gelu("op5_b", matmul1) + op1_a = forge.op.Gelu("op1_a", matmul1) + op1_b = forge.op.Gelu("op1_b", matmul1) + + forge.set_chip_break("multiply2") + forge.override_op_size("multiply2", (2, 1)) + forge.override_op_size("op2_a", (2, 1)) + multiply2 = forge.op.Multiply("multiply2", op1_a, op1_b) + op2_a = forge.op.Gelu("op2_a", multiply2) + op2_b = forge.op.Sqrt("op2_b", multiply2) + + forge.set_chip_break("multiply3") + forge.override_op_size("multiply3", (2, 1)) + forge.override_op_size("op3_a", (2, 1)) + multiply3 = forge.op.Multiply("multiply3", op2_a, op2_b) + op3_a = forge.op.Gelu("op3_a", multiply3) + op3_b = forge.op.Sqrt("op3_b", multiply3) + + forge.set_chip_break("multiply4") + forge.override_op_size("multiply4", (2, 1)) + forge.override_op_size("op4_a", (2, 4)) + multiply4 = forge.op.Multiply("multiply4", op3_a, op3_b) + op4_a = forge.op.Gelu("op4_a", multiply4) + + forge.set_chip_break("multiply4") + forge.override_op_size("matmul5", (2, 8)) + forge.override_op_size("op5_a", (2, 1)) + matmul5 = forge.op.Matmul("matmul5", op4_a, input_5) + op5_a = forge.op.Gelu("op5_a", matmul1) + op5_b = forge.op.Gelu("op5_b", matmul1) return matmul5 compiler_cfg = _get_global_compiler_config() compiler_cfg.enable_consteval = False - pybuda.set_configuration_options( + forge.set_configuration_options( backend_cluster_descriptor_path=ONE_SHELF_ETH_CONNECTIONS ) @@ -429,98 +429,98 @@ def bert_ops(hidden, attention_q_weights, attention_q_bias, input_5): def test_galaxy_scan_chip_pairs(scan_chip): def two_chip_simple_unary_to_unary(act): - pybuda.set_chip_break("unary1") + forge.set_chip_break("unary1") - unary0 = pybuda.op.Exp("unary0", act) - unary1 = pybuda.op.Gelu("unary1", unary0) + unary0 = forge.op.Exp("unary0", act) + unary1 = forge.op.Gelu("unary1", unary0) return unary1 def two_chip_eth_gather(act): - pybuda.set_chip_break("unary1") - pybuda.override_op_size("unary0", (2, 2)) - pybuda.override_op_size("unary1", (1, 1)) + forge.set_chip_break("unary1") + forge.override_op_size("unary0", (2, 2)) + forge.override_op_size("unary1", (1, 1)) - unary0 = pybuda.op.Exp("unary0", act) - unary1 = pybuda.op.Gelu("unary1", unary0) + unary0 = forge.op.Exp("unary0", act) + unary1 = forge.op.Gelu("unary1", unary0) return unary1 def two_chip_eth_multicast(act0, act1): - pybuda.set_chip_break("unary1") - pybuda.override_op_size("unary0", (2, 1)) - pybuda.override_op_size("unary1", (1, 2)) - pybuda.override_op_size("matmul0", (2, 2)) - - unary0 = pybuda.op.Sqrt("unary0", act0) - unary1 = pybuda.op.Gelu("unary1", act1) - matmul0 = pybuda.op.Matmul("matmul0", unary0, unary1) + forge.set_chip_break("unary1") + forge.override_op_size("unary0", (2, 1)) + forge.override_op_size("unary1", (1, 2)) + forge.override_op_size("matmul0", (2, 2)) + + unary0 = forge.op.Sqrt("unary0", act0) + unary1 = forge.op.Gelu("unary1", act1) + matmul0 = forge.op.Matmul("matmul0", unary0, unary1) return matmul0 def two_chip_eth_gather_multicast(act0, act1): - pybuda.set_chip_break("unary1") - pybuda.override_op_size("unary0", (2, 1)) - pybuda.override_op_size("unary1", (2, 2)) - pybuda.override_op_size("matmul0", (2, 2)) - - unary0 = pybuda.op.Exp("unary0", act0) - unary1 = pybuda.op.Exp("unary1", act1) - matmul0 = pybuda.op.Matmul("matmul0", unary0, unary1) + forge.set_chip_break("unary1") + forge.override_op_size("unary0", (2, 1)) + forge.override_op_size("unary1", (2, 2)) + forge.override_op_size("matmul0", (2, 2)) + + unary0 = forge.op.Exp("unary0", act0) + unary1 = forge.op.Exp("unary1", act1) + matmul0 = forge.op.Matmul("matmul0", unary0, unary1) return matmul0 def two_chip_dram_buf_fork_c0_to_c0c1(act): - pybuda.set_chip_break("unary1") + forge.set_chip_break("unary1") - unary0 = pybuda.op.Gelu("unary0", act) - unary1 = pybuda.op.Exp("unary1", unary0) - return pybuda.op.Add("add", unary1, act) + unary0 = forge.op.Gelu("unary0", act) + unary1 = forge.op.Exp("unary1", unary0) + return forge.op.Add("add", unary1, act) def two_chip_l1_buf_fork_c0_to_c1c1_same_consumer(act): - pybuda.set_chip_break("matmul0") + forge.set_chip_break("matmul0") - unary0 = pybuda.op.Exp("unary0", act) - matmul0 = pybuda.op.Matmul("matmul0", unary0, unary0) + unary0 = forge.op.Exp("unary0", act) + matmul0 = forge.op.Matmul("matmul0", unary0, unary0) return matmul0 def two_chip_binary_inputs_c1_tensix_c1_dram(act0, act1): - pybuda.set_chip_break("unary0") + forge.set_chip_break("unary0") - nop0 = pybuda.op.Buffer("nop0", act0) - unary0 = pybuda.op.Exp("unary0", nop0) - add0 = pybuda.op.Add("add0", unary0, act1) + nop0 = forge.op.Buffer("nop0", act0) + unary0 = forge.op.Exp("unary0", nop0) + add0 = forge.op.Add("add0", unary0, act1) return add0 def two_chip_matmul_inputs_c0_tensix_c1_dram(act0, act1): - pybuda.set_chip_break("multiply0") + forge.set_chip_break("multiply0") - nop0 = pybuda.op.Buffer("nop0", act0) - multiply0 = pybuda.op.Multiply("multiply0", nop0, act1) + nop0 = forge.op.Buffer("nop0", act0) + multiply0 = forge.op.Multiply("multiply0", nop0, act1) return multiply0 def two_chip_binary_inputs_c0_tensix_c1_tensix(act0, act1, act2): - pybuda.set_chip_break("nop0") + forge.set_chip_break("nop0") - add0 = pybuda.op.Add("add0", act0, act1) - nop0 = pybuda.op.Buffer("nop0", act2) - add1 = pybuda.op.Add("add1", add0, nop0) + add0 = forge.op.Add("add0", act0, act1) + nop0 = forge.op.Buffer("nop0", act2) + add1 = forge.op.Add("add1", add0, nop0) return add1 def two_chip_matmul_inputs_c0_tensix_c0_tensix(act0, act1, act2): - pybuda.set_chip_break("multiply0") + forge.set_chip_break("multiply0") - add0 = pybuda.op.Add("add0", act0, act1) - nop0 = pybuda.op.Buffer("nop0", act2) - multiply0 = pybuda.op.Multiply("multiply0", add0, nop0) + add0 = forge.op.Add("add0", act0, act1) + nop0 = forge.op.Buffer("nop0", act2) + multiply0 = forge.op.Multiply("multiply0", add0, nop0) return multiply0 def two_chip_multi_temporal_unary_to_unary(act): # TODO: placement doesn't work for two non-mmio chips - pybuda.set_chip_break("unary1") - pybuda.set_epoch_break("unary2") - pybuda.set_chip_break("unary3") - - unary0 = pybuda.op.Sqrt("unary0", act) - unary1 = pybuda.op.Gelu("unary1", unary0) - unary2 = pybuda.op.Exp("unary2", unary1) - unary3 = pybuda.op.Log("unary3", unary2) + forge.set_chip_break("unary1") + forge.set_epoch_break("unary2") + forge.set_chip_break("unary3") + + unary0 = forge.op.Sqrt("unary0", act) + unary1 = forge.op.Gelu("unary1", unary0) + unary2 = forge.op.Exp("unary2", unary1) + unary3 = forge.op.Log("unary3", unary2) return unary3 test_list = [ @@ -543,7 +543,7 @@ def two_chip_multi_temporal_unary_to_unary(act): devtype = BackendType.Silicon arch = BackendDevice.Wormhole_B0 compiler_cfg = _get_global_compiler_config() - # pybuda.set_configuration_options( + # forge.set_configuration_options( # backend_cluster_descriptor_path=eth_connections_file # ) @@ -560,7 +560,7 @@ def two_chip_multi_temporal_unary_to_unary(act): device_cfg.get_ethernet_connections() ) # {chip_a: {chan_a: (chip_b, chan_b) ... }... } compiler_cfg.enable_consteval = False - pybuda.set_configuration_options(output_queues_on_host=False) + forge.set_configuration_options(output_queues_on_host=False) galaxy_adjacent_chips = {} for chip_a, channels_a in eth_connections.items(): @@ -585,9 +585,9 @@ def two_chip_multi_temporal_unary_to_unary(act): chip_ids.append(0) chip_ids.sort() - pybuda.pybuda_reset() + forge.forge_reset() compiler_cfg.enable_consteval = False - pybuda.set_configuration_options(output_queues_on_host=False) + forge.set_configuration_options(output_queues_on_host=False) module = ModuleBuilder(test) num_inputs = len(signature(test).parameters) diff --git a/pybuda/test/galaxy/test_galaxy_shelf_setup.py b/forge/test/galaxy/test_galaxy_shelf_setup.py similarity index 86% rename from pybuda/test/galaxy/test_galaxy_shelf_setup.py rename to forge/test/galaxy/test_galaxy_shelf_setup.py index baa8889c2..1e98ede7a 100644 --- a/pybuda/test/galaxy/test_galaxy_shelf_setup.py +++ b/forge/test/galaxy/test_galaxy_shelf_setup.py @@ -10,24 +10,24 @@ import math -import pybuda -import pybuda.op -from pybuda import ( +import forge +import forge.op +from forge import ( Tensor, CompilerConfig, DataFormat, PyTorchModule, ) -from pybuda.verify import verify_module, verify_module_pipeline, VerifyConfig, TestKind -from pybuda._C.backend_api import BackendDevice, BackendType +from forge.verify import verify_module, verify_module_pipeline, VerifyConfig, TestKind +from forge._C.backend_api import BackendDevice, BackendType from test.common import compile, device, ModuleBuilder -from pybuda.config import _get_global_compiler_config, _clear_global_compiler_config -from pybuda.ttdevice import get_device_config +from forge.config import _get_global_compiler_config, _clear_global_compiler_config +from forge.ttdevice import get_device_config from test_galaxy_unit_tests import ( get_two_chip_op_tests, get_galaxy_chip_adjacency_list, get_chip_ids_for_galaxy, - reset_pybuda_between_tests, + reset_forge_between_tests, ) from loguru import logger @@ -86,7 +86,7 @@ def test_galaxy_shelf_connection_modules(test_level, microbatch): test_list = get_two_chip_op_tests(test_level, chip_a, chip_b) for test in test_list: chip_ids = get_chip_ids_for_galaxy([chip_a, chip_b]) - reset_pybuda_between_tests() + reset_forge_between_tests() module = ModuleBuilder(test) num_inputs = len(signature(test).parameters) @@ -110,14 +110,14 @@ def test_two_shelves_full_grid_unary_ops(microbatch, test_device): compiler_cfg = _get_global_compiler_config() devtype = test_device.devtype arch = test_device.arch - pybuda.pybuda_reset() + forge.forge_reset() if devtype == BackendType.Golden: - pybuda.set_configuration_options( + forge.set_configuration_options( backend_runtime_params_path=TWO_SHELF_RUNTIME_PARAMS, backend_cluster_descriptor_path=TWO_SHELF_ETH_CONNECTIONS, ) - pybuda.set_configuration_options( + forge.set_configuration_options( output_queues_on_host=False, enable_consteval=False ) @@ -138,15 +138,15 @@ def full_grid_unary_nop(act): f"nop{chip}_{op}" for chip in range(num_chips) for op in range(num_rows) ] for op in op_names: - pybuda.override_op_size(op, (1, num_cols)) + forge.override_op_size(op, (1, num_cols)) - nop = pybuda.op.Buffer(op_names[0], act) + nop = forge.op.Buffer(op_names[0], act) for i in range(1, len(op_names)): # set chip break after one chip is full of ops if i % num_rows == 0: - pybuda.set_chip_break(op_names[i]) - nop = pybuda.op.Buffer(op_names[i], nop) + forge.set_chip_break(op_names[i]) + nop = forge.op.Buffer(op_names[i], nop) return nop chip_ids = TWO_SHELF_GALAXY_IDS @@ -174,14 +174,14 @@ def test_two_shelves_full_eth_link_usage(microbatch, test_device): arch = test_device.arch num_chips = 64 - pybuda.pybuda_reset() + forge.forge_reset() if devtype == BackendType.Golden: - pybuda.set_configuration_options( + forge.set_configuration_options( backend_runtime_params_path=TWO_SHELF_RUNTIME_PARAMS, backend_cluster_descriptor_path=TWO_SHELF_ETH_CONNECTIONS, ) - pybuda.set_configuration_options( + forge.set_configuration_options( output_queues_on_host=False, enable_consteval=False ) @@ -202,15 +202,15 @@ def full_eth_links_unary_nop(act): f"nop{chip}_{op}" for chip in range(num_chips) for op in range(num_rows) ] for op in op_names: - pybuda.override_op_size(op, (1, num_cols)) + forge.override_op_size(op, (1, num_cols)) - nop = pybuda.op.Buffer(op_names[0], act) + nop = forge.op.Buffer(op_names[0], act) for i in range(1, len(op_names)): # set chip break after one chip is full of ops if i % num_rows == 0: - pybuda.set_chip_break(op_names[i]) - nop = pybuda.op.Buffer(op_names[i], nop) + forge.set_chip_break(op_names[i]) + nop = forge.op.Buffer(op_names[i], nop) return nop chip_ids = TWO_SHELF_GALAXY_IDS diff --git a/pybuda/test/galaxy/test_galaxy_unit_tests.py b/forge/test/galaxy/test_galaxy_unit_tests.py similarity index 77% rename from pybuda/test/galaxy/test_galaxy_unit_tests.py rename to forge/test/galaxy/test_galaxy_unit_tests.py index 21b0ff63c..961a5b832 100644 --- a/pybuda/test/galaxy/test_galaxy_unit_tests.py +++ b/forge/test/galaxy/test_galaxy_unit_tests.py @@ -17,27 +17,27 @@ BertForQuestionAnswering, ) -import pybuda -import pybuda.op -from pybuda import ( +import forge +import forge.op +from forge import ( Tensor, CompilerConfig, DataFormat, PyTorchModule, ) from test.bert.modules import ( - PyBudaBertMHA, - PyBudaBertEncoder, - PyBudaFeedForward, - PyBudaPredictionHeadDecoder, - PyBudaPredictionHeadTransform, + ForgeBertMHA, + ForgeBertEncoder, + ForgeFeedForward, + ForgePredictionHeadDecoder, + ForgePredictionHeadTransform, get_bert_parameters, ) -from pybuda.verify import verify_module, verify_module_pipeline, VerifyConfig, TestKind -from pybuda._C.backend_api import BackendDevice, BackendType +from forge.verify import verify_module, verify_module_pipeline, VerifyConfig, TestKind +from forge._C.backend_api import BackendDevice, BackendType from test.common import compile, device, ModuleBuilder -from pybuda.config import _get_global_compiler_config, _clear_global_compiler_config -from pybuda.ttdevice import get_device_config +from forge.config import _get_global_compiler_config, _clear_global_compiler_config +from forge.ttdevice import get_device_config from loguru import logger @@ -58,119 +58,119 @@ def get_two_chip_op_tests(subset, c0, c1): # Keep names unique and return same number of outputs as inputs to enable pipelining unit tests def two_chip_simple_unary_to_unary(act): - pybuda.set_chip_break("unary1_A") - pybuda.override_dram_queue_placement("inputs", chip_id=c0, channel=0) + forge.set_chip_break("unary1_A") + forge.override_dram_queue_placement("inputs", chip_id=c0, channel=0) - unary0 = pybuda.op.Exp("unary0_A", act) - unary1 = pybuda.op.Gelu("unary1_A", unary0) + unary0 = forge.op.Exp("unary0_A", act) + unary1 = forge.op.Gelu("unary1_A", unary0) return unary1 def two_chip_eth_gather(act): - pybuda.set_chip_break("unary1_B") - pybuda.override_dram_queue_placement("inputs", chip_id=c0, channel=0) - pybuda.override_op_size("unary0_B", (2, 2)) - pybuda.override_op_size("unary1_B", (1, 1)) + forge.set_chip_break("unary1_B") + forge.override_dram_queue_placement("inputs", chip_id=c0, channel=0) + forge.override_op_size("unary0_B", (2, 2)) + forge.override_op_size("unary1_B", (1, 1)) - unary0 = pybuda.op.Buffer("unary0_B", act) - unary1 = pybuda.op.Gelu("unary1_B", unary0) + unary0 = forge.op.Buffer("unary0_B", act) + unary1 = forge.op.Gelu("unary1_B", unary0) return unary1 def two_chip_eth_multicast(act0, act1): - pybuda.set_chip_break("unary1_C") - pybuda.override_dram_queue_placement("input_0_unary0_C", chip_id=c0, channel=0) - pybuda.override_dram_queue_placement("input_0_unary1_C", chip_id=c1, channel=0) - pybuda.override_op_size("unary0_C", (2, 1)) - pybuda.override_op_size("unary1_C", (1, 2)) - pybuda.override_op_size("matmul0_C", (2, 2)) - - unary0 = pybuda.op.Sqrt("unary0_C", act0) - unary1 = pybuda.op.Gelu("unary1_C", act1) - matmul0 = pybuda.op.Matmul("matmul0_C", unary0, unary1) + forge.set_chip_break("unary1_C") + forge.override_dram_queue_placement("input_0_unary0_C", chip_id=c0, channel=0) + forge.override_dram_queue_placement("input_0_unary1_C", chip_id=c1, channel=0) + forge.override_op_size("unary0_C", (2, 1)) + forge.override_op_size("unary1_C", (1, 2)) + forge.override_op_size("matmul0_C", (2, 2)) + + unary0 = forge.op.Sqrt("unary0_C", act0) + unary1 = forge.op.Gelu("unary1_C", act1) + matmul0 = forge.op.Matmul("matmul0_C", unary0, unary1) return matmul0, unary0 def two_chip_eth_gather_multicast(act0, act1): - pybuda.set_chip_break("unary1_D") - pybuda.override_dram_queue_placement("input_0_unary0_D", chip_id=c0, channel=0) - pybuda.override_dram_queue_placement("input_0_unary1_D", chip_id=c1, channel=0) - pybuda.override_op_size("unary0_D", (2, 1)) - pybuda.override_op_size("unary1_D", (2, 2)) - pybuda.override_op_size("matmul0_D", (2, 2)) - - unary0 = pybuda.op.Gelu("unary0_D", act0) - unary1 = pybuda.op.Buffer("unary1_D", act1) - matmul0 = pybuda.op.Matmul("matmul0_D", unary0, unary1) + forge.set_chip_break("unary1_D") + forge.override_dram_queue_placement("input_0_unary0_D", chip_id=c0, channel=0) + forge.override_dram_queue_placement("input_0_unary1_D", chip_id=c1, channel=0) + forge.override_op_size("unary0_D", (2, 1)) + forge.override_op_size("unary1_D", (2, 2)) + forge.override_op_size("matmul0_D", (2, 2)) + + unary0 = forge.op.Gelu("unary0_D", act0) + unary1 = forge.op.Buffer("unary1_D", act1) + matmul0 = forge.op.Matmul("matmul0_D", unary0, unary1) return matmul0, unary1 def two_chip_dram_buf_fork_c0_to_c0c1(act): - pybuda.set_chip_break("unary1_E") - pybuda.override_dram_queue_placement("inputs", chip_id=c0, channel=0) + forge.set_chip_break("unary1_E") + forge.override_dram_queue_placement("inputs", chip_id=c0, channel=0) - unary0 = pybuda.op.Gelu("unary0_E", act) - unary1 = pybuda.op.Buffer("unary1_E", unary0) - add0 = pybuda.op.Add("add_E", unary1, act) + unary0 = forge.op.Gelu("unary0_E", act) + unary1 = forge.op.Buffer("unary1_E", unary0) + add0 = forge.op.Add("add_E", unary1, act) return add0 def two_chip_l1_buf_fork_c0_to_c1c1_same_consumer(act): - pybuda.set_chip_break("add0_F") - pybuda.override_dram_queue_placement("inputs", chip_id=c0, channel=0) + forge.set_chip_break("add0_F") + forge.override_dram_queue_placement("inputs", chip_id=c0, channel=0) - unary0 = pybuda.op.Buffer("unary0_F", act) - add0 = pybuda.op.Add("add0_F", unary0, unary0) + unary0 = forge.op.Buffer("unary0_F", act) + add0 = forge.op.Add("add0_F", unary0, unary0) return add0 def two_chip_binary_inputs_c1_tensix_c1_dram(act0, act1): - pybuda.set_chip_break("unary0_G") - pybuda.override_dram_queue_placement("input_0_nop0_G", chip_id=c0, channel=0) - pybuda.override_dram_queue_placement("input_1_add0_G", chip_id=c1, channel=0) + forge.set_chip_break("unary0_G") + forge.override_dram_queue_placement("input_0_nop0_G", chip_id=c0, channel=0) + forge.override_dram_queue_placement("input_1_add0_G", chip_id=c1, channel=0) - nop0 = pybuda.op.Buffer("nop0_G", act0) - unary0 = pybuda.op.Buffer("unary0_G", nop0) - add0 = pybuda.op.Add("add0_G", unary0, act1) + nop0 = forge.op.Buffer("nop0_G", act0) + unary0 = forge.op.Buffer("unary0_G", nop0) + add0 = forge.op.Add("add0_G", unary0, act1) return add0, nop0 def two_chip_multiply_inputs_c0_tensix_c1_dram(act0, act1): - pybuda.set_chip_break("multiply0_H") - pybuda.override_dram_queue_placement("input_0_nop0_H", chip_id=c0, channel=0) - pybuda.override_dram_queue_placement( + forge.set_chip_break("multiply0_H") + forge.override_dram_queue_placement("input_0_nop0_H", chip_id=c0, channel=0) + forge.override_dram_queue_placement( "input_1_multiply0_H", chip_id=c1, channel=0 ) - nop0 = pybuda.op.Buffer("nop0_H", act0) - multiply0 = pybuda.op.Multiply("multiply0_H", nop0, act1) + nop0 = forge.op.Buffer("nop0_H", act0) + multiply0 = forge.op.Multiply("multiply0_H", nop0, act1) return multiply0, nop0 def two_chip_binary_inputs_c0_tensix_c1_tensix(act0, act1, act2): - pybuda.set_chip_break("nop0_I") - pybuda.override_dram_queue_placement("input_0_add0_I", chip_id=c0, channel=0) - pybuda.override_dram_queue_placement("input_1_add0_I", chip_id=c0, channel=0) - pybuda.override_dram_queue_placement("input_0_nop0_I", chip_id=c1, channel=0) - - add0 = pybuda.op.Add("add0_I", act0, act1) - nop0 = pybuda.op.Buffer("nop0_I", act2) - add1 = pybuda.op.Add("add1_I", add0, nop0) + forge.set_chip_break("nop0_I") + forge.override_dram_queue_placement("input_0_add0_I", chip_id=c0, channel=0) + forge.override_dram_queue_placement("input_1_add0_I", chip_id=c0, channel=0) + forge.override_dram_queue_placement("input_0_nop0_I", chip_id=c1, channel=0) + + add0 = forge.op.Add("add0_I", act0, act1) + nop0 = forge.op.Buffer("nop0_I", act2) + add1 = forge.op.Add("add1_I", add0, nop0) return add1, add0, nop0 def two_chip_multiply_inputs_c0_tensix_c0_tensix(act0, act1, act2): - pybuda.set_chip_break("multiply0_J") - pybuda.override_dram_queue_placement("input_0_add0_J", chip_id=c0, channel=0) - pybuda.override_dram_queue_placement("input_1_add0_J", chip_id=c0, channel=0) - pybuda.override_dram_queue_placement("input_0_nop0_J", chip_id=c0, channel=0) - - add0 = pybuda.op.Add("add0_J", act0, act1) - nop0 = pybuda.op.Buffer("nop0_J", act2) - multiply0 = pybuda.op.Multiply("multiply0_J", add0, nop0) + forge.set_chip_break("multiply0_J") + forge.override_dram_queue_placement("input_0_add0_J", chip_id=c0, channel=0) + forge.override_dram_queue_placement("input_1_add0_J", chip_id=c0, channel=0) + forge.override_dram_queue_placement("input_0_nop0_J", chip_id=c0, channel=0) + + add0 = forge.op.Add("add0_J", act0, act1) + nop0 = forge.op.Buffer("nop0_J", act2) + multiply0 = forge.op.Multiply("multiply0_J", add0, nop0) return multiply0, add0, nop0 def two_chip_matmul_inputs_c0_dram_c1_tensix(act0, act1, act2): - pybuda.set_chip_break("add0_K") - pybuda.override_dram_queue_placement("input_0_nop0_K", chip_id=c0, channel=0) - pybuda.override_dram_queue_placement("input_0_nop1_K", chip_id=c0, channel=0) - pybuda.override_dram_queue_placement("input_0_matmul0_K", chip_id=c0, channel=0) - - nop0 = pybuda.op.Buffer("nop0_K", act0) - nop1 = pybuda.op.Buffer("nop1_K", act1) - add0 = pybuda.op.Add("add0_K", nop0, nop1) - matmul0 = pybuda.op.Matmul("matmul0_K", act2, add0) + forge.set_chip_break("add0_K") + forge.override_dram_queue_placement("input_0_nop0_K", chip_id=c0, channel=0) + forge.override_dram_queue_placement("input_0_nop1_K", chip_id=c0, channel=0) + forge.override_dram_queue_placement("input_0_matmul0_K", chip_id=c0, channel=0) + + nop0 = forge.op.Buffer("nop0_K", act0) + nop1 = forge.op.Buffer("nop1_K", act1) + add0 = forge.op.Add("add0_K", nop0, nop1) + matmul0 = forge.op.Matmul("matmul0_K", act2, add0) return matmul0, add0, nop1 if subset == "sanity": @@ -274,9 +274,9 @@ def get_chip_ids_for_galaxy(chip_ids, full_galaxy=False, galaxy_adjacent_chips=N return chip_ids -def reset_pybuda_between_tests(): - pybuda.pybuda_reset() - pybuda.set_configuration_options( +def reset_forge_between_tests(): + forge.forge_reset() + forge.set_configuration_options( backend_cluster_descriptor_path=BACKEND_CLUSTER_DESC, output_queues_on_host=False, enable_consteval=False, @@ -324,7 +324,7 @@ def test_galaxy_scan_chip_pairs(scan_chip, test_level, microbatch): test_list = get_two_chip_op_tests(test_level, chip_a, chip_b) for test in test_list: chip_ids = get_chip_ids_for_galaxy([chip_a, chip_b]) - reset_pybuda_between_tests() + reset_forge_between_tests() module = ModuleBuilder(test) num_inputs = len(signature(test).parameters) @@ -378,7 +378,7 @@ def test_galaxy_two_hop_two_chip_tests(test_level, microbatch): chip_ids = get_chip_ids_for_galaxy( [chip_a, chip_b], True, galaxy_adjacent_chips ) - reset_pybuda_between_tests() + reset_forge_between_tests() module = ModuleBuilder(test) num_inputs = len(signature(test).parameters) @@ -427,7 +427,7 @@ def test_galaxy_four_chip_layouts(microbatch): ) chip_ids = get_chip_ids_for_galaxy(chips, True, galaxy_adjacent_chips) - reset_pybuda_between_tests() + reset_forge_between_tests() tests = create_four_chip_test_modules(chips[0], chips[1], chips[2], chips[3]) modules = [ModuleBuilder(test) for test in tests] @@ -452,11 +452,11 @@ def test_galaxy_dram_buf_fork(microbatch): MAX_BUF_FORK = 8 def dram_buf_fork(act): - pybuda.override_dram_queue_placement("input_unary0", chip_id=0, channel=0) + forge.override_dram_queue_placement("input_unary0", chip_id=0, channel=0) for i in range(MAX_BUF_FORK): - pybuda.set_chip_break(f"unary{i}") + forge.set_chip_break(f"unary{i}") - op_list = [pybuda.op.Buffer(f"unary{i}", act) for i in range(MAX_BUF_FORK)] + op_list = [forge.op.Buffer(f"unary{i}", act) for i in range(MAX_BUF_FORK)] return op_list compiler_cfg = _get_global_compiler_config() @@ -484,7 +484,7 @@ def dram_buf_fork(act): for chips in eight_chip_layouts: chip_ids = get_chip_ids_for_galaxy(chips, True, galaxy_adjacent_chips) - reset_pybuda_between_tests() + reset_forge_between_tests() module = ModuleBuilder(dram_buf_fork) inputs_shape = [(microbatch, 1, 64, 64)] @@ -507,13 +507,13 @@ def test_galaxy_l1_buf_fork(microbatch): MAX_BUF_FORK = 8 def l1_buf_fork(act): - pybuda.override_dram_queue_placement("input_unary0", chip_id=0, channel=0) - unary0 = pybuda.op.Gelu("unary0", act) + forge.override_dram_queue_placement("input_unary0", chip_id=0, channel=0) + unary0 = forge.op.Gelu("unary0", act) for i in range(MAX_BUF_FORK): - pybuda.set_chip_break(f"unary{i}") + forge.set_chip_break(f"unary{i}") op_list = [ - pybuda.op.Buffer(f"unary{i}", unary0) for i in range(1, MAX_BUF_FORK) + forge.op.Buffer(f"unary{i}", unary0) for i in range(1, MAX_BUF_FORK) ] return op_list @@ -542,7 +542,7 @@ def l1_buf_fork(act): for chips in eight_chip_layouts: chip_ids = get_chip_ids_for_galaxy(chips, True, galaxy_adjacent_chips) - reset_pybuda_between_tests() + reset_forge_between_tests() module = ModuleBuilder(l1_buf_fork) inputs_shape = [(microbatch, 1, 64, 64)] @@ -588,15 +588,15 @@ def full_grid_unary_nop(act): f"nop{chip}_{op}" for chip in range(num_chips) for op in range(num_rows) ] for op in op_names: - pybuda.override_op_size(op, (1, num_cols)) + forge.override_op_size(op, (1, num_cols)) - nop = pybuda.op.Buffer(op_names[0], act) + nop = forge.op.Buffer(op_names[0], act) for i in range(1, len(op_names)): # set chip break after one chip is full of ops if i % num_rows == 0: - pybuda.set_chip_break(op_names[i]) - nop = pybuda.op.Buffer(op_names[i], nop) + forge.set_chip_break(op_names[i]) + nop = forge.op.Buffer(op_names[i], nop) return nop galaxy_adjacent_chips = get_galaxy_chip_adjacency_list( @@ -606,7 +606,7 @@ def full_grid_unary_nop(act): list(range(num_chips)), True, galaxy_adjacent_chips ) - reset_pybuda_between_tests() + reset_forge_between_tests() module = ModuleBuilder(full_grid_unary_nop) inputs_shape = [(microbatch, 4, 64, num_cols * 32)] @@ -642,8 +642,8 @@ def test_vf_chip(microbatch, scan_chip): num_cols = 8 def large_matmul(act0, act1): - matmul0 = pybuda.op.Matmul("matmul", act0, act1) - pybuda.override_op_size("matmul", (num_rows, num_cols)) + matmul0 = forge.op.Matmul("matmul", act0, act1) + forge.override_op_size("matmul", (num_rows, num_cols)) return matmul0 galaxy_adjacent_chips = get_galaxy_chip_adjacency_list( @@ -661,7 +661,7 @@ def large_matmul(act0, act1): [int(chip)], False, galaxy_adjacent_chips ) - reset_pybuda_between_tests() + reset_forge_between_tests() module = ModuleBuilder(large_matmul) inputs_shape = [(microbatch, 4, num_rows * 32, num_cols * 32), (microbatch, 4, num_rows * 32, num_cols * 32)] diff --git a/pybuda/test/galaxy/test_multichip_golden.py b/forge/test/galaxy/test_multichip_golden.py similarity index 83% rename from pybuda/test/galaxy/test_multichip_golden.py rename to forge/test/galaxy/test_multichip_golden.py index e8648b3f9..42a8ab3ac 100644 --- a/pybuda/test/galaxy/test_multichip_golden.py +++ b/forge/test/galaxy/test_multichip_golden.py @@ -17,27 +17,27 @@ BertForQuestionAnswering, ) -import pybuda -import pybuda.op -from pybuda import ( +import forge +import forge.op +from forge import ( Tensor, CompilerConfig, DataFormat, PyTorchModule, ) from test.bert.modules import ( - PyBudaBertMHA, - PyBudaBertEncoder, - PyBudaFeedForward, - PyBudaPredictionHeadDecoder, - PyBudaPredictionHeadTransform, + ForgeBertMHA, + ForgeBertEncoder, + ForgeFeedForward, + ForgePredictionHeadDecoder, + ForgePredictionHeadTransform, get_bert_parameters, ) -from pybuda.verify import verify_module, verify_module_pipeline, VerifyConfig, TestKind -from pybuda._C.backend_api import BackendDevice, BackendType +from forge.verify import verify_module, verify_module_pipeline, VerifyConfig, TestKind +from forge._C.backend_api import BackendDevice, BackendType from test.common import compile, device, ModuleBuilder -from pybuda.config import _get_global_compiler_config, _clear_global_compiler_config -from pybuda.ttdevice import get_device_config +from forge.config import _get_global_compiler_config, _clear_global_compiler_config +from forge.ttdevice import get_device_config from loguru import logger @@ -93,14 +93,14 @@ def test_galaxy_two_chip_module(microbatch): chip_a = 31 def two_chip_simple_unary_to_unary(act): - pybuda.set_chip_break("unary1_A") - pybuda.override_dram_queue_placement("inputs", chip_id=chip_a, channel=0) + forge.set_chip_break("unary1_A") + forge.override_dram_queue_placement("inputs", chip_id=chip_a, channel=0) - unary0 = pybuda.op.Exp("unary0_A", act) - unary1 = pybuda.op.Gelu("unary1_A", unary0) + unary0 = forge.op.Exp("unary0_A", act) + unary1 = forge.op.Gelu("unary1_A", unary0) return unary1 - pybuda.set_configuration_options( + forge.set_configuration_options( backend_cluster_descriptor_path=ONE_SHELF_ETH_CONNECTIONS, backend_runtime_params_path=ONE_SHELF_RUNTIME_PARAMS, ) @@ -119,8 +119,8 @@ def two_chip_simple_unary_to_unary(act): for chip_b in galaxy_adjacent_chips[chip_a]: test = two_chip_simple_unary_to_unary chip_ids = get_chip_ids_for_galaxy([chip_a, chip_b]) - pybuda.pybuda_reset() - pybuda.set_configuration_options( + forge.forge_reset() + forge.set_configuration_options( backend_cluster_descriptor_path=ONE_SHELF_ETH_CONNECTIONS, backend_runtime_params_path=ONE_SHELF_RUNTIME_PARAMS, output_queues_on_host=False, diff --git a/pybuda/test/galaxy/two_shelf_eth_connections.yaml b/forge/test/galaxy/two_shelf_eth_connections.yaml similarity index 100% rename from pybuda/test/galaxy/two_shelf_eth_connections.yaml rename to forge/test/galaxy/two_shelf_eth_connections.yaml diff --git a/pybuda/test/galaxy/two_shelf_runtime_params.yaml b/forge/test/galaxy/two_shelf_runtime_params.yaml similarity index 99% rename from pybuda/test/galaxy/two_shelf_runtime_params.yaml rename to forge/test/galaxy/two_shelf_runtime_params.yaml index 9018943cf..e1cf059b2 100644 --- a/pybuda/test/galaxy/two_shelf_runtime_params.yaml +++ b/forge/test/galaxy/two_shelf_runtime_params.yaml @@ -212,7 +212,7 @@ params_to_ignore: system_level_params: system-device-chip_locations: 58,3,5,0,1,-57,3,6,0,1,-56,3,7,0,1,-55,2,7,0,1,-54,2,6,0,1,-7,0,5,0,0,-6,0,6,0,0,-0,3,5,0,0,-59,3,4,0,1,-13,2,7,0,0,-14,2,6,0,0,-15,3,6,0,0,-16,3,7,0,0,-17,1,7,0,1,-18,1,6,0,1,-19,1,5,0,1,-20,1,4,0,1,-5,1,6,0,0,-63,3,0,0,1,-4,1,5,0,0,-24,0,4,0,0,-62,3,1,0,1,-3,2,5,0,0,-23,0,4,0,1,-61,3,2,0,1,-2,2,4,0,0,-22,0,3,0,1,-60,3,3,0,1,-1,3,4,0,0,-21,1,3,0,1,-28,0,1,0,1,-27,0,1,0,0,-26,0,2,0,0,-25,0,3,0,0,-8,0,5,0,1,-9,0,6,0,1,-10,0,7,0,1,-11,0,7,0,0,-12,1,7,0,0,-29,0,2,0,1,-30,1,2,0,1,-31,1,1,0,1,-32,1,0,0,1,-33,0,0,0,1,-34,0,0,0,0,-35,1,0,0,0,-36,1,1,0,0,-37,1,2,0,0,-38,1,3,0,0,-39,1,4,0,0,-40,2,3,0,0,-41,2,2,0,0,-42,2,1,0,0,-43,2,0,0,0,-44,3,0,0,0,-45,3,1,0,0,-46,3,2,0,0,-47,3,3,0,0,-48,2,0,0,1,-49,2,1,0,1,-50,2,2,0,1,-51,2,3,0,1,-52,2,4,0,1,-53,2,5,0,1,- system-device-chips_with_mmio: 0- - system-device-cluster_descriptor: pybuda/test/galaxy/two_shelf_eth_connections.yaml + system-device-cluster_descriptor: forge/test/galaxy/two_shelf_eth_connections.yaml system-device-ethernet_connections: 13,12,16,4,-13,13,16,5,-13,0,14,0,-13,14,16,6,-13,1,14,1,-13,15,16,7,-13,2,14,2,-13,4,12,12,-13,5,12,13,-13,6,12,14,-13,7,12,15,-13,3,14,3,-16,4,13,12,-16,5,13,13,-16,6,13,14,-16,7,13,15,-16,0,15,0,-16,1,15,1,-16,2,15,2,-16,3,15,3,-22,12,21,4,-22,15,21,7,-22,2,23,2,-22,4,25,4,-22,5,25,5,-22,0,23,0,-22,13,21,5,-22,1,23,1,-22,14,21,6,-22,3,23,3,-22,8,29,8,-22,9,29,9,-22,10,29,10,-22,11,29,11,-33,4,34,4,-33,5,34,5,-33,8,28,8,-33,9,28,9,-33,10,28,10,-33,11,28,11,-33,12,32,4,-33,13,32,5,-33,14,32,6,-33,15,32,7,-0,0,1,0,-0,1,1,1,-0,2,1,2,-0,3,1,3,-0,8,15,8,-0,9,15,9,-0,10,15,10,-0,11,15,11,-0,4,3,12,-0,5,3,13,-0,6,3,14,-0,7,3,15,-17,4,10,12,-17,5,10,13,-17,6,10,14,-17,7,10,15,-17,12,55,4,-17,13,55,5,-17,14,55,6,-17,15,55,7,-17,8,18,8,-17,9,18,9,-17,10,18,10,-17,11,18,11,-49,12,62,4,-49,7,31,15,-49,15,62,7,-49,2,48,2,-49,14,62,6,-49,1,48,1,-49,8,50,8,-49,9,50,9,-49,10,50,10,-49,11,50,11,-49,0,48,0,-49,13,62,5,-49,3,48,3,-49,4,31,12,-49,5,31,13,-55,0,54,0,-55,13,56,5,-55,1,54,1,-55,14,56,6,-55,2,54,2,-55,15,56,7,-55,3,54,3,-55,4,17,12,-55,5,17,13,-55,6,17,14,-55,7,17,15,-55,12,56,4,-41,12,46,4,-41,7,37,15,-41,6,37,14,-41,15,46,7,-41,2,40,2,-41,14,46,6,-41,1,40,1,-41,13,46,5,-41,0,40,0,-41,3,40,3,-41,8,42,8,-41,9,42,9,-41,10,42,10,-41,11,42,11,-41,4,37,12,-41,5,37,13,-54,8,53,8,-54,7,18,15,-54,6,18,14,-54,11,53,11,-54,10,53,10,-54,9,53,9,-54,0,55,0,-54,13,57,5,-54,1,55,1,-54,14,57,6,-54,2,55,2,-54,15,57,7,-54,3,55,3,-54,12,57,4,-54,4,18,12,-54,5,18,13,-9,10,10,10,-9,11,10,11,-9,4,6,4,-9,5,6,5,-9,12,18,4,-9,13,18,5,-9,0,8,0,-9,14,18,6,-9,1,8,1,-9,15,18,7,-9,2,8,2,-9,3,8,3,-9,8,10,8,-9,9,10,9,-38,7,25,15,-38,6,25,14,-38,5,25,13,-38,3,39,3,-38,2,39,2,-38,15,40,7,-38,1,39,1,-38,14,40,6,-38,8,37,8,-38,9,37,9,-38,10,37,10,-38,11,37,11,-38,12,40,4,-38,13,40,5,-38,0,39,0,-38,4,25,12,-6,12,5,4,-6,15,5,7,-6,2,7,2,-6,4,9,4,-6,5,9,5,-6,0,7,0,-6,13,5,5,-6,1,7,1,-6,14,5,6,-6,3,7,3,-6,8,11,8,-6,9,11,9,-6,10,11,10,-6,11,11,11,-35,4,34,12,-35,5,34,13,-35,6,34,14,-35,7,34,15,-35,8,36,8,-35,9,36,9,-35,10,36,10,-35,11,36,11,-35,12,43,4,-35,13,43,5,-35,14,43,6,-35,15,43,7,-19,12,53,4,-19,7,8,15,-19,6,8,14,-19,15,53,7,-19,2,18,2,-19,14,53,6,-19,1,18,1,-19,13,53,5,-19,0,18,0,-19,3,18,3,-19,8,20,8,-19,9,20,9,-19,10,20,10,-19,11,20,11,-19,4,8,12,-19,5,8,13,-34,12,35,4,-34,13,35,5,-34,14,35,6,-34,15,35,7,-34,4,33,4,-34,5,33,5,-34,8,27,8,-34,9,27,9,-34,10,27,10,-34,11,27,11,-32,8,31,8,-32,9,31,9,-32,10,31,10,-32,11,31,11,-32,12,48,4,-32,13,48,5,-32,15,48,7,-32,4,33,12,-32,5,33,13,-32,6,33,14,-32,7,33,15,-31,1,30,1,-31,15,49,7,-31,3,30,3,-31,2,30,2,-31,8,32,8,-31,9,32,9,-31,10,32,10,-31,11,32,11,-31,4,28,12,-31,5,28,13,-31,6,28,14,-31,7,28,15,-31,12,49,4,-31,13,49,5,-31,0,30,0,-43,0,42,0,-43,13,44,5,-43,1,42,1,-43,14,44,6,-43,2,42,2,-43,15,44,7,-43,3,42,3,-43,4,35,12,-43,5,35,13,-43,6,35,14,-43,7,35,15,-43,12,44,4,-4,12,3,4,-4,7,7,15,-4,6,7,14,-4,15,3,7,-4,2,5,2,-4,14,3,6,-4,1,5,1,-4,13,3,5,-4,0,5,0,-4,8,39,8,-4,9,39,9,-4,10,39,10,-4,11,39,11,-4,3,5,3,-4,4,7,12,-4,5,7,13,-63,4,48,12,-63,5,48,13,-63,6,48,14,-63,7,48,15,-63,0,62,0,-63,1,62,1,-63,2,62,2,-63,3,62,3,-62,4,49,12,-62,5,49,13,-62,6,49,14,-62,7,49,15,-62,8,61,8,-62,9,61,9,-62,10,61,10,-62,11,61,11,-62,0,63,0,-62,1,63,1,-62,2,63,2,-62,3,63,3,-3,4,4,12,-3,12,0,4,-3,11,14,11,-3,7,4,15,-3,6,4,14,-3,5,4,13,-3,0,2,0,-3,13,0,5,-3,1,2,1,-3,14,0,6,-3,2,2,2,-3,15,0,7,-3,3,2,3,-3,8,14,8,-3,9,14,9,-3,10,14,10,-59,0,58,0,-59,1,58,1,-59,2,58,2,-59,3,58,3,-59,4,52,12,-59,5,52,13,-59,6,52,14,-59,7,52,15,-59,8,60,8,-59,9,60,9,-59,10,60,10,-59,11,60,11,-7,12,4,4,-7,15,4,7,-7,2,6,2,-7,0,6,0,-7,13,4,5,-7,1,6,1,-7,14,4,6,-7,3,6,3,-7,4,8,4,-7,5,8,5,-7,8,24,8,-7,9,24,9,-7,10,24,10,-7,11,24,11,-58,4,53,12,-58,5,53,13,-58,6,53,14,-58,7,53,15,-58,0,59,0,-58,1,59,1,-58,2,59,2,-58,3,59,3,-58,8,57,8,-58,9,57,9,-58,10,57,10,-58,11,57,11,-45,0,44,0,-45,1,44,1,-45,2,44,2,-45,3,44,3,-45,8,46,8,-45,9,46,9,-45,10,46,10,-45,11,46,11,-45,4,42,12,-45,5,42,13,-45,6,42,14,-45,7,42,15,-37,15,41,7,-37,14,41,6,-37,0,36,0,-37,3,36,3,-37,2,36,2,-37,1,36,1,-37,8,38,8,-37,9,38,9,-37,10,38,10,-37,11,38,11,-37,4,26,12,-37,5,26,13,-37,6,26,14,-37,7,26,15,-37,12,41,4,-37,13,41,5,-8,10,23,10,-8,11,23,11,-8,4,7,4,-8,5,7,5,-8,12,19,4,-8,13,19,5,-8,0,9,0,-8,14,19,6,-8,1,9,1,-8,15,19,7,-8,2,9,2,-8,3,9,3,-8,8,23,8,-8,9,23,9,-57,0,56,0,-57,1,56,1,-57,2,56,2,-57,3,56,3,-57,4,54,12,-57,5,54,13,-57,6,54,14,-57,7,54,15,-57,8,58,8,-57,9,58,9,-57,10,58,10,-57,11,58,11,-44,0,45,0,-44,1,45,1,-44,2,45,2,-44,3,45,3,-44,4,43,12,-44,5,43,13,-44,6,43,14,-44,7,43,15,-53,9,54,9,-53,8,54,8,-53,11,54,11,-53,10,54,10,-53,1,52,1,-53,14,58,6,-53,2,52,2,-53,15,58,7,-53,3,52,3,-53,12,58,4,-53,13,58,5,-53,4,19,12,-53,5,19,13,-53,6,19,14,-53,7,19,15,-23,12,20,4,-23,15,20,7,-23,2,22,2,-23,0,22,0,-23,13,20,5,-23,1,22,1,-23,14,20,6,-23,3,22,3,-23,4,24,4,-23,5,24,5,-23,8,8,8,-23,9,8,9,-23,10,8,10,-23,11,8,11,-52,9,51,9,-52,8,51,8,-52,11,51,11,-52,10,51,10,-52,1,53,1,-52,14,59,6,-52,2,53,2,-52,15,59,7,-52,3,53,3,-52,12,59,4,-52,13,59,5,-52,4,20,12,-52,5,20,13,-52,6,20,14,-52,7,20,15,-51,8,52,8,-51,7,21,15,-51,6,21,14,-51,11,52,11,-51,10,52,10,-51,9,52,9,-51,0,50,0,-51,13,60,5,-51,1,50,1,-51,14,60,6,-51,2,50,2,-51,15,60,7,-51,3,50,3,-51,12,60,4,-51,4,21,12,-51,5,21,13,-42,12,45,4,-42,7,36,15,-42,6,36,14,-42,15,45,7,-42,2,43,2,-42,14,45,6,-42,1,43,1,-42,13,45,5,-42,0,43,0,-42,3,43,3,-42,8,41,8,-42,9,41,9,-42,10,41,10,-42,11,41,11,-42,4,36,12,-42,5,36,13,-50,4,30,12,-50,12,61,4,-50,3,51,3,-50,7,30,15,-50,6,30,14,-50,5,30,13,-50,8,49,8,-50,9,49,9,-50,10,49,10,-50,11,49,11,-50,0,51,0,-50,13,61,5,-50,1,51,1,-50,14,61,6,-50,2,51,2,-50,15,61,7,-40,8,2,8,-40,12,47,4,-40,7,38,15,-40,11,2,11,-40,10,2,10,-40,9,2,9,-40,0,41,0,-40,13,47,5,-40,1,41,1,-40,14,47,6,-40,2,41,2,-40,15,47,7,-40,3,41,3,-40,4,38,12,-40,5,38,13,-40,6,38,14,-10,12,17,4,-10,13,17,5,-10,14,17,6,-10,15,17,7,-10,4,11,4,-10,5,11,5,-10,8,9,8,-10,9,9,9,-10,10,9,10,-10,11,9,11,-39,12,2,4,-39,3,38,3,-39,2,38,2,-39,15,2,7,-39,14,2,6,-39,1,38,1,-39,13,2,5,-39,0,38,0,-39,8,4,8,-39,9,4,9,-39,10,4,10,-39,11,4,11,-39,4,24,12,-39,5,24,13,-39,6,24,14,-39,7,24,15,-18,12,54,4,-18,11,17,11,-18,10,17,10,-18,15,54,7,-18,2,19,2,-18,14,54,6,-18,1,19,1,-18,13,54,5,-18,0,19,0,-18,3,19,3,-18,4,9,12,-18,5,9,13,-18,6,9,14,-18,7,9,15,-18,8,17,8,-18,9,17,9,-56,0,57,0,-56,1,57,1,-56,2,57,2,-56,3,57,3,-56,4,55,12,-56,5,55,13,-56,6,55,14,-56,7,55,15,-48,12,63,4,-48,13,63,5,-48,0,49,0,-48,14,63,6,-48,1,49,1,-48,15,63,7,-48,2,49,2,-48,4,32,12,-48,5,32,13,-48,7,32,15,-48,3,49,3,-36,15,42,7,-36,14,42,6,-36,0,37,0,-36,3,37,3,-36,2,37,2,-36,1,37,1,-36,4,27,12,-36,5,27,13,-36,6,27,14,-36,7,27,15,-36,8,35,8,-36,9,35,9,-36,10,35,10,-36,11,35,11,-36,12,42,4,-36,13,42,5,-27,11,34,11,-27,3,26,3,-27,12,36,4,-27,13,36,5,-27,0,26,0,-27,14,36,6,-27,1,26,1,-27,15,36,7,-27,2,26,2,-27,4,28,4,-27,5,28,5,-27,8,34,8,-27,9,34,9,-27,10,34,10,-25,5,22,5,-25,3,24,3,-25,8,26,8,-25,9,26,9,-25,10,26,10,-25,11,26,11,-25,12,38,4,-25,13,38,5,-25,0,24,0,-25,14,38,6,-25,1,24,1,-25,15,38,7,-25,2,24,2,-25,4,22,4,-26,5,29,5,-26,3,27,3,-26,8,25,8,-26,9,25,9,-26,10,25,10,-26,11,25,11,-26,12,37,4,-26,13,37,5,-26,0,27,0,-26,14,37,6,-26,1,27,1,-26,15,37,7,-26,2,27,2,-26,4,29,4,-28,11,33,11,-28,3,29,3,-28,4,27,4,-28,5,27,5,-28,12,31,4,-28,13,31,5,-28,0,29,0,-28,14,31,6,-28,1,29,1,-28,15,31,7,-28,2,29,2,-28,8,33,8,-28,9,33,9,-28,10,33,10,-2,8,40,8,-2,7,39,15,-2,6,39,14,-2,11,40,11,-2,10,40,10,-2,9,40,9,-2,0,3,0,-2,13,1,5,-2,1,3,1,-2,14,1,6,-2,2,3,2,-2,15,1,7,-2,3,3,3,-2,12,1,4,-2,4,39,12,-2,5,39,13,-61,0,60,0,-61,1,60,1,-61,2,60,2,-61,3,60,3,-61,4,50,12,-61,5,50,13,-61,6,50,14,-61,7,50,15,-61,8,62,8,-61,9,62,9,-61,10,62,10,-61,11,62,11,-60,0,61,0,-60,1,61,1,-60,2,61,2,-60,3,61,3,-60,4,51,12,-60,5,51,13,-60,6,51,14,-60,7,51,15,-60,8,59,8,-60,9,59,9,-60,10,59,10,-60,11,59,11,-1,0,0,0,-1,1,0,1,-1,2,0,2,-1,3,0,3,-1,4,2,12,-1,5,2,13,-1,6,2,14,-1,7,2,15,-1,8,47,8,-1,9,47,9,-1,10,47,10,-1,11,47,11,-24,5,23,5,-24,3,25,3,-24,12,39,4,-24,13,39,5,-24,0,25,0,-24,14,39,6,-24,1,25,1,-24,15,39,7,-24,2,25,2,-24,8,7,8,-24,9,7,9,-24,10,7,10,-24,11,7,11,-24,4,23,4,-5,8,12,8,-5,7,6,15,-5,6,6,14,-5,11,12,11,-5,10,12,10,-5,9,12,9,-5,0,4,0,-5,13,14,5,-5,1,4,1,-5,14,14,6,-5,2,4,2,-5,15,14,7,-5,3,4,3,-5,12,14,4,-5,4,6,12,-5,5,6,13,-29,11,22,11,-29,3,28,3,-29,12,30,4,-29,13,30,5,-29,0,28,0,-29,14,30,6,-29,1,28,1,-29,15,30,7,-29,2,28,2,-29,4,26,4,-29,5,26,5,-29,8,22,8,-29,9,22,9,-29,10,22,10,-30,12,50,4,-30,3,31,3,-30,2,31,2,-30,15,50,7,-30,14,50,6,-30,1,31,1,-30,13,50,5,-30,0,31,0,-30,4,29,12,-30,5,29,13,-30,6,29,14,-30,7,29,15,-30,8,21,8,-30,9,21,9,-30,10,21,10,-30,11,21,11,-46,8,45,8,-46,9,45,9,-46,10,45,10,-46,11,45,11,-46,0,47,0,-46,1,47,1,-46,2,47,2,-46,3,47,3,-46,4,41,12,-46,5,41,13,-46,6,41,14,-46,7,41,15,-14,11,3,11,-14,10,3,10,-14,9,3,9,-14,3,13,3,-14,2,13,2,-14,15,15,7,-14,1,13,1,-14,14,15,6,-14,12,15,4,-14,13,15,5,-14,0,13,0,-14,4,5,12,-14,5,5,13,-14,6,5,14,-14,7,5,15,-14,8,3,8,-15,4,14,12,-15,5,14,13,-15,6,14,14,-15,7,14,15,-15,0,16,0,-15,1,16,1,-15,2,16,2,-15,3,16,3,-15,8,0,8,-15,9,0,9,-15,10,0,10,-15,11,0,11,-47,4,40,12,-47,5,40,13,-47,6,40,14,-47,7,40,15,-47,8,1,8,-47,9,1,9,-47,10,1,10,-47,11,1,11,-47,0,46,0,-47,1,46,1,-47,2,46,2,-47,3,46,3,-12,4,11,12,-12,5,11,13,-12,6,11,14,-12,7,11,15,-12,12,13,4,-12,13,13,5,-12,14,13,6,-12,15,13,7,-12,8,5,8,-12,9,5,9,-12,10,5,10,-12,11,5,11,-11,12,12,4,-11,13,12,5,-11,14,12,6,-11,15,12,7,-11,4,10,4,-11,5,10,5,-11,8,6,8,-11,9,6,9,-11,10,6,10,-11,11,6,11,-20,4,23,12,-20,12,52,4,-20,3,21,3,-20,7,23,15,-20,6,23,14,-20,5,23,13,-20,8,19,8,-20,9,19,9,-20,10,19,10,-20,11,19,11,-20,0,21,0,-20,13,52,5,-20,1,21,1,-20,14,52,6,-20,2,21,2,-20,15,52,7,-21,4,22,12,-21,11,30,11,-21,10,30,10,-21,7,22,15,-21,6,22,14,-21,5,22,13,-21,12,51,4,-21,13,51,5,-21,0,20,0,-21,14,51,6,-21,1,20,1,-21,15,51,7,-21,2,20,2,-21,3,20,3,-21,8,30,8,-21,9,30,9,- system-device-num_mmio_devices: 1 system-device-number_of_chips: 64 diff --git a/pybuda/test/galaxy/utils/generate_system_params.py b/forge/test/galaxy/utils/generate_system_params.py similarity index 79% rename from pybuda/test/galaxy/utils/generate_system_params.py rename to forge/test/galaxy/utils/generate_system_params.py index 9233120b5..721ab3254 100644 --- a/pybuda/test/galaxy/utils/generate_system_params.py +++ b/forge/test/galaxy/utils/generate_system_params.py @@ -2,10 +2,10 @@ # SPDX-License-Identifier: Apache-2.0 import argparse -import pybuda -from pybuda.config import _get_global_compiler_config -from pybuda.ttdevice import get_device_config -from pybuda._C.backend_api import BackendDevice, BackendType +import forge +from forge.config import _get_global_compiler_config +from forge.ttdevice import get_device_config +from forge._C.backend_api import BackendDevice, BackendType if __name__ == "__main__": @@ -14,7 +14,7 @@ args = parser.parse_args() compiler_cfg = _get_global_compiler_config() - pybuda.set_configuration_options( + forge.set_configuration_options( backend_runtime_params_path = args.save_params_file, store_backend_db_to_yaml = True, ) diff --git a/pybuda/test/galaxy/utils/verify_push_bandwidth.py b/forge/test/galaxy/utils/verify_push_bandwidth.py similarity index 100% rename from pybuda/test/galaxy/utils/verify_push_bandwidth.py rename to forge/test/galaxy/utils/verify_push_bandwidth.py diff --git a/pybuda/test/gpt2/gpt2.py b/forge/test/gpt2/gpt2.py similarity index 96% rename from pybuda/test/gpt2/gpt2.py rename to forge/test/gpt2/gpt2.py index e588abfa1..0386e01b8 100644 --- a/pybuda/test/gpt2/gpt2.py +++ b/forge/test/gpt2/gpt2.py @@ -5,10 +5,10 @@ import torch import numpy as np -from pybuda import ( - PyBudaModule, +from forge import ( + ForgeModule, ) -import pybuda.op as nn +import forge.op as nn from transformers.models.gpt2 import GPT2Config @@ -134,7 +134,7 @@ def functional_gpt2_block(hidden_states, config, parameters, is_qkv_weight_split return hidden_states -class PyBudaGPT2MLP(PyBudaModule): +class ForgeGPT2MLP(ForgeModule): def __init__(self, name, parameters, config, prefix=None): super().__init__(name) self.parameters = parameters @@ -167,7 +167,7 @@ def parameter(name): return output -class PyBudaGPT2MHA(PyBudaModule): +class ForgeGPT2MHA(ForgeModule): def __init__(self, name, parameters, config, prefix=None): super().__init__(name) self.parameters = parameters @@ -238,14 +238,14 @@ def param(name): return output -class PyBudaGPT2Block(PyBudaModule): +class ForgeGPT2Block(ForgeModule): def __init__(self, name, parameters, config): super().__init__(name) self.parameters = parameters self.config = config - self.attn = PyBudaGPT2MHA("attn", parameters, config, prefix="attn") - self.mlp = PyBudaGPT2MLP("mlp", parameters, config, prefix="mlp") + self.attn = ForgeGPT2MHA("attn", parameters, config, prefix="attn") + self.mlp = ForgeGPT2MLP("mlp", parameters, config, prefix="mlp") def forward(self, hidden_states): def parameter(key): @@ -280,7 +280,7 @@ def parameter(key): return hidden_states -class PyBudaGPT2LayerNorm(PyBudaModule): +class ForgeGPT2LayerNorm(ForgeModule): def __init__(self, name, parameters, config): super().__init__(name) self.parameters = parameters diff --git a/pybuda/test/gpt2/test_gpt2.py b/forge/test/gpt2/test_gpt2.py similarity index 81% rename from pybuda/test/gpt2/test_gpt2.py rename to forge/test/gpt2/test_gpt2.py index 5a648bfb9..66bc58aa7 100644 --- a/pybuda/test/gpt2/test_gpt2.py +++ b/forge/test/gpt2/test_gpt2.py @@ -5,20 +5,20 @@ import torch import numpy as np -from pybuda import ( +from forge import ( TTDevice, Tensor, Parameter, - PyBudaModule, - pybuda_compile, + ForgeModule, + forge_compile, CompilerConfig, VerifyConfig, SGD, ) -from pybuda.utils import get_pybuda_parameters_from_state_dict -from pybuda._C.backend_api import BackendType +from forge.utils import get_forge_parameters_from_state_dict +from forge._C.backend_api import BackendType -import pybuda.op as nn +import forge.op as nn from transformers.models.gpt2 import GPT2Config from transformers.models.gpt2.modeling_gpt2 import GPT2Block, GPT2MLP, GPT2Attention @@ -27,10 +27,10 @@ functional_gpt2_feedforward, functional_gpt2_mha, functional_gpt2_block, - PyBudaGPT2MLP, - PyBudaGPT2MHA, - PyBudaGPT2Block, - PyBudaGPT2LayerNorm, + ForgeGPT2MLP, + ForgeGPT2MHA, + ForgeGPT2Block, + ForgeGPT2LayerNorm, ) verify_cfg=VerifyConfig(atol=1e-02, rtol=1e-02, run_golden=False) # don't run backend golden on each test yet @@ -135,22 +135,22 @@ def test_functional_gpt2_block_vs_transformers(): assert torch.allclose(golden[0], calculated) -def test_pybuda_gpt2_feedforward(): +def test_forge_gpt2_feedforward(): config = get_default_gpt2_config() mlp = GPT2MLP(config.hidden_size, config) - pybuda_module = PyBudaGPT2MLP( - "ff", get_pybuda_parameters_from_state_dict(mlp.state_dict()), config + forge_module = ForgeGPT2MLP( + "ff", get_forge_parameters_from_state_dict(mlp.state_dict()), config ) tt0 = TTDevice("tt0", devtype=BackendType.Golden) - tt0.place_module(pybuda_module) + tt0.place_module(forge_module) activations = Tensor.create_from_torch( torch.rand(1, 1, DEFAULT_SEQUENCE_LENGTH, config.hidden_size) ) # Adjust atol/rtol due to differences in gelu backwards implementation - ret = pybuda_compile( + ret = forge_compile( tt0, "gpt2_ff", activations, @@ -164,19 +164,19 @@ def test_pybuda_gpt2_feedforward(): assert torch.allclose(ret.golden_outputs[0], calculated) -def test_pybuda_gpt2_mha(): +def test_forge_gpt2_mha(): config = get_default_gpt2_config() gpt2_mha = GPT2Attention(config) parameters = split_qkv_weights_and_bias_aot(config, gpt2_mha.state_dict()) parameters = add_causal_mask_constants_to_parameters(config, parameters) - pybuda_module = PyBudaGPT2MHA( - "mha", get_pybuda_parameters_from_state_dict(parameters), config + forge_module = ForgeGPT2MHA( + "mha", get_forge_parameters_from_state_dict(parameters), config ) tt0 = TTDevice("tt0", devtype=BackendType.Golden) - tt0.place_module(pybuda_module) + tt0.place_module(forge_module) activations = Tensor.create_from_torch( @@ -184,7 +184,7 @@ def test_pybuda_gpt2_mha(): ) # Adjust atol/rtol due to differences in gelu backwards implementation - ret = pybuda_compile( + ret = forge_compile( tt0, "gpt2_mha", activations, @@ -194,31 +194,31 @@ def test_pybuda_gpt2_mha(): ), verify_cfg=verify_cfg, ) - pybuda_output = ret.golden_outputs[0].squeeze(dim=0) # squeeze to get back original tensor dims + forge_output = ret.golden_outputs[0].squeeze(dim=0) # squeeze to get back original tensor dims golden = functional_gpt2_mha(activations.value(), config, parameters) - assert torch.allclose(pybuda_output, golden, atol=1e-02, rtol=1e-02) + assert torch.allclose(forge_output, golden, atol=1e-02, rtol=1e-02) -def test_pybuda_gpt2_block(): +def test_forge_gpt2_block(): config = get_default_gpt2_config() gpt2_block = GPT2Block(config) parameters = split_qkv_weights_and_bias_aot(config, gpt2_block.state_dict(), prefix="attn") parameters = add_causal_mask_constants_to_parameters(config, parameters, prefix="attn") - pybuda_module = PyBudaGPT2Block( - "block", get_pybuda_parameters_from_state_dict(parameters), config + forge_module = ForgeGPT2Block( + "block", get_forge_parameters_from_state_dict(parameters), config ) tt0 = TTDevice("tt0", devtype=BackendType.Golden) - tt0.place_module(pybuda_module) + tt0.place_module(forge_module) activations = Tensor.create_from_torch( torch.rand(1, 1, DEFAULT_SEQUENCE_LENGTH, config.hidden_size) ) # Adjust atol/rtol due to differences in gelu backwards implementation - ret = pybuda_compile( + ret = forge_compile( tt0, "gpt2_block", activations, @@ -228,32 +228,32 @@ def test_pybuda_gpt2_block(): ), verify_cfg=verify_cfg, ) - pybuda_output = ret.golden_outputs[0].squeeze(dim=0) # squeeze to get back original tensor dims + forge_output = ret.golden_outputs[0].squeeze(dim=0) # squeeze to get back original tensor dims golden = functional_gpt2_block(activations.value(), config, parameters) - assert torch.allclose(pybuda_output, golden, atol=1e-02, rtol=1e-02) + assert torch.allclose(forge_output, golden, atol=1e-02, rtol=1e-02) -def test_pybuda_gpt2_layernorm(): +def test_forge_gpt2_layernorm(): config = get_default_gpt2_config() gpt2_block = GPT2Block(config) parameters = gpt2_block.state_dict() - pybuda_parameters = get_pybuda_parameters_from_state_dict(parameters) + forge_parameters = get_forge_parameters_from_state_dict(parameters) - pybuda_module = PyBudaGPT2LayerNorm( - "ln", pybuda_parameters, config + forge_module = ForgeGPT2LayerNorm( + "ln", forge_parameters, config ) tt0 = TTDevice("tt0", devtype=BackendType.Golden) - tt0.place_module(pybuda_module) + tt0.place_module(forge_module) activations = Tensor.create_from_torch( torch.rand(1, 1, DEFAULT_SEQUENCE_LENGTH, config.hidden_size) ) # Adjust atol/rtol due to differences in gelu backwards implementation - ret = pybuda_compile( + ret = forge_compile( tt0, "gpt2_layernorm", activations, @@ -265,7 +265,7 @@ def test_pybuda_gpt2_layernorm(): verify_cfg=verify_cfg, ) - pybuda_output = ret.golden_outputs[0].squeeze(dim=0) # squeeze to get back original tensor dims + forge_output = ret.golden_outputs[0].squeeze(dim=0) # squeeze to get back original tensor dims golden = torch.nn.functional.layer_norm( activations.value(), @@ -274,4 +274,4 @@ def test_pybuda_gpt2_layernorm(): bias=parameters["ln_1.bias"], ) - assert torch.allclose(pybuda_output, golden, atol=1e-02, rtol=1e-02) + assert torch.allclose(forge_output, golden, atol=1e-02, rtol=1e-02) diff --git a/pybuda/test/llama/amp_configs/amp_config.py b/forge/test/llama/amp_configs/amp_config.py similarity index 63% rename from pybuda/test/llama/amp_configs/amp_config.py rename to forge/test/llama/amp_configs/amp_config.py index c763705b4..156655b92 100644 --- a/pybuda/test/llama/amp_configs/amp_config.py +++ b/forge/test/llama/amp_configs/amp_config.py @@ -4,79 +4,79 @@ import json import os -def apply_mlp(pybuda, config): +def apply_mlp(forge, config): # Config could be df string or a dict of df strings if isinstance(config, str): - df = str_to_dataformat(pybuda, config) + df = str_to_dataformat(forge, config) gate_df = up_df = down_df = df else: - gate_df = str_to_dataformat(pybuda, config['gate']) - up_df = str_to_dataformat(pybuda, config['up']) - down_df = str_to_dataformat(pybuda, config['down']) + gate_df = str_to_dataformat(forge, config['gate']) + up_df = str_to_dataformat(forge, config['up']) + down_df = str_to_dataformat(forge, config['down']) # MLP dataformat is applied to MLP weights with this regex - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( output_df=gate_df, name_regex=".*mlp.gate_proj.weight.*", input_df={0: [gate_df, True]}) - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( output_df=up_df, name_regex=".*mlp.up_proj.weight.*", input_df={0: [up_df, True]}) - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( output_df=down_df, name_regex=".*mlp.down_proj.weight.*", input_df={0: [down_df, True]}) -def apply_attn(pybuda, config): +def apply_attn(forge, config): # Config could be df string or a dict of df strings if isinstance(config, str): - df = str_to_dataformat(pybuda, config) + df = str_to_dataformat(forge, config) q_df = k_df = v_df = o_df = df else: - q_df = str_to_dataformat(pybuda, config['q']) - k_df = str_to_dataformat(pybuda, config['k']) - v_df = str_to_dataformat(pybuda, config['v']) - o_df = str_to_dataformat(pybuda, config['o']) + q_df = str_to_dataformat(forge, config['q']) + k_df = str_to_dataformat(forge, config['k']) + v_df = str_to_dataformat(forge, config['v']) + o_df = str_to_dataformat(forge, config['o']) # Attention dataformat is applied to attention weights with this regex - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( output_df=q_df, name_regex=".*self_attn.q_proj.weight.*", input_df={0: [q_df, True]}) - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( output_df=k_df, name_regex=".*self_attn.k_proj.weight.*", input_df={0: [k_df, True]}) - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( output_df=v_df, name_regex=".*self_attn.v_proj.weight.*", input_df={0: [v_df, True]}) - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( output_df=o_df, name_regex=".*self_attn.o_proj.weight.*", input_df={0: [o_df, True]}) -def apply_cache(pybuda, config, num_layers): +def apply_cache(forge, config, num_layers): # Config could be df string or a dict of df strings if isinstance(config, str): - df = str_to_dataformat(pybuda, config) + df = str_to_dataformat(forge, config) key_df = df value_df = df else: - key_df = str_to_dataformat(pybuda, config['key']) - value_df = str_to_dataformat(pybuda, config['value']) + key_df = str_to_dataformat(forge, config['key']) + value_df = str_to_dataformat(forge, config['value']) - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( output_df=key_df, name_regex="k_past_.*", input_df={0: [key_df, True]}) - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( output_df=value_df, name_regex="v_past_.*", input_df={0: [value_df, True]}) @@ -92,71 +92,71 @@ def apply_cache(pybuda, config, num_layers): k = OP_OFFSET * i j = HSTACK_OFFSET * i # special-case key ops - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( output_df=key_df, name_regex=f'concatenate_{30+k}.dc.concatenate.0', input_df={0: [key_df, True], 1: [key_df, True]}) # Write-view also needs overriding - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( output_df=key_df, name_regex=f".*output_hstack_{INDEX_START + 1 +j}.*", input_df={0: [key_df, True]}) # special-case value ops - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( output_df=value_df, name_regex=f'concatenate_{44+k}.dc.concatenate.0', input_df={0: [value_df, True], 1: [value_df, True]}) # Write-view also needs overriding - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( output_df=value_df, name_regex=f".*output_hstack_{INDEX_START + 3 +j}.*", input_df={0: [value_df, True]}) -def apply_matmul_acc(pybuda, df): - pybuda.config.configure_mixed_precision( +def apply_matmul_acc(forge, df): + forge.config.configure_mixed_precision( op_type="matmul", intermediate_df=df, accumulate_df=df, ) -def apply_default(pybuda, df): +def apply_default(forge, df): # Default dataformat is applied to all other weights with this regex - pybuda.set_configuration_options(default_df_override=df, accumulate_df=df) + forge.set_configuration_options(default_df_override=df, accumulate_df=df) -def apply_attn_mask(pybuda, df): +def apply_attn_mask(forge, df): # MLP dataformat is applied to MLP weights with this regex - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( output_df=df, name_regex="attention_mask", input_df={0: [df, True]}) -def str_to_dataformat(pybuda, df_str): +def str_to_dataformat(forge, df_str): if df_str == 'fp32': - df = pybuda.DataFormat.Float32 + df = forge.DataFormat.Float32 elif df_str == 'fp16': - df = pybuda.DataFormat.Float16 + df = forge.DataFormat.Float16 elif df_str == 'bf16': - df = pybuda.DataFormat.Float16_b + df = forge.DataFormat.Float16_b elif df_str == 'fp8': - df = pybuda.DataFormat.Bfp8 + df = forge.DataFormat.Bfp8 elif df_str == 'fp8b': - df = pybuda.DataFormat.Bfp8_b + df = forge.DataFormat.Bfp8_b elif df_str == 'fp4b': - df = pybuda.DataFormat.Bfp4_b + df = forge.DataFormat.Bfp4_b elif df_str == 'fp2b': - df = pybuda.DataFormat.Bfp2_b + df = forge.DataFormat.Bfp2_b else: raise ValueError('Precision "%s" not implemented' % precision) return df -def apply_amp_settings(pybuda, config_file, num_layers): +def apply_amp_settings(forge, config_file, num_layers): print('Applying AMP from file ', config_file, flush=True) # Open config json with open(config_file) as f: @@ -168,17 +168,17 @@ def apply_amp_settings(pybuda, config_file, num_layers): ''' for k, v in config.items(): if k == "mm_acc_df": - apply_matmul_acc(pybuda, str_to_dataformat(pybuda, v)) + apply_matmul_acc(forge, str_to_dataformat(forge, v)) elif k == "mlp_df": - apply_mlp(pybuda, v) + apply_mlp(forge, v) elif k == "attn_df": - apply_attn(pybuda, v) + apply_attn(forge, v) elif k == "cache_df": - apply_cache(pybuda, v, num_layers) + apply_cache(forge, v, num_layers) elif k == "default_df": - apply_default(pybuda, str_to_dataformat(pybuda, v)) + apply_default(forge, str_to_dataformat(forge, v)) elif k == "attn_mask_df": - apply_attn_mask(pybuda, str_to_dataformat(pybuda, v)) + apply_attn_mask(forge, str_to_dataformat(forge, v)) else: raise ValueError('Config "%s" not implemented' % k) diff --git a/pybuda/test/llama/amp_configs/w6.json b/forge/test/llama/amp_configs/w6.json similarity index 100% rename from pybuda/test/llama/amp_configs/w6.json rename to forge/test/llama/amp_configs/w6.json diff --git a/pybuda/test/llama/decode.py b/forge/test/llama/decode.py similarity index 96% rename from pybuda/test/llama/decode.py rename to forge/test/llama/decode.py index cd212df6b..631a4b81f 100644 --- a/pybuda/test/llama/decode.py +++ b/forge/test/llama/decode.py @@ -8,7 +8,7 @@ import time from pybudify_caching import PyBudify -import pybuda +import forge from prettytable import PrettyTable @@ -22,7 +22,7 @@ def main(): parser.add_argument('-n', '--num-tokens', type=int, default=10, help='Maximum number of tokens to generate') parser.add_argument('--output-at-end', action='store_true', help='Output at the end of generation instead of token by token') - parser.add_argument('-d', '--device', choices=['huggingface', 'pytorch', 'golden', 'silicon'], default='huggingface', help='huggingface: run using HF code only, pytorch: use our shim but run in PyTorch, golden/silicon: run via pybuda') + parser.add_argument('-d', '--device', choices=['huggingface', 'pytorch', 'golden', 'silicon'], default='huggingface', help='huggingface: run using HF code only, pytorch: use our shim but run in PyTorch, golden/silicon: run via forge') parser.add_argument('--no-kv-cache', action='store_true', help='Do not use a kv-cache and only generate the first 32 tokens') parser.add_argument('--arch', choices=['greyskull', 'wormhole_b0'], default='wormhole_b0', help='Architecture to use for silicon') parser.add_argument('--num-chips', type=int, default=1, help='Number of chips to use') @@ -112,32 +112,32 @@ def main(): # frac_factor = 8 - # pybuda.config.insert_fracture_group([ + # forge.config.insert_fracture_group([ # (f"matmul_{86}",-1, frac_factor), # ] # ) # for f in range(frac_factor): - # pybuda.config.configure_mixed_precision( - # output_df=pybuda.DataFormat.Bfp8_b, + # forge.config.configure_mixed_precision( + # output_df=forge.DataFormat.Bfp8_b, # name_regex=f"fractured_{f}_matmul_86", - # input_df={0: [pybuda.DataFormat.Bfp8_b, True]}, + # input_df={0: [forge.DataFormat.Bfp8_b, True]}, # ) - # pybuda.config.configure_mixed_precision( - # output_df=pybuda.DataFormat.Bfp2_b, - # accumulate_df=pybuda.DataFormat.Bfp2_b, + # forge.config.configure_mixed_precision( + # output_df=forge.DataFormat.Bfp2_b, + # accumulate_df=forge.DataFormat.Bfp2_b, # name_regex="fractured_gather_n0_matmul_86.dc.concatenate.0", - # intermediate_df=pybuda.DataFormat.Bfp2_b, - # input_df={0: [pybuda.DataFormat.Bfp2_b, True]}, + # intermediate_df=forge.DataFormat.Bfp2_b, + # input_df={0: [forge.DataFormat.Bfp2_b, True]}, # ) - # pybuda.config.configure_mixed_precision( - # output_df=pybuda.DataFormat.Bfp2_b, - # accumulate_df=pybuda.DataFormat.Bfp2_b, + # forge.config.configure_mixed_precision( + # output_df=forge.DataFormat.Bfp2_b, + # accumulate_df=forge.DataFormat.Bfp2_b, # name_regex="llama_nonkv_1nc_1nl_128cl_1bsz.output_reshape_87", - # intermediate_df=pybuda.DataFormat.Bfp2_b, - # input_df={0: [pybuda.DataFormat.Bfp2_b, True]}, + # intermediate_df=forge.DataFormat.Bfp2_b, + # input_df={0: [forge.DataFormat.Bfp2_b, True]}, # ) num_tokens = input_ids.shape[-1] @@ -162,7 +162,7 @@ def main(): pad_k[:, :, :k.shape[-2], :] = k pad_v[:, :, :v.shape[-2], :] = v - # merge heads for pybuda loopback + # merge heads for forge loopback def merge(tensor): num_attention_heads = tensor.shape[-3] attn_head_size = head_dim diff --git a/pybuda/test/llama/eval_data/episode_iv.txt b/forge/test/llama/eval_data/episode_iv.txt similarity index 100% rename from pybuda/test/llama/eval_data/episode_iv.txt rename to forge/test/llama/eval_data/episode_iv.txt diff --git a/pybuda/test/llama/generate_eval.py b/forge/test/llama/generate_eval.py similarity index 100% rename from pybuda/test/llama/generate_eval.py rename to forge/test/llama/generate_eval.py diff --git a/pybuda/test/llama/hang.py b/forge/test/llama/hang.py similarity index 98% rename from pybuda/test/llama/hang.py rename to forge/test/llama/hang.py index 541a2f632..f43072e72 100644 --- a/pybuda/test/llama/hang.py +++ b/forge/test/llama/hang.py @@ -18,7 +18,7 @@ def main(): parser = ArgumentParser('Generate text token-by-token starting with a pre-filled KV cache') parser.add_argument('-m', '--model', type=str, default='decapoda-research/llama-7b-hf', help='Model name') - parser.add_argument('-d', '--device', choices=['huggingface', 'pytorch', 'golden', 'silicon'], default='huggingface', help='huggingface: run using HF code only, pytorch: use our shim but run in PyTorch, golden/silicon: run via pybuda') + parser.add_argument('-d', '--device', choices=['huggingface', 'pytorch', 'golden', 'silicon'], default='huggingface', help='huggingface: run using HF code only, pytorch: use our shim but run in PyTorch, golden/silicon: run via forge') parser.add_argument('--arch', choices=['greyskull', 'wormhole_b0'], default='wormhole_b0', help='Architecture to use for silicon') parser.add_argument('--precision', choices=['fp32', 'fp16', 'bf16', 'fp8', 'fp8b'], default='fp32', help='Precision to use for all silicon tensors') parser.add_argument('--amp-level', type=int, default=0, choices=[0, 1, 2], help='Automatic mixed precision level (0=off, 1=mixed b-formats, 2=mixed a-formats)') diff --git a/pybuda/test/llama/llama_test.py b/forge/test/llama/llama_test.py similarity index 90% rename from pybuda/test/llama/llama_test.py rename to forge/test/llama/llama_test.py index 18f55d4ac..51b27477c 100644 --- a/pybuda/test/llama/llama_test.py +++ b/forge/test/llama/llama_test.py @@ -17,7 +17,7 @@ def test_llama(device, arch, chips_to_use): ''' see large-lm/investigations/Llama-7B-odkv/README.md ''' - data_folder = 'pybuda/test/llama/eval_data/pt_gt_128' + data_folder = 'forge/test/llama/eval_data/pt_gt_128' if chips_to_use == "chip1": num_chips = 1 elif chips_to_use == "chip2": @@ -29,7 +29,7 @@ def test_llama(device, arch, chips_to_use): # python generate_eval.py --input eval_data/episode_iv.txt --output eval_data/pt_gt_128 --context-length 128 --num-samples 100 args_for_generate = { 'model': 'decapoda-research/llama-7b-hf', - 'input': 'pybuda/test/llama/eval_data/episode_iv.txt', + 'input': 'forge/test/llama/eval_data/episode_iv.txt', 'output': data_folder, 'context_length': 128, 'num_samples': 100, @@ -53,7 +53,7 @@ def test_llama(device, arch, chips_to_use): 'num_layers': 32, 'opt_level': 1, 'verify': False, - 'amp_config': 'pybuda/test/llama/amp_configs/w6.json', + 'amp_config': 'forge/test/llama/amp_configs/w6.json', 'input_count': None, 'nlp_target_cycles': -1 } diff --git a/pybuda/test/llama/modeling_alpaca_caching.py b/forge/test/llama/modeling_alpaca_caching.py similarity index 100% rename from pybuda/test/llama/modeling_alpaca_caching.py rename to forge/test/llama/modeling_alpaca_caching.py diff --git a/pybuda/test/llama/placement.py b/forge/test/llama/placement.py similarity index 99% rename from pybuda/test/llama/placement.py rename to forge/test/llama/placement.py index b445ba4f4..efc1ffcb6 100644 --- a/pybuda/test/llama/placement.py +++ b/forge/test/llama/placement.py @@ -6,7 +6,7 @@ from collections import OrderedDict import yaml import os -from pybuda._C import DataFormat +from forge._C import DataFormat def convert_data_format(str_format): if str_format is None: diff --git a/pybuda/test/llama/pybudify_caching.py b/forge/test/llama/pybudify_caching.py similarity index 74% rename from pybuda/test/llama/pybudify_caching.py rename to forge/test/llama/pybudify_caching.py index b046da988..d8625bcd5 100644 --- a/pybuda/test/llama/pybudify_caching.py +++ b/forge/test/llama/pybudify_caching.py @@ -21,44 +21,44 @@ def __init__(self, pt_module, device='silicon', arch='wormhole_b0', precision='f self.write_index = write_index if device != 'pytorch': - # pybuda workarounds + # forge workarounds os.environ["GOLDEN_WORMHOLE_B0"] = "1" - # os.environ["PYBUDA_ENABLE_BROADCAST_SPLITTING"] = "1" - #os.environ["PYBUDA_DISABLE_FORK_JOIN_BUF"] = "1" - # os.environ["PYBUDA_DRAM_PICK_CAPACITY"] = "1" + # os.environ["FORGE_ENABLE_BROADCAST_SPLITTING"] = "1" + #os.environ["FORGE_DISABLE_FORK_JOIN_BUF"] = "1" + # os.environ["FORGE_DRAM_PICK_CAPACITY"] = "1" os.environ["WHA0_DISABLE_RELAY_BUFS"] = "1" - os.environ["PYBUDA_FUSE_STOP_ON_RECIPROCAL"] = "1" - os.environ["PYBUDA_PLACER_SNAKE"] = "1" + os.environ["FORGE_FUSE_STOP_ON_RECIPROCAL"] = "1" + os.environ["FORGE_PLACER_SNAKE"] = "1" os.environ["LOGGER_LEVEL"] = log_level os.environ["LOGURU_LEVEL"] = log_level - # os.environ["PYBUDA_DISABLE_FORK_JOIN_BUF"] = "1" - os.environ["PYBUDA_ENABLE_OUTPUT_QUEUES_ON_HOST"] = "0" - os.environ["PYBUDA_FORCE_SEQUENTIAL"] = "1" + # os.environ["FORGE_DISABLE_FORK_JOIN_BUF"] = "1" + os.environ["FORGE_ENABLE_OUTPUT_QUEUES_ON_HOST"] = "0" + os.environ["FORGE_FORCE_SEQUENTIAL"] = "1" # os.environ["TT_BACKEND_FORCE_SW_TILIZE"] = "1" if nlp_target_cycles > 0: - os.environ["PYBUDA_NLP_MANUAL_TARGET"] = str(nlp_target_cycles) + os.environ["FORGE_NLP_MANUAL_TARGET"] = str(nlp_target_cycles) - pybuda = self.pybuda = __import__('pybuda') # let us set log levels before importing pybuda + forge = self.forge = __import__('forge') # let us set log levels before importing forge if device == 'pytorch': pass else: - devtype = { 'golden' : pybuda.BackendType.Golden, - 'silicon': pybuda.BackendType.Silicon, + devtype = { 'golden' : forge.BackendType.Golden, + 'silicon': forge.BackendType.Silicon, }[device] - module = pybuda.PyTorchModule(netlist_name, self.bound_module) + module = forge.PyTorchModule(netlist_name, self.bound_module) assert amp_config_file is not None, "amp_config_file must be specified for PyBudify" assert num_layers is not None, "num_layers must be specified for PyBudify" # apply_amp_settings manages all dataformats - amp_config.apply_amp_settings(pybuda, amp_config_file, num_layers) + amp_config.apply_amp_settings(forge, amp_config_file, num_layers) num_layers = 32 if num_layers is None else num_layers if placement_config_file is not None: - manual_placer(pybuda.config, placement_config_file, loops=num_layers) + manual_placer(forge.config, placement_config_file, loops=num_layers) if self.prefill_kvs: @@ -72,35 +72,35 @@ def __init__(self, pt_module, device='silicon', arch='wormhole_b0', precision='f for layer_num in range(num_layers): k = OP_OFFSET * layer_num j = INDEX_OFFSET * layer_num - pybuda.config.add_schedule_constraint([f'concatenate_{30+k}.dc.concatenate.0', f'index_{INDEX_START+j}.dc.select.0', f'index_{INDEX_START+j}.dc.buffer.1', f'{netlist_name}.output_hstack_{INDEX_START+1+j}_tm_nop', f'matmul_{33+k}', + forge.config.add_schedule_constraint([f'concatenate_{30+k}.dc.concatenate.0', f'index_{INDEX_START+j}.dc.select.0', f'index_{INDEX_START+j}.dc.buffer.1', f'{netlist_name}.output_hstack_{INDEX_START+1+j}_tm_nop', f'matmul_{33+k}', f'concatenate_{44+k}.dc.concatenate.0', f'index_{INDEX_START+2+j}.dc.select.0', f'index_{INDEX_START+2+j}.dc.buffer.1', f'{netlist_name}.output_hstack_{INDEX_START+3+j}_tm_nop', f'matmul_{48+k}']) - pybuda.config.override_op_size(f'{netlist_name}.output_hstack_{INDEX_START + 1 +j}_tm_nop', (1,4)) + forge.config.override_op_size(f'{netlist_name}.output_hstack_{INDEX_START + 1 +j}_tm_nop', (1,4)) # Removed these since they don't play nice with fusion # if layer_num < (num_layers - 1): - # pybuda.config.add_schedule_constraint([f'add_{76+k}', f'layers.{layer_num+1}.input_layernorm.weight_s_brcst_m2_0_0.lc1']) + # forge.config.add_schedule_constraint([f'add_{76+k}', f'layers.{layer_num+1}.input_layernorm.weight_s_brcst_m2_0_0.lc1']) # Ensure all decoders start at same op - # pybuda.config.set_epoch_break(f'layers.{layer_num}.input_layernorm.weight_s_brcst_m2_0_0.lc1') + # forge.config.set_epoch_break(f'layers.{layer_num}.input_layernorm.weight_s_brcst_m2_0_0.lc1') - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.loopback_outputs = generate_loopback_dict(num_layers) # IM SORRY JON perf_level = { None : None, 'none' : None, - 'light' : pybuda.PerfTraceLevel.LIGHT, - 'verbose': pybuda.PerfTraceLevel.VERBOSE }[perf] - pybuda.set_configuration_options(enable_auto_fusing=fuse, performance_trace=perf_level, backend_opt_level=opt_level, input_queues_on_host=False) + 'light' : forge.PerfTraceLevel.LIGHT, + 'verbose': forge.PerfTraceLevel.VERBOSE }[perf] + forge.set_configuration_options(enable_auto_fusing=fuse, performance_trace=perf_level, backend_opt_level=opt_level, input_queues_on_host=False) - pybuda_arch = { 'grayskull': pybuda.BackendDevice.Grayskull, - 'wormhole_b0': pybuda.BackendDevice.Wormhole_B0 }[arch] + forge_arch = { 'grayskull': forge.BackendDevice.Grayskull, + 'wormhole_b0': forge.BackendDevice.Wormhole_B0 }[arch] if tti_load is not None: - self.tt0 = pybuda.TTDevice.load_image(img_path=tti_load) + self.tt0 = forge.TTDevice.load_image(img_path=tti_load) else: - self.tt0 = pybuda.TTDevice('tt0', module=module, + self.tt0 = forge.TTDevice('tt0', module=module, # fp32_fallback=fallback, - arch=pybuda_arch, + arch=forge_arch, devtype=devtype, chip_ids=list(range(num_chips))) @@ -127,10 +127,10 @@ def debug_tensors(t1, t2): return max_err > 1 # return True - self.verify_cfg = pybuda.VerifyConfig(verify_all=True, + self.verify_cfg = forge.VerifyConfig(verify_all=True, verify_last=True, devtype=device, - arch=pybuda_arch, + arch=forge_arch, # golden_compare_callback=debug_tensors, intermediates=False, golden_ignore_df_precision=False) @@ -167,7 +167,7 @@ def __call__(self, *args, **kwargs): ) print(f'Saved image to {self.tti_save}') sys.exit(0) - self.pybuda.initialize_pipeline(training=False, + self.forge.initialize_pipeline(training=False, sample_inputs=init_args, output_queue=self.output_q, microbatch_count=self.micro_batch_size, @@ -175,15 +175,15 @@ def __call__(self, *args, **kwargs): _verify_cfg=self.verify_cfg, ) self.initialized = True - self.pybuda.sync() + self.forge.sync() self.tt0.push_to_inputs(*args) if self.prefill_kvs: - self.pybuda.run_generate(input_count=1, write_index=self.write_index, _sequential=True) + self.forge.run_generate(input_count=1, write_index=self.write_index, _sequential=True) else: - self.pybuda.run_forward(input_count=1, _sequential=True) + self.forge.run_forward(input_count=1, _sequential=True) ys = self.output_q.get() - outputs = tuple([ y.value().float() for y in ys if isinstance(y, self.pybuda.tensor.TensorFromPytorch)]) + outputs = tuple([ y.value().float() for y in ys if isinstance(y, self.forge.tensor.TensorFromPytorch)]) if self.verify_cfg: baseline = self.bound_module(*args, **kwargs) if len(outputs) != len(baseline): diff --git a/pybuda/test/llama/tt_eval.py b/forge/test/llama/tt_eval.py similarity index 99% rename from pybuda/test/llama/tt_eval.py rename to forge/test/llama/tt_eval.py index 4938b4131..75595207f 100644 --- a/pybuda/test/llama/tt_eval.py +++ b/forge/test/llama/tt_eval.py @@ -18,7 +18,7 @@ def main(): parser = ArgumentParser('Generate text token-by-token starting with a pre-filled KV cache') parser.add_argument('-m', '--model', type=str, default='decapoda-research/llama-7b-hf', help='Model name') - parser.add_argument('-d', '--device', choices=['pytorch', 'golden', 'silicon'], default='huggingface', help='huggingface: run using HF code only, pytorch: use our shim but run in PyTorch, golden/silicon: run via pybuda') + parser.add_argument('-d', '--device', choices=['pytorch', 'golden', 'silicon'], default='huggingface', help='huggingface: run using HF code only, pytorch: use our shim but run in PyTorch, golden/silicon: run via forge') parser.add_argument('--arch', choices=['greyskull', 'wormhole_b0'], default='wormhole_b0', help='Architecture to use for silicon') parser.add_argument('--num-chips', type=int, default=1, help='Number of chips to use') parser.add_argument('--fuse', action='store_true', help='Fuse layers') diff --git a/pybuda/test/mlir/llama/test_llama_inference.py b/forge/test/mlir/llama/test_llama_inference.py similarity index 98% rename from pybuda/test/mlir/llama/test_llama_inference.py rename to forge/test/mlir/llama/test_llama_inference.py index 692a8b8fe..00048d9a8 100644 --- a/pybuda/test/mlir/llama/test_llama_inference.py +++ b/forge/test/mlir/llama/test_llama_inference.py @@ -6,7 +6,7 @@ import pytest from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer -import pybuda +import forge from test.mlir.llama.utils.utils import load_model @@ -25,7 +25,7 @@ def test_llama_inference(): print(tokenizer.decode(generation_output[0])) # Compile the model - compiled_model = pybuda.compile(framework_model, input_ids) + compiled_model = forge.compile(framework_model, input_ids) @pytest.mark.skip(reason="No need to run in CI, this is PoC that should be mapped to work on device.") def test_llama_inference_no_cache_cpu(): diff --git a/pybuda/test/mlir/llama/tests/test_llama_embedding.py b/forge/test/mlir/llama/tests/test_llama_embedding.py similarity index 85% rename from pybuda/test/mlir/llama/tests/test_llama_embedding.py rename to forge/test/mlir/llama/tests/test_llama_embedding.py index e1bfb85ff..a53b07822 100644 --- a/pybuda/test/mlir/llama/tests/test_llama_embedding.py +++ b/forge/test/mlir/llama/tests/test_llama_embedding.py @@ -4,9 +4,9 @@ import torch import pytest -import pybuda +import forge from test.mlir.llama.utils.utils import load_model -from pybuda.op.eval.common import compare_with_golden_pcc +from forge.op.eval.common import compare_with_golden_pcc @pytest.mark.xfail(reason="L1 allocation issue on Metal") @@ -25,7 +25,7 @@ def test_llama_embedding(): golden_output = framework_model(*inputs) # Compile the model - compiled_model = pybuda.compile(framework_model, sample_inputs=inputs) + compiled_model = forge.compile(framework_model, sample_inputs=inputs) # Run on TT device tt_out = compiled_model(*inputs) diff --git a/pybuda/test/mlir/llama/tests/test_llama_lm_head.py b/forge/test/mlir/llama/tests/test_llama_lm_head.py similarity index 84% rename from pybuda/test/mlir/llama/tests/test_llama_lm_head.py rename to forge/test/mlir/llama/tests/test_llama_lm_head.py index 1f08801fb..7bc5b54f1 100644 --- a/pybuda/test/mlir/llama/tests/test_llama_lm_head.py +++ b/forge/test/mlir/llama/tests/test_llama_lm_head.py @@ -4,9 +4,9 @@ import torch import pytest -import pybuda +import forge from test.mlir.llama.utils.utils import load_model -from pybuda.op.eval.common import compare_with_golden_pcc +from forge.op.eval.common import compare_with_golden_pcc @pytest.mark.xfail(reason="Squeeze op is not supported on MLIR.") @@ -24,7 +24,7 @@ def test_llama_lm_head(): golden_output = framework_model(*inputs) # Compile the model - compiled_model = pybuda.compile(framework_model, sample_inputs=inputs) + compiled_model = forge.compile(framework_model, sample_inputs=inputs) # Run on TT device tt_out = compiled_model(*inputs) diff --git a/pybuda/test/mlir/llama/tests/test_llama_mlp.py b/forge/test/mlir/llama/tests/test_llama_mlp.py similarity index 84% rename from pybuda/test/mlir/llama/tests/test_llama_mlp.py rename to forge/test/mlir/llama/tests/test_llama_mlp.py index c40cb6d8b..33fbeba78 100644 --- a/pybuda/test/mlir/llama/tests/test_llama_mlp.py +++ b/forge/test/mlir/llama/tests/test_llama_mlp.py @@ -4,9 +4,9 @@ import torch import pytest -import pybuda +import forge from test.mlir.llama.utils.utils import load_model -from pybuda.op.eval.common import compare_with_golden_pcc +from forge.op.eval.common import compare_with_golden_pcc @pytest.mark.xfail(reason="Squeeze op is not supported on MLIR.") @@ -24,7 +24,7 @@ def test_llama_mlp(): golden_output = framework_model(*inputs) # Compile the model - compiled_model = pybuda.compile(framework_model, sample_inputs=inputs) + compiled_model = forge.compile(framework_model, sample_inputs=inputs) # Run on TT device tt_out = compiled_model(*inputs) diff --git a/pybuda/test/mlir/llama/tests/test_llama_rms_norm.py b/forge/test/mlir/llama/tests/test_llama_rms_norm.py similarity index 84% rename from pybuda/test/mlir/llama/tests/test_llama_rms_norm.py rename to forge/test/mlir/llama/tests/test_llama_rms_norm.py index dab328f8e..e58c59ee0 100644 --- a/pybuda/test/mlir/llama/tests/test_llama_rms_norm.py +++ b/forge/test/mlir/llama/tests/test_llama_rms_norm.py @@ -4,9 +4,9 @@ import torch import pytest -import pybuda +import forge from test.mlir.llama.utils.utils import load_model -from pybuda.op.eval.common import compare_with_golden_pcc +from forge.op.eval.common import compare_with_golden_pcc @pytest.mark.xfail(reason="Tile broadcast op is not supported on MLIR.") @@ -24,7 +24,7 @@ def test_llama_lm_head(): golden_output = framework_model(*inputs) # Compile the model - compiled_model = pybuda.compile(framework_model, sample_inputs=inputs) + compiled_model = forge.compile(framework_model, sample_inputs=inputs) # Run on TT device tt_out = compiled_model(*inputs) diff --git a/pybuda/test/mlir/llama/tests/test_llama_rotary_emb.py b/forge/test/mlir/llama/tests/test_llama_rotary_emb.py similarity index 86% rename from pybuda/test/mlir/llama/tests/test_llama_rotary_emb.py rename to forge/test/mlir/llama/tests/test_llama_rotary_emb.py index fca3c8346..87d64d385 100644 --- a/pybuda/test/mlir/llama/tests/test_llama_rotary_emb.py +++ b/forge/test/mlir/llama/tests/test_llama_rotary_emb.py @@ -4,9 +4,9 @@ import torch import pytest -import pybuda +import forge from test.mlir.llama.utils.utils import load_model -from pybuda.op.eval.common import compare_with_golden_pcc +from forge.op.eval.common import compare_with_golden_pcc @pytest.mark.xfail(reason="Dynamic shapes..") @@ -26,7 +26,7 @@ def test_llama_rotary_emb(): golden_output = framework_model(*inputs) # Compile the model - compiled_model = pybuda.compile(framework_model, sample_inputs=inputs) + compiled_model = forge.compile(framework_model, sample_inputs=inputs) # Run on TT device tt_out = compiled_model(*inputs) diff --git a/pybuda/test/mlir/llama/tests/test_llama_self_attn.py b/forge/test/mlir/llama/tests/test_llama_self_attn.py similarity index 89% rename from pybuda/test/mlir/llama/tests/test_llama_self_attn.py rename to forge/test/mlir/llama/tests/test_llama_self_attn.py index 2772c0483..04cef291a 100644 --- a/pybuda/test/mlir/llama/tests/test_llama_self_attn.py +++ b/forge/test/mlir/llama/tests/test_llama_self_attn.py @@ -4,9 +4,9 @@ import torch import pytest -import pybuda +import forge from test.mlir.llama.utils.utils import load_model -from pybuda.op.eval.common import compare_with_golden_pcc +from forge.op.eval.common import compare_with_golden_pcc @pytest.mark.xfail(reason="Squeeze op is not supported on MLIR.") @@ -37,7 +37,7 @@ def forward(self, *inputs): golden_output = framework_model(*inputs) # Compile the model - compiled_model = pybuda.compile(framework_model, sample_inputs=inputs) + compiled_model = forge.compile(framework_model, sample_inputs=inputs) # Run on TT device tt_out = compiled_model(*inputs) diff --git a/pybuda/test/mlir/llama/utils/utils.py b/forge/test/mlir/llama/utils/utils.py similarity index 90% rename from pybuda/test/mlir/llama/utils/utils.py rename to forge/test/mlir/llama/utils/utils.py index 2846f30f7..50da99103 100644 --- a/pybuda/test/mlir/llama/utils/utils.py +++ b/forge/test/mlir/llama/utils/utils.py @@ -4,11 +4,11 @@ from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer -import pybuda +import forge def load_model(model_path="openlm-research/open_llama_3b", use_cache=False): # Compiler configurations - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.enable_tvm_cpu_fallback = False # Load Llama 3B model diff --git a/pybuda/test/mlir/mnist/__init__.py b/forge/test/mlir/mnist/__init__.py similarity index 100% rename from pybuda/test/mlir/mnist/__init__.py rename to forge/test/mlir/mnist/__init__.py diff --git a/pybuda/test/mlir/mnist/test_inference.py b/forge/test/mlir/mnist/test_inference.py similarity index 76% rename from pybuda/test/mlir/mnist/test_inference.py rename to forge/test/mlir/mnist/test_inference.py index e0b99560f..5b0f59f26 100644 --- a/pybuda/test/mlir/mnist/test_inference.py +++ b/forge/test/mlir/mnist/test_inference.py @@ -4,8 +4,8 @@ import torch from .utils import * -import pybuda -from pybuda.op.eval.common import compare_with_golden_pcc +import forge +from forge.op.eval.common import compare_with_golden_pcc def test_mnist_inference(): inputs = [torch.rand(1, 784)] @@ -13,7 +13,7 @@ def test_mnist_inference(): framework_model = MNISTLinear() fw_out = framework_model(*inputs) - compiled_model = pybuda.compile(framework_model, sample_inputs=inputs) + compiled_model = forge.compile(framework_model, sample_inputs=inputs) co_out = compiled_model(*[i.to("tt") for i in inputs]) co_out = [co.to("cpu") for co in co_out] diff --git a/pybuda/test/mlir/mnist/training/mnist_linear_pybuda.py b/forge/test/mlir/mnist/training/mnist_linear_forge.py similarity index 76% rename from pybuda/test/mlir/mnist/training/mnist_linear_pybuda.py rename to forge/test/mlir/mnist/training/mnist_linear_forge.py index dba714365..309587d8f 100644 --- a/pybuda/test/mlir/mnist/training/mnist_linear_pybuda.py +++ b/forge/test/mlir/mnist/training/mnist_linear_forge.py @@ -6,8 +6,8 @@ from torchvision import datasets, transforms from torch.utils.tensorboard import SummaryWriter -import pybuda -from pybuda import ( +import forge +from forge import ( CPUDevice, PyTorchModule, ) @@ -17,7 +17,7 @@ load_tb_writer, load_dataset, ) -from pybuda.config import _get_global_compiler_config +from forge.config import _get_global_compiler_config class FeedForward(torch.nn.Module): def __init__(self, input_size, hidden_size, output_size): @@ -53,11 +53,11 @@ def train(loss_on_cpu=True): sequential = True framework_model = FeedForward(input_size, hidden_size, output_size) - tt_model = pybuda.PyTorchModule(f"mnist_linear_{batch_size}", framework_model) - tt_optimizer = pybuda.optimizers.SGD( + tt_model = forge.PyTorchModule(f"mnist_linear_{batch_size}", framework_model) + tt_optimizer = forge.optimizers.SGD( learning_rate=learning_rate, device_params=True ) - tt0 = pybuda.TTDevice("tt0", module=tt_model, optimizer=tt_optimizer) + tt0 = forge.TTDevice("tt0", module=tt_model, optimizer=tt_optimizer) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False) @@ -72,9 +72,9 @@ def train(loss_on_cpu=True): if loss_on_cpu: cpu0 = CPUDevice("cpu0", module=PyTorchModule("identity", Identity())) - cpu0.place_loss_module(pybuda.PyTorchModule(f"loss_{batch_size}", torch.nn.CrossEntropyLoss())) + cpu0.place_loss_module(forge.PyTorchModule(f"loss_{batch_size}", torch.nn.CrossEntropyLoss())) else: - tt_loss = pybuda.PyTorchModule(f"loss_{batch_size}", torch.nn.CrossEntropyLoss()) + tt_loss = forge.PyTorchModule(f"loss_{batch_size}", torch.nn.CrossEntropyLoss()) tt0.place_loss_module(tt_loss) compiler_cfg = _get_global_compiler_config() @@ -83,7 +83,7 @@ def train(loss_on_cpu=True): if not loss_on_cpu: sample_target = (sample_target,) - checkpoint_queue = pybuda.initialize_pipeline( + checkpoint_queue = forge.initialize_pipeline( training=True, sample_inputs=sample_input, sample_targets=sample_target, @@ -109,20 +109,20 @@ def train(loss_on_cpu=True): else: tt0.push_to_target_inputs(targets) - pybuda.run_forward(input_count=1, _sequential=sequential) - pybuda.run_backward(input_count=1, zero_grad=True, _sequential=sequential) - pybuda.run_optimizer(checkpoint=True, _sequential=sequential) + forge.run_forward(input_count=1, _sequential=sequential) + forge.run_backward(input_count=1, zero_grad=True, _sequential=sequential) + forge.run_optimizer(checkpoint=True, _sequential=sequential) - loss_q = pybuda.run.get_loss_queue() + loss_q = forge.run.get_loss_queue() step = 0 loss = loss_q.get()[0] print(loss) # while not loss_q.empty(): # if loss_on_cpu: - # writer.add_scalar("Loss/PyBuda/overfit", loss_q.get()[0], step) + # writer.add_scalar("Loss/Forge/overfit", loss_q.get()[0], step) # else: - # writer.add_scalar("Loss/PyBuda/overfit", loss_q.get()[0].value()[0], step) + # writer.add_scalar("Loss/Forge/overfit", loss_q.get()[0].value()[0], step) # step += 1 writer.close() diff --git a/pybuda/test/mlir/mnist/training/mnist_linear_pytorch.py b/forge/test/mlir/mnist/training/mnist_linear_pytorch.py similarity index 100% rename from pybuda/test/mlir/mnist/training/mnist_linear_pytorch.py rename to forge/test/mlir/mnist/training/mnist_linear_pytorch.py diff --git a/pybuda/test/mlir/mnist/training/test_training.py b/forge/test/mlir/mnist/training/test_training.py similarity index 94% rename from pybuda/test/mlir/mnist/training/test_training.py rename to forge/test/mlir/mnist/training/test_training.py index c07e18806..177679fbf 100644 --- a/pybuda/test/mlir/mnist/training/test_training.py +++ b/forge/test/mlir/mnist/training/test_training.py @@ -5,7 +5,7 @@ import torch from torch import nn -import pybuda +import forge from .utils import * def test_mnist_training(): @@ -24,7 +24,7 @@ def test_mnist_training(): # Define model and instruct it to compile and run on TT device framework_model = MNISTLinear() - tt_model = pybuda.compile(framework_model) + tt_model = forge.compile(framework_model) tt_model.to("tt") # Create a torch loss and leave on CPU @@ -32,7 +32,7 @@ def test_mnist_training(): # Define optimizer and instruct it to compile and run on TT device framework_optimizer = torch.optim.SGD(framework_model.parameters(), lr=learning_rate) - tt_optimizer = pybuda.compile(framework_optimizer) + tt_optimizer = forge.compile(framework_optimizer) tt_optimizer.to("tt") for epoch_idx in range(num_epochs): diff --git a/pybuda/test/mlir/mnist/utils.py b/forge/test/mlir/mnist/utils.py similarity index 100% rename from pybuda/test/mlir/mnist/utils.py rename to forge/test/mlir/mnist/utils.py diff --git a/pybuda/test/mlir/resnet/test_resnet_inference.py b/forge/test/mlir/resnet/test_resnet_inference.py similarity index 78% rename from pybuda/test/mlir/resnet/test_resnet_inference.py rename to forge/test/mlir/resnet/test_resnet_inference.py index 5b2be5133..3ad6890b5 100644 --- a/pybuda/test/mlir/resnet/test_resnet_inference.py +++ b/forge/test/mlir/resnet/test_resnet_inference.py @@ -5,12 +5,12 @@ import torch from torchvision.models.resnet import resnet50 -import pybuda +import forge def test_resnet_inference(): # Compiler configurations - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.enable_tvm_cpu_fallback = False # Load ResNet50 model @@ -24,4 +24,4 @@ def test_resnet_inference(): print(generation_output) # Compile the model - compiled_model = pybuda.compile(framework_model, input_image) + compiled_model = forge.compile(framework_model, input_image) diff --git a/pybuda/test/mlir/test_ops.py b/forge/test/mlir/test_ops.py similarity index 90% rename from pybuda/test/mlir/test_ops.py rename to forge/test/mlir/test_ops.py index 00891c33c..5d1e786e3 100644 --- a/pybuda/test/mlir/test_ops.py +++ b/forge/test/mlir/test_ops.py @@ -9,8 +9,8 @@ import torch from torch import nn -import pybuda -from pybuda.op.eval.common import compare_with_golden_pcc +import forge +from forge.op.eval.common import compare_with_golden_pcc def test_add(): class Add(nn.Module): @@ -25,7 +25,7 @@ def forward(self, a, b): framework_model = Add() fw_out = framework_model(*inputs) - compiled_model = pybuda.compile(framework_model, sample_inputs=inputs) + compiled_model = forge.compile(framework_model, sample_inputs=inputs) co_out = compiled_model(*inputs) co_out = [co.to("cpu") for co in co_out] @@ -56,7 +56,7 @@ def forward(self, a): framework_model = Transpose(dims) fw_out = framework_model(*inputs) - compiled_model = pybuda.compile(framework_model, sample_inputs=inputs) + compiled_model = forge.compile(framework_model, sample_inputs=inputs) co_out = compiled_model(*inputs) co_out = [co.to("cpu") for co in co_out] @@ -76,7 +76,7 @@ def forward(self, a, b): framework_model = Subtract() fw_out = framework_model(*inputs) - compiled_model = pybuda.compile(framework_model, sample_inputs=inputs) + compiled_model = forge.compile(framework_model, sample_inputs=inputs) co_out = compiled_model(*inputs) co_out = [co.to("cpu") for co in co_out] @@ -97,7 +97,7 @@ def forward(self, a, b): framework_model = Multiply() fw_out = framework_model(*inputs) - compiled_model = pybuda.compile(framework_model, sample_inputs=inputs) + compiled_model = forge.compile(framework_model, sample_inputs=inputs) co_out = compiled_model(*inputs) co_out = [co.to("cpu") for co in co_out] @@ -119,7 +119,7 @@ def forward(self, a): framework_model = ReLU() fw_out = framework_model(*inputs) - compiled_model = pybuda.compile(framework_model, sample_inputs=inputs) + compiled_model = forge.compile(framework_model, sample_inputs=inputs) co_out = compiled_model(*inputs) co_out = [co.to("cpu") for co in co_out] @@ -141,7 +141,7 @@ def forward(self, a): framework_model = Linear() fw_out = framework_model(*inputs) - compiled_model = pybuda.compile(framework_model, sample_inputs=inputs) + compiled_model = forge.compile(framework_model, sample_inputs=inputs) co_out = compiled_model(*inputs) co_out = [co.to("cpu") for co in co_out] @@ -163,7 +163,7 @@ def forward(self, a): framework_model = Softmax() fw_out = framework_model(*inputs) - compiled_model = pybuda.compile(framework_model, sample_inputs=inputs) + compiled_model = forge.compile(framework_model, sample_inputs=inputs) co_out = compiled_model(*inputs) co_out = [co.to("cpu") for co in co_out] @@ -186,7 +186,7 @@ def forward(self, a): framework_model = ReduceSum() fw_out = framework_model(*inputs) - compiled_model = pybuda.compile(framework_model, sample_inputs=inputs) + compiled_model = forge.compile(framework_model, sample_inputs=inputs) co_out = compiled_model(*inputs) co_out = [co.to("cpu") for co in co_out] @@ -208,7 +208,7 @@ def forward(self, a): framework_model = ReduceMean() fw_out = framework_model(*inputs) - compiled_model = pybuda.compile(framework_model, sample_inputs=inputs) + compiled_model = forge.compile(framework_model, sample_inputs=inputs) co_out = compiled_model(*inputs) co_out = [co.to("cpu") for co in co_out] @@ -235,7 +235,7 @@ def forward(self, x, y): framework_model = Matmul() fw_out = framework_model(*inputs) - compiled_model = pybuda.compile(framework_model, sample_inputs=inputs) + compiled_model = forge.compile(framework_model, sample_inputs=inputs) co_out = compiled_model(*inputs) co_out = [co.to("cpu") for co in co_out] @@ -266,7 +266,7 @@ def forward(self, x): framework_model = Mean() fw_out = framework_model(*inputs) - compiled_model = pybuda.compile(framework_model, sample_inputs=inputs) + compiled_model = forge.compile(framework_model, sample_inputs=inputs) co_out = compiled_model(*inputs) co_out = [co.to("cpu") for co in co_out] @@ -290,7 +290,7 @@ def forward(self, x): framework_model = Sqrt() fw_out = framework_model(*inputs) - compiled_model = pybuda.compile(framework_model, sample_inputs=inputs) + compiled_model = forge.compile(framework_model, sample_inputs=inputs) co_out = compiled_model(*inputs) co_out = [co.to("cpu") for co in co_out] diff --git a/pybuda/test/mlir/test_training.py b/forge/test/mlir/test_training.py similarity index 89% rename from pybuda/test/mlir/test_training.py rename to forge/test/mlir/test_training.py index 8fe6b5223..3676de851 100644 --- a/pybuda/test/mlir/test_training.py +++ b/forge/test/mlir/test_training.py @@ -5,9 +5,9 @@ import torch import torch.nn as nn -import pybuda -import pybuda.config -from pybuda.op.eval.common import compare_with_golden_pcc +import forge +import forge.config +from forge.op.eval.common import compare_with_golden_pcc def test_torch_training(): class MatmulParam(nn.Module): @@ -28,7 +28,7 @@ def forward(self, x): loss_fn = torch.nn.MSELoss() optimizer = torch.optim.SGD(model.parameters(), lr=0.1) - tt_model = pybuda.compile(model, sample_inputs=[torch.rand(shape)], loss=loss_fn, optimizer=optimizer) + tt_model = forge.compile(model, sample_inputs=[torch.rand(shape)], loss=loss_fn, optimizer=optimizer) num_epochs = 20 diff --git a/pybuda/test/model_demos/__init__.py b/forge/test/model_demos/__init__.py similarity index 100% rename from pybuda/test/model_demos/__init__.py rename to forge/test/model_demos/__init__.py diff --git a/pybuda/test/model_demos/high_prio/cnn/__init__.py b/forge/test/model_demos/high_prio/cnn/__init__.py similarity index 100% rename from pybuda/test/model_demos/high_prio/cnn/__init__.py rename to forge/test/model_demos/high_prio/cnn/__init__.py diff --git a/pybuda/test/model_demos/high_prio/cnn/onnx/__init__.py b/forge/test/model_demos/high_prio/cnn/onnx/__init__.py similarity index 100% rename from pybuda/test/model_demos/high_prio/cnn/onnx/__init__.py rename to forge/test/model_demos/high_prio/cnn/onnx/__init__.py diff --git a/pybuda/test/model_demos/high_prio/cnn/onnx/test_ddrnet.py b/forge/test/model_demos/high_prio/cnn/onnx/test_ddrnet.py similarity index 83% rename from pybuda/test/model_demos/high_prio/cnn/onnx/test_ddrnet.py rename to forge/test/model_demos/high_prio/cnn/onnx/test_ddrnet.py index 3e41e0887..ea80558dd 100644 --- a/pybuda/test/model_demos/high_prio/cnn/onnx/test_ddrnet.py +++ b/forge/test/model_demos/high_prio/cnn/onnx/test_ddrnet.py @@ -1,16 +1,16 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -import pybuda, os +import forge, os import pytest from torchvision import transforms import requests from PIL import Image import onnx -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda.verify.config import TestKind -from pybuda._C.backend_api import BackendDevice +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge.verify.config import TestKind +from forge._C.backend_api import BackendDevice variants = ["ddrnet23s", "ddrnet23", "ddrnet39"] @@ -18,24 +18,24 @@ @pytest.mark.parametrize("variant", variants) def test_ddrnet(variant, test_device): - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b - os.environ["PYBUDA_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" if test_device.arch == BackendDevice.Wormhole_B0: # These overrides are planned to be ON by default - os.environ["PYBUDA_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" + os.environ["FORGE_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" if test_device.arch == BackendDevice.Grayskull: # Temp mitigations for net2pipe errors, should be removed. # - os.environ["PYBUDA_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" - os.environ["PYBUDA_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" - os.environ["PYBUDA_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" - # STEP 2: # Create PyBuda module from onnx weights + # STEP 2: # Create Forge module from onnx weights model_name = f"{variant}_onnx" load_path = ( @@ -43,7 +43,7 @@ def test_ddrnet(variant, test_device): ) model = onnx.load(load_path) - tt_model = pybuda.OnnxModule(model_name, model, load_path) + tt_model = forge.OnnxModule(model_name, model, load_path) # STEP 3: Prepare input url = "https://raw.githubusercontent.com/pytorch/hub/master/images/dog.jpg" @@ -85,11 +85,11 @@ def test_ddrnet(variant, test_device): @pytest.mark.parametrize("variant", variants) def test_ddrnet_semantic_segmentation_onnx(variant, test_device): - # Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b - os.environ["PYBUDA_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" if test_device.arch == BackendDevice.Wormhole_B0: os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = "36864" @@ -152,7 +152,7 @@ def test_ddrnet_semantic_segmentation_onnx(variant, test_device): model = onnx.load(load_path) onnx.checker.check_model(model) model_name = f"onnx_{variant}" - tt_model = pybuda.OnnxModule(model_name, model, load_path) + tt_model = forge.OnnxModule(model_name, model, load_path) # Prepare input image_path = "third_party/confidential_customer_models/cv_demos/ddrnet/semantic_segmentation/image/road_scenes.png" diff --git a/pybuda/test/model_demos/high_prio/cnn/onnx/test_dla.py b/forge/test/model_demos/high_prio/cnn/onnx/test_dla.py similarity index 82% rename from pybuda/test/model_demos/high_prio/cnn/onnx/test_dla.py rename to forge/test/model_demos/high_prio/cnn/onnx/test_dla.py index c8f84725c..86de75749 100644 --- a/pybuda/test/model_demos/high_prio/cnn/onnx/test_dla.py +++ b/forge/test/model_demos/high_prio/cnn/onnx/test_dla.py @@ -1,15 +1,15 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -import pybuda +import forge import onnx import os -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig +from forge.verify.backend import verify_module +from forge import VerifyConfig import requests import pytest -from pybuda.verify.config import TestKind -from pybuda._C.backend_api import BackendDevice +from forge.verify.config import TestKind +from forge._C.backend_api import BackendDevice import torchvision.transforms as transforms from PIL import Image @@ -30,11 +30,11 @@ @pytest.mark.parametrize("variant", variants) def test_dla_onnx(test_device, variant): - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.Float16_b + compiler_cfg.default_df_override = forge._C.Float16_b - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" # Load data sample url = "https://images.rawpixel.com/image_1300/cHJpdmF0ZS9sci9pbWFnZXMvd2Vic2l0ZS8yMDIyLTA1L3BkMTA2LTA0Ny1jaGltXzEuanBn.jpg" @@ -64,7 +64,7 @@ def test_dla_onnx(test_device, variant): # Load DLA model model_name = f"dla_{variant}_onnx" onnx_model = onnx.load(onnx_model_path) - tt_model = pybuda.OnnxModule(model_name, onnx_model, onnx_model_path) + tt_model = forge.OnnxModule(model_name, onnx_model, onnx_model_path) pcc = 0.99 if test_device.arch == BackendDevice.Wormhole_B0: @@ -76,7 +76,7 @@ def test_dla_onnx(test_device, variant): if variant == "dla46_c": pcc = 0.97 if variant == "dla102x2": - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" verify_module( tt_model, diff --git a/pybuda/test/model_demos/high_prio/cnn/onnx/test_fpn.py b/forge/test/model_demos/high_prio/cnn/onnx/test_fpn.py similarity index 68% rename from pybuda/test/model_demos/high_prio/cnn/onnx/test_fpn.py rename to forge/test/model_demos/high_prio/cnn/onnx/test_fpn.py index e6947ca20..98cbac273 100644 --- a/pybuda/test/model_demos/high_prio/cnn/onnx/test_fpn.py +++ b/forge/test/model_demos/high_prio/cnn/onnx/test_fpn.py @@ -2,25 +2,25 @@ # SPDX-License-Identifier: Apache-2.0 import torch -import pybuda +import forge import onnx import os -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig +from forge.verify.backend import verify_module +from forge import VerifyConfig def test_fpn_onnx(test_device, test_kind): - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.Float16_b + compiler_cfg.default_df_override = forge._C.Float16_b - os.environ["PYBUDA_RIBBON2"] = "1" - os.environ["PYBUDA_FORCE_EMULATE_HARVESTED"] = "1" + os.environ["FORGE_RIBBON2"] = "1" + os.environ["FORGE_FORCE_EMULATE_HARVESTED"] = "1" # Load FPN model onnx_model_path = "third_party/confidential_customer_models/generated/files/fpn.onnx" model = onnx.load(onnx_model_path) - tt_model = pybuda.OnnxModule("onnx_fpn", model, onnx_model_path) + tt_model = forge.OnnxModule("onnx_fpn", model, onnx_model_path) feat0 = torch.rand(1, 10, 64, 64) feat1 = torch.rand(1, 20, 16, 16) diff --git a/pybuda/test/model_demos/high_prio/cnn/onnx/test_hardnet.py b/forge/test/model_demos/high_prio/cnn/onnx/test_hardnet.py similarity index 73% rename from pybuda/test/model_demos/high_prio/cnn/onnx/test_hardnet.py rename to forge/test/model_demos/high_prio/cnn/onnx/test_hardnet.py index 5667a054c..e5b1bfb10 100644 --- a/pybuda/test/model_demos/high_prio/cnn/onnx/test_hardnet.py +++ b/forge/test/model_demos/high_prio/cnn/onnx/test_hardnet.py @@ -1,16 +1,16 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -import pybuda, os +import forge, os import onnx from PIL import Image from torchvision import transforms import urllib -from pybuda.verify.backend import verify_module +from forge.verify.backend import verify_module import pytest -from pybuda import VerifyConfig -from pybuda.verify.config import TestKind -from pybuda._C.backend_api import BackendDevice +from forge import VerifyConfig +from forge.verify.config import TestKind +from forge._C.backend_api import BackendDevice variants = ["hardnet68", "hardnet85", "hardnet68ds", "hardnet39ds"] @@ -18,17 +18,17 @@ @pytest.mark.parametrize("variant", variants) def test_hardnet_onnx(variant, test_device): - # Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b - os.environ["PYBUDA_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" if variant == "hardnet68ds": - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" if variant == "hardnet85" and test_device.arch == BackendDevice.Grayskull: - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" # Download an example image url, filename = ( @@ -58,9 +58,9 @@ def test_hardnet_onnx(variant, test_device): ) model_name = f"{variant}_onnx" - # Create PyBuda module from onnx weights + # Create Forge module from onnx weights model = onnx.load(load_path) - tt_model = pybuda.OnnxModule(model_name, model, load_path) + tt_model = forge.OnnxModule(model_name, model, load_path) # Run inference on Tenstorrent device verify_module( diff --git a/pybuda/test/model_demos/high_prio/cnn/onnx/test_lstm_genom.py b/forge/test/model_demos/high_prio/cnn/onnx/test_lstm_genom.py similarity index 75% rename from pybuda/test/model_demos/high_prio/cnn/onnx/test_lstm_genom.py rename to forge/test/model_demos/high_prio/cnn/onnx/test_lstm_genom.py index e6b5f8121..aa6001562 100644 --- a/pybuda/test/model_demos/high_prio/cnn/onnx/test_lstm_genom.py +++ b/forge/test/model_demos/high_prio/cnn/onnx/test_lstm_genom.py @@ -6,11 +6,11 @@ import os import onnx import tensorflow as tf -import pybuda -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind +import forge +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind from test.utils import download_model def test_lstm_genom_onnx(test_device): @@ -20,7 +20,7 @@ def test_lstm_genom_onnx(test_device): # Run inference on Tenstorrent device inputs = tf.random.uniform(shape=[1, 10, 4]) verify_module( - pybuda.OnnxModule("onnx_lstm", model, load_path), + forge.OnnxModule("onnx_lstm", model, load_path), input_shapes=(inputs.shape,), inputs=[(inputs,)], verify_cfg=VerifyConfig( diff --git a/pybuda/test/model_demos/high_prio/cnn/onnx/test_lstm_valence.py b/forge/test/model_demos/high_prio/cnn/onnx/test_lstm_valence.py similarity index 61% rename from pybuda/test/model_demos/high_prio/cnn/onnx/test_lstm_valence.py rename to forge/test/model_demos/high_prio/cnn/onnx/test_lstm_valence.py index 8256da206..03097919c 100644 --- a/pybuda/test/model_demos/high_prio/cnn/onnx/test_lstm_valence.py +++ b/forge/test/model_demos/high_prio/cnn/onnx/test_lstm_valence.py @@ -6,32 +6,32 @@ import os import onnx import tensorflow as tf -import pybuda -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind +import forge +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind def test_lstm_valence_onnx(test_device): # Load model checkpoint from HuggingFace load_path = "third_party/confidential_customer_models/model_2/onnx/lstm_valence/lstm-valence-model.onnx" model = onnx.load(load_path) - # Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b - os.environ["PYBUDA_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" # Required to patch data-mismatch. Here is followup issue # to check this out in more details: - # tenstorrent/pybuda#1828 - os.environ["PYBUDA_DECOMPOSE_SIGMOID"] = "1" + # tenstorrent/forge#1828 + os.environ["FORGE_DECOMPOSE_SIGMOID"] = "1" # Run inference on Tenstorrent device inputs = tf.random.uniform(shape=[1, 1, 282]) verify_module( - pybuda.OnnxModule("onnx_lstm", model, load_path), + forge.OnnxModule("onnx_lstm", model, load_path), input_shapes=(inputs.shape,), inputs=[(inputs,)], verify_cfg=VerifyConfig( diff --git a/pybuda/test/model_demos/high_prio/cnn/onnx/test_perceiverio.py b/forge/test/model_demos/high_prio/cnn/onnx/test_perceiverio.py similarity index 83% rename from pybuda/test/model_demos/high_prio/cnn/onnx/test_perceiverio.py rename to forge/test/model_demos/high_prio/cnn/onnx/test_perceiverio.py index 4077b68e8..0b68225b8 100644 --- a/pybuda/test/model_demos/high_prio/cnn/onnx/test_perceiverio.py +++ b/forge/test/model_demos/high_prio/cnn/onnx/test_perceiverio.py @@ -1,7 +1,7 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -import pybuda +import forge import onnx import os @@ -11,9 +11,9 @@ from transformers import AutoImageProcessor -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda.verify.config import TestKind +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge.verify.config import TestKind def get_sample_data(model_name): @@ -35,15 +35,15 @@ def get_sample_data(model_name): ) def test_perceiver_for_image_classification_onnx(test_device, model_name): - # Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b + compiler_cfg.default_df_override = forge.DataFormat.Float16_b compiler_cfg.enable_auto_fusing = False - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" verify_enabled = True - if test_device.arch == pybuda.BackendDevice.Wormhole_B0: + if test_device.arch == forge.BackendDevice.Wormhole_B0: if model_name == "deepmind/vision-perceiver-learned": os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{105*1024}" @@ -60,16 +60,16 @@ def test_perceiver_for_image_classification_onnx(test_device, model_name): elif model_name == "deepmind/vision-perceiver-fourier": os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{101*1024}" - elif test_device.arch == pybuda.BackendDevice.Grayskull: + elif test_device.arch == forge.BackendDevice.Grayskull: - if test_device.devtype == pybuda.BackendType.Silicon: + if test_device.devtype == forge.BackendType.Silicon: verify_enabled = False if model_name == "deepmind/vision-perceiver-learned": os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{101*1024}" elif model_name == "deepmind/vision-perceiver-fourier": - os.environ["PYBUDA_DISABLE_PADDING_PASS"] = "1" + os.environ["FORGE_DISABLE_PADDING_PASS"] = "1" compiler_cfg.place_on_new_epoch("hslice_50.dc.sparse_matmul.2.lc2") compiler_cfg.place_on_new_epoch("matmul_47") os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{101*1024}" @@ -90,8 +90,8 @@ def test_perceiver_for_image_classification_onnx(test_device, model_name): onnx_model = onnx.load(onnx_model_path) onnx.checker.check_model(onnx_model) - # Create PyBuda module from Onnx model - tt_model = pybuda.OnnxModule( + # Create Forge module from Onnx model + tt_model = forge.OnnxModule( str(model_name.split("/")[-1].replace("-", "_")) + "_onnx", onnx_model, onnx_model_path, diff --git a/pybuda/test/model_demos/high_prio/cnn/onnx/test_retinanet.py b/forge/test/model_demos/high_prio/cnn/onnx/test_retinanet.py similarity index 78% rename from pybuda/test/model_demos/high_prio/cnn/onnx/test_retinanet.py rename to forge/test/model_demos/high_prio/cnn/onnx/test_retinanet.py index c30c23a99..36ac66888 100644 --- a/pybuda/test/model_demos/high_prio/cnn/onnx/test_retinanet.py +++ b/forge/test/model_demos/high_prio/cnn/onnx/test_retinanet.py @@ -2,14 +2,14 @@ # SPDX-License-Identifier: Apache-2.0 -# STEP 0: import PyBuda library +# STEP 0: import Forge library import pytest -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig, PyTorchModule -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind -import pybuda +from forge.verify.backend import verify_module +from forge import VerifyConfig, PyTorchModule +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind +import forge import os import onnx @@ -26,7 +26,7 @@ ######## def img_preprocess(scal_val=1): - pil_img = Image.open("pybuda/test/model_demos/utils/cnn/onnx/images/carvana.jpg") + pil_img = Image.open("forge/test/model_demos/utils/cnn/onnx/images/carvana.jpg") scale=scal_val w, h = pil_img.size print("----", w, h) @@ -48,30 +48,30 @@ def img_preprocess(scal_val=1): ######### def test_retinanet_r101_640x480_onnx(test_device): - os.environ["PYBUDA_DECOMPOSE_SIGMOID"] = "1" - os.environ["PYBUDA_DISABLE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_DECOMPOSE_SIGMOID"] = "1" + os.environ["FORGE_DISABLE_CONV_MULTI_OP_FRACTURE"] = "1" os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{76*1024}" - os.environ["PYBUDA_RIBBON2"] = "1" - os.environ["PYBUDA_BALANCER_PREPASS_DISABLED"] = "1" - os.environ["PYBUDA_LEGACY_UBLOCK_SHAPE"] = "1" + os.environ["FORGE_RIBBON2"] = "1" + os.environ["FORGE_BALANCER_PREPASS_DISABLED"] = "1" + os.environ["FORGE_LEGACY_UBLOCK_SHAPE"] = "1" # Temp mitigations for net2pipe errors, should be removed. # - os.environ["PYBUDA_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" - os.environ["PYBUDA_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" - os.environ["PYBUDA_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" compiler_cfg.graph_solver_self_cut_type = "ConsumerOperandDataEdgesFirst" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b + compiler_cfg.default_df_override = forge.DataFormat.Float16_b compiler_cfg.conv_multi_op_fracture_factor_override["conv2d_356"] = 3 - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model load_path = "third_party/confidential_customer_models/model_2/onnx/retinanet/retinanet-9.onnx" model = onnx.load(load_path) - tt_model = pybuda.OnnxModule("onnx_retinanet", model, load_path) + tt_model = forge.OnnxModule("onnx_retinanet", model, load_path) # Image preprocessing img_tensor = img_preprocess() @@ -117,12 +117,12 @@ def img_preprocessing(): @pytest.mark.parametrize("variant", variants) def test_retinanet_onnx(variant, test_device): - # Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b - os.environ["PYBUDA_DECOMPOSE_SIGMOID"] = "1" - os.environ["PYBUDA_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge.DataFormat.Float16_b + os.environ["FORGE_DECOMPOSE_SIGMOID"] = "1" + os.environ["FORGE_RIBBON2"] = "1" if test_device.arch == BackendDevice.Wormhole_B0: os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = "73728" @@ -151,9 +151,9 @@ def test_retinanet_onnx(variant, test_device): os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = "69632" # Temp mitigations for net2pipe errors, should be removed. # - os.environ["PYBUDA_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" - os.environ["PYBUDA_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" - os.environ["PYBUDA_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" if variant == "retinanet_rn18fpn": compiler_cfg.balancer_op_override("conv2d_82.dc.matmul.11", "t_stream_shape", (1,1)) @@ -177,7 +177,7 @@ def test_retinanet_onnx(variant, test_device): ) model_name = f"onnx_{variant}" model = onnx.load(load_path) - tt_model = pybuda.OnnxModule(model_name, model, load_path) + tt_model = forge.OnnxModule(model_name, model, load_path) # Prepare input input_batch = img_preprocessing() diff --git a/pybuda/test/model_demos/high_prio/cnn/onnx/test_segformer_imgcls.py b/forge/test/model_demos/high_prio/cnn/onnx/test_segformer_imgcls.py similarity index 72% rename from pybuda/test/model_demos/high_prio/cnn/onnx/test_segformer_imgcls.py rename to forge/test/model_demos/high_prio/cnn/onnx/test_segformer_imgcls.py index fe7a20e76..cd56fcf9d 100644 --- a/pybuda/test/model_demos/high_prio/cnn/onnx/test_segformer_imgcls.py +++ b/forge/test/model_demos/high_prio/cnn/onnx/test_segformer_imgcls.py @@ -2,10 +2,10 @@ # SPDX-License-Identifier: Apache-2.0 -import pybuda -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda.verify.config import TestKind +import forge +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge.verify.config import TestKind from transformers import AutoImageProcessor import os import pytest @@ -36,15 +36,15 @@ def get_sample_data(model_name): @pytest.mark.parametrize("variant", variants_img_classification) def test_segformer_image_classification_onnx(test_device, variant): - # Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b - os.environ["PYBUDA_RIBBON2"] = "1" - os.environ["PYBUDA_DISABLE_PADDING_PASS"] = "1" + compiler_cfg.default_df_override = forge.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" + os.environ["FORGE_DISABLE_PADDING_PASS"] = "1" pcc_value = 0.99 - if test_device.arch == pybuda.BackendDevice.Wormhole_B0: + if test_device.arch == forge.BackendDevice.Wormhole_B0: if variant in [ "nvidia/mit-b1", @@ -53,9 +53,9 @@ def test_segformer_image_classification_onnx(test_device, variant): "nvidia/mit-b4", "nvidia/mit-b5", ]: - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" - if variant == "nvidia/mit-b0" and test_device.devtype == pybuda.BackendType.Silicon: + if variant == "nvidia/mit-b0" and test_device.devtype == forge.BackendType.Silicon: pcc_value = 0.97 # Load the sample image @@ -65,7 +65,7 @@ def test_segformer_image_classification_onnx(test_device, variant): model = onnx.load(onnx_model_path) onnx.checker.check_model(model) - tt_model = pybuda.OnnxModule(str(variant).split("/")[-1].replace("-", "_"), model, onnx_model_path) + tt_model = forge.OnnxModule(str(variant).split("/")[-1].replace("-", "_"), model, onnx_model_path) # Run inference on Tenstorrent device verify_module( @@ -77,7 +77,7 @@ def test_segformer_image_classification_onnx(test_device, variant): devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - verify_pybuda_codegen_vs_framework=True, + verify_forge_codegen_vs_framework=True, verify_tvm_compile=True, pcc=pcc_value, ), diff --git a/pybuda/test/model_demos/high_prio/cnn/onnx/test_segformer_semseg.py b/forge/test/model_demos/high_prio/cnn/onnx/test_segformer_semseg.py similarity index 78% rename from pybuda/test/model_demos/high_prio/cnn/onnx/test_segformer_semseg.py rename to forge/test/model_demos/high_prio/cnn/onnx/test_segformer_semseg.py index cda2ad81b..5146b4a52 100644 --- a/pybuda/test/model_demos/high_prio/cnn/onnx/test_segformer_semseg.py +++ b/forge/test/model_demos/high_prio/cnn/onnx/test_segformer_semseg.py @@ -2,10 +2,10 @@ # SPDX-License-Identifier: Apache-2.0 -import pybuda -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda.verify.config import TestKind +import forge +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge.verify.config import TestKind from transformers import AutoImageProcessor import os import pytest @@ -35,15 +35,15 @@ def get_sample_data(model_name): @pytest.mark.parametrize("variant", variants_semseg) def test_segformer_semantic_segmentation_onnx(test_device, variant): - # Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b - os.environ["PYBUDA_RIBBON2"] = "1" - os.environ["PYBUDA_DISABLE_PADDING_PASS"] = "1" + compiler_cfg.default_df_override = forge.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" + os.environ["FORGE_DISABLE_PADDING_PASS"] = "1" pcc_value = 0.99 - if test_device.arch == pybuda.BackendDevice.Wormhole_B0: + if test_device.arch == forge.BackendDevice.Wormhole_B0: if variant in [ "nvidia/segformer-b1-finetuned-ade-512-512", "nvidia/segformer-b2-finetuned-ade-512-512", @@ -51,12 +51,12 @@ def test_segformer_semantic_segmentation_onnx(test_device, variant): "nvidia/segformer-b4-finetuned-ade-512-512", ]: - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" - if variant == "nvidia/segformer-b2-finetuned-ade-512-512" and test_device.devtype == pybuda.BackendType.Silicon: + if variant == "nvidia/segformer-b2-finetuned-ade-512-512" and test_device.devtype == forge.BackendType.Silicon: pcc_value = 0.98 - elif test_device.arch == pybuda.BackendDevice.Grayskull: + elif test_device.arch == forge.BackendDevice.Grayskull: compiler_cfg.enable_auto_fusing = False if variant == "nvidia/segformer-b2-finetuned-ade-512-512": @@ -71,7 +71,7 @@ def test_segformer_semantic_segmentation_onnx(test_device, variant): compiler_cfg.place_on_new_epoch("add_3523") compiler_cfg.place_on_new_epoch("concatenate_3527.dc.concatenate.0") - if test_device.devtype == pybuda.BackendType.Silicon: + if test_device.devtype == forge.BackendType.Silicon: if variant in [ "nvidia/segformer-b0-finetuned-ade-512-512", @@ -90,7 +90,7 @@ def test_segformer_semantic_segmentation_onnx(test_device, variant): model = onnx.load(onnx_model_path) onnx.checker.check_model(model) - tt_model = pybuda.OnnxModule(str(variant).split("/")[-1].replace("-", "_"), model, onnx_model_path) + tt_model = forge.OnnxModule(str(variant).split("/")[-1].replace("-", "_"), model, onnx_model_path) # Run inference on Tenstorrent device verify_module( @@ -102,7 +102,7 @@ def test_segformer_semantic_segmentation_onnx(test_device, variant): devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - verify_pybuda_codegen_vs_framework=True, + verify_forge_codegen_vs_framework=True, verify_tvm_compile=True, pcc=pcc_value, ), diff --git a/pybuda/test/model_demos/high_prio/cnn/onnx/test_yolo_v3.py b/forge/test/model_demos/high_prio/cnn/onnx/test_yolo_v3.py similarity index 80% rename from pybuda/test/model_demos/high_prio/cnn/onnx/test_yolo_v3.py rename to forge/test/model_demos/high_prio/cnn/onnx/test_yolo_v3.py index b2ca57804..8dc8d01f5 100644 --- a/pybuda/test/model_demos/high_prio/cnn/onnx/test_yolo_v3.py +++ b/forge/test/model_demos/high_prio/cnn/onnx/test_yolo_v3.py @@ -9,11 +9,11 @@ import onnx import torch -import pybuda -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind +import forge +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind ######## @@ -43,14 +43,14 @@ def preprocess(img): @pytest.mark.skip(reason="While loop in model, not supported yet") def test_yolov3_tiny_onnx(test_device): - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "CNN" - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model load_path = "third_party/confidential_customer_models/model_2/onnx/saved/yolo_v3/tiny-yolov3-11.onnx" model = onnx.load(load_path) - tt_model = pybuda.OnnxModule("onnx_yolov3_tiny", model, load_path) + tt_model = forge.OnnxModule("onnx_yolov3_tiny", model, load_path) # Image preprocessing pil_img = Image.open("third_party/confidential_customer_models/model_2/onnx/saved/yolo_v3/carvana.jpg") @@ -74,14 +74,14 @@ def test_yolov3_tiny_onnx(test_device): @pytest.mark.skip(reason="While loop in model, not supported yet") def test_yolov3_onnx(test_device): - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "CNN" - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model load_path = "third_party/confidential_customer_models/model_2/onnx/saved/yolo_v3/yolov3-10.onnx" model = onnx.load(load_path) - tt_model = pybuda.OnnxModule("onnx_yolov3_tiny", model, load_path) + tt_model = forge.OnnxModule("onnx_yolov3_tiny", model, load_path) # Image preprocessing pil_img = Image.open("third_party/confidential_customer_models/model_2/onnx/saved/yolo_v3/carvana.jpg") diff --git a/pybuda/test/model_demos/high_prio/cnn/onnx/test_yolo_v5.py b/forge/test/model_demos/high_prio/cnn/onnx/test_yolo_v5.py similarity index 82% rename from pybuda/test/model_demos/high_prio/cnn/onnx/test_yolo_v5.py rename to forge/test/model_demos/high_prio/cnn/onnx/test_yolo_v5.py index 96deb8905..c3b6d9b2e 100644 --- a/pybuda/test/model_demos/high_prio/cnn/onnx/test_yolo_v5.py +++ b/forge/test/model_demos/high_prio/cnn/onnx/test_yolo_v5.py @@ -1,7 +1,7 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -import pybuda, os +import forge, os import requests import torch from PIL import Image @@ -10,10 +10,10 @@ import numpy as np from yolov5.utils.dataloaders import exif_transpose, letterbox import onnx, pytest -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda.verify.config import TestKind -from pybuda._C.backend_api import BackendDevice +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge.verify.config import TestKind +from forge._C.backend_api import BackendDevice def data_preprocessing(ims: Image.Image, size: tuple) -> tuple: @@ -66,11 +66,11 @@ def data_preprocessing(ims: Image.Image, size: tuple) -> tuple: @pytest.mark.parametrize("variant", variants) def test_yolo_v5_320x320_onnx(test_device, variant): - # pybuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b - os.environ["PYBUDA_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" input_size = 320 @@ -93,7 +93,7 @@ def test_yolo_v5_320x320_onnx(test_device, variant): # Run inference on Tenstorrent device verify_module( - pybuda.OnnxModule(model_name, onnx_model, onnx_model_path), + forge.OnnxModule(model_name, onnx_model, onnx_model_path), input_shapes=([pixel_values.shape]), inputs=([pixel_values]), verify_cfg=VerifyConfig( @@ -111,22 +111,22 @@ def test_yolo_v5_320x320_onnx(test_device, variant): @pytest.mark.parametrize("variant", variants) def test_yolo_v5_480x480_onnx(test_device, variant): - # pybuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b - os.environ["PYBUDA_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" compiler_cfg.enable_tm_cpu_fallback = True # Temp mitigations for net2pipe errors, should be removed. # - os.environ["PYBUDA_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" - os.environ["PYBUDA_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" - os.environ["PYBUDA_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" if test_device.arch == BackendDevice.Wormhole_B0: - os.environ["PYBUDA_INSERT_SLICE_FOR_CONCAT"] = "1" - os.environ["PYBUDA_CONCAT_SLICE_Y"] = "10" + os.environ["FORGE_INSERT_SLICE_FOR_CONCAT"] = "1" + os.environ["FORGE_CONCAT_SLICE_Y"] = "10" os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{112*1024}" if variant == "yolov5m": compiler_cfg.balancer_op_override( @@ -137,11 +137,11 @@ def test_yolo_v5_480x480_onnx(test_device, variant): elif test_device.arch == BackendDevice.Grayskull: if variant in ["yolov5n", "yolov5s"]: - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" if variant in ["yolov5m", "yolov5x"]: - os.environ["PYBUDA_INSERT_SLICE_FOR_CONCAT"] = "1" - os.environ["PYBUDA_CONCAT_SLICE_Y"] = "10" + os.environ["FORGE_INSERT_SLICE_FOR_CONCAT"] = "1" + os.environ["FORGE_CONCAT_SLICE_Y"] = "10" if variant == "yolov5m": compiler_cfg.balancer_op_override( "concatenate_26.dc.concatenate.30.dc.concatenate.0.dc.concatenate.12", @@ -149,7 +149,7 @@ def test_yolo_v5_480x480_onnx(test_device, variant): (1, 1), ) if variant == "yolov5x": - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{112*1024}" compiler_cfg.balancer_op_override( "concatenate_40.dc.concatenate.30.dc.concatenate.0.dc.concatenate.12", @@ -173,7 +173,7 @@ def test_yolo_v5_480x480_onnx(test_device, variant): # Run inference on Tenstorrent device verify_module( - pybuda.OnnxModule(model_name, onnx_model, onnx_model_path), + forge.OnnxModule(model_name, onnx_model, onnx_model_path), input_shapes=([pixel_values.shape]), inputs=([pixel_values]), verify_cfg=VerifyConfig( @@ -191,16 +191,16 @@ def test_yolo_v5_480x480_onnx(test_device, variant): @pytest.mark.parametrize("variant", variants) def test_yolo_v5_640x640_onnx(test_device, variant): - # pybuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b - os.environ["PYBUDA_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" if test_device.arch == BackendDevice.Wormhole_B0: - os.environ["PYBUDA_INSERT_SLICE_FOR_CONCAT"] = "1" - os.environ["PYBUDA_CONCAT_SLICE_Y"] = "10" + os.environ["FORGE_INSERT_SLICE_FOR_CONCAT"] = "1" + os.environ["FORGE_CONCAT_SLICE_Y"] = "10" if variant in ["yolov5n", "yolov5s"]: if variant == "yolov5s": @@ -220,7 +220,7 @@ def test_yolo_v5_640x640_onnx(test_device, variant): if variant == "yolov5l": os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{112*1024}" - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" compiler_cfg.balancer_op_override( "concatenate_405.dc.concatenate.7", "grid_shape", (1, 1) ) @@ -242,10 +242,10 @@ def test_yolo_v5_640x640_onnx(test_device, variant): os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{112*1024}" if variant in ["yolov5m", "yolov5x"]: - os.environ["PYBUDA_INSERT_SLICE_FOR_CONCAT"] = "1" - os.environ["PYBUDA_CONCAT_SLICE_Y"] = "10" + os.environ["FORGE_INSERT_SLICE_FOR_CONCAT"] = "1" + os.environ["FORGE_CONCAT_SLICE_Y"] = "10" - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" if variant == "yolov5m": compiler_cfg.balancer_op_override( @@ -279,7 +279,7 @@ def test_yolo_v5_640x640_onnx(test_device, variant): # Run inference on Tenstorrent device verify_module( - pybuda.OnnxModule(model_name, onnx_model, onnx_model_path), + forge.OnnxModule(model_name, onnx_model, onnx_model_path), input_shapes=([pixel_values.shape]), inputs=([pixel_values]), verify_cfg=VerifyConfig( diff --git a/pybuda/test/model_demos/high_prio/cnn/onnx/test_yolo_x.py b/forge/test/model_demos/high_prio/cnn/onnx/test_yolo_x.py similarity index 89% rename from pybuda/test/model_demos/high_prio/cnn/onnx/test_yolo_x.py rename to forge/test/model_demos/high_prio/cnn/onnx/test_yolo_x.py index 13c87161d..c72f1c67e 100644 --- a/pybuda/test/model_demos/high_prio/cnn/onnx/test_yolo_x.py +++ b/forge/test/model_demos/high_prio/cnn/onnx/test_yolo_x.py @@ -2,16 +2,16 @@ # SPDX-License-Identifier: Apache-2.0 -import pybuda, os +import forge, os import pytest import cv2, torch import numpy as np import onnx -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda.verify.config import TestKind +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge.verify.config import TestKind import requests -from pybuda._C.backend_api import BackendDevice +from forge._C.backend_api import BackendDevice def preprocess(img, input_size, swap=(2, 0, 1)): @@ -40,12 +40,12 @@ def preprocess(img, input_size, swap=(2, 0, 1)): @pytest.mark.parametrize("variant", variants) def test_yolox_onnx(variant, test_device): - # pybuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b - os.environ["PYBUDA_RIBBON2"] = "1" - os.environ["PYBUDA_DECOMPOSE_SIGMOID"] = "1" + compiler_cfg.default_df_override = forge.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" + os.environ["FORGE_DECOMPOSE_SIGMOID"] = "1" if test_device.arch == BackendDevice.Wormhole_B0: @@ -65,9 +65,9 @@ def test_yolox_onnx(variant, test_device): elif variant == "yolox_m": - os.environ["PYBUDA_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" - os.environ["PYBUDA_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" - os.environ["PYBUDA_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" compiler_cfg.place_on_new_epoch("conv2d_187.dc.matmul.8") compiler_cfg.balancer_op_override("conv2d_7.dc.conv2d.3.dc.reshape.0.dc.sparse_matmul.4.lc2", "t_stream_shape", (1, 4)) @@ -78,13 +78,13 @@ def test_yolox_onnx(variant, test_device): elif variant in ["yolox_l", "yolox_darknet", "yolox_x"]: - os.environ["PYBUDA_FORK_JOIN_BUF_QUEUES"] = "1" - os.environ["PYBUDA_FORK_JOIN_EXPAND_OUTPUT_BUFFERS"] = "1" - os.environ["PYBUDA_FORK_JOIN_SKIP_EXPANDING_BUFFERS"] = "1" + os.environ["FORGE_FORK_JOIN_BUF_QUEUES"] = "1" + os.environ["FORGE_FORK_JOIN_EXPAND_OUTPUT_BUFFERS"] = "1" + os.environ["FORGE_FORK_JOIN_SKIP_EXPANDING_BUFFERS"] = "1" if variant == "yolox_l": - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" compiler_cfg.balancer_op_override("conv2d_7.dc.conv2d.5.dc.reshape.0.dc.sparse_matmul.4.lc2", "t_stream_shape", (1, 4)) compiler_cfg.balancer_op_override("conv2d_7.dc.conv2d.3.dc.reshape.0.dc.sparse_matmul.4.lc2", "t_stream_shape", (1, 4)) compiler_cfg.balancer_op_override("conv2d_7.dc.conv2d.1.dc.reshape.0.dc.sparse_matmul.10.lc2", "t_stream_shape", (1, 4)) @@ -146,9 +146,9 @@ def test_yolox_onnx(variant, test_device): elif variant in ["yolox_l", "yolox_darknet", "yolox_x"]: - os.environ["PYBUDA_FORK_JOIN_BUF_QUEUES"] = "1" - os.environ["PYBUDA_FORK_JOIN_EXPAND_OUTPUT_BUFFERS"] = "1" - os.environ["PYBUDA_FORK_JOIN_SKIP_EXPANDING_BUFFERS"] = "1" + os.environ["FORGE_FORK_JOIN_BUF_QUEUES"] = "1" + os.environ["FORGE_FORK_JOIN_EXPAND_OUTPUT_BUFFERS"] = "1" + os.environ["FORGE_FORK_JOIN_SKIP_EXPANDING_BUFFERS"] = "1" if variant == "yolox_l": @@ -179,11 +179,11 @@ def test_yolox_onnx(variant, test_device): elif variant == "yolox_x": - os.environ["PYBUDA_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" - os.environ["PYBUDA_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" - os.environ["PYBUDA_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" compiler_cfg.place_on_new_epoch("conv2d_385.dc.conv2d.5.dc.matmul.11") compiler_cfg.place_on_new_epoch("conv2d_385.dc.conv2d.1.dc.matmul.11") compiler_cfg.place_on_new_epoch("conv2d_385.dc.conv2d.3.dc.matmul.11") @@ -214,7 +214,7 @@ def test_yolox_onnx(variant, test_device): onnx_model = onnx.load(onnx_model_path) onnx.checker.check_model(onnx_model) model_name = f"onnx_{variant}" - tt_model = pybuda.OnnxModule(model_name, onnx_model, onnx_model_path) + tt_model = forge.OnnxModule(model_name, onnx_model, onnx_model_path) # PCC if test_device.arch == BackendDevice.Wormhole_B0: diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/__init__.py b/forge/test/model_demos/high_prio/cnn/pytorch/__init__.py similarity index 100% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/__init__.py rename to forge/test/model_demos/high_prio/cnn/pytorch/__init__.py diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_alexnet.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_alexnet.py similarity index 87% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_alexnet.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_alexnet.py index e1218db39..e111c39b8 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_alexnet.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_alexnet.py @@ -9,28 +9,28 @@ from torchvision import transforms from loguru import logger -import pybuda -from pybuda import VerifyConfig -from pybuda.verify.config import TestKind -from pybuda._C.backend_api import BackendType -from pybuda.verify.backend import verify_module +import forge +from forge import VerifyConfig +from forge.verify.config import TestKind +from forge._C.backend_api import BackendType +from forge.verify.backend import verify_module from pytorchcv.model_provider import get_model as ptcv_get_model @pytest.mark.skip(reason="Not supported") def test_alexnet_torchhub(test_device): # Configurations - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "CNN" - os.environ["PYBUDA_CONV2D_SPARSE_SECOND"] = "1" + os.environ["FORGE_CONV2D_SPARSE_SECOND"] = "1" # Load model framework_model = download_model( torch.hub.load, "pytorch/vision:v0.10.0", "alexnet", pretrained=True ) framework_model.eval() - pybuda_model = pybuda.PyTorchModule("pt_alexnet_torchhub", framework_model) + forge_model = forge.PyTorchModule("pt_alexnet_torchhub", framework_model) # Load and pre-process image try: @@ -65,7 +65,7 @@ def test_alexnet_torchhub(test_device): # Verify verify_module( - pybuda_model, + forge_model, input_shapes=[(img_tensor.shape,)], inputs=[(img_tensor,)], verify_cfg=VerifyConfig( @@ -81,15 +81,15 @@ def test_alexnet_torchhub(test_device): @pytest.mark.skip(reason="Not supported") def test_alexnet_osmr(test_device): # Configurations - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "CNN" - os.environ["PYBUDA_CONV2D_SPARSE_SECOND"] = "1" + os.environ["FORGE_CONV2D_SPARSE_SECOND"] = "1" # Load model framework_model = download_model(ptcv_get_model, "alexnet", pretrained=True) framework_model.eval() - pybuda_model = pybuda.PyTorchModule("pt_alexnet_osmr", framework_model) + forge_model = forge.PyTorchModule("pt_alexnet_osmr", framework_model) # Load and pre-process image try: @@ -124,7 +124,7 @@ def test_alexnet_osmr(test_device): # Verify verify_module( - pybuda_model, + forge_model, input_shapes=[(img_tensor.shape,)], inputs=[(img_tensor,)], verify_cfg=VerifyConfig( diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_autoencoder.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_autoencoder.py similarity index 82% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_autoencoder.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_autoencoder.py index 1689c7150..f71515906 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_autoencoder.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_autoencoder.py @@ -4,15 +4,15 @@ import pytest import os -import pybuda +import forge import torch import torchvision.transforms as transforms from datasets import load_dataset -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind, NebulaGalaxy +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind, NebulaGalaxy # SPDX-FileCopyrightText: Copyright (c) 2018 Udacity # @@ -92,10 +92,10 @@ def forward(self, x): def test_conv_ae_pytorch(test_device): - # Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b # Instantiate model # NOTE: The model has not been pre-trained or fine-tuned. @@ -116,7 +116,7 @@ def test_conv_ae_pytorch(test_device): sample_tensor = transform(sample).unsqueeze(0) verify_module( - pybuda.PyTorchModule("pt_conv_ae", model), + forge.PyTorchModule("pt_conv_ae", model), input_shapes=[t.shape for t in sample_tensor], inputs=[sample_tensor], verify_cfg=VerifyConfig( @@ -124,15 +124,15 @@ def test_conv_ae_pytorch(test_device): devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) def test_linear_ae_pytorch(test_device): - # Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b # Instantiate model # NOTE: The model has not been pre-trained or fine-tuned. @@ -154,7 +154,7 @@ def test_linear_ae_pytorch(test_device): sample_tensor = transform(sample).squeeze(0) verify_module( - pybuda.PyTorchModule("pt_linear_ae", model), + forge.PyTorchModule("pt_linear_ae", model), input_shapes=[t.shape for t in sample_tensor], inputs=[sample_tensor], verify_cfg=VerifyConfig( @@ -162,6 +162,6 @@ def test_linear_ae_pytorch(test_device): devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_blazepose.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_blazepose.py similarity index 76% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_blazepose.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_blazepose.py index b8a2e5560..e1f811404 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_blazepose.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_blazepose.py @@ -2,13 +2,13 @@ # SPDX-License-Identifier: Apache-2.0 # BlazePose Demo Script - PyTorch -from pybuda.verify.config import TestKind, BackendDevice, BackendType +from forge.verify.config import TestKind, BackendDevice, BackendType import os -from pybuda.verify.backend import verify_module +from forge.verify.backend import verify_module import pytest import cv2 -import pybuda +import forge import torch import sys @@ -26,8 +26,8 @@ def test_blazepose_detector_pytorch(test_device): if test_device.arch == BackendDevice.Grayskull: pytest.skip("Grayskull test failing with TM ERROR (producer = conv2d_163.dc.add.11_fused_tm_op_0.dc.matmul.7, consumer = conv2d_163.dc.add.11_fused_tm_op_0.dc.matmul.12): TM order does't satisfy constraints for stacking with phased pipes, buf_size_mb must be a multiple of the total stack factor or producer t") - # Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" # Load BlazePose Detector @@ -36,7 +36,7 @@ def test_blazepose_detector_pytorch(test_device): pose_detector.load_anchors("third_party/confidential_customer_models/model_2/pytorch/mediapipepytorch/anchors_pose.npy") # Load data sample - orig_image = cv2.imread("pybuda/test/model_demos/utils/cnn/pytorch/images/girl.png") + orig_image = cv2.imread("forge/test/model_demos/utils/cnn/pytorch/images/girl.png") # Preprocess for BlazePose Detector _, img2, scale, pad = resize_pad(orig_image) @@ -44,10 +44,10 @@ def test_blazepose_detector_pytorch(test_device): img2 = img2.float() / 255.0 verify_module( - pybuda.PyTorchModule("pt_blazepose_detector", pose_detector), + forge.PyTorchModule("pt_blazepose_detector", pose_detector), input_shapes=[(img2.shape,)], inputs=[(img2,)], - verify_cfg=pybuda.VerifyConfig( + verify_cfg=forge.VerifyConfig( arch=test_device.arch, devtype=test_device.devtype, devmode=test_device.devmode, @@ -62,10 +62,10 @@ def test_blazepose_regressor_pytorch(test_device): pytest.skip("Grayskull test failing with data mismatch") # Set PyBDUA environment variable - os.environ["PYBUDA_DECOMPOSE_SIGMOID"] = "1" + os.environ["FORGE_DECOMPOSE_SIGMOID"] = "1" - # Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "CNN" @@ -74,9 +74,9 @@ def test_blazepose_regressor_pytorch(test_device): pose_regressor.load_weights("third_party/confidential_customer_models/model_2/pytorch/mediapipepytorch/blazepose_landmark.pth") verify_module( - pybuda.PyTorchModule("pt_blazepose_regressor", pose_regressor), + forge.PyTorchModule("pt_blazepose_regressor", pose_regressor), input_shapes=[(1, 3, 256, 256,)], - verify_cfg=pybuda.VerifyConfig( + verify_cfg=forge.VerifyConfig( arch=test_device.arch, devtype=test_device.devtype, devmode=test_device.devmode, @@ -92,10 +92,10 @@ def test_blazepose_detector_pytorch_1x1(test_device): pytest.skip("Grayskull test failing with TM ERROR (producer = conv2d_163.dc.add.11_fused_tm_op_0.dc.matmul.7, consumer = conv2d_163.dc.add.11_fused_tm_op_0.dc.matmul.12): TM order does't satisfy constraints for stacking with phased pipes, buf_size_mb must be a multiple of the total stack factor or producer t") # Set PyBDUA environment variable - os.environ["PYBUDA_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" + os.environ["FORGE_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" - # Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" # Load BlazePose Detector @@ -104,7 +104,7 @@ def test_blazepose_detector_pytorch_1x1(test_device): pose_detector.load_anchors("third_party/confidential_customer_models/model_2/pytorch/mediapipepytorch/anchors_pose.npy") # Load data sample - orig_image = cv2.imread("pybuda/test/model_demos/utils/cnn/pytorch/images/girl.png") + orig_image = cv2.imread("forge/test/model_demos/utils/cnn/pytorch/images/girl.png") # Preprocess for BlazePose Detector _, img2, scale, pad = resize_pad(orig_image) @@ -112,10 +112,10 @@ def test_blazepose_detector_pytorch_1x1(test_device): img2 = img2.float() / 255.0 verify_module( - pybuda.PyTorchModule("pt_blazepose_detector", pose_detector), + forge.PyTorchModule("pt_blazepose_detector", pose_detector), input_shapes=[(img2.shape,)], inputs=[(img2,)], - verify_cfg=pybuda.VerifyConfig( + verify_cfg=forge.VerifyConfig( arch=test_device.arch, devtype=test_device.devtype, devmode=test_device.devmode, @@ -130,11 +130,11 @@ def test_blazepose_regressor_pytorch_1x1(test_device): pytest.skip("Grayskull test failing with data mismatch") # Set PyBDUA environment variable - os.environ["PYBUDA_DECOMPOSE_SIGMOID"] = "1" - os.environ["PYBUDA_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" + os.environ["FORGE_DECOMPOSE_SIGMOID"] = "1" + os.environ["FORGE_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" - # Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "CNN" # Load BlazePose Landmark Regressor @@ -142,9 +142,9 @@ def test_blazepose_regressor_pytorch_1x1(test_device): pose_regressor.load_weights("third_party/confidential_customer_models/model_2/pytorch/mediapipepytorch/blazepose_landmark.pth") verify_module( - pybuda.PyTorchModule("pt_blazepose_regressor", pose_regressor), + forge.PyTorchModule("pt_blazepose_regressor", pose_regressor), input_shapes=[(1, 3, 256, 256,)], - verify_cfg=pybuda.VerifyConfig( + verify_cfg=forge.VerifyConfig( arch=test_device.arch, devtype=test_device.devtype, devmode=test_device.devmode, @@ -160,11 +160,11 @@ def test_blaze_palm_pytorch_1x1(test_device): pytest.skip() # Set PyBDUA environment variable - os.environ["PYBUDA_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" - # Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" compiler_cfg.cpu_fallback_ops = set(["concatenate"]) @@ -179,7 +179,7 @@ def test_blaze_palm_pytorch_1x1(test_device): palm_detector.min_score_thresh = 0.75 # Load data sample - orig_image = cv2.imread("pybuda/test/model_demos/utils/cnn/pytorch/images/girl.png") + orig_image = cv2.imread("forge/test/model_demos/utils/cnn/pytorch/images/girl.png") # Preprocess for BlazePose Detector img1, img2, scale, pad = resize_pad(orig_image) @@ -187,10 +187,10 @@ def test_blaze_palm_pytorch_1x1(test_device): img2 = img2.float() / 255.0 verify_module( - pybuda.PyTorchModule("pt_palm_detector", palm_detector), + forge.PyTorchModule("pt_palm_detector", palm_detector), input_shapes=[(img2.shape,)], inputs=[(img2,)], - verify_cfg=pybuda.VerifyConfig( + verify_cfg=forge.VerifyConfig( arch=test_device.arch, devtype=test_device.devtype, devmode=test_device.devmode, @@ -206,10 +206,10 @@ def test_blaze_hand_pytorch_1x1(test_device): pytest.skip() # Set PyBDUA environment variable - os.environ["PYBUDA_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" + os.environ["FORGE_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" - # Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" compiler_cfg.conv_multi_op_fracture_factor_override["conv2d_33"] = -1 compiler_cfg.conv_multi_op_fracture_factor_override["conv2d_112"] = -1 @@ -221,9 +221,9 @@ def test_blaze_hand_pytorch_1x1(test_device): ) verify_module( - pybuda.PyTorchModule("pt_hand_regressor", hand_regressor), + forge.PyTorchModule("pt_hand_regressor", hand_regressor), input_shapes=[(1, 3, 256, 256,)], - verify_cfg=pybuda.VerifyConfig( + verify_cfg=forge.VerifyConfig( arch=test_device.arch, devtype=test_device.devtype, devmode=test_device.devmode, diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_clip.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_clip.py similarity index 89% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_clip.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_clip.py index 2b539f642..d8732e2c8 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_clip.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_clip.py @@ -3,13 +3,13 @@ # SPDX-License-Identifier: Apache-2.0 import pytest from test.utils import download_model -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind -from pybuda.op.eval import compare_tensor_to_golden +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind +from forge.op.eval import compare_tensor_to_golden -import pybuda +import forge import os import requests import torch @@ -103,16 +103,16 @@ def forward(self, input_ids, vision_outputs, last_hidden_state, *encoder_outputs def test_clip_pytorch(test_device): - # Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b - os.environ["PYBUDA_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" # Required to patch data-mismatch. Here is followup issue # to check this out in more details: - # tenstorrent/pybuda#1828 - os.environ["PYBUDA_DECOMPOSE_SIGMOID"] = "1" + # tenstorrent/forge#1828 + os.environ["FORGE_DECOMPOSE_SIGMOID"] = "1" # Load processor and model from HuggingFace model_ckpt = "openai/clip-vit-base-patch32" @@ -137,9 +137,9 @@ def test_clip_pytorch(test_device): vision_outputs = vision_model(inputs[1]) - tt0 = pybuda.TTDevice("tt0", module=pybuda.PyTorchModule("pt_clip_text_model", text_model)) + tt0 = forge.TTDevice("tt0", module=forge.PyTorchModule("pt_clip_text_model", text_model)) tt0.push_to_inputs(inputs[0], inputs[2]) - output_q = pybuda.run_inference(_sequential=True) + output_q = forge.run_inference(_sequential=True) text_outputs = output_q.get() text_outputs = [o.value().float() for o in text_outputs] diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_ddrnet.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_ddrnet.py similarity index 84% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_ddrnet.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_ddrnet.py index 647c0bc45..ef4f50233 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_ddrnet.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_ddrnet.py @@ -1,17 +1,17 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -import pybuda, os +import forge, os import torch from torchvision import transforms import requests from PIL import Image import pytest -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind -from pybuda import VerifyConfig +from forge.verify.backend import verify_module +from forge.verify.config import TestKind +from forge import VerifyConfig import sys -from pybuda._C.backend_api import BackendDevice +from forge._C.backend_api import BackendDevice sys.path.append("third_party/confidential_customer_models/generated/scripts/") from model_ddrnet import DualResNet_23, DualResNet_39, BasicBlock @@ -27,13 +27,13 @@ @pytest.mark.parametrize("variant", variants) def test_ddrnet_pytorch(variant, test_device): - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b - os.environ["PYBUDA_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model if variant == "ddrnet23s": model = DualResNet_23( @@ -64,7 +64,7 @@ def test_ddrnet_pytorch(variant, test_device): model_name = f"pt_{variant}" - tt_model = pybuda.PyTorchModule(model_name, model) + tt_model = forge.PyTorchModule(model_name, model) # STEP 3: Prepare input url = "https://raw.githubusercontent.com/pytorch/hub/master/images/dog.jpg" @@ -106,11 +106,11 @@ def test_ddrnet_pytorch(variant, test_device): @pytest.mark.parametrize("variant", variants) def test_ddrnet_semantic_segmentation_pytorch(variant, test_device): - # Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b - os.environ["PYBUDA_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" if ( variant == "ddrnet23s_cityscapes" @@ -147,7 +147,7 @@ def test_ddrnet_semantic_segmentation_pytorch(variant, test_device): model.load_state_dict(state_dict, strict=False) model.eval() model_name = f"pt_{variant}" - tt_model = pybuda.PyTorchModule(model_name, model) + tt_model = forge.PyTorchModule(model_name, model) # prepare input image_path = "third_party/confidential_customer_models/cv_demos/ddrnet/semantic_segmentation/image/road_scenes.png" diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_deit.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_deit.py similarity index 76% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_deit.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_deit.py index 3f058fdb3..ded846f12 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_deit.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_deit.py @@ -3,9 +3,9 @@ # SPDX-License-Identifier: Apache-2.0 import pytest import os -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda.verify.config import TestKind, NebulaGalaxy +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge.verify.config import TestKind, NebulaGalaxy from test.model_demos.models.deit import generate_model_deit_imgcls_hf_pytorch @@ -25,7 +25,7 @@ def test_vit_base_classify_224_hf_pytorch(variant, test_device): devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], pcc=0.78 ) ) diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_densenet.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_densenet.py similarity index 73% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_densenet.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_densenet.py index 3869d60ab..4997dac38 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_densenet.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_densenet.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 import pytest from test.utils import download_model -import pybuda +import forge import os import urllib.request from loguru import logger @@ -23,10 +23,10 @@ import urllib from torchvision.transforms import Compose, ConvertImageDtype, Normalize, PILToTensor, Resize, CenterCrop -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind, NebulaGalaxy +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind, NebulaGalaxy ############ def get_input_img(): @@ -83,13 +83,13 @@ def test_densenet_121_pytorch(variant, test_device): if test_device.arch == BackendDevice.Grayskull: pytest.skip("Grayskull test has data mismatch") - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b - os.environ["PYBUDA_DISABLE_CONSTANT_FOLDING"] = "1" + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b + os.environ["FORGE_DISABLE_CONSTANT_FOLDING"] = "1" - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model if variant == "densenet121": model = download_model(torch.hub.load, "pytorch/vision:v0.10.0", "densenet121", pretrained=True) img_tensor = get_input_img() @@ -104,7 +104,7 @@ def test_densenet_121_pytorch(variant, test_device): img_tensor = get_input_img_hf_xray() - tt_model = pybuda.PyTorchModule(variant, model) + tt_model = forge.PyTorchModule(variant, model) # STEP 3: Run inference on Tenstorrent device model(img_tensor) @@ -118,21 +118,21 @@ def test_densenet_121_pytorch(variant, test_device): devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) def test_densenet_161_pytorch(test_device): - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model model = download_model(torch.hub.load, "pytorch/vision:v0.10.0", "densenet161", pretrained=True) - tt_model = pybuda.PyTorchModule("densnet161_pt", model) + tt_model = forge.PyTorchModule("densnet161_pt", model) # STEP 3: Run inference on Tenstorrent device img_tensor = get_input_img() @@ -156,17 +156,17 @@ def test_densenet_169_pytorch(test_device): if test_device.arch == BackendDevice.Grayskull: pytest.skip("Grayskull test has data mismatch") - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "CNN" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b - os.environ["PYBUDA_DISABLE_CONSTANT_FOLDING"] = "1" - os.environ["PYBUDA_GRAPHSOLVER_SELF_CUT_TYPE"] = "ConsumerOperandDataEdgesFirst" - os.environ["PYBUDA_LEGACY_UBLOCK_SHAPE"] = "1" + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b + os.environ["FORGE_DISABLE_CONSTANT_FOLDING"] = "1" + os.environ["FORGE_GRAPHSOLVER_SELF_CUT_TYPE"] = "ConsumerOperandDataEdgesFirst" + os.environ["FORGE_LEGACY_UBLOCK_SHAPE"] = "1" - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model model = download_model(torch.hub.load, "pytorch/vision:v0.10.0", "densenet169", pretrained=True) - tt_model = pybuda.PyTorchModule("densnet169_pt", model) + tt_model = forge.PyTorchModule("densnet169_pt", model) # STEP 3: Run inference on Tenstorrent device img_tensor = get_input_img() @@ -189,17 +189,17 @@ def test_densenet_201_pytorch(test_device): if test_device.arch == BackendDevice.Grayskull: pytest.skip("Grayskull test has data mismatch") - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "CNN" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b - os.environ["PYBUDA_DISABLE_CONSTANT_FOLDING"] = "1" - os.environ["PYBUDA_GRAPHSOLVER_SELF_CUT_TYPE"] = "ConsumerOperandDataEdgesFirst" - os.environ["PYBUDA_LEGACY_UBLOCK_SHAPE"] = "1" + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b + os.environ["FORGE_DISABLE_CONSTANT_FOLDING"] = "1" + os.environ["FORGE_GRAPHSOLVER_SELF_CUT_TYPE"] = "ConsumerOperandDataEdgesFirst" + os.environ["FORGE_LEGACY_UBLOCK_SHAPE"] = "1" - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model model = download_model(torch.hub.load, "pytorch/vision:v0.10.0", "densenet201", pretrained=True) - tt_model = pybuda.PyTorchModule("densnet201_pt", model) + tt_model = forge.PyTorchModule("densnet201_pt", model) # STEP 3: Run inference on Tenstorrent device img_tensor = get_input_img() diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_dla.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_dla.py similarity index 79% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_dla.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_dla.py index 41c12f03e..291246437 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_dla.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_dla.py @@ -4,11 +4,11 @@ import os -import pybuda -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda.verify import TestKind -from pybuda._C.backend_api import BackendDevice +import forge +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge.verify import TestKind +from forge._C.backend_api import BackendDevice import requests import pytest import torchvision.transforms as transforms @@ -45,11 +45,11 @@ @pytest.mark.parametrize("variant", variants, ids=variants) def test_dla_pytorch(variant, test_device): - # PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.Float16_b - os.environ["PYBUDA_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge._C.Float16_b + os.environ["FORGE_RIBBON2"] = "1" func = variants_func[variant] model_name = f"dla_{variant}_pytorch" @@ -60,7 +60,7 @@ def test_dla_pytorch(variant, test_device): compiler_cfg.place_on_new_epoch("concatenate_776.dc.concatenate.0") elif test_device.arch == BackendDevice.Grayskull: if func.__name__ in ("dla102x2", "dla169"): - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" if func.__name__ == "dla46_c": pcc = 0.97 @@ -82,8 +82,8 @@ def test_dla_pytorch(variant, test_device): pytorch_model = func(pretrained="imagenet") pytorch_model.eval() - # Create pybuda.PyTorchModule using the loaded Pytorch model - tt_model = pybuda.PyTorchModule(model_name, pytorch_model) + # Create forge.PyTorchModule using the loaded Pytorch model + tt_model = forge.PyTorchModule(model_name, pytorch_model) verify_module( tt_model, diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_efficientnet.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_efficientnet.py similarity index 87% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_efficientnet.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_efficientnet.py index 23ad86cb6..3aeba3ae8 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_efficientnet.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_efficientnet.py @@ -12,12 +12,12 @@ from timm.data.transforms_factory import create_transform from loguru import logger -import pybuda -from pybuda import VerifyConfig -from pybuda import CompileDepth -from pybuda.verify.config import TestKind -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.backend import verify_module +import forge +from forge import VerifyConfig +from forge import CompileDepth +from forge.verify.config import TestKind +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.backend import verify_module ## https://huggingface.co/docs/timm/models/efficientnet @@ -39,7 +39,7 @@ def test_efficientnet_timm(variant, test_device): pytest.skip("Grayskull test failing with piepgen and blobgen errors") # Configuration - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" compiler_cfg.enable_auto_fusing = False @@ -51,21 +51,21 @@ def test_efficientnet_timm(variant, test_device): compiler_cfg.conv_multi_op_fracture_factor_override["conv2d_488"] = 5 compiler_cfg.conv_multi_op_fracture_factor_override["conv2d_541"] = 5 compiler_cfg.balancer_op_override("conv2d_68.dc.matmul.12", "t_stream_shape", (7,1)) - os.environ["PYBUDA_DECOMPOSE_SIGMOID"] = "1" + os.environ["FORGE_DECOMPOSE_SIGMOID"] = "1" elif variant == "efficientnet_b4": if test_device.arch == BackendDevice.Wormhole_B0: compiler_cfg.amp_level = 1 - compiler_cfg.default_df_override=pybuda.DataFormat.Float16_b - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" - os.environ["PYBUDA_PAD_SPARSE_MM"] = "{13:16}" - os.environ["PYBUDA_GRAPHSOLVER_SELF_CUT_TYPE"] = "ConsumerOperandDataEdgesFirst" - os.environ["PYBUDA_DECOMPOSE_SIGMOID"] = "1" + compiler_cfg.default_df_override=forge.DataFormat.Float16_b + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_PAD_SPARSE_MM"] = "{13:16}" + os.environ["FORGE_GRAPHSOLVER_SELF_CUT_TYPE"] = "ConsumerOperandDataEdgesFirst" + os.environ["FORGE_DECOMPOSE_SIGMOID"] = "1" # Load model framework_model = download_model(timm.create_model, variant, pretrained=True) framework_model.eval() - pybuda_model = pybuda.PyTorchModule("pt_effnet_timm", framework_model) + forge_model = forge.PyTorchModule("pt_effnet_timm", framework_model) # Load and pre-process image try: @@ -91,7 +91,7 @@ def test_efficientnet_timm(variant, test_device): # Verify verify_module( - pybuda_model, + forge_model, input_shapes=[(img_tensor.shape,)], inputs=[(img_tensor,)], verify_cfg=VerifyConfig( @@ -125,10 +125,10 @@ def test_efficientnet_torchvision(variant, test_device): pytest.skip("Error! The overlay blob for chip_0__y_7__x_1 does not fit, the max size is 73600, however we tried to allocate 345716.") # Configuration - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" compiler_cfg.enable_auto_fusing = False # Until #844 is resolved - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b + compiler_cfg.default_df_override = forge.DataFormat.Float16_b if variant == models.efficientnet_b0: # Solves issue for bigger conv layers in the middle of the graph @@ -139,7 +139,7 @@ def test_efficientnet_torchvision(variant, test_device): compiler_cfg.conv_multi_op_fracture_factor_override["conv2d_428"] = 5 compiler_cfg.conv_multi_op_fracture_factor_override["conv2d_479"] = 5 compiler_cfg.conv_multi_op_fracture_factor_override["conv2d_531"] = 5 - os.environ["PYBUDA_DECOMPOSE_SIGMOID"] = "1" + os.environ["FORGE_DECOMPOSE_SIGMOID"] = "1" elif variant == models.efficientnet_b4: # Solves issue for bigger conv layers in the middle of the graph @@ -165,13 +165,13 @@ def test_efficientnet_torchvision(variant, test_device): compiler_cfg.balancer_op_override("conv2d_1625.dc.matmul.8", "t_stream_shape", (1,1)) # PIPEGEN-ERROR compiler_cfg.balancer_op_override("conv2d_104.dc.matmul.12", "t_stream_shape", (7,1)) # blobgen error - os.environ["PYBUDA_DECOMPOSE_SIGMOID"] = "1" + os.environ["FORGE_DECOMPOSE_SIGMOID"] = "1" # Load model framework_model = download_model(variant, pretrained=True) framework_model.eval() - pybuda_model = pybuda.PyTorchModule("pt_effnet_torchvis", framework_model) + forge_model = forge.PyTorchModule("pt_effnet_torchvis", framework_model) # Load and pre-process image try: @@ -197,7 +197,7 @@ def test_efficientnet_torchvision(variant, test_device): # Verify verify_module( - pybuda_model, + forge_model, input_shapes=[(img_tensor.shape,)], inputs=[(img_tensor,)], verify_cfg=VerifyConfig( diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_efficientnet_lite.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_efficientnet_lite.py similarity index 66% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_efficientnet_lite.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_efficientnet_lite.py index c5cf80d01..6a5ef6bed 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_efficientnet_lite.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_efficientnet_lite.py @@ -4,8 +4,8 @@ ## EfficientNet V1 demo import pytest -# STEP 0: import PyBuda library -import pybuda +# STEP 0: import Forge library +import forge import os import math @@ -18,11 +18,11 @@ from torchvision import transforms import json -from pybuda.op.eval import compare_tensor_to_golden -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind +from forge.op.eval import compare_tensor_to_golden +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind ## https://github.com/RangiLyu/EfficientNet-Lite/ from test.model_demos.utils.cnn.pytorch.saved.efficientnet_lite import src_efficientnet_lite as efflite @@ -35,7 +35,7 @@ def get_image_tensor(wh): transforms.CenterCrop(wh), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),]) - img_tensor = tfms(Image.open('pybuda/test/model_demos/utils/cnn/pytorch/images/img.jpeg')).unsqueeze(0) + img_tensor = tfms(Image.open('forge/test/model_demos/utils/cnn/pytorch/images/img.jpeg')).unsqueeze(0) return img_tensor ###### @@ -44,22 +44,22 @@ def test_efficientnet_lite_0_pytorch(test_device): if test_device.arch == BackendDevice.Grayskull: pytest.skip("Compiles but gives: \"Forward error: Failed while running fwd program\" when running") - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "CNN" if test_device.arch == BackendDevice.Wormhole_B0: - os.environ["PYBUDA_GRAPHSOLVER_SELF_CUT_TYPE"] = "ConsumerOperandDataEdgesFirst" + os.environ["FORGE_GRAPHSOLVER_SELF_CUT_TYPE"] = "ConsumerOperandDataEdgesFirst" elif test_device.arch == BackendDevice.Grayskull: - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" - # STEP 2: Model load in PyBuda + # STEP 2: Model load in Forge model_name = 'efficientnet_lite0' model = efflite.build_efficientnet_lite(model_name, 1000) model.load_pretrain("third_party/confidential_customer_models/model_2/pytorch/efficientnet_lite/weights/efficientnet_lite0.pth") model.eval() - tt_model = pybuda.PyTorchModule("pt_effnet_lite0", model) + tt_model = forge.PyTorchModule("pt_effnet_lite0", model) # Image preprocessing wh = efflite.efficientnet_lite_params[model_name][2] @@ -83,21 +83,21 @@ def test_efficientnet_lite_1_pytorch(test_device): if test_device.arch == BackendDevice.Grayskull: pytest.skip("Backend compile failed") - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b compiler_cfg.amp_level = 2 - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" - os.environ["PYBUDA_GRAPHSOLVER_SELF_CUT_TYPE"] = "ConsumerOperandDataEdgesFirst" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_GRAPHSOLVER_SELF_CUT_TYPE"] = "ConsumerOperandDataEdgesFirst" - # STEP 2: Model load in PyBuda + # STEP 2: Model load in Forge model_name = 'efficientnet_lite1' model = efflite.build_efficientnet_lite(model_name, 1000) model.load_pretrain("third_party/confidential_customer_models/model_2/pytorch/efficientnet_lite/weights/efficientnet_lite1.pth") model.eval() - tt_model = pybuda.PyTorchModule("pt_effnet_lite1", model) + tt_model = forge.PyTorchModule("pt_effnet_lite1", model) # Image preprocessing wh = efflite.efficientnet_lite_params[model_name][2] @@ -123,28 +123,28 @@ def test_efficientnet_lite_2_pytorch(test_device): if test_device.arch == BackendDevice.Grayskull: pytest.skip("Backend compile failed") - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b compiler_cfg.amp_level = 2 compiler_cfg.balancer_op_override("conv2d_99.dc.conv2d.1.dc.matmul.12", "grid_shape", (7,5)) compiler_cfg.balancer_op_override("conv2d_142.dc.conv2d.1.dc.matmul.12", "grid_shape", (7,5)) - os.environ["PYBUDA_PAD_SPARSE_MM"] = "{529:544}" - os.environ["PYBUDA_MANUAL_SPLICE_DECOMP_TH"] = "529" - os.environ["PYBUDA_GRAPHSOLVER_SELF_CUT_TYPE"] = "ConsumerOperandDataEdgesFirst" - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_PAD_SPARSE_MM"] = "{529:544}" + os.environ["FORGE_MANUAL_SPLICE_DECOMP_TH"] = "529" + os.environ["FORGE_GRAPHSOLVER_SELF_CUT_TYPE"] = "ConsumerOperandDataEdgesFirst" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" if test_device.arch == BackendDevice.Wormhole_B0: - os.environ["PYBUDA_FORK_JOIN_EXPAND_OUTPUT_BUFFERS"] = "1" + os.environ["FORGE_FORK_JOIN_EXPAND_OUTPUT_BUFFERS"] = "1" - # STEP 2: Model load in PyBuda + # STEP 2: Model load in Forge model_name = 'efficientnet_lite2' model = efflite.build_efficientnet_lite(model_name, 1000) model.load_pretrain("third_party/confidential_customer_models/model_2/pytorch/efficientnet_lite/weights/efficientnet_lite2.pth") model.eval() - tt_model = pybuda.PyTorchModule("pt_effnet_lite2", model) + tt_model = forge.PyTorchModule("pt_effnet_lite2", model) # Image preprocessing wh = efflite.efficientnet_lite_params[model_name][2] @@ -168,26 +168,26 @@ def test_efficientnet_lite_3_pytorch(test_device): if test_device.arch == BackendDevice.Grayskull: pytest.skip("Fails with: Error! fork_stream_ids exceeds max fork allowed for chip_0__y_3__x_2, stream_id=24") - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_PAD_SPARSE_MM"] = "{613:640, 39:48, 11:12}" - os.environ["PYBUDA_MANUAL_SPLICE_DECOMP_TH"] = "613" - os.environ["PYBUDA_GRAPHSOLVER_SELF_CUT_TYPE"] = "ConsumerOperandDataEdgesFirst" - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_PAD_SPARSE_MM"] = "{613:640, 39:48, 11:12}" + os.environ["FORGE_MANUAL_SPLICE_DECOMP_TH"] = "613" + os.environ["FORGE_GRAPHSOLVER_SELF_CUT_TYPE"] = "ConsumerOperandDataEdgesFirst" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_RIBBON2"] = "1" if test_device.arch == BackendDevice.Grayskull: - os.environ["PYBUDA_PAD_SPARSE_MM_WEIGHT_MM"] = "{26:27}" + os.environ["FORGE_PAD_SPARSE_MM_WEIGHT_MM"] = "{26:27}" elif test_device.arch == BackendDevice.Wormhole_B0: - os.environ["PYBUDA_GRAPHSOLVER_SELF_CUT_TYPE"] = "FastCut" + os.environ["FORGE_GRAPHSOLVER_SELF_CUT_TYPE"] = "FastCut" - # STEP 2: Model load in PyBuda + # STEP 2: Model load in Forge model_name = 'efficientnet_lite3' model = efflite.build_efficientnet_lite(model_name, 1000) model.load_pretrain("third_party/confidential_customer_models/model_2/pytorch/efficientnet_lite/weights/efficientnet_lite3.pth") model.eval() - tt_model = pybuda.PyTorchModule("pt_effnet_lite3", model) + tt_model = forge.PyTorchModule("pt_effnet_lite3", model) # Image preprocessing wh = efflite.efficientnet_lite_params[model_name][2] @@ -210,26 +210,26 @@ def test_efficientnet_lite_4_pytorch(test_device): if test_device.arch == BackendDevice.Grayskull: pytest.skip("Fails with: Error! The overlay blob for chip_0__y_7__x_1 does not fit, the max size is 130944, however we tried to allocate 133168.") - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_PAD_SPARSE_MM"] = "{46:48}" - os.environ["PYBUDA_GRAPHSOLVER_SELF_CUT_TYPE"] = "ConsumerOperandDataEdgesFirst" - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_PAD_SPARSE_MM"] = "{46:48}" + os.environ["FORGE_GRAPHSOLVER_SELF_CUT_TYPE"] = "ConsumerOperandDataEdgesFirst" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" if test_device.arch == BackendDevice.Wormhole_B0: - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b compiler_cfg.amp_level = 1 - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" elif test_device.arch == BackendDevice.Grayskull: - os.environ["PYBUDA_PAD_SPARSE_MM_WEIGHT_CONCAT"] = "{51:54, 11:16, 6:8, 5:8}" + os.environ["FORGE_PAD_SPARSE_MM_WEIGHT_CONCAT"] = "{51:54, 11:16, 6:8, 5:8}" - # STEP 2: Model load in PyBuda + # STEP 2: Model load in Forge model_name = 'efficientnet_lite4' model = efflite.build_efficientnet_lite(model_name, 1000) model.load_pretrain("third_party/confidential_customer_models/model_2/pytorch/efficientnet_lite/weights/efficientnet_lite4.pth") model.eval() - tt_model = pybuda.PyTorchModule("pt_effnet_lite4", model) + tt_model = forge.PyTorchModule("pt_effnet_lite4", model) # Image preprocessing wh = efflite.efficientnet_lite_params[model_name][2] diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_fpn.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_fpn.py similarity index 77% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_fpn.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_fpn.py index 1c0b0d740..85cec01b2 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_fpn.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_fpn.py @@ -3,10 +3,10 @@ # SPDX-License-Identifier: Apache-2.0 import torch import torch.nn as nn -import pybuda +import forge import os -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig +from forge.verify.backend import verify_module +from forge import VerifyConfig from torchvision.ops import FeaturePyramidNetwork from collections import OrderedDict @@ -29,16 +29,16 @@ def forward(self, feat0, feat1, feat2): def test_fpn_pytorch(test_device, test_kind): - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.Float16_b + compiler_cfg.default_df_override = forge._C.Float16_b - os.environ["PYBUDA_RIBBON2"] = "1" - os.environ["PYBUDA_FORCE_EMULATE_HARVESTED"] = "1" + os.environ["FORGE_RIBBON2"] = "1" + os.environ["FORGE_FORCE_EMULATE_HARVESTED"] = "1" # Load FPN model model = FPNWrapper([10, 20, 30], 5) - tt_model = pybuda.PyTorchModule("pytorch_fpn", model) + tt_model = forge.PyTorchModule("pytorch_fpn", model) feat0 = torch.rand(1, 10, 64, 64) feat1 = torch.rand(1, 20, 16, 16) diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_ghostnet.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_ghostnet.py similarity index 85% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_ghostnet.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_ghostnet.py index a5b905a01..5c114a0da 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_ghostnet.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_ghostnet.py @@ -12,11 +12,11 @@ from timm.data.transforms_factory import create_transform from loguru import logger -import pybuda -from pybuda import VerifyConfig -from pybuda.verify.config import TestKind -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.backend import verify_module +import forge +from forge import VerifyConfig +from forge.verify.config import TestKind +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.backend import verify_module variants = [ "ghostnet_050", @@ -34,14 +34,14 @@ def test_ghostnet_timm(variant, test_device): if variant == "ghostnet_130": pytest.skip("Skip ghostnet_130 due to hang on device") # Configurations - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b # Load model framework_model = download_model(timm.create_model, variant, pretrained=True) framework_model.eval() - pybuda_model = pybuda.PyTorchModule("pt_ghostnet_timm", framework_model) + forge_model = forge.PyTorchModule("pt_ghostnet_timm", framework_model) # Load and pre-process image try: @@ -72,7 +72,7 @@ def test_ghostnet_timm(variant, test_device): # Verify verify_module( - pybuda_model, + forge_model, input_shapes=[(img_tensor.shape,)], inputs=[(img_tensor,)], verify_cfg=VerifyConfig( diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_ghostnet_100.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_ghostnet_100.py similarity index 83% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_ghostnet_100.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_ghostnet_100.py index 2af3f53d9..a4aa5057f 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_ghostnet_100.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_ghostnet_100.py @@ -2,11 +2,11 @@ # SPDX-License-Identifier: Apache-2.0 import pytest -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda.verify.config import TestKind +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge.verify.config import TestKind from test.model_demos.models.ghostnet import generate_model_ghostnet_imgcls_timm -from pybuda._C.backend_api import BackendDevice +from forge._C.backend_api import BackendDevice variants = ["ghostnet_100"] diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_googlenet.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_googlenet.py similarity index 74% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_googlenet.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_googlenet.py index 2e0ea97ea..f6e206306 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_googlenet.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_googlenet.py @@ -3,33 +3,33 @@ # SPDX-License-Identifier: Apache-2.0 import pytest from test.utils import download_model -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind, NebulaGalaxy +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind, NebulaGalaxy import os -import pybuda +import forge import torch from PIL import Image from torchvision import models, transforms from loguru import logger def test_googlenet_pytorch(test_device): - # Set PyBuda configuration parameters + # Set Forge configuration parameters compiler_cfg = ( - pybuda.config._get_global_compiler_config() + forge.config._get_global_compiler_config() ) # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b - # Create PyBuda module from PyTorch model + # Create Forge module from PyTorch model # Two ways to load the same model # model = torch.hub.load('pytorch/vision:v0.10.0', 'googlenet', pretrained=True) model = download_model(models.googlenet, pretrained=True) model.eval() - tt_model = pybuda.PyTorchModule("pt_googlenet", model) + tt_model = forge.PyTorchModule("pt_googlenet", model) # Image preprocessing try: @@ -63,6 +63,6 @@ def test_googlenet_pytorch(test_device): devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_hardnet.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_hardnet.py similarity index 80% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_hardnet.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_hardnet.py index 3da1ee88f..245a92dd4 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_hardnet.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_hardnet.py @@ -1,16 +1,16 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -import pybuda, os +import forge, os import torch import pytest import urllib from PIL import Image from torchvision import transforms -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendDevice +from forge.verify.backend import verify_module +from forge.verify.config import TestKind +from forge import VerifyConfig +from forge._C.backend_api import BackendDevice variants = ["hardnet68", "hardnet85", "hardnet68ds", "hardnet39ds"] @@ -18,14 +18,14 @@ @pytest.mark.parametrize("variant", variants) def test_hardnet_pytorch(test_device, variant): - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b - os.environ["PYBUDA_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" if variant == "hardnet85" and test_device.arch == BackendDevice.Wormhole_B0: - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" # load only the model architecture without pre-trained weights. model = torch.hub.load("PingoLH/Pytorch-HarDNet", variant, pretrained=False) @@ -42,9 +42,9 @@ def test_hardnet_pytorch(test_device, variant): model.load_state_dict(state_dict) model.eval() - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model model_name = f"pt_{variant}" - tt_model = pybuda.PyTorchModule(model_name, model) + tt_model = forge.PyTorchModule(model_name, model) # STEP 3: Prepare input url, filename = ( diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_hrnet.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_hrnet.py similarity index 84% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_hrnet.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_hrnet.py index b4f7fc582..f2a2c8213 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_hrnet.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_hrnet.py @@ -4,15 +4,15 @@ import pytest from test.utils import download_model import torch -import pybuda -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind +import forge +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind import os from loguru import logger -import pybuda +import forge import torch import torch.multiprocessing from PIL import Image @@ -27,16 +27,16 @@ ############# def generate_model_hrnet_imgcls_osmr_pytorch(test_device, variant): - # STEP 1: Set PyBuda configuration parameters + # STEP 1: Set Forge configuration parameters compiler_cfg = ( - pybuda.config._get_global_compiler_config() + forge.config._get_global_compiler_config() ) # load global compiler config object - # tenstorrent/pybuda#950 + # tenstorrent/forge#950 compiler_cfg.balancer_policy = "CNN" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model """ models = [ hrnet_w18_small_v1, @@ -52,7 +52,7 @@ def generate_model_hrnet_imgcls_osmr_pytorch(test_device, variant): """ model = download_model(ptcv_get_model, variant, pretrained=True) model.eval() - tt_model = pybuda.PyTorchModule(f"pt_hrnet_osmr_{variant}", model) + tt_model = forge.PyTorchModule(f"pt_hrnet_osmr_{variant}", model) # Model load try: @@ -113,16 +113,16 @@ def test_hrnet_osmr_pytorch(variant, test_device): ) def generate_model_hrnet_imgcls_timm_pytorch(test_device, variant): - # STEP 1: Set PyBuda configuration parameters + # STEP 1: Set Forge configuration parameters compiler_cfg = ( - pybuda.config._get_global_compiler_config() + forge.config._get_global_compiler_config() ) # load global compiler config object - # tenstorrent/pybuda#950 + # tenstorrent/forge#950 compiler_cfg.balancer_policy = "CNN" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model """ default_cfgs = { 'hrnet_w18_small' @@ -138,7 +138,7 @@ def generate_model_hrnet_imgcls_timm_pytorch(test_device, variant): """ model = download_model(timm.create_model, variant, pretrained=True) model.eval() - tt_model = pybuda.PyTorchModule(f"pt_hrnet_timm_{variant}", model) + tt_model = forge.PyTorchModule(f"pt_hrnet_timm_{variant}", model) ## Preprocessing try: diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_inception_v4.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_inception_v4.py similarity index 70% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_inception_v4.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_inception_v4.py index 79ca4a4a5..f95031bc5 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_inception_v4.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_inception_v4.py @@ -3,9 +3,9 @@ # SPDX-License-Identifier: Apache-2.0 ## Inception V4 -# STEP 0: import PyBuda library +# STEP 0: import Forge library import pytest -import pybuda +import forge import os import urllib from loguru import logger @@ -19,36 +19,36 @@ from timm.data.transforms_factory import create_transform torch.multiprocessing.set_sharing_strategy("file_system") -import pybuda -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind, NebulaGalaxy +import forge +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind, NebulaGalaxy def generate_model_inceptionV4_imgcls_osmr_pytorch(test_device, variant): - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b - os.environ["PYBUDA_PAD_SPARSE_MM"] = "{694:704, 676:704, 167:182, 158:160, 39:48}" - os.environ["PYBUDA_MANUAL_SPLICE_DECOMP_TH"] = "158" - os.environ["PYBUDA_DISABLE_CONV_MULTI_OP_FRACTURE"] = "1" + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b + os.environ["FORGE_PAD_SPARSE_MM"] = "{694:704, 676:704, 167:182, 158:160, 39:48}" + os.environ["FORGE_MANUAL_SPLICE_DECOMP_TH"] = "158" + os.environ["FORGE_DISABLE_CONV_MULTI_OP_FRACTURE"] = "1" compiler_cfg.balancer_op_override("_fused_op_4", "t_stream_shape", (158,1)) # TM error compiler_cfg.balancer_op_override("_fused_op_7", "t_stream_shape", (158,1)) # TM error if test_device.arch == BackendDevice.Wormhole_B0: compiler_cfg.balancer_op_override("conv2d_551.dc.sparse_matmul.10.dc.sparse_matmul.1.lc2", "grid_shape", (1,4)) # Temp mitigations for net2pipe errors, should be removed. # - os.environ["PYBUDA_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" - os.environ["PYBUDA_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" - os.environ["PYBUDA_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" elif test_device.arch == BackendDevice.Grayskull: - compiler_cfg.balancer_op_override("_fused_op_2", "t_stream_shape", (676,1)) # TM error (ref pybuda#1527) + compiler_cfg.balancer_op_override("_fused_op_2", "t_stream_shape", (676,1)) # TM error (ref forge#1527) # Load model framework_model = download_model(ptcv_get_model, variant, pretrained=True) - pybuda_model = pybuda.PyTorchModule("pt_inception_v4_osmr", framework_model) + forge_model = forge.PyTorchModule("pt_inception_v4_osmr", framework_model) # Load and pre-process image img_tensor = get_image() @@ -56,7 +56,7 @@ def generate_model_inceptionV4_imgcls_osmr_pytorch(test_device, variant): # Compile & Verify pcc = 0.91 if test_device.arch == BackendDevice.Grayskull else 0.97 - return pybuda_model, [img_tensor], {"pcc": pcc} + return forge_model, [img_tensor], {"pcc": pcc} def preprocess_timm_model(model_name): @@ -112,41 +112,41 @@ def test_inception_v4_osmr_pytorch(test_device): devmode=test_device.devmode, test_kind=TestKind.INFERENCE, pcc=other["pcc"], - # padding overrides cause tensor size mismatch during verification: tenstorrent/pybuda#627 + # padding overrides cause tensor size mismatch during verification: tenstorrent/forge#627 verify_post_autograd_passes=False, verify_post_placer=False, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ), ) def generate_model_inceptionV4_imgcls_timm_pytorch(test_device, variant): # Configurations - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b - os.environ["PYBUDA_PAD_SPARSE_MM"] = "{694:704, 676:704, 167:182, 158:160, 39:48}" - os.environ["PYBUDA_MANUAL_SPLICE_DECOMP_TH"] = "158" - os.environ["PYBUDA_DISABLE_CONV_MULTI_OP_FRACTURE"] = "1" + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b + os.environ["FORGE_PAD_SPARSE_MM"] = "{694:704, 676:704, 167:182, 158:160, 39:48}" + os.environ["FORGE_MANUAL_SPLICE_DECOMP_TH"] = "158" + os.environ["FORGE_DISABLE_CONV_MULTI_OP_FRACTURE"] = "1" compiler_cfg.balancer_op_override("_fused_op_4", "t_stream_shape", (158,1)) # TM error compiler_cfg.balancer_op_override("_fused_op_7", "t_stream_shape", (158,1)) # TM error if test_device.arch == BackendDevice.Wormhole_B0: compiler_cfg.balancer_op_override("conv2d_551.dc.sparse_matmul.10.dc.sparse_matmul.1.lc2", "grid_shape", (1,4)) # Temp mitigations for net2pipe errors, should be removed. # - os.environ["PYBUDA_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" - os.environ["PYBUDA_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" - os.environ["PYBUDA_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" elif test_device.arch == BackendDevice.Grayskull: - compiler_cfg.balancer_op_override("_fused_op_2", "t_stream_shape", (676,1)) # TM error (ref pybuda#1527) + compiler_cfg.balancer_op_override("_fused_op_2", "t_stream_shape", (676,1)) # TM error (ref forge#1527) # Load model & Preprocess image framework_model, img_tensor = download_model(preprocess_timm_model, variant) - pybuda_model = pybuda.PyTorchModule("pt_inception_v4_timm", framework_model) + forge_model = forge.PyTorchModule("pt_inception_v4_timm", framework_model) # Compile & Verify pcc = 0.96 if test_device.arch == BackendDevice.Grayskull else 0.97 - return pybuda_model, [img_tensor], {} + return forge_model, [img_tensor], {} def test_inception_v4_timm_pytorch(test_device): @@ -164,9 +164,9 @@ def test_inception_v4_timm_pytorch(test_device): devmode=test_device.devmode, test_kind=TestKind.INFERENCE, pcc=0.97, - # padding overrides cause tensor size mismatch during verification: tenstorrent/pybuda#627 + # padding overrides cause tensor size mismatch during verification: tenstorrent/forge#627 verify_post_autograd_passes=False, verify_post_placer=False, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ), ) diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_mlp_mixer.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_mlp_mixer.py similarity index 84% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_mlp_mixer.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_mlp_mixer.py index 518940d4c..6fe174ece 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_mlp_mixer.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_mlp_mixer.py @@ -3,12 +3,12 @@ # SPDX-License-Identifier: Apache-2.0 import pytest -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind -import pybuda +import forge import os import torch @@ -56,7 +56,7 @@ def test_mlp_mixer_timm_pytorch(variant, test_device): elif variant == "mixer_l16_224_in21k": pytest.skip("Bus Error during placer/balancer") elif variant == "mixer_s16_224": - pytest.skip("/home/jenkinsad/pybuda/third_party/budabackend//src/overlay/blob_gen.rb:250:in `ParseStreamString': undefined method `map' for nil:NilClass (NoMethodError)") + pytest.skip("/home/jenkinsad/forge/third_party/budabackend//src/overlay/blob_gen.rb:250:in `ParseStreamString': undefined method `map' for nil:NilClass (NoMethodError)") elif variant == "mixer_s32_224": pytest.skip("Hangs on Grayskull") @@ -67,8 +67,8 @@ def test_mlp_mixer_timm_pytorch(variant, test_device): transform = create_transform(**config) - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" try: @@ -79,8 +79,8 @@ def test_mlp_mixer_timm_pytorch(variant, test_device): image = torch.rand(1, 3, 256, 256) pixel_values = transform(image).unsqueeze(0) - # STEP 2: Create PyBuda module from PyTorch model - tt_model = pybuda.PyTorchModule(variant+"_pt", model) + # STEP 2: Create Forge module from PyTorch model + tt_model = forge.PyTorchModule(variant+"_pt", model) pcc = 0.92 if test_device.arch == BackendDevice.Grayskull and variant == "mixer_b16_224_miil" else 0.99 verify_module( diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_mobilenet_v1.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_mobilenet_v1.py similarity index 83% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_mobilenet_v1.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_mobilenet_v1.py index 7f7b532c9..da8d782f1 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_mobilenet_v1.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_mobilenet_v1.py @@ -4,11 +4,11 @@ import pytest from test.utils import download_model import torch -import pybuda -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind, NebulaGalaxy +import forge +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind, NebulaGalaxy import os import torch.nn as nn @@ -161,14 +161,14 @@ def forward(self, input): def generate_model_mobilenetV1_base_custom_pytorch(test_device, variant): - # Set PyBUDA configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b - # Create PyBUDA module from PyTorch model + # Create Forge module from PyTorch model model = MobileNetV1(9) - tt_model = pybuda.PyTorchModule("mobilenet_v1", model) + tt_model = forge.PyTorchModule("mobilenet_v1", model) input_shape = (1, 3, 64, 64) @@ -194,7 +194,7 @@ def test_mobilenetv1_basic(test_device): devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) @@ -203,21 +203,21 @@ def test_mobilenetv1_basic(test_device): from transformers import AutoImageProcessor, AutoModelForImageClassification def generate_model_mobilenetv1_imgcls_hf_pytorch(test_device, variant): - # Set PyBuda configuration parameters + # Set Forge configuration parameters compiler_cfg = ( - pybuda.config._get_global_compiler_config() + forge.config._get_global_compiler_config() ) # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b - # Create PyBuda module from PyTorch model + # Create Forge module from PyTorch model preprocessor = download_model(AutoImageProcessor.from_pretrained, variant ) model = download_model(AutoModelForImageClassification.from_pretrained, variant ) - tt_model = pybuda.PyTorchModule("mobilenet_v1__hf_075_192", model) + tt_model = forge.PyTorchModule("mobilenet_v1__hf_075_192", model) # Image load and pre-processing into pixel_values url = "http://images.cocodataset.org/val2017/000000039769.jpg" @@ -246,25 +246,25 @@ def test_mobilenetv1_192(test_device): devmode=test_device.devmode, test_kind=TestKind.INFERENCE, pcc=0.95, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) def generate_model_mobilenetV1I224_imgcls_hf_pytorch(test_device, variant): - # Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b - os.environ["PYBUDA_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" - # Create PyBuda module from PyTorch model + # Create Forge module from PyTorch model preprocessor = download_model(AutoImageProcessor.from_pretrained, variant ) model = download_model(AutoModelForImageClassification.from_pretrained, variant ) - tt_model = pybuda.PyTorchModule("mobilenet_v1__hf_1_224", model) + tt_model = forge.PyTorchModule("mobilenet_v1__hf_1_224", model) # Image load and pre-processing into pixel_values url = "http://images.cocodataset.org/val2017/000000039769.jpg" @@ -292,6 +292,6 @@ def test_mobilenetv1_224(test_device): devmode=test_device.devmode, test_kind=TestKind.INFERENCE, pcc=0.95, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_mobilenet_v1_ssd.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_mobilenet_v1_ssd.py similarity index 74% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_mobilenet_v1_ssd.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_mobilenet_v1_ssd.py index 3c2348be1..074adf3e2 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_mobilenet_v1_ssd.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_mobilenet_v1_ssd.py @@ -3,13 +3,13 @@ # SPDX-License-Identifier: Apache-2.0 import pytest from test.utils import download_model -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind import os -import pybuda +import forge import sys sys.path = list(set(sys.path + ["third_party/confidential_customer_models/model_2/pytorch/"])) @@ -20,14 +20,14 @@ def test_mobilenet_v1_ssd_pytorch_1x1(test_device): if test_device.arch == BackendDevice.Grayskull: pytest.skip() - os.environ["PYBUDA_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" + os.environ["FORGE_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" - # STEP 1: Set PyBuda configuration parameters + # STEP 1: Set Forge configuration parameters compiler_cfg = ( - pybuda.config._get_global_compiler_config() + forge.config._get_global_compiler_config() ) # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b compiler_cfg.conv_multi_op_fracture_factor_override["conv2d_102"] = -1 compiler_cfg.conv_multi_op_fracture_factor_override["conv2d_131"] = -1 @@ -36,13 +36,13 @@ def test_mobilenet_v1_ssd_pytorch_1x1(test_device): class_names = [name.strip() for name in open(label_path).readlines()] number_of_classes = len(class_names) - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model model_path = "third_party/confidential_customer_models/model_2/pytorch/mobilenetv1_ssd/models/mobilenet-v1-ssd-mp-0_675.pth" net = create_mobilenetv1_ssd(number_of_classes) net.load(model_path) net.eval() - tt_model = pybuda.PyTorchModule("pt_mobilenet_v1_ssd", net) + tt_model = forge.PyTorchModule("pt_mobilenet_v1_ssd", net) input_shape = (1, 3, 300, 300) diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_mobilenet_v2.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_mobilenet_v2.py similarity index 77% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_mobilenet_v2.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_mobilenet_v2.py index 186ecb517..6f59ea312 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_mobilenet_v2.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_mobilenet_v2.py @@ -3,13 +3,13 @@ # SPDX-License-Identifier: Apache-2.0 import pytest from test.utils import download_model -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind, NebulaGalaxy +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind, NebulaGalaxy import os -import pybuda +import forge import urllib import timm import torch @@ -22,20 +22,20 @@ from transformers import MobileNetV2FeatureExtractor, MobileNetV2ForSemanticSegmentation def generate_model_mobilenetV2_imgcls_torchhub_pytorch(test_device, variant): - # STEP 1: Set PyBuda configuration parameters + # STEP 1: Set Forge configuration parameters compiler_cfg = ( - pybuda.config._get_global_compiler_config() + forge.config._get_global_compiler_config() ) # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b if test_device.arch == BackendDevice.Grayskull: compiler_cfg.balancer_policy = "CNN" - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model model = download_model(torch.hub.load, variant, "mobilenet_v2", pretrained=True ) - tt_model = pybuda.PyTorchModule("mobilenet_v2", model) + tt_model = forge.PyTorchModule("mobilenet_v2", model) # Image preprocessing url = "http://images.cocodataset.org/val2017/000000039769.jpg" @@ -64,26 +64,26 @@ def test_mobilenetv2_basic(test_device): devmode=test_device.devmode, test_kind=TestKind.INFERENCE, pcc=0.95, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) def generate_model_mobilenetV2I96_imgcls_hf_pytorch(test_device, variant): - # Set PyBuda configuration parameters + # Set Forge configuration parameters compiler_cfg = ( - pybuda.config._get_global_compiler_config() + forge.config._get_global_compiler_config() ) # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b - # Create PyBuda module from PyTorch model + # Create Forge module from PyTorch model preprocessor = download_model(AutoImageProcessor.from_pretrained, variant ) model = download_model(AutoModelForImageClassification.from_pretrained, variant ) - tt_model = pybuda.PyTorchModule("mobilenet_v2__hf_035_96", model) + tt_model = forge.PyTorchModule("mobilenet_v2__hf_035_96", model) # Image load and pre-processing into pixel_values url = "http://images.cocodataset.org/val2017/000000039769.jpg" @@ -109,26 +109,26 @@ def test_mobilenetv2_96(test_device): devmode=test_device.devmode, test_kind=TestKind.INFERENCE, pcc=0.8, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) def generate_model_mobilenetV2I160_imgcls_hf_pytorch(test_device, variant): - # Set PyBuda configuration parameters + # Set Forge configuration parameters compiler_cfg = ( - pybuda.config._get_global_compiler_config() + forge.config._get_global_compiler_config() ) # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b - # Create PyBuda module from PyTorch model + # Create Forge module from PyTorch model preprocessor = download_model(AutoImageProcessor.from_pretrained, variant ) model = download_model(AutoModelForImageClassification.from_pretrained, variant ) - tt_model = pybuda.PyTorchModule("mobilenet_v2__hf_075_160", model) + tt_model = forge.PyTorchModule("mobilenet_v2__hf_075_160", model) # Image load and pre-processing into pixel_values url = "http://images.cocodataset.org/val2017/000000039769.jpg" @@ -154,28 +154,28 @@ def test_mobilenetv2_160(test_device): devmode=test_device.devmode, test_kind=TestKind.INFERENCE, pcc=0.9, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) def generate_model_mobilenetV2I244_imgcls_hf_pytorch(test_device, variant): - # Set PyBuda configuration parameters + # Set Forge configuration parameters compiler_cfg = ( - pybuda.config._get_global_compiler_config() + forge.config._get_global_compiler_config() ) # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b if test_device.arch == BackendDevice.Grayskull: - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" - # Create PyBuda module from PyTorch model + # Create Forge module from PyTorch model preprocessor = download_model(AutoImageProcessor.from_pretrained, variant ) model = download_model(AutoModelForImageClassification.from_pretrained, variant ) - tt_model = pybuda.PyTorchModule("mobilenet_v2__hf_1_224", model) + tt_model = forge.PyTorchModule("mobilenet_v2__hf_1_224", model) # Image load and pre-processing into pixel_values url = "http://images.cocodataset.org/val2017/000000039769.jpg" @@ -202,23 +202,23 @@ def test_mobilenetv2_224(test_device): devmode=test_device.devmode, test_kind=TestKind.INFERENCE, pcc=0.9, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) def generate_model_mobilenetV2_imgcls_timm_pytorch(test_device, variant): - # Set PyBuda configuration parameters + # Set Forge configuration parameters compiler_cfg = ( - pybuda.config._get_global_compiler_config() + forge.config._get_global_compiler_config() ) # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b if test_device.arch == BackendDevice.Grayskull: - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" - # Create PyBuda module from PyTorch model + # Create Forge module from PyTorch model model = download_model(timm.create_model, variant, pretrained=True) - tt_model = pybuda.PyTorchModule("mobilenet_v2__hf_timm", model) + tt_model = forge.PyTorchModule("mobilenet_v2__hf_timm", model) # Image load and pre-processing into pixel_values try: @@ -255,7 +255,7 @@ def test_mobilenetv2_timm(test_device): devmode=test_device.devmode, test_kind=TestKind.INFERENCE, pcc=0.95, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) @@ -270,14 +270,14 @@ def generate_model_mobilenetV2_semseg_hf_pytorch(test_device, variant): pytest.skip("Failing on GS with: Could not reconcile constraints: path[conv2d_554.dc.matmul.8 -> add_567]") # Configurations - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b - os.environ["PYBUDA_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" # Load model framework_model = download_model(MobileNetV2ForSemanticSegmentation.from_pretrained, variant) - pybuda_model = pybuda.PyTorchModule("pt_mobilenet_v2_deeplab_v3", framework_model) + forge_model = forge.PyTorchModule("pt_mobilenet_v2_deeplab_v3", framework_model) # I 3x513x513 # # Load and pre-process image @@ -307,7 +307,7 @@ def generate_model_mobilenetV2_semseg_hf_pytorch(test_device, variant): # cpu_predicted_mask = cpu_out.logits.argmax(1).squeeze(0) # print("Predicted mask", cpu_predicted_mask) - return pybuda_model, [img_tensor], {} + return forge_model, [img_tensor], {} variants = [ @@ -331,6 +331,6 @@ def test_mobilenetv2_deeplabv3(variant, test_device): devmode=test_device.devmode, test_kind=TestKind.INFERENCE, pcc=0.85, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ), ) diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_mobilenet_v3.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_mobilenet_v3.py similarity index 79% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_mobilenet_v3.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_mobilenet_v3.py index 1c92a8d1d..440e7ceaa 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_mobilenet_v3.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_mobilenet_v3.py @@ -3,15 +3,15 @@ # SPDX-License-Identifier: Apache-2.0 import pytest from test.utils import download_model -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind, NebulaGalaxy +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind, NebulaGalaxy import os import urllib -import pybuda +import forge import requests import torch from PIL import Image @@ -22,20 +22,20 @@ from loguru import logger def generate_model_mobilenetV3_imgcls_torchhub_pytorch(test_device, variant): - # Set PyBuda configuration parameters + # Set Forge configuration parameters compiler_cfg = ( - pybuda.config._get_global_compiler_config() + forge.config._get_global_compiler_config() ) # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b if test_device.arch == BackendDevice.Grayskull: - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" - # Create PyBuda module from PyTorch model + # Create Forge module from PyTorch model model = download_model(torch.hub.load, "pytorch/vision:v0.10.0", variant, pretrained=True ) - tt_model = pybuda.PyTorchModule("mobilenet_v3_large_pt", model) + tt_model = forge.PyTorchModule("mobilenet_v3_large_pt", model) # Run inference on Tenstorrent device url = "http://images.cocodataset.org/val2017/000000039769.jpg" @@ -65,18 +65,18 @@ def test_mobilenetv3_basic(variant, test_device): devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], enabled=False # TODO: small variant has very low PCC, large variant has high PCC ) ) def generate_model_mobilenetV3_imgcls_timm_pytorch(test_device, variant): - # Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b - # Create PyBuda module from PyTorch model + # Create Forge module from PyTorch model # Both options are good # model = timm.create_model('mobilenetv3_small_100', pretrained=True) if variant == "mobilenetv3_small_100": @@ -88,7 +88,7 @@ def generate_model_mobilenetV3_imgcls_timm_pytorch(test_device, variant): f"hf_hub:timm/mobilenetv3_large_100.ra_in1k", pretrained=True ) - tt_model = pybuda.PyTorchModule(variant, model) + tt_model = forge.PyTorchModule(variant, model) # Image load and pre-processing into pixel_values try: @@ -119,7 +119,7 @@ def test_mobilenetv3_timm(variant, test_device): test_device, variant, ) - os.environ["PYBUDA_LEGACY_KERNEL_BROADCAST"] = "1" + os.environ["FORGE_LEGACY_KERNEL_BROADCAST"] = "1" verify_module( model, @@ -130,7 +130,7 @@ def test_mobilenetv3_timm(variant, test_device): devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], enabled=False # TODO: small variant has very low PCC, large variant has high PCC ) ) @@ -140,7 +140,7 @@ def test_mobilenetv3_timm(variant, test_device): @pytest.mark.skip(reason="Not supported") def test_mobilenetv3_timm_1x1(variant, test_device): pytest.skip() - os.environ["PYBUDA_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" + os.environ["FORGE_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" model, inputs, _ = generate_model_mobilenetV3_imgcls_timm_pytorch( test_device, variant, diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_openpose.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_openpose.py similarity index 92% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_openpose.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_openpose.py index 89b87b7ed..f6a533055 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_openpose.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_openpose.py @@ -11,11 +11,11 @@ from torchvision import transforms from pytorchcv.model_provider import get_model as ptcv_get_model -import pybuda -from pybuda import VerifyConfig, CompileDepth -from pybuda.verify.config import TestKind, BackendDevice, DataFormat, NebulaGalaxy -from pybuda._C.backend_api import BackendType -from pybuda.verify.backend import verify_module +import forge +from forge import VerifyConfig, CompileDepth +from forge.verify.config import TestKind, BackendDevice, DataFormat, NebulaGalaxy +from forge._C.backend_api import BackendType +from forge.verify.backend import verify_module from test.utils import download_model @@ -294,35 +294,35 @@ def forward(self, x): def generate_model_openpose_posdet_custom_pytorch(test_device, variant): # Init config - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() if test_device.arch == BackendDevice.Grayskull: # Limit to BE Golden verify as hang occures due to the fork-join buffering - # tenstorrent/pybuda#880 + # tenstorrent/forge#880 compiler_cfg.compile_depth = CompileDepth.BACKEND_GOLDEN_VERIFY if variant == "body_basic" and test_device.arch == BackendDevice.Grayskull: # Possibilities of finding out better way of handling extra blob gen size - # tenstorrent/pybuda#881 + # tenstorrent/forge#881 os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{12*1024}" if variant == "hand_basic" and test_device.arch == BackendDevice.Wormhole_B0: os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{96*1024}" # Configurations - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" compiler_cfg.enable_enumerate_u_kt = False compiler_cfg.graph_solver_self_cut_type = "FastCut" compiler_cfg.default_df_override = DataFormat.Float16_b compiler_cfg.enable_auto_fusing = False # Data type errors while using AMP = 1 - # tenstorrent/pybuda#856 + # tenstorrent/forge#856 # compiler_cfg.amp_level = 2 # No valid grids found for many conv ops - # tenstorrent/pybuda#872 - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + # tenstorrent/forge#872 + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" # Load model if variant == "body_basic": @@ -340,7 +340,7 @@ def generate_model_openpose_posdet_custom_pytorch(test_device, variant): ) framework_model_dict = transfer(framework_model, torch.load(model_path)) framework_model.load_state_dict(framework_model_dict) - pybuda_model = pybuda.PyTorchModule("open_pose_" + variant + "_pt", framework_model) + forge_model = forge.PyTorchModule("open_pose_" + variant + "_pt", framework_model) # Load & pre-process image img_tensor = get_image_tensor(sample_path) @@ -348,7 +348,7 @@ def generate_model_openpose_posdet_custom_pytorch(test_device, variant): # Sanity run cpu_out = framework_model(img_tensor) - return pybuda_model, [img_tensor], {} + return forge_model, [img_tensor], {} @pytest.mark.parametrize("variant", variants) @@ -368,7 +368,7 @@ def test_openpose_basic(variant, test_device): devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], pcc=0.9, ), ) @@ -388,15 +388,15 @@ def generate_model_openpose_posdet_osmr_pytorch(test_device, variant): pytest.skip("Grayskull failing with data mismatch PCC = 0.7433900704259362") # Configurations - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "CNN" compiler_cfg.enable_auto_fusing = False - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16 + compiler_cfg.default_df_override = forge._C.DataFormat.Float16 # Load model framework_model = download_model(ptcv_get_model, variant, pretrained=True) framework_model.eval() - pybuda_model = pybuda.PyTorchModule("openpose_" + variant + "_pt", framework_model) + forge_model = forge.PyTorchModule("openpose_" + variant + "_pt", framework_model) # Load & pre-process image sample_path = "third_party/confidential_customer_models/model_2/pytorch/openpose/samples/body.jpeg" @@ -405,7 +405,7 @@ def generate_model_openpose_posdet_osmr_pytorch(test_device, variant): # Sanity run cpu_out = framework_model(img_tensor) - return pybuda_model, [img_tensor], {} + return forge_model, [img_tensor], {} variants = [ diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_perceiverio.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_perceiverio.py similarity index 79% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_perceiverio.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_perceiverio.py index fb28fa897..06e5a3d76 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_perceiverio.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_perceiverio.py @@ -1,7 +1,7 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -import pybuda +import forge import torch import os import requests @@ -15,9 +15,9 @@ PerceiverForImageClassificationFourier, ) -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda.verify.config import TestKind +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge.verify.config import TestKind def get_sample_data(model_name): @@ -46,22 +46,22 @@ def get_sample_data(model_name): @pytest.mark.parametrize("variant", variants) def test_perceiverio_for_image_classification_pytorch(test_device, variant): - # Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b - os.environ["PYBUDA_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" verify_enabled = True pcc_value = 0.99 # Temp mitigations for net2pipe errors, should be removed. # if variant == "deepmind/vision-perceiver-conv": - os.environ["PYBUDA_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" - os.environ["PYBUDA_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" - os.environ["PYBUDA_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" - if test_device.arch == pybuda.BackendDevice.Wormhole_B0: + if test_device.arch == forge.BackendDevice.Wormhole_B0: if variant == "deepmind/vision-perceiver-conv": os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{10*1024}" @@ -70,23 +70,23 @@ def test_perceiverio_for_image_classification_pytorch(test_device, variant): "deepmind/vision-perceiver-learned", "deepmind/vision-perceiver-fourier", ]: - os.environ["PYBUDA_DISABLE_PADDING_PASS"] = "1" + os.environ["FORGE_DISABLE_PADDING_PASS"] = "1" compiler_cfg.enable_auto_fusing = False if variant == "deepmind/vision-perceiver-fourier": compiler_cfg.balancer_op_override( "hslice_41.dc.sparse_matmul.2.lc2", "t_stream_shape", (1, 2) ) - if test_device.devtype == pybuda.BackendType.Silicon: + if test_device.devtype == forge.BackendType.Silicon: pcc_value = 0.97 if variant == "deepmind/vision-perceiver-learned": - if test_device.devtype == pybuda.BackendType.Silicon: + if test_device.devtype == forge.BackendType.Silicon: pcc_value = 0.92 - elif test_device.arch == pybuda.BackendDevice.Grayskull: + elif test_device.arch == forge.BackendDevice.Grayskull: - if test_device.devtype == pybuda.BackendType.Silicon: + if test_device.devtype == forge.BackendType.Silicon: verify_enabled = False if variant in [ @@ -101,7 +101,7 @@ def test_perceiverio_for_image_classification_pytorch(test_device, variant): "deepmind/vision-perceiver-fourier", ]: os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{101*1024}" - os.environ["PYBUDA_DISABLE_PADDING_PASS"] = "1" + os.environ["FORGE_DISABLE_PADDING_PASS"] = "1" if variant == "deepmind/vision-perceiver-fourier": compiler_cfg.balancer_op_override( @@ -126,7 +126,7 @@ def test_perceiverio_for_image_classification_pytorch(test_device, variant): model.eval() - tt_model = pybuda.PyTorchModule( + tt_model = forge.PyTorchModule( "pt_" + str(variant.split("/")[-1].replace("-", "_")), model ) diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_rcnn.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_rcnn.py similarity index 85% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_rcnn.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_rcnn.py index c3277c43a..8146af20d 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_rcnn.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_rcnn.py @@ -1,16 +1,16 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -import pybuda +import forge import cv2 import torch.nn.init as init import torchvision import torch.nn as nn import torchvision.transforms as transforms import os, sys -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda.verify.config import TestKind +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge.verify.config import TestKind def test_rcnn_pytorch(test_device): @@ -36,7 +36,7 @@ def test_rcnn_pytorch(test_device): param.requires_grad = False # Image - img = cv2.imread("pybuda/test/model_demos/utils/cnn/pytorch/images/car.jpg") + img = cv2.imread("forge/test/model_demos/utils/cnn/pytorch/images/car.jpg") transform = transforms.Compose( [ @@ -58,14 +58,14 @@ def test_rcnn_pytorch(test_device): rects[:, 3] += rects[:, 1] print("Suggested number of proposals: %d" % len(rects)) - # PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.Float16_b - os.environ["PYBUDA_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge._C.Float16_b + os.environ["FORGE_RIBBON2"] = "1" - # Pybuda model - tt_model = pybuda.PyTorchModule("rcnn", model) + # Forge model + tt_model = forge.PyTorchModule("rcnn", model) # Proposals generated by selective search were fed to a model in a loop manner to compute features. # [Refer line No.151 in https://github.com/object-detection-algorithm/R-CNN/blob/master/py/car_detector.py] diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_resnet.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_resnet.py similarity index 77% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_resnet.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_resnet.py index 22b297fde..43e263ef9 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_resnet.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_resnet.py @@ -3,13 +3,13 @@ # SPDX-License-Identifier: Apache-2.0 import pytest from test.utils import download_model -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind, NebulaGalaxy +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind, NebulaGalaxy import os -import pybuda +import forge import requests from PIL import Image from transformers import AutoFeatureExtractor, ResNetForImageClassification @@ -26,11 +26,11 @@ def generate_model_resnet_imgcls_hf_pytorch(test_device, variant): feature_extractor = download_model(AutoFeatureExtractor.from_pretrained, model_ckpt) model = download_model(ResNetForImageClassification.from_pretrained, model_ckpt) - # Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b - os.environ["PYBUDA_PAD_OUTPUT_BUFFER"] = "1" + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b + os.environ["FORGE_PAD_OUTPUT_BUFFER"] = "1" # Load data sample try: @@ -43,7 +43,7 @@ def generate_model_resnet_imgcls_hf_pytorch(test_device, variant): # Data preprocessing inputs = feature_extractor(image, return_tensors="pt") pixel_values = inputs["pixel_values"] - model = pybuda.PyTorchModule("pt_resnet50", model) + model = forge.PyTorchModule("pt_resnet50", model) return model, [pixel_values], {} @@ -57,7 +57,7 @@ def test_resnet(test_device, enable_default_dram_parameters): test_device, "microsoft/resnet-50", ) - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() verify_module( model, @@ -69,7 +69,7 @@ def test_resnet(test_device, enable_default_dram_parameters): devmode=test_device.devmode, test_kind=TestKind.INFERENCE, pcc=0.9, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) @@ -80,12 +80,12 @@ def generate_model_resnet_imgcls_timm_pytorch(test_device, variant): config = resolve_data_config({}, model=model) transform = create_transform(**config) - # Set PyBuda configuration parameters + # Set Forge configuration parameters compiler_cfg = ( - pybuda.config._get_global_compiler_config() + forge.config._get_global_compiler_config() ) # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b # Load data sample try: @@ -98,7 +98,7 @@ def generate_model_resnet_imgcls_timm_pytorch(test_device, variant): # Data preprocessing pixel_values = transform(image).unsqueeze(0) - model = pybuda.PyTorchModule("pt_resnet50", model) + model = forge.PyTorchModule("pt_resnet50", model) return model, [pixel_values], {} @@ -118,6 +118,6 @@ def test_resnet_timm(test_device): devmode=test_device.devmode, test_kind=TestKind.INFERENCE, pcc=0.9, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_resnext.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_resnext.py similarity index 69% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_resnext.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_resnext.py index 61b856c0e..49f423919 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_resnext.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_resnext.py @@ -3,14 +3,14 @@ # SPDX-License-Identifier: Apache-2.0 import pytest from test.utils import download_model -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind import os from loguru import logger -import pybuda +import forge import torch from PIL import Image from torchvision import transforms @@ -45,23 +45,23 @@ def get_image_tensor(): return input_batch def test_resnext_50_torchhub_pytorch(test_device): - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b if test_device.arch == BackendDevice.Grayskull: os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{72*1024}" - os.environ["PYBUDA_TEMP_DISABLE_MODEL_KB_PROLOGUE_BW"] = "1" + os.environ["FORGE_TEMP_DISABLE_MODEL_KB_PROLOGUE_BW"] = "1" - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model model = download_model(torch.hub.load, "pytorch/vision:v0.10.0", "resnext50_32x4d", pretrained=True ) model.eval() - tt_model = pybuda.PyTorchModule("pt_resnext50_torchhub", model) + tt_model = forge.PyTorchModule("pt_resnext50_torchhub", model) input_batch = get_image_tensor() @@ -82,25 +82,25 @@ def test_resnext_50_torchhub_pytorch(test_device): ) def test_resnext_101_torchhub_pytorch(test_device): - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b if test_device.arch == BackendDevice.Wormhole_B0: - os.environ["PYBUDA_BALANCER_PREPASS_DISABLED"] = "1" + os.environ["FORGE_BALANCER_PREPASS_DISABLED"] = "1" elif test_device.arch == BackendDevice.Grayskull: compiler_cfg.enable_auto_fusing = False - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{80*1024}" - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model model = download_model(torch.hub.load, "pytorch/vision:v0.10.0", "resnext101_32x8d", pretrained=True ) model.eval() - tt_model = pybuda.PyTorchModule("pt_resnext101_torchhub", model) + tt_model = forge.PyTorchModule("pt_resnext101_torchhub", model) input_batch = get_image_tensor() @@ -125,20 +125,20 @@ def test_resnext_101_32x8d_fb_wsl_pytorch(test_device): if test_device.arch == BackendDevice.Grayskull: pytest.skip("Grayskull failing with: Chip = 0, Core x = 1, y = 7(logical x = 0, y = 5): has more than 24 prefetch buf streams") - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" - os.environ["PYBUDA_BALANCER_PREPASS_DISABLED"] = "1" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" + os.environ["FORGE_BALANCER_PREPASS_DISABLED"] = "1" + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model # 4 variants model = download_model(torch.hub.load, "facebookresearch/WSL-Images", "resnext101_32x8d_wsl" ) model.eval() - tt_model = pybuda.PyTorchModule("pt_resnext101_fb_wsl", model) + tt_model = forge.PyTorchModule("pt_resnext101_fb_wsl", model) input_batch = get_image_tensor() @@ -159,21 +159,21 @@ def test_resnext_101_32x8d_fb_wsl_pytorch(test_device): ) def test_resnext_14_osmr_pytorch(test_device): - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b if test_device.arch == BackendDevice.Grayskull: compiler_cfg.enable_auto_fusing = False - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{24*1024}" - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model model = download_model(ptcv_get_model, "resnext14_32x4d", pretrained=True) model.eval() - tt_model = pybuda.PyTorchModule("pt_resnext14_osmr", model) + tt_model = forge.PyTorchModule("pt_resnext14_osmr", model) input_batch = get_image_tensor() @@ -195,21 +195,21 @@ def test_resnext_14_osmr_pytorch(test_device): ) def test_resnext_26_osmr_pytorch(test_device): - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b - os.environ["PYBUDA_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" if test_device.arch == BackendDevice.Grayskull: compiler_cfg.enable_auto_fusing = False - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{72*1024}" - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model model = download_model(ptcv_get_model, "resnext26_32x4d", pretrained=True) model.eval() - tt_model = pybuda.PyTorchModule("pt_resnext26_osmr", model) + tt_model = forge.PyTorchModule("pt_resnext26_osmr", model) input_batch = get_image_tensor() @@ -231,20 +231,20 @@ def test_resnext_26_osmr_pytorch(test_device): ) def test_resnext_50_osmr_pytorch(test_device): - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b if test_device.arch == BackendDevice.Grayskull: compiler_cfg.enable_auto_fusing = False - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{72*1024}" - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model model = download_model(ptcv_get_model, "resnext50_32x4d", pretrained=True) model.eval() - tt_model = pybuda.PyTorchModule("pt_resnext50_osmr", model) + tt_model = forge.PyTorchModule("pt_resnext50_osmr", model) input_batch = get_image_tensor() @@ -265,23 +265,23 @@ def test_resnext_50_osmr_pytorch(test_device): ) def test_resnext_101_osmr_pytorch(test_device): - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b if test_device.arch == BackendDevice.Wormhole_B0: - os.environ["PYBUDA_BALANCER_PREPASS_DISABLED"] = "1" + os.environ["FORGE_BALANCER_PREPASS_DISABLED"] = "1" elif test_device.arch == BackendDevice.Grayskull: compiler_cfg.enable_auto_fusing = False - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{80*1024}" - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model model = download_model(ptcv_get_model, "resnext101_64x4d", pretrained=True) model.eval() - tt_model = pybuda.PyTorchModule("pt_resnext101_osmr", model) + tt_model = forge.PyTorchModule("pt_resnext101_osmr", model) input_batch = get_image_tensor() diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_retinanet.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_retinanet.py similarity index 86% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_retinanet.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_retinanet.py index d275c5639..b3d1372b2 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_retinanet.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_retinanet.py @@ -2,20 +2,20 @@ # SPDX-License-Identifier: Apache-2.0 -import pybuda +import forge from PIL import Image import requests from torchvision import transforms import os import pytest -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind -from pybuda import VerifyConfig +from forge.verify.backend import verify_module +from forge.verify.config import TestKind +from forge import VerifyConfig import sys sys.path.append("third_party/confidential_customer_models/cv_demos/retinanet/model/") from model_implementation import Model -from pybuda._C.backend_api import BackendDevice +from forge._C.backend_api import BackendDevice def img_preprocess(): @@ -47,12 +47,12 @@ def img_preprocess(): @pytest.mark.parametrize("variant", variants) def test_retinanet(variant, test_device): - # Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b - os.environ["PYBUDA_DECOMPOSE_SIGMOID"] = "1" - os.environ["PYBUDA_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge.DataFormat.Float16_b + os.environ["FORGE_DECOMPOSE_SIGMOID"] = "1" + os.environ["FORGE_RIBBON2"] = "1" os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = "73728" if test_device.arch == BackendDevice.Wormhole_B0: @@ -105,9 +105,9 @@ def test_retinanet(variant, test_device): if test_device.arch == BackendDevice.Grayskull: # Temp mitigations for net2pipe errors, should be removed. # - os.environ["PYBUDA_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" - os.environ["PYBUDA_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" - os.environ["PYBUDA_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" if variant == "retinanet_rn18fpn": compiler_cfg.balancer_op_override( @@ -141,7 +141,7 @@ def test_retinanet(variant, test_device): ) model = Model.load(checkpoint_path) model.eval() - tt_model = pybuda.PyTorchModule(f"pt_{variant}", model) + tt_model = forge.PyTorchModule(f"pt_{variant}", model) # Prepare input input_batch = img_preprocess() diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_segformer_imgcls.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_segformer_imgcls.py similarity index 72% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_segformer_imgcls.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_segformer_imgcls.py index 7a073824b..765208ca6 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_segformer_imgcls.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_segformer_imgcls.py @@ -2,10 +2,10 @@ # SPDX-License-Identifier: Apache-2.0 -import pybuda -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda.verify.config import TestKind +import forge +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge.verify.config import TestKind from transformers import ( AutoImageProcessor, SegformerForImageClassification, @@ -40,15 +40,15 @@ def get_sample_data(model_name): @pytest.mark.parametrize("variant", variants_img_classification) def test_segformer_image_classification_pytorch(test_device, variant): - # Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b - os.environ["PYBUDA_RIBBON2"] = "1" - os.environ["PYBUDA_DISABLE_PADDING_PASS"] = "1" + compiler_cfg.default_df_override = forge.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" + os.environ["FORGE_DISABLE_PADDING_PASS"] = "1" pcc_value = 0.99 - if test_device.arch == pybuda.BackendDevice.Wormhole_B0: + if test_device.arch == forge.BackendDevice.Wormhole_B0: if variant in [ "nvidia/mit-b1", @@ -57,14 +57,14 @@ def test_segformer_image_classification_pytorch(test_device, variant): "nvidia/mit-b4", "nvidia/mit-b5", ]: - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" - if variant == "nvidia/mit-b0" and test_device.devtype == pybuda.BackendType.Silicon: + if variant == "nvidia/mit-b0" and test_device.devtype == forge.BackendType.Silicon: pcc_value = 0.97 - elif test_device.arch == pybuda.BackendDevice.Grayskull: + elif test_device.arch == forge.BackendDevice.Grayskull: - if variant in ["nvidia/mit-b1"] and test_device.devtype == pybuda.BackendType.Silicon: + if variant in ["nvidia/mit-b1"] and test_device.devtype == forge.BackendType.Silicon: pcc_value = 0.97 # Set model configurations @@ -80,8 +80,8 @@ def test_segformer_image_classification_pytorch(test_device, variant): # Load the sample image pixel_values = get_sample_data(variant) - # Create PyBuda module from PyTorch model - tt_model = pybuda.PyTorchModule("pt_" + str(variant.split("/")[-1].replace("-", "_")), model) + # Create Forge module from PyTorch model + tt_model = forge.PyTorchModule("pt_" + str(variant.split("/")[-1].replace("-", "_")), model) # Run inference on Tenstorrent device verify_module( @@ -93,7 +93,7 @@ def test_segformer_image_classification_pytorch(test_device, variant): devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - verify_pybuda_codegen_vs_framework=True, + verify_forge_codegen_vs_framework=True, verify_tvm_compile=True, pcc=pcc_value, ), diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_segformer_semseg.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_segformer_semseg.py similarity index 74% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_segformer_semseg.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_segformer_semseg.py index 9c9e3edc8..93e294295 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_segformer_semseg.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_segformer_semseg.py @@ -2,10 +2,10 @@ # SPDX-License-Identifier: Apache-2.0 -import pybuda -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda.verify.config import TestKind +import forge +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge.verify.config import TestKind from transformers import ( AutoImageProcessor, SegformerForSemanticSegmentation, @@ -38,15 +38,15 @@ def get_sample_data(model_name): @pytest.mark.parametrize("variant", variants_semseg) def test_segformer_semantic_segmentation_pytorch(test_device, variant): - # Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b - os.environ["PYBUDA_RIBBON2"] = "1" - os.environ["PYBUDA_DISABLE_PADDING_PASS"] = "1" + compiler_cfg.default_df_override = forge.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" + os.environ["FORGE_DISABLE_PADDING_PASS"] = "1" pcc_value = 0.99 - if test_device.arch == pybuda.BackendDevice.Wormhole_B0: + if test_device.arch == forge.BackendDevice.Wormhole_B0: if variant in [ "nvidia/segformer-b1-finetuned-ade-512-512", "nvidia/segformer-b2-finetuned-ade-512-512", @@ -54,7 +54,7 @@ def test_segformer_semantic_segmentation_pytorch(test_device, variant): "nvidia/segformer-b4-finetuned-ade-512-512", ]: - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" if ( variant @@ -62,11 +62,11 @@ def test_segformer_semantic_segmentation_pytorch(test_device, variant): "nvidia/segformer-b0-finetuned-ade-512-512", "nvidia/segformer-b2-finetuned-ade-512-512", ] - and test_device.devtype == pybuda.BackendType.Silicon + and test_device.devtype == forge.BackendType.Silicon ): pcc_value = 0.98 - elif test_device.arch == pybuda.BackendDevice.Grayskull: + elif test_device.arch == forge.BackendDevice.Grayskull: if variant == "nvidia/segformer-b2-finetuned-ade-512-512": compiler_cfg.place_on_new_epoch("concatenate_1098.dc.concatenate.0") @@ -77,7 +77,7 @@ def test_segformer_semantic_segmentation_pytorch(test_device, variant): if variant == "nvidia/segformer-b4-finetuned-ade-512-512": compiler_cfg.place_on_new_epoch("concatenate_2748.dc.concatenate.0") - if test_device.devtype == pybuda.BackendType.Silicon: + if test_device.devtype == forge.BackendType.Silicon: pcc_value = 0.98 # Load the model from HuggingFace @@ -87,8 +87,8 @@ def test_segformer_semantic_segmentation_pytorch(test_device, variant): # Load the sample image pixel_values = get_sample_data(variant) - # Create PyBuda module from PyTorch model - tt_model = pybuda.PyTorchModule("pt_" + str(variant.split("/")[-1].replace("-", "_")), model) + # Create Forge module from PyTorch model + tt_model = forge.PyTorchModule("pt_" + str(variant.split("/")[-1].replace("-", "_")), model) # Run inference on Tenstorrent device verify_module( @@ -100,7 +100,7 @@ def test_segformer_semantic_segmentation_pytorch(test_device, variant): devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - verify_pybuda_codegen_vs_framework=True, + verify_forge_codegen_vs_framework=True, verify_tvm_compile=True, pcc=pcc_value, ), diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_ssd300_resnet50.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_ssd300_resnet50.py similarity index 87% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_ssd300_resnet50.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_ssd300_resnet50.py index 3d030863c..c9a517934 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_ssd300_resnet50.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_ssd300_resnet50.py @@ -1,16 +1,16 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -import pybuda +import forge import numpy as np import torch import os import skimage import requests -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendDevice +from forge.verify.backend import verify_module +from forge.verify.config import TestKind +from forge import VerifyConfig +from forge._C.backend_api import BackendDevice def load_image(image_path): @@ -61,14 +61,14 @@ def prepare_input(img_uri): def test_pytorch_ssd300_resnet50(test_device): - # STEP 1 : Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # STEP 1 : Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b + compiler_cfg.default_df_override = forge.DataFormat.Float16_b compiler_cfg.amp_level = 1 if test_device.arch == BackendDevice.Grayskull: - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = "90112" if test_device.arch == BackendDevice.Wormhole_B0: @@ -89,7 +89,7 @@ def test_pytorch_ssd300_resnet50(test_device): checkpoint = torch.load(checkpoint_path, map_location=torch.device("cpu")) model.load_state_dict(checkpoint["model"]) model.eval() - tt_model = pybuda.PyTorchModule("ssd300_resnet50", model) + tt_model = forge.PyTorchModule("ssd300_resnet50", model) # STEP 3 : prepare input img = "http://images.cocodataset.org/val2017/000000397133.jpg" diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_swin.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_swin.py similarity index 80% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_swin.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_swin.py index 9410d1b6d..8e6ba3b4b 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_swin.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_swin.py @@ -1,8 +1,8 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -# STEP 0: import PyBuda library -import pybuda +# STEP 0: import Forge library +import forge from transformers import ViTImageProcessor, SwinForImageClassification import timm @@ -21,17 +21,17 @@ def test_swin_v1_tiny_4_224_hf_pytorch(test_device): pytest.skip() # Working on it - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.retain_tvm_python_files = True compiler_cfg.enable_tvm_constant_prop = True os.environ["TVM_BACKTRACE"]="1" - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model feature_extractor = ViTImageProcessor.from_pretrained("microsoft/swin-tiny-patch4-window7-224") # model = SwinForImageClassification.from_pretrained("microsoft/swin-tiny-patch4-window7-224", torchscript=True) model = download_model(timm.create_model, "swin_tiny_patch4_window7_224", pretrained=True) - tt_model = pybuda.PyTorchModule("Swin_v1_tiny_4_224", model) + tt_model = forge.PyTorchModule("Swin_v1_tiny_4_224", model) # STEP 3: Run inference on Tenstorrent device img_tensor = feature_extractor(images=image, return_tensors="pt").pixel_values @@ -39,7 +39,7 @@ def test_swin_v1_tiny_4_224_hf_pytorch(test_device): # from pthflops import count_ops # flops = count_ops(model, img_tensor) #output = model(img_tensor).logits - output_q = pybuda.run_inference(tt_model, inputs=([img_tensor])) + output_q = forge.run_inference(tt_model, inputs=([img_tensor])) output = output_q.get()[0].value().detach().float().numpy() predicted_class_idx = output.argmax(-1).item() diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_unet.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_unet.py similarity index 75% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_unet.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_unet.py index f550e9190..9443ab325 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_unet.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_unet.py @@ -1,7 +1,7 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -import pybuda +import forge import os import urllib from test.utils import download_model @@ -17,37 +17,37 @@ import numpy as np import pytest -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind from pytorchcv.model_provider import get_model as ptcv_get_model import segmentation_models_pytorch as smp from segmentation_models_pytorch.encoders import get_preprocessing_fn def generate_model_unet_imgseg_osmr_pytorch(test_device, variant): - # Also, golden test segfaults when pushing params to golden: tenstorrent/pybuda#637 + # Also, golden test segfaults when pushing params to golden: tenstorrent/forge#637 - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.enable_enumerate_u_kt = False - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b - os.environ["PYBUDA_FORCE_RESIZE_DENSE_MM"] = "1" - os.environ["PYBUDA_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b + os.environ["FORGE_FORCE_RESIZE_DENSE_MM"] = "1" + os.environ["FORGE_RIBBON2"] = "1" if test_device.arch == BackendDevice.Wormhole_B0: compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_BALANCER_PREPASS_DISABLED"] = "1" + os.environ["FORGE_BALANCER_PREPASS_DISABLED"] = "1" # Temp mitigations for net2pipe errors, should be removed. # - os.environ["PYBUDA_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" - os.environ["PYBUDA_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" - os.environ["PYBUDA_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" elif test_device.arch == BackendDevice.Grayskull: compiler_cfg.balancer_policy = "CNN" - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model model = download_model(ptcv_get_model, variant, pretrained=False) - tt_model = pybuda.PyTorchModule("unet_cityscapes_osmr_pt", model) + tt_model = forge.PyTorchModule("unet_cityscapes_osmr_pt", model) # STEP 3: Run inference on Tenstorrent device img_tensor = x = torch.randn(1, 3, 224, 224) @@ -103,14 +103,14 @@ def get_imagenet_sample(): def test_unet_holocron_pytorch(test_device): from holocron.models.segmentation.unet import unet_tvvgg11 - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "CNN" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model model = download_model(unet_tvvgg11, pretrained= True).eval() - tt_model = pybuda.PyTorchModule("unet_holocron_pt", model) + tt_model = forge.PyTorchModule("unet_holocron_pt", model) img_tensor = get_imagenet_sample() @@ -128,7 +128,7 @@ def test_unet_holocron_pytorch(test_device): # STEP 3: Run inference on Tenstorrent device #output = model(img_tensor) - # output_q = pybuda.run_inference(tt_model, inputs=([img_tensor])) + # output_q = forge.run_inference(tt_model, inputs=([img_tensor])) # output = output_q.get()[0].value() # #print(output) @@ -140,19 +140,19 @@ def test_unet_holocron_pytorch(test_device): #print(mask) #print(pred_mask.mean(), "--", mask.mean()) - #File "/home/mbahnas/GitLab/PYBUDA/pybuda_0317/pybuda/pybuda/pybuda/op/eval/pybuda/resize.py", line 61, in shape + #File "/home/mbahnas/GitLab/FORGE/forge_0317/forge/forge/forge/op/eval/forge/resize.py", line 61, in shape #AssertionError: Only support upsample with integer scale factor def generate_model_unet_imgseg_smp_pytorch(test_device, variant): - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b - os.environ["PYBUDA_GRAPHSOLVER_SELF_CUT_TYPE"]= "FastCut" + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b + os.environ["FORGE_GRAPHSOLVER_SELF_CUT_TYPE"]= "FastCut" compiler_cfg.conv_multi_op_fracture_factor_override["conv2d_1488"] = 3 - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model #encoder_name = "vgg19" encoder_name = "resnet101" #encoder_name = "vgg19_bn" @@ -165,7 +165,7 @@ def generate_model_unet_imgseg_smp_pytorch(test_device, variant): ) model.eval() - tt_model = pybuda.PyTorchModule("unet_qubvel_pt", model) + tt_model = forge.PyTorchModule("unet_qubvel_pt", model) # Image preprocessing params = download_model(smp.encoders.get_preprocessing_params, encoder_name) @@ -199,16 +199,16 @@ def test_unet_qubvel_pytorch(test_device): ) def generate_model_unet_imgseg_torchhub_pytorch(test_device, variant): - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "CNN" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" if test_device.arch == BackendDevice.Grayskull: compiler_cfg.balancer_op_override("conv2d_transpose_174.dc.conv2d.17.dc.matmul.11", "grid_shape", (4,4)) - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model model = download_model(torch.hub.load, "mateuszbuda/brain-segmentation-pytorch", variant, @@ -218,7 +218,7 @@ def generate_model_unet_imgseg_torchhub_pytorch(test_device, variant): pretrained=True, ) model.eval() - tt_model = pybuda.PyTorchModule("pt_unet_torchhub", model) + tt_model = forge.PyTorchModule("pt_unet_torchhub", model) # Download an example input image url, filename = ("https://github.com/mateuszbuda/brain-segmentation-pytorch/raw/master/assets/TCGA_CS_4944.png", "TCGA_CS_4944.png") @@ -238,7 +238,7 @@ def generate_model_unet_imgseg_torchhub_pytorch(test_device, variant): def test_unet_torchhub_pytorch(test_device): - pybuda.config.override_op_size("_fused_op_6", (2, 2)) + forge.config.override_op_size("_fused_op_6", (2, 2)) model, inputs, _ = generate_model_unet_imgseg_torchhub_pytorch( test_device, "unet", @@ -261,7 +261,7 @@ def test_unet_torchhub_pytorch(test_device): # # STEP 3: Run inference on Tenstorrent device # #output = model(img_batch) - # output_q = pybuda.run_inference(tt_model, inputs=([img_batch])) + # output_q = forge.run_inference(tt_model, inputs=([img_batch])) # output = output_q.get() # print(output) diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_vgg.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_vgg.py similarity index 75% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_vgg.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_vgg.py index 35965dc3d..638999af8 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_vgg.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_vgg.py @@ -3,12 +3,12 @@ # SPDX-License-Identifier: Apache-2.0 import pytest from test.utils import download_model -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind, NebulaGalaxy +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind, NebulaGalaxy -import pybuda +import forge import os from pytorchcv.model_provider import get_model as ptcv_get_model @@ -28,20 +28,20 @@ variants = ["vgg11", "vgg13", "vgg16", "vgg19", "bn_vgg19", "bn_vgg19b"] @pytest.mark.parametrize("variant", variants) def test_vgg_osmr_pytorch(variant, test_device): - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b if (test_device.arch == BackendDevice.Wormhole_B0): os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = "65536" - os.environ["PYBUDA_LEGACY_KERNEL_BROADCAST"] = "1" + os.environ["FORGE_LEGACY_KERNEL_BROADCAST"] = "1" # Temp mitigations for net2pipe errors, should be removed. # - os.environ["PYBUDA_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" - os.environ["PYBUDA_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" - os.environ["PYBUDA_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model # Variants: #['vgg11', 'vgg13', 'vgg16', 'vgg19', # 'bn_vgg11', 'bn_vgg13', 'bn_vgg16', 'bn_vgg19', @@ -49,7 +49,7 @@ def test_vgg_osmr_pytorch(variant, test_device): #model = src_VGG_Osmr.vgg11(pretrained=True) model = download_model(ptcv_get_model, variant, pretrained=True) model.eval() - tt_model = pybuda.PyTorchModule(f"pt_{variant}_osmr", model) + tt_model = forge.PyTorchModule(f"pt_{variant}_osmr", model) # Image preprocessing try: @@ -78,19 +78,19 @@ def test_vgg_osmr_pytorch(variant, test_device): test_kind=TestKind.INFERENCE, pcc=0.9, enabled=False, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) def test_vgg_19_hf_pytorch(test_device): - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b if test_device.arch == BackendDevice.Wormhole_B0: os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = "65536" - os.environ["PYBUDA_LEGACY_KERNEL_BROADCAST"] = "1" + os.environ["FORGE_LEGACY_KERNEL_BROADCAST"] = "1" ''' # https://pypi.org/project/vgg-pytorch/ @@ -100,10 +100,10 @@ def test_vgg_19_hf_pytorch(test_device): vgg16, vgg16_bn vgg19, vgg19_bn ''' - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model model = download_model(VGG.from_pretrained, 'vgg19') model.eval() - tt_model = pybuda.PyTorchModule("pt_vgg_19_hf", model) + tt_model = forge.PyTorchModule("pt_vgg_19_hf", model) # Image preprocessing try: @@ -131,7 +131,7 @@ def test_vgg_19_hf_pytorch(test_device): devmode=test_device.devmode, test_kind=TestKind.INFERENCE, enabled=False, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) @@ -156,15 +156,15 @@ def test_vgg_bn19_timm_pytorch(test_device): model_name = 'vgg19_bn' model, image_tensor = download_model(preprocess_timm_model, model_name) - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b if test_device.arch == BackendDevice.Wormhole_B0: os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = "65536" - # STEP 2: Create PyBuda module from PyTorch model - tt_model = pybuda.PyTorchModule(model_name+"_timm_pt", model) + # STEP 2: Create Forge module from PyTorch model + tt_model = forge.PyTorchModule(model_name+"_timm_pt", model) # STEP 3: Run inference on Tenstorrent device verify_module( @@ -176,22 +176,22 @@ def test_vgg_bn19_timm_pytorch(test_device): devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], pcc=0.9 ) ) def test_vgg_bn19_torchhub_pytorch(test_device): - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b if test_device.arch == BackendDevice.Wormhole_B0: os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = "65536" - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model # Variants: #model = torch.hub.load('pytorch/vision:v0.10.0', 'vgg11', pretrained=True) #model = torch.hub.load('pytorch/vision:v0.10.0', 'vgg11_bn', pretrained=True) @@ -202,7 +202,7 @@ def test_vgg_bn19_torchhub_pytorch(test_device): #model = torch.hub.load('pytorch/vision:v0.10.0', 'vgg19', pretrained=True) model = download_model(torch.hub.load, 'pytorch/vision:v0.10.0', 'vgg19_bn', pretrained=True) model.eval() - tt_model = pybuda.PyTorchModule("pt_vgg_bn19_torchhub", model) + tt_model = forge.PyTorchModule("pt_vgg_bn19_torchhub", model) # Image preprocessing @@ -230,7 +230,7 @@ def test_vgg_bn19_torchhub_pytorch(test_device): devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], pcc = 0.98 if test_device.arch == BackendDevice.Grayskull else 0.99 ) ) diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_vilt.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_vilt.py similarity index 89% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_vilt.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_vilt.py index d0d568d42..59dfc34b8 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_vilt.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_vilt.py @@ -3,13 +3,13 @@ # SPDX-License-Identifier: Apache-2.0 import pytest from test.utils import download_model -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda.verify.config import TestKind -from pybuda._C.backend_api import BackendDevice +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge.verify.config import TestKind +from forge._C.backend_api import BackendDevice import os -import pybuda +import forge import requests import torch from PIL import Image @@ -98,11 +98,11 @@ def forward(self, embedding_output, attention_mask, head_mask=None): def generate_model_vilt_question_answering_hf_pytorch(test_device, variant): - # Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" # Set model configurations config = ViltConfig.from_pretrained(variant) @@ -125,7 +125,7 @@ def generate_model_vilt_question_answering_hf_pytorch(test_device, variant): text_vision_embedding_model = ViLtEmbeddingWrapper(model) vilt_model = ViltModelWrapper(model,task="qa") - tt_model = pybuda.PyTorchModule("ViLt_question_answering", vilt_model) + tt_model = forge.PyTorchModule("ViLt_question_answering", vilt_model) embedding_output, attention_mask = text_vision_embedding_model(**encoding) @@ -154,11 +154,11 @@ def test_vilt_question_answering_hf_pytorch(variant, test_device): def generate_model_vilt_maskedlm_hf_pytorch(test_device, variant): - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" # Set model configurations config = ViltConfig.from_pretrained(variant) @@ -183,7 +183,7 @@ def generate_model_vilt_maskedlm_hf_pytorch(test_device, variant): text_vision_embedding_model = ViLtEmbeddingWrapper(model) vilt_model = ViltModelWrapper(model = model, task = "maskedlm", text_seq_len = encoding["input_ids"].shape[1]) - tt_model = pybuda.PyTorchModule("ViLt_maskedlm", vilt_model) + tt_model = forge.PyTorchModule("ViLt_maskedlm", vilt_model) embedding_output, attention_mask = text_vision_embedding_model(**encoding) diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_vit.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_vit.py similarity index 80% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_vit.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_vit.py index 95229ca30..7dde54a4f 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_vit.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_vit.py @@ -3,14 +3,14 @@ # SPDX-License-Identifier: Apache-2.0 import pytest from test.utils import download_model -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind, DataFormat +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind, DataFormat import os -import pybuda +import forge import requests import torch from datasets import load_dataset @@ -25,19 +25,19 @@ def generate_model_vit_imgcls_hf_pytorch(test_device, variant): - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b compiler_cfg.balancer_policy = "Ribbon" - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model image_processor = download_model(AutoImageProcessor.from_pretrained, variant ) model = download_model(ViTForImageClassification.from_pretrained, variant ) - tt_model = pybuda.PyTorchModule("ViT_classif_16_224", model) + tt_model = forge.PyTorchModule("ViT_classif_16_224", model) # STEP 3: Run inference on Tenstorrent device img_tensor = image_processor(image_1, return_tensors="pt").pixel_values @@ -53,7 +53,7 @@ def test_vit_classify_224_hf_pytorch(variant, test_device): test_device, variant, ) - if "PYBUDA_NEB_GALAXY_CI" in os.environ: + if "FORGE_NEB_GALAXY_CI" in os.environ: chip_ids = [0, 11, 10, 9, 8, 7, 19, 20, 21, 22, 23, 24, 6, 5, 14, 13, 12, 16, 15, 3, 4, 26, 25, 32, 31, 30, 29, 28, 27, 1, 2, 18, 17] else: chip_ids = [0] @@ -79,9 +79,9 @@ def test_vit_classify_224_hf_pytorch_1x1(variant, test_device): if test_device.arch == BackendDevice.Grayskull: pytest.skip() - os.environ["PYBUDA_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" + os.environ["FORGE_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" if "large" in variant: - os.environ["PYBUDA_EXTRA_L1_MARGIN"] = "20000" + os.environ["FORGE_EXTRA_L1_MARGIN"] = "20000" model, inputs, _ = generate_model_vit_imgcls_hf_pytorch( @@ -116,20 +116,20 @@ def test_vit_classification_1x1_demo(test_device, mode, variant): pytest.skip("Not supported") # Setup for 1x1 grid - os.environ["PYBUDA_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" + os.environ["FORGE_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" # Configurations - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b compiler_cfg.enable_tvm_cpu_fallback = False # Load image preprocessor and model image_processor = download_model(AutoImageProcessor.from_pretrained, variant) framework_model = download_model(ViTForImageClassification.from_pretrained, variant) model_name = "_".join(variant.split('/')[-1].split('-')[:2]) + f"_{mode}" - tt_model = pybuda.PyTorchModule(model_name, framework_model) + tt_model = forge.PyTorchModule(model_name, framework_model) # Load and preprocess image dataset = load_dataset("huggingface/cats-image") @@ -151,7 +151,7 @@ def test_vit_classification_1x1_demo(test_device, mode, variant): ) elif mode == "demo": # Run inference on Tenstorrent device - output_q = pybuda.run_inference(tt_model, inputs=([input_image])) + output_q = forge.run_inference(tt_model, inputs=([input_image])) output = output_q.get()[0].value().detach().float().numpy() # Postprocessing diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_vovnet.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_vovnet.py similarity index 78% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_vovnet.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_vovnet.py index 7217c8b2a..35de7ccd9 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_vovnet.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_vovnet.py @@ -3,12 +3,12 @@ # SPDX-License-Identifier: Apache-2.0 import pytest from test.utils import download_model -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind -import pybuda +import forge import os import torch @@ -50,14 +50,14 @@ def get_image(): def generate_model_vovnet_imgcls_osmr_pytorch(test_device, variant): - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "CNN" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model model = download_model(ptcv_get_model, variant, pretrained=True) - tt_model = pybuda.PyTorchModule(f"{variant}_osmr_pt", model) + tt_model = forge.PyTorchModule(f"{variant}_osmr_pt", model) image_tensor = get_image() @@ -107,15 +107,15 @@ def preprocess_steps(model_type): def generate_model_vovnet39_imgcls_stigma_pytorch(test_device, variant): - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model model, image_tensor = download_model(preprocess_steps, vovnet39) - tt_model = pybuda.PyTorchModule("vovnet_39_stigma_pt", model) + tt_model = forge.PyTorchModule("vovnet_39_stigma_pt", model) return tt_model, [image_tensor], {} @@ -126,7 +126,7 @@ def test_vovnet_v1_39_stigma_pytorch(test_device, enable_default_dram_parameters test_device, None, ) - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.default_dram_parameters = enable_default_dram_parameters verify_module( @@ -145,14 +145,14 @@ def test_vovnet_v1_39_stigma_pytorch(test_device, enable_default_dram_parameters def generate_model_vovnet57_imgcls_stigma_pytorch(test_device, variant): - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "CNN" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model model, image_tensor = download_model(preprocess_steps, vovnet57) - tt_model = pybuda.PyTorchModule("vovnet_57_stigma_pt", model) + tt_model = forge.PyTorchModule("vovnet_57_stigma_pt", model) return tt_model, [image_tensor], {} @@ -197,18 +197,18 @@ def preprocess_timm_model(model_name): def generate_model_vovnet_imgcls_timm_pytorch(test_device, variant): model, image_tensor = download_model(preprocess_timm_model, variant) - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "CNN" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b - # tenstorrent/pybuda#915 + # tenstorrent/forge#915 if test_device.arch == BackendDevice.Grayskull and variant == "ese_vovnet39b": compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" - # STEP 2: Create PyBuda module from PyTorch model - tt_model = pybuda.PyTorchModule(variant+"_pt", model) + # STEP 2: Create Forge module from PyTorch model + tt_model = forge.PyTorchModule(variant+"_pt", model) return tt_model, [image_tensor], {} diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_wideresnet.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_wideresnet.py similarity index 84% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_wideresnet.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_wideresnet.py index 75c276ab5..58709c215 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_wideresnet.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_wideresnet.py @@ -3,14 +3,14 @@ # SPDX-License-Identifier: Apache-2.0 import pytest import os -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda.verify.config import TestKind +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge.verify.config import TestKind from test.model_demos.models.wideresnet import ( generate_model_wideresnet_imgcls_pytorch, generate_model_wideresnet_imgcls_timm, ) -from pybuda._C.backend_api import BackendDevice +from forge._C.backend_api import BackendDevice variants = ["wide_resnet50_2", "wide_resnet101_2"] @@ -25,7 +25,7 @@ def test_wideresnet_pytorch(variant, test_device): variant, ) - os.environ["PYBUDA_TEMP_DISABLE_MODEL_KB_PROLOGUE_BW"] = "1" + os.environ["FORGE_TEMP_DISABLE_MODEL_KB_PROLOGUE_BW"] = "1" verify_module( model, @@ -55,7 +55,7 @@ def test_wideresnet_timm(variant, test_device): variant, ) - os.environ["PYBUDA_TEMP_DISABLE_MODEL_KB_PROLOGUE_BW"] = "1" + os.environ["FORGE_TEMP_DISABLE_MODEL_KB_PROLOGUE_BW"] = "1" verify_module( model, diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_xception.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_xception.py similarity index 69% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_xception.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_xception.py index 546dc7d2b..312fa7cca 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_xception.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_xception.py @@ -3,10 +3,10 @@ # SPDX-License-Identifier: Apache-2.0 import pytest import os -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda.verify.config import TestKind -from pybuda._C.backend_api import BackendDevice +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge.verify.config import TestKind +from forge._C.backend_api import BackendDevice from test.model_demos.models.xception import generate_model_xception_imgcls_timm variants = ["xception", "xception41", "xception65", "xception71"] @@ -15,12 +15,12 @@ @pytest.mark.parametrize("variant", variants, ids=variants) def test_xception_timm(variant, test_device): if test_device.arch == BackendDevice.Grayskull and variant == "xception": - os.environ["PYBUDA_TEMP_DISABLE_MODEL_KB_PROLOGUE_BW"] = "1" + os.environ["FORGE_TEMP_DISABLE_MODEL_KB_PROLOGUE_BW"] = "1" # Temp mitigations for net2pipe errors, should be removed. # - os.environ["PYBUDA_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" - os.environ["PYBUDA_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" - os.environ["PYBUDA_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" ( model, inputs, diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_yolo_v3.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_yolo_v3.py similarity index 69% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_yolo_v3.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_yolo_v3.py index cb0a6cf0c..60f6cfefd 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_yolo_v3.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_yolo_v3.py @@ -8,11 +8,11 @@ from PIL import Image import torch -import pybuda -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind, NebulaGalaxy +import forge +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind, NebulaGalaxy # https://github.com/holli/yolov3_pytorch sys.path = list(set(sys.path + ["third_party/confidential_customer_models/model_2/pytorch/"])) @@ -24,17 +24,17 @@ def generate_model_yolotinyV3_imgcls_holli_pytorch(test_device, variant): - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.Float16_b + compiler_cfg.default_df_override = forge._C.Float16_b model = Yolov3Tiny(num_classes=80, use_wrong_previous_anchors=True) model.load_state_dict(torch.load('third_party/confidential_customer_models/model_2/pytorch/yolo_v3/weights/yolov3_tiny_coco_01.h5')) model.eval() - # STEP 2: Create PyBuda module from PyTorch model - tt_model = pybuda.PyTorchModule("pytorch_yolov3_tiny_holli", model) + # STEP 2: Create Forge module from PyTorch model + tt_model = forge.PyTorchModule("pytorch_yolov3_tiny_holli", model) sz = 512 imgfile = "third_party/confidential_customer_models/model_2/pytorch/yolo_v3/person.jpg" @@ -59,24 +59,24 @@ def test_yolov3_tiny_holli_pytorch(test_device): devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], pcc=0.97, ) ) def generate_model_yoloV3_imgcls_holli_pytorch(test_device, variant): - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.Float16_b - os.environ["PYBUDA_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge._C.Float16_b + os.environ["FORGE_RIBBON2"] = "1" model = Yolov3(num_classes=80) model.load_state_dict(torch.load('third_party/confidential_customer_models/model_2/pytorch/yolo_v3/weights/yolov3_coco_01.h5', map_location=torch.device('cpu'))) model.eval() - # STEP 2: Create PyBuda module from PyTorch model - tt_model = pybuda.PyTorchModule("pytorch_yolov3_holli", model) + # STEP 2: Create Forge module from PyTorch model + tt_model = forge.PyTorchModule("pytorch_yolov3_holli", model) sz = 512 imgfile = "third_party/confidential_customer_models/model_2/pytorch/yolo_v3/person.jpg" @@ -86,7 +86,7 @@ def generate_model_yoloV3_imgcls_holli_pytorch(test_device, variant): pcc = 0.9 if test_device.arch == BackendDevice.Grayskull: - os.environ["PYBUDA_FORK_JOIN_SKIP_EXPANDING_BUFFERS"] = "1" + os.environ["FORGE_FORK_JOIN_SKIP_EXPANDING_BUFFERS"] = "1" pcc = 0.86 return tt_model, [img_tensor], {"pcc": pcc} @@ -98,7 +98,7 @@ def test_yolov3_holli_pytorch(test_device): ) if test_device.arch == BackendDevice.Wormhole_B0: - os.environ["PYBUDA_FORK_JOIN_EXPAND_OUTPUT_BUFFERS"] = "1" + os.environ["FORGE_FORK_JOIN_EXPAND_OUTPUT_BUFFERS"] = "1" verify_module( model, @@ -109,7 +109,7 @@ def test_yolov3_holli_pytorch(test_device): devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], pcc=other["pcc"], ) ) @@ -118,9 +118,9 @@ def test_yolov3_holli_pytorch_1x1(test_device): if test_device.arch == BackendDevice.Grayskull: pytest.skip() - os.environ["PYBUDA_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_RIBBON2"] = "1" model, inputs, other = generate_model_yoloV3_imgcls_holli_pytorch( test_device, None, ) diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_yolo_v5.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_yolo_v5.py similarity index 67% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_yolo_v5.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_yolo_v5.py index 2e7e8d7a1..0e807036f 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_yolo_v5.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_yolo_v5.py @@ -8,17 +8,17 @@ import torch.nn as nn from PIL import Image -import pybuda +import forge -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, DataFormat ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.backend import verify_module +from forge.verify.config import TestKind def generate_model_yoloV5I320_imgcls_torchhub_pytorch(test_device, variant, size): @@ -27,25 +27,25 @@ def generate_model_yoloV5I320_imgcls_torchhub_pytorch(test_device, variant, size compiler_cfg.enable_tm_cpu_fallback = False compiler_cfg.enable_conv_prestride = True compiler_cfg.enable_tvm_constant_prop = True - os.environ["PYBUDA_DECOMPOSE_SIGMOID"] = "1" - os.environ["PYBUDA_LEGACY_UBLOCK_SHAPE"] = "1" + os.environ["FORGE_DECOMPOSE_SIGMOID"] = "1" + os.environ["FORGE_LEGACY_UBLOCK_SHAPE"] = "1" if test_device.arch == BackendDevice.Grayskull: compiler_cfg.enable_tm_cpu_fallback = True - os.environ["PYBUDA_FORK_JOIN_SKIP_EXPANDING_BUFFERS"] = "1" + os.environ["FORGE_FORK_JOIN_SKIP_EXPANDING_BUFFERS"] = "1" elif test_device.arch == BackendDevice.Wormhole_B0: if size == "m": - os.environ["PYBUDA_FORK_JOIN_SKIP_EXPANDING_BUFFERS"] = "1" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_FORK_JOIN_SKIP_EXPANDING_BUFFERS"] = "1" + os.environ["FORGE_RIBBON2"] = "1" compiler_cfg.default_df_override = DataFormat.Float16_b - os.environ["PYBUDA_PAD_SPARSE_MM"] = "{13:16, 3:4}" - os.environ["PYBUDA_GRAPHSOLVER_SELF_CUT_TYPE"] = "ConsumerOperandDataEdgesFirst" - os.environ["PYBUDA_EXTRA_L1_MARGIN"] = f"{64*1024}" + os.environ["FORGE_PAD_SPARSE_MM"] = "{13:16, 3:4}" + os.environ["FORGE_GRAPHSOLVER_SELF_CUT_TYPE"] = "ConsumerOperandDataEdgesFirst" + os.environ["FORGE_EXTRA_L1_MARGIN"] = f"{64*1024}" os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{6*1024}" if size == "l" or size == "x": compiler_cfg.enable_enumerate_u_kt = False - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" - os.environ["PYBUDA_BALANCER_PREPASS_DISABLED"] = "1" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_BALANCER_PREPASS_DISABLED"] = "1" if size == "l" or size == "m" or size == "x": compiler_cfg.enable_auto_fusing = False @@ -77,7 +77,7 @@ def test_yolov5_320x320(test_device, size): devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - verify_pybuda_codegen_vs_framework = True, + verify_forge_codegen_vs_framework = True, ), ) @@ -87,60 +87,60 @@ def generate_model_yoloV5I640_imgcls_torchhub_pytorch(test_device, variant, size compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" compiler_cfg.enable_auto_fusing = False - os.environ["PYBUDA_DECOMPOSE_SIGMOID"] = "1" - os.environ["PYBUDA_DISABLE_CAP_SPARSE_MM_FIDELITY"] = "1" - os.environ["PYBUDA_LEGACY_UBLOCK_SHAPE"] = "1" + os.environ["FORGE_DECOMPOSE_SIGMOID"] = "1" + os.environ["FORGE_DISABLE_CAP_SPARSE_MM_FIDELITY"] = "1" + os.environ["FORGE_LEGACY_UBLOCK_SHAPE"] = "1" if test_device.arch == BackendDevice.Grayskull: compiler_cfg.enable_enumerate_u_kt = False - os.environ["PYBUDA_FORK_JOIN_SKIP_EXPANDING_BUFFERS"] = "1" + os.environ["FORGE_FORK_JOIN_SKIP_EXPANDING_BUFFERS"] = "1" compiler_cfg.enable_tm_cpu_fallback = True compiler_cfg.enable_conv_prestride = True - os.environ["PYBUDA_PAD_SPARSE_MM"] = "{13:16, 3:4}" + os.environ["FORGE_PAD_SPARSE_MM"] = "{13:16, 3:4}" if size in ["s", "m", "l", "x", "n"]: os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{65*1024}" if size in ["l", "x"]: os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{80*1024}" os.environ[ - "PYBUDA_GRAPHSOLVER_SELF_CUT_TYPE" + "FORGE_GRAPHSOLVER_SELF_CUT_TYPE" ] = "FastCut" compiler_cfg.enable_enumerate_u_kt = True - os.environ["PYBUDA_INSERT_SLICE_FOR_CONCAT"] = "1" - os.environ["PYBUDA_CONCAT_SLICE_Y"] = "10" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_INSERT_SLICE_FOR_CONCAT"] = "1" + os.environ["FORGE_CONCAT_SLICE_Y"] = "10" + os.environ["FORGE_RIBBON2"] = "1" if size in ["x"]: compiler_cfg.place_on_new_epoch("conv2d_210.dc.matmul.11") - os.environ["PYBUDA_TEMP_BALANCER_DISABLE_TARGET_PROXIMITY"] = "1" - os.environ["PYBUDA_TEMP_RIBBON2_LEGACY_UTIL_EVAL"] = "1" + os.environ["FORGE_TEMP_BALANCER_DISABLE_TARGET_PROXIMITY"] = "1" + os.environ["FORGE_TEMP_RIBBON2_LEGACY_UTIL_EVAL"] = "1" # Temp mitigations for net2pipe errors, should be removed. # - os.environ["PYBUDA_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" - os.environ["PYBUDA_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" - os.environ["PYBUDA_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" if size in ["m"]: - os.environ["PYBUDA_RIBBON2"] = "1" - os.environ["PYBUDA_INSERT_SLICE_FOR_CONCAT"] = "1" - os.environ["PYBUDA_CONCAT_SLICE_Y"] = "10" - os.environ["PYBUDA_TEMP_BALANCER_DISABLE_TARGET_PROXIMITY"] = "1" + os.environ["FORGE_RIBBON2"] = "1" + os.environ["FORGE_INSERT_SLICE_FOR_CONCAT"] = "1" + os.environ["FORGE_CONCAT_SLICE_Y"] = "10" + os.environ["FORGE_TEMP_BALANCER_DISABLE_TARGET_PROXIMITY"] = "1" compiler_cfg.place_on_new_epoch("conv2d_27.dc.matmul.8") if size in ["l"]: compiler_cfg.place_on_new_epoch("conv2d_313.dc.matmul.8") # Temp mitigations for net2pipe errors, should be removed. # - os.environ["PYBUDA_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" - os.environ["PYBUDA_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" - os.environ["PYBUDA_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" elif test_device.arch == BackendDevice.Wormhole_B0: - os.environ["PYBUDA_PAD_SPARSE_MM"] = "{13:16, 3:4}" - os.environ["PYBUDA_MAX_GRAPH_CUT_RETRY"] = "100" - os.environ["PYBUDA_GRAPHSOLVER_SELF_CUT_TYPE"] = "ConsumerOperandDataEdgesFirst" - os.environ["PYBUDA_INSERT_SLICE_FOR_CONCAT"] = "1" - os.environ["PYBUDA_CONCAT_SLICE_Y"] = "10" - os.environ["PYBUDA_EXTRA_L1_MARGIN"] = "0" + os.environ["FORGE_PAD_SPARSE_MM"] = "{13:16, 3:4}" + os.environ["FORGE_MAX_GRAPH_CUT_RETRY"] = "100" + os.environ["FORGE_GRAPHSOLVER_SELF_CUT_TYPE"] = "ConsumerOperandDataEdgesFirst" + os.environ["FORGE_INSERT_SLICE_FOR_CONCAT"] = "1" + os.environ["FORGE_CONCAT_SLICE_Y"] = "10" + os.environ["FORGE_EXTRA_L1_MARGIN"] = "0" if size == "s" or size == "m" or size == "l" or size == "x": - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" compiler_cfg.enable_conv_prestride = True compiler_cfg.enable_tvm_constant_prop = True compiler_cfg.default_df_override = DataFormat.Float16_b @@ -153,26 +153,26 @@ def generate_model_yoloV5I640_imgcls_torchhub_pytorch(test_device, variant, size compiler_cfg.balancer_op_override("concatenate_19.dc.concatenate.30.dc.concatenate.1.dc.buffer.0", "t_stream_shape", (3,1)) if size == "m": compiler_cfg.balancer_op_override("concatenate_332.dc.concatenate.7", "grid_shape", (1,1)) - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{112*1024}" - os.environ["PYBUDA_TEMP_RIBBON2_LEGACY_UTIL_EVAL"] = "1" + os.environ["FORGE_TEMP_RIBBON2_LEGACY_UTIL_EVAL"] = "1" if size == "l": compiler_cfg.enable_auto_transposing_placement = True compiler_cfg.enable_tm_cpu_fallback = True - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" compiler_cfg.balancer_op_override("conv2d_328.dc.matmul.8", "grid_shape", (5,2)) if size == "x": compiler_cfg.balancer_op_override("concatenate_363.dc.concatenate.0", "grid_shape", (1,1)) compiler_cfg.balancer_op_override("conv2d_41.dc.matmul.8", "t_stream_shape", (1,1)) - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" compiler_cfg.enable_tm_cpu_fallback = True - os.environ["PYBUDA_DISABLE_CAP_SPARSE_MM_FIDELITY"] = "0" - os.environ["PYBUDA_TEMP_BALANCER_DISABLE_TARGET_PROXIMITY"] = "1" + os.environ["FORGE_DISABLE_CAP_SPARSE_MM_FIDELITY"] = "0" + os.environ["FORGE_TEMP_BALANCER_DISABLE_TARGET_PROXIMITY"] = "1" # Temp mitigations for net2pipe errors, should be removed. # - os.environ["PYBUDA_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" - os.environ["PYBUDA_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" - os.environ["PYBUDA_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" name = "yolov5" + size model = download_model(torch.hub.load, variant, name, pretrained=True) @@ -189,7 +189,7 @@ def generate_model_yoloV5I640_imgcls_torchhub_pytorch(test_device, variant, size ) def test_yolov5_640x640(test_device, size): if size in ["l"] and test_device.arch == BackendDevice.Grayskull: - os.environ["PYBUDA_TEMP_DISABLE_MODEL_KB_PROLOGUE_BW"] = "1" + os.environ["FORGE_TEMP_DISABLE_MODEL_KB_PROLOGUE_BW"] = "1" model, inputs, _ = generate_model_yoloV5I640_imgcls_torchhub_pytorch( test_device, "ultralytics/yolov5", @@ -204,7 +204,7 @@ def test_yolov5_640x640(test_device, size): devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - verify_pybuda_codegen_vs_framework = True, + verify_forge_codegen_vs_framework = True, ), ) @@ -213,20 +213,20 @@ def generate_model_yoloV5I480_imgcls_torchhub_pytorch(test_device, variant, size compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" compiler_cfg.enable_tm_cpu_fallback = True - os.environ["PYBUDA_DECOMPOSE_SIGMOID"] = "1" - os.environ["PYBUDA_LEGACY_UBLOCK_SHAPE"] = "1" + os.environ["FORGE_DECOMPOSE_SIGMOID"] = "1" + os.environ["FORGE_LEGACY_UBLOCK_SHAPE"] = "1" if test_device.arch == BackendDevice.Grayskull: - os.environ["PYBUDA_PAD_SPARSE_MM"] = "{113:128}" + os.environ["FORGE_PAD_SPARSE_MM"] = "{113:128}" if size == "x": - os.environ["PYBUDA_TEMP_ELT_UNARY_ESTIMATES_LEGACY"] = "1" - os.environ["PYBUDA_INSERT_SLICE_FOR_CONCAT"] = "1" - os.environ["PYBUDA_CONCAT_SLICE_Y"] = "10" + os.environ["FORGE_TEMP_ELT_UNARY_ESTIMATES_LEGACY"] = "1" + os.environ["FORGE_INSERT_SLICE_FOR_CONCAT"] = "1" + os.environ["FORGE_CONCAT_SLICE_Y"] = "10" compiler_cfg.balancer_op_override("concatenate_40.dc.concatenate.30.dc.concatenate.1.dc.buffer.0", "t_stream_shape", (6,1)) compiler_cfg.balancer_op_override("conv2d_41.dc.matmul.8", "grid_shape", (5,5)) elif size == "m": - os.environ["PYBUDA_INSERT_SLICE_FOR_CONCAT"] = "1" - os.environ["PYBUDA_CONCAT_SLICE_Y"] = "10" + os.environ["FORGE_INSERT_SLICE_FOR_CONCAT"] = "1" + os.environ["FORGE_CONCAT_SLICE_Y"] = "10" compiler_cfg.balancer_op_override("concatenate_26.dc.concatenate.30.dc.concatenate.1.dc.buffer.0", "t_stream_shape", (6,1)) os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{32*1024}" else: @@ -236,9 +236,9 @@ def generate_model_yoloV5I480_imgcls_torchhub_pytorch(test_device, variant, size # env vars needed to support 640x640 yolov5 working compiler_cfg.default_df_override = DataFormat.Float16_b - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" if size != "x": - os.environ["PYBUDA_PAD_SPARSE_MM"] = "{13:16, 3:4}" + os.environ["FORGE_PAD_SPARSE_MM"] = "{13:16, 3:4}" os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{64*1024}" if size == "s": @@ -247,17 +247,17 @@ def generate_model_yoloV5I480_imgcls_torchhub_pytorch(test_device, variant, size compiler_cfg.default_dram_parameters = True if size == "m": - os.environ["PYBUDA_INSERT_SLICE_FOR_CONCAT"] = "1" - os.environ["PYBUDA_CONCAT_SLICE_Y"] = "10" + os.environ["FORGE_INSERT_SLICE_FOR_CONCAT"] = "1" + os.environ["FORGE_CONCAT_SLICE_Y"] = "10" compiler_cfg.balancer_op_override("concatenate_26.dc.concatenate.30.dc.concatenate.1.dc.buffer.0", "t_stream_shape", (6,1)) elif size == "l": compiler_cfg.enable_auto_fusing = False compiler_cfg.place_on_new_epoch("concatenate_208.dc.concatenate.0") elif size == "x": - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" # These are planned to be on by default - os.environ["PYBUDA_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" + os.environ["FORGE_RIBBON2_CALCULATE_TARGET_CYCLES"] = "1" name = "yolov5" + size model = download_model(torch.hub.load, variant, name, pretrained=True) @@ -273,11 +273,11 @@ def generate_model_yoloV5I480_imgcls_torchhub_pytorch(test_device, variant, size ) def test_yolov5_480x480(test_device, size): if test_device.arch == BackendDevice.Grayskull: - os.environ["PYBUDA_FORK_JOIN_SKIP_EXPANDING_BUFFERS"] = "1" + os.environ["FORGE_FORK_JOIN_SKIP_EXPANDING_BUFFERS"] = "1" if size in ["m", "l"] and test_device.arch == BackendDevice.Wormhole_B0: - os.environ["PYBUDA_LEGACY_KERNEL_BROADCAST"] = "1" + os.environ["FORGE_LEGACY_KERNEL_BROADCAST"] = "1" if size in ["s"] and test_device.arch == BackendDevice.Wormhole_B0: - os.environ["PYBUDA_TEMP_DISABLE_MODEL_KB_PROLOGUE_BW"] = "1" + os.environ["FORGE_TEMP_DISABLE_MODEL_KB_PROLOGUE_BW"] = "1" model, inputs, _ = generate_model_yoloV5I480_imgcls_torchhub_pytorch( test_device, "ultralytics/yolov5", @@ -292,7 +292,7 @@ def test_yolov5_480x480(test_device, size): devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - verify_pybuda_codegen_vs_framework = True, + verify_forge_codegen_vs_framework = True, verify_post_placer=False ), ) @@ -301,10 +301,10 @@ def test_yolov5_480x480(test_device, size): @pytest.mark.skip(reason="Not supported") def test_yolov5_1280x1280(test_device): # env vars needed to support 640x640 yolov5 working - os.environ["PYBUDA_PAD_SPARSE_MM"] = "{13:16}" - os.environ["PYBUDA_INSERT_SLICE_FOR_CONCAT"] = "1" + os.environ["FORGE_PAD_SPARSE_MM"] = "{13:16}" + os.environ["FORGE_INSERT_SLICE_FOR_CONCAT"] = "1" - os.environ["PYBUDA_PADDING_PASS_BUFFER_QUEUE"] = "1" + os.environ["FORGE_PADDING_PASS_BUFFER_QUEUE"] = "1" compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "CNN" @@ -329,6 +329,6 @@ def test_yolov5_1280x1280(test_device): devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - verify_pybuda_codegen_vs_framework = True, + verify_forge_codegen_vs_framework = True, ), ) diff --git a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_yolo_v6.py b/forge/test/model_demos/high_prio/cnn/pytorch/test_yolo_v6.py similarity index 85% rename from pybuda/test/model_demos/high_prio/cnn/pytorch/test_yolo_v6.py rename to forge/test/model_demos/high_prio/cnn/pytorch/test_yolo_v6.py index 060ab1076..da879f366 100644 --- a/pybuda/test/model_demos/high_prio/cnn/pytorch/test_yolo_v6.py +++ b/forge/test/model_demos/high_prio/cnn/pytorch/test_yolo_v6.py @@ -1,7 +1,7 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -import pybuda +import forge import os import pytest import requests @@ -10,10 +10,10 @@ import torch from PIL import Image from yolov6 import YOLOV6 -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendDevice +from forge.verify.backend import verify_module +from forge.verify.config import TestKind +from forge import VerifyConfig +from forge._C.backend_api import BackendDevice # preprocessing steps referred form https://github.com/meituan/YOLOv6/blob/main/inference.ipynb @@ -95,23 +95,23 @@ def process_image(path, img_size, stride, half): @pytest.mark.parametrize("variant", variants) def test_yolo_v6_pytorch(variant, test_device): - # STEP 1 : Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # STEP 1 : Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge.DataFormat.Float16_b if variant in ["yolov6m", "yolov6l"]: - os.environ["PYBUDA_FORK_JOIN_BUF_QUEUES"] = "1" - os.environ["PYBUDA_FORK_JOIN_EXPAND_OUTPUT_BUFFERS"] = "1" - os.environ["PYBUDA_FORK_JOIN_SKIP_EXPANDING_BUFFERS"] = "1" - os.environ["PYBUDA_MAX_FORK_JOIN_BUF"] = "1" + os.environ["FORGE_FORK_JOIN_BUF_QUEUES"] = "1" + os.environ["FORGE_FORK_JOIN_EXPAND_OUTPUT_BUFFERS"] = "1" + os.environ["FORGE_FORK_JOIN_SKIP_EXPANDING_BUFFERS"] = "1" + os.environ["FORGE_MAX_FORK_JOIN_BUF"] = "1" # Temp mitigations for net2pipe errors, should be removed. # - os.environ["PYBUDA_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" - os.environ["PYBUDA_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" - os.environ["PYBUDA_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_FUSED_ESTIMATES"] = "0" + os.environ["FORGE_TEMP_SCALE_SPARSE_ESTIMATE_ARGS"] = "0" + os.environ["FORGE_TEMP_ENABLE_NEW_SPARSE_ESTIMATES"] = "0" if test_device.arch == BackendDevice.Grayskull and variant == "yolov6m": compiler_cfg.balancer_op_override( @@ -124,7 +124,7 @@ def test_yolo_v6_pytorch(variant, test_device): ) if test_device.arch == BackendDevice.Wormhole_B0 and variant == "yolov6l": - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" if test_device.arch == BackendDevice.Grayskull and variant == "yolov6l": compiler_cfg.balancer_op_override( @@ -152,7 +152,7 @@ def test_yolo_v6_pytorch(variant, test_device): model = model.model model.eval() - tt_model = pybuda.PyTorchModule(f"{variant}_pt", model) + tt_model = forge.PyTorchModule(f"{variant}_pt", model) # STEP 3 : prepare input url = "http://images.cocodataset.org/val2017/000000397133.jpg" diff --git a/pybuda/test/model_demos/high_prio/cnn/tflite/__init__.py b/forge/test/model_demos/high_prio/cnn/tflite/__init__.py similarity index 100% rename from pybuda/test/model_demos/high_prio/cnn/tflite/__init__.py rename to forge/test/model_demos/high_prio/cnn/tflite/__init__.py diff --git a/pybuda/test/model_demos/high_prio/cnn/tflite/test_efficientnet_lite.py b/forge/test/model_demos/high_prio/cnn/tflite/test_efficientnet_lite.py similarity index 84% rename from pybuda/test/model_demos/high_prio/cnn/tflite/test_efficientnet_lite.py rename to forge/test/model_demos/high_prio/cnn/tflite/test_efficientnet_lite.py index 48e51296b..a8a958a15 100644 --- a/pybuda/test/model_demos/high_prio/cnn/tflite/test_efficientnet_lite.py +++ b/forge/test/model_demos/high_prio/cnn/tflite/test_efficientnet_lite.py @@ -7,19 +7,19 @@ import torch.nn as nn from PIL import Image -from pybuda import ( +from forge import ( TFLiteModule, VerifyConfig, BackendType, DataFormat, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind, BackendDevice +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind, BackendDevice from tvm import relay import tflite import tensorflow as tf -import pybuda +import forge import os @@ -27,14 +27,14 @@ def test_efficientnet_lite0_1x1(test_device): if test_device.arch == BackendDevice.Grayskull: pytest.skip() - os.environ["PYBUDA_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" compiler_cfg.enable_tvm_constant_prop = True compiler_cfg.graph_solver_self_cut_type = "FastCut" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16 + compiler_cfg.default_df_override = forge.DataFormat.Float16 tflite_path = "third_party/confidential_customer_models/model_2/tflite/efficientnet-lite0-fp32.tflite" @@ -57,14 +57,14 @@ def test_efficientnet_lite4_1x1(test_device): if test_device.arch == BackendDevice.Grayskull: pytest.skip() - os.environ["PYBUDA_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" compiler_cfg.enable_tvm_constant_prop = True compiler_cfg.graph_solver_self_cut_type = "FastCut" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16 + compiler_cfg.default_df_override = forge.DataFormat.Float16 tflite_path = "third_party/confidential_customer_models/model_2/tflite/efficientnet-lite4-fp32.tflite" @@ -80,7 +80,7 @@ def test_efficientnet_lite4_1x1(test_device): devmode=test_device.devmode, test_kind=TestKind.INFERENCE, verify_pipeline_result_vs_framework=False, - verify_pybuda_codegen_vs_framework=False, + verify_forge_codegen_vs_framework=False, ), ) @@ -101,9 +101,9 @@ def test_efficientnet_lite0(test_device): compiler_cfg.conv_multi_op_fracture_factor_override["conv2d_69"] = 5 import os - os.environ["PYBUDA_PAD_SPARSE_MM"] = "{7:8, 25:32, 98:128}" - os.environ["PYBUDA_PAD_SPARSE_MM_WEIGHT_CONCAT"] = "{5:8, 15:16, 3:4,21:24}" - os.environ["PYBUDA_MANUAL_SPLICE_DECOMP_TH"] = "98" + os.environ["FORGE_PAD_SPARSE_MM"] = "{7:8, 25:32, 98:128}" + os.environ["FORGE_PAD_SPARSE_MM_WEIGHT_CONCAT"] = "{5:8, 15:16, 3:4,21:24}" + os.environ["FORGE_MANUAL_SPLICE_DECOMP_TH"] = "98" module = TFLiteModule("tflite_efficientnet_lite0", tflite_path) input_shape = (1, 224, 224, 3) @@ -136,8 +136,8 @@ def test_efficientnet_lite1(test_device): compiler_cfg.conv_multi_op_fracture_factor_override["conv2d_81"] = 5 import os - os.environ["PYBUDA_PAD_SPARSE_MM"] = "{113:128}" - os.environ["PYBUDA_MANUAL_SPLICE_DECOMP_TH"] = "113" + os.environ["FORGE_PAD_SPARSE_MM"] = "{113:128}" + os.environ["FORGE_MANUAL_SPLICE_DECOMP_TH"] = "113" module = TFLiteModule("tflite_efficientnet_lite1", tflite_path) @@ -165,9 +165,9 @@ def test_efficientnet_lite2(test_device): # compiler_cfg.enable_conv_prestride = True import os - os.environ["PYBUDA_PAD_SPARSE_MM"] = "{529:532, 35:48}" - os.environ["PYBUDA_PAD_SPARSE_MM_WEIGHT_CONCAT"] = "{5:8, 17:20, 23:24, 39:40}" - os.environ["PYBUDA_MANUAL_SPLICE_DECOMP_TH"] = "133" + os.environ["FORGE_PAD_SPARSE_MM"] = "{529:532, 35:48}" + os.environ["FORGE_PAD_SPARSE_MM_WEIGHT_CONCAT"] = "{5:8, 17:20, 23:24, 39:40}" + os.environ["FORGE_MANUAL_SPLICE_DECOMP_TH"] = "133" tflite_path = "third_party/confidential_customer_models/model_2/tflite/efficientnet-lite2-fp32.tflite" compiler_cfg.conv_multi_op_fracture_factor_override["conv2d_0"] = 3 @@ -196,9 +196,9 @@ def test_efficientnet_lite3(test_device): compiler_cfg.graph_solver_self_cut_type = "FastCut" import os - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" - os.environ["PYBUDA_PAD_SPARSE_MM"] = "{613:640}" - os.environ["PYBUDA_MANUAL_SPLICE_DECOMP_TH"] = "613" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_PAD_SPARSE_MM"] = "{613:640}" + os.environ["FORGE_MANUAL_SPLICE_DECOMP_TH"] = "613" tflite_path = "third_party/confidential_customer_models/model_2/tflite/efficientnet-lite3-fp32.tflite" @@ -230,8 +230,8 @@ def test_efficientnet_lite4(test_device): compiler_cfg.graph_solver_self_cut_type = "FastCut" import os - os.environ["PYBUDA_PAD_SPARSE_MM"] = "{13:16}" - os.environ["PYBUDA_PAD_SPARSE_MM_WEIGHT_CONCAT"] = "{51:54, 11:16, 6:8, 5:8, 21:24, 30:32}" + os.environ["FORGE_PAD_SPARSE_MM"] = "{13:16}" + os.environ["FORGE_PAD_SPARSE_MM_WEIGHT_CONCAT"] = "{51:54, 11:16, 6:8, 5:8, 21:24, 30:32}" tflite_path = "third_party/confidential_customer_models/model_2/tflite/efficientnet-lite4-fp32.tflite" compiler_cfg.conv_multi_op_fracture_factor_override["conv2d_88"] = 5 @@ -262,6 +262,6 @@ def test_efficientnet_lite4(test_device): devmode=test_device.devmode, test_kind=TestKind.INFERENCE, verify_pipeline_result_vs_framework=False, - verify_pybuda_codegen_vs_framework=False, + verify_forge_codegen_vs_framework=False, ), ) diff --git a/pybuda/test/model_demos/high_prio/cnn/tflite/test_hand_landmarker.py b/forge/test/model_demos/high_prio/cnn/tflite/test_hand_landmarker.py similarity index 77% rename from pybuda/test/model_demos/high_prio/cnn/tflite/test_hand_landmarker.py rename to forge/test/model_demos/high_prio/cnn/tflite/test_hand_landmarker.py index af46db2ad..af269de65 100644 --- a/pybuda/test/model_demos/high_prio/cnn/tflite/test_hand_landmarker.py +++ b/forge/test/model_demos/high_prio/cnn/tflite/test_hand_landmarker.py @@ -8,18 +8,18 @@ import torch.nn as nn from PIL import Image -from pybuda import ( +from forge import ( TFLiteModule, VerifyConfig, BackendType, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind, BackendDevice +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind, BackendDevice from tvm import relay import tflite import tensorflow as tf -import pybuda +import forge import os @@ -27,15 +27,15 @@ def test_hand_landmark_lite_1x1(test_device): if test_device.arch == BackendDevice.Grayskull: pytest.skip() - os.environ["PYBUDA_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" - os.environ["PYBUDA_ENABLE_SINGLE_BUFFER_FALLBACK"] = "1" + os.environ["FORGE_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_ENABLE_SINGLE_BUFFER_FALLBACK"] = "1" compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" compiler_cfg.enable_tvm_constant_prop = True compiler_cfg.graph_solver_self_cut_type = "ConsumerOperandDataEdgesFirst" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b + compiler_cfg.default_df_override = forge.DataFormat.Float16_b tflite_path = "third_party/confidential_customer_models/model_2/tflite/hand_landmark_lite.tflite" @@ -61,8 +61,8 @@ def test_palm_detection_lite_1x1(test_device): if test_device.arch == BackendDevice.Grayskull: pytest.skip() - os.environ["PYBUDA_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" diff --git a/pybuda/test/model_demos/high_prio/cnn/tflite/test_mobilenet_ssd.py b/forge/test/model_demos/high_prio/cnn/tflite/test_mobilenet_ssd.py similarity index 73% rename from pybuda/test/model_demos/high_prio/cnn/tflite/test_mobilenet_ssd.py rename to forge/test/model_demos/high_prio/cnn/tflite/test_mobilenet_ssd.py index f6cbbe39a..9848de2fc 100644 --- a/pybuda/test/model_demos/high_prio/cnn/tflite/test_mobilenet_ssd.py +++ b/forge/test/model_demos/high_prio/cnn/tflite/test_mobilenet_ssd.py @@ -8,18 +8,18 @@ import torch.nn as nn from PIL import Image -from pybuda import ( +from forge import ( TFLiteModule, VerifyConfig, BackendType, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind, BackendDevice +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind, BackendDevice from tvm import relay import tflite import tensorflow as tf -import pybuda +import forge import os @@ -27,14 +27,14 @@ def test_mobilenet_ssd_1x1(test_device): if test_device.arch == BackendDevice.Grayskull: pytest.skip() - os.environ["PYBUDA_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" compiler_cfg.enable_tvm_constant_prop = True compiler_cfg.graph_solver_self_cut_type = "FastCut" - compiler_cfg.default_df_override=pybuda.DataFormat.Float16_b + compiler_cfg.default_df_override=forge.DataFormat.Float16_b compiler_cfg.cpu_fallback_ops = set(["concatenate"]) tflite_path = "third_party/confidential_customer_models/model_2/tflite/ssd_mobilenet_v2.tflite" diff --git a/pybuda/test/model_demos/high_prio/cnn/tflite/test_pose_landmark.py b/forge/test/model_demos/high_prio/cnn/tflite/test_pose_landmark.py similarity index 84% rename from pybuda/test/model_demos/high_prio/cnn/tflite/test_pose_landmark.py rename to forge/test/model_demos/high_prio/cnn/tflite/test_pose_landmark.py index 98ec9790c..78df26d16 100644 --- a/pybuda/test/model_demos/high_prio/cnn/tflite/test_pose_landmark.py +++ b/forge/test/model_demos/high_prio/cnn/tflite/test_pose_landmark.py @@ -8,18 +8,18 @@ import torch.nn as nn from PIL import Image -from pybuda import ( +from forge import ( TFLiteModule, VerifyConfig, BackendType, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind, BackendDevice +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind, BackendDevice from tvm import relay import tflite import tensorflow as tf -import pybuda +import forge import os @@ -27,16 +27,16 @@ def test_pose_landmark_lite_1x1(test_device): if test_device.arch == BackendDevice.Grayskull: pytest.skip() - os.environ["PYBUDA_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" - os.environ["PYBUDA_SPLIT_RESIZE2D"] = "128" - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" - os.environ["PYBUDA_MAX_CONCAT_INPUTS"] = "6" + os.environ["FORGE_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" + os.environ["FORGE_SPLIT_RESIZE2D"] = "128" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_MAX_CONCAT_INPUTS"] = "6" compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" compiler_cfg.enable_tvm_constant_prop = True compiler_cfg.graph_solver_self_cut_type = "FastCut" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b + compiler_cfg.default_df_override = forge.DataFormat.Float16_b compiler_cfg.enable_single_buffer_fallback = True tflite_path = "third_party/confidential_customer_models/model_2/tflite/pose_landmark_lite.tflite" @@ -63,16 +63,16 @@ def test_pose_landmark_heavy_1x1(test_device): if test_device.arch == BackendDevice.Grayskull: pytest.skip() - os.environ["PYBUDA_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" - os.environ["PYBUDA_SPLIT_RESIZE2D"] = "128" - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" - os.environ["PYBUDA_MAX_CONCAT_INPUTS"] = "6" + os.environ["FORGE_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" + os.environ["FORGE_SPLIT_RESIZE2D"] = "128" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_MAX_CONCAT_INPUTS"] = "6" compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" compiler_cfg.enable_tvm_constant_prop = True compiler_cfg.graph_solver_self_cut_type = "FastCut" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b + compiler_cfg.default_df_override = forge.DataFormat.Float16_b compiler_cfg.amp_level = 1 compiler_cfg.enable_single_buffer_fallback = True @@ -112,7 +112,7 @@ def test_pose_landmark_lite(test_device): compiler_cfg.balancer_op_override("conv2d_21.dc.matmul.11", "grid_shape", (4,3)) # blobgen compiler_cfg.balancer_op_override("conv2d_26.dc.matmul.11", "grid_shape", (4,5)) elif test_device.arch == BackendDevice.Wormhole_B0: - os.environ["PYBUDA_PAD_SPARSE_MM_WEIGHT_CONCAT"] = "{11:12}" + os.environ["FORGE_PAD_SPARSE_MM_WEIGHT_CONCAT"] = "{11:12}" compiler_cfg.conv_multi_op_fracture_factor_override["conv2d_21"] = 5 compiler_cfg.conv_multi_op_fracture_factor_override["conv2d_26"] = 5 diff --git a/pybuda/test/model_demos/high_prio/nlp/__init__.py b/forge/test/model_demos/high_prio/nlp/__init__.py similarity index 100% rename from pybuda/test/model_demos/high_prio/nlp/__init__.py rename to forge/test/model_demos/high_prio/nlp/__init__.py diff --git a/pybuda/test/model_demos/high_prio/nlp/pytorch/__init__.py b/forge/test/model_demos/high_prio/nlp/pytorch/__init__.py similarity index 100% rename from pybuda/test/model_demos/high_prio/nlp/pytorch/__init__.py rename to forge/test/model_demos/high_prio/nlp/pytorch/__init__.py diff --git a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_albert.py b/forge/test/model_demos/high_prio/nlp/pytorch/test_albert.py similarity index 74% rename from pybuda/test/model_demos/high_prio/nlp/pytorch/test_albert.py rename to forge/test/model_demos/high_prio/nlp/pytorch/test_albert.py index 133d7e903..01e4ea06d 100644 --- a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_albert.py +++ b/forge/test/model_demos/high_prio/nlp/pytorch/test_albert.py @@ -6,13 +6,13 @@ import os -import pybuda +import forge from transformers import AlbertForMaskedLM, AlbertTokenizer, AlbertForTokenClassification, AlbertForSequenceClassification, AlbertForQuestionAnswering -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind sizes = ["base", "large", "xlarge", "xxlarge"] variants = ["v1", "v2"] @@ -25,45 +25,45 @@ def test_albert_masked_lm_pytorch(size, variant, test_device): tokenizer = download_model(AlbertTokenizer.from_pretrained, model_ckpt) model = download_model(AlbertForMaskedLM.from_pretrained, model_ckpt) - pybuda.config.set_configuration_options( - default_df_override=pybuda.DataFormat.Float16, + forge.config.set_configuration_options( + default_df_override=forge.DataFormat.Float16, amp_level=2, ) - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() if ("xxlarge" in model_ckpt): if test_device.arch == BackendDevice.Grayskull: - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.enable_auto_fusing = False compiler_cfg.amp_level = 2 - os.environ["PYBUDA_NLP_MANUAL_TARGET"] = "2000000" + os.environ["FORGE_NLP_MANUAL_TARGET"] = "2000000" if variant == "v2": compiler_cfg.enable_enumerate_u_kt = False elif test_device.arch == BackendDevice.Wormhole_B0: # until tenstorrent/budabackend#1120 is resolved - pybuda.config.set_configuration_options( + forge.config.set_configuration_options( enable_auto_fusing=False, enable_enumerate_u_kt=False, amp_level=1, - default_df_override=pybuda.DataFormat.Float16_b, + default_df_override=forge.DataFormat.Float16_b, ) os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{105*1024}" - os.environ["PYBUDA_NLP_MANUAL_TARGET"] = "2000000" + os.environ["FORGE_NLP_MANUAL_TARGET"] = "2000000" elif "xlarge" == size: os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{8*1024}" if test_device.arch == BackendDevice.Grayskull: - os.environ["PYBUDA_NLP_MANUAL_TARGET"] = "2000000" + os.environ["FORGE_NLP_MANUAL_TARGET"] = "2000000" elif "large" == size: if test_device.arch == BackendDevice.Grayskull: compiler_cfg.enable_auto_fusing = False - os.environ["PYBUDA_TEMP_ELT_UNARY_ESTIMATES_LEGACY"] = "1" + os.environ["FORGE_TEMP_ELT_UNARY_ESTIMATES_LEGACY"] = "1" elif test_device.arch == BackendDevice.Wormhole_B0: - os.environ["PYBUDA_LEGACY_KERNEL_BROADCAST"] = "1" + os.environ["FORGE_LEGACY_KERNEL_BROADCAST"] = "1" elif "base" == size: if test_device.arch == BackendDevice.Wormhole_B0: - os.environ["PYBUDA_LEGACY_KERNEL_BROADCAST"] = "1" + os.environ["FORGE_LEGACY_KERNEL_BROADCAST"] = "1" # Load data sample sample_text = "The capital of France is [MASK]." @@ -79,7 +79,7 @@ def test_albert_masked_lm_pytorch(size, variant, test_device): model(**input_tokens) verify_module( - pybuda.PyTorchModule("pt_albertbert_masked_lm", model), + forge.PyTorchModule("pt_albertbert_masked_lm", model), input_shapes=[(input_tokens['input_ids'].shape, input_tokens['attention_mask'].shape,)], inputs=[(input_tokens['input_ids'], input_tokens['attention_mask'])], verify_cfg=VerifyConfig( @@ -98,13 +98,13 @@ def test_albert_masked_lm_pytorch(size, variant, test_device): @pytest.mark.parametrize("size", sizes, ids=sizes) def test_albert_token_classification_pytorch(size, variant, test_device): - # Set PyBUDA configuration parameters - pybuda.config.set_configuration_options( - default_df_override=pybuda.DataFormat.Float16, + # Set Forge configuration parameters + forge.config.set_configuration_options( + default_df_override=forge.DataFormat.Float16, amp_level=2, ) - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() # NOTE: These model variants are pre-trined only. They need to be fine-tuned # on a downstream task. Code is for demonstration purposes only. @@ -112,26 +112,26 @@ def test_albert_token_classification_pytorch(size, variant, test_device): # albert-base-v2, albert-large-v2, albert-xlarge-v2, albert-xxlarge-v2 model_ckpt = f"albert-{size}-{variant}" if "xxlarge" in model_ckpt: - pybuda.config.set_configuration_options( + forge.config.set_configuration_options( enable_auto_fusing=False, enable_enumerate_u_kt=False, ) os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{105*1024}" - os.environ["PYBUDA_NLP_MANUAL_TARGET"] = "2000000" + os.environ["FORGE_NLP_MANUAL_TARGET"] = "2000000" elif "xlarge" in model_ckpt: os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{8*1024}" if test_device.arch == BackendDevice.Grayskull: - os.environ["PYBUDA_NLP_MANUAL_TARGET"] = "2000000" + os.environ["FORGE_NLP_MANUAL_TARGET"] = "2000000" elif "large" == size: if test_device.arch == BackendDevice.Grayskull: compiler_cfg.enable_auto_fusing = False - os.environ["PYBUDA_TEMP_ELT_UNARY_ESTIMATES_LEGACY"] = "1" + os.environ["FORGE_TEMP_ELT_UNARY_ESTIMATES_LEGACY"] = "1" elif test_device.arch == BackendDevice.Wormhole_B0: - os.environ["PYBUDA_LEGACY_KERNEL_BROADCAST"] = "1" + os.environ["FORGE_LEGACY_KERNEL_BROADCAST"] = "1" elif "base" == size: if test_device.arch == BackendDevice.Wormhole_B0: - os.environ["PYBUDA_LEGACY_KERNEL_BROADCAST"] = "1" + os.environ["FORGE_LEGACY_KERNEL_BROADCAST"] = "1" # Load ALBERT tokenizer and model from HuggingFace @@ -152,7 +152,7 @@ def test_albert_token_classification_pytorch(size, variant, test_device): model(**input_tokens) verify_module( - pybuda.PyTorchModule("pt_albertbert_token_classification", model), + forge.PyTorchModule("pt_albertbert_token_classification", model), input_shapes=[(input_tokens['input_ids'].shape, input_tokens['attention_mask'].shape,)], inputs=[(input_tokens['input_ids'], input_tokens['attention_mask'])], verify_cfg=VerifyConfig( diff --git a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_bart.py b/forge/test/model_demos/high_prio/nlp/pytorch/test_bart.py similarity index 87% rename from pybuda/test/model_demos/high_prio/nlp/pytorch/test_bart.py rename to forge/test/model_demos/high_prio/nlp/pytorch/test_bart.py index e0c669d0c..38d596366 100644 --- a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_bart.py +++ b/forge/test/model_demos/high_prio/nlp/pytorch/test_bart.py @@ -9,27 +9,27 @@ from transformers import BartConfig, BartModel, BartTokenizer, BartForSequenceClassification from transformers.models.bart.modeling_bart import shift_tokens_right, BartAttention -import pybuda -from pybuda import ( +import forge +from forge import ( PyTorchModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, CPUDevice, TTDevice, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind from loguru import logger -from pybuda.op.eval.common import compare_tensor_to_golden, calculate_pcc +from forge.op.eval.common import compare_tensor_to_golden, calculate_pcc from typing import Optional @@ -79,9 +79,9 @@ def test_pt_bart_classifier(test_device): # Compile & feed data pt_mod = BartWrapper(model.model) mod = PyTorchModule("bart", pt_mod) - tt0 = pybuda.TTDevice("tt0", devtype=test_device.devtype, arch=test_device.arch, module=mod) + tt0 = forge.TTDevice("tt0", devtype=test_device.devtype, arch=test_device.arch, module=mod) tt0.push_to_inputs(inputs) - output_q = pybuda.run_inference() + output_q = forge.run_inference() # Verify output outputs = output_q.get()[0].value().detach().float() diff --git a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_bert.py b/forge/test/model_demos/high_prio/nlp/pytorch/test_bert.py similarity index 75% rename from pybuda/test/model_demos/high_prio/nlp/pytorch/test_bert.py rename to forge/test/model_demos/high_prio/nlp/pytorch/test_bert.py index 7cc2bc2d3..6ce56c060 100644 --- a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_bert.py +++ b/forge/test/model_demos/high_prio/nlp/pytorch/test_bert.py @@ -6,13 +6,13 @@ import os -import pybuda +import forge from transformers import BertForMaskedLM, BertTokenizer, BertForTokenClassification, BertForSequenceClassification, BertForQuestionAnswering -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind, NebulaGalaxy +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind, NebulaGalaxy def generate_model_bert_maskedlm_hf_pytorch(test_device, variant): # Load Bert tokenizer and model from HuggingFace @@ -20,8 +20,8 @@ def generate_model_bert_maskedlm_hf_pytorch(test_device, variant): tokenizer = BertTokenizer.from_pretrained(model_ckpt) model = BertForMaskedLM.from_pretrained(model_ckpt) - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b # Load data sample sample_text = "The capital of France is [MASK]." @@ -35,7 +35,7 @@ def generate_model_bert_maskedlm_hf_pytorch(test_device, variant): return_tensors="pt", ) - model = pybuda.PyTorchModule("pt_bert_masked_lm", model) + model = forge.PyTorchModule("pt_bert_masked_lm", model) return model, [input_tokens['input_ids']], {} @@ -54,7 +54,7 @@ def test_bert_masked_lm_pytorch(test_device): devmode=test_device.devmode, test_kind=TestKind.INFERENCE, pcc=0.9, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) @@ -65,10 +65,10 @@ def generate_model_bert_qa_hf_pytorch(test_device, variant): tokenizer = download_model(BertTokenizer.from_pretrained, model_ckpt) model = download_model(BertForQuestionAnswering.from_pretrained, model_ckpt) - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" # Load data sample from SQuADv1.1 context = """Super Bowl 50 was an American football game to determine the champion of the National Football League @@ -92,7 +92,7 @@ def generate_model_bert_qa_hf_pytorch(test_device, variant): return_tensors="pt", ) - model = pybuda.PyTorchModule("pt_bert_question_answering", model) + model = forge.PyTorchModule("pt_bert_question_answering", model) return model, [input_tokens['input_ids']], {} @@ -112,7 +112,7 @@ def test_bert_question_answering_pytorch(test_device): devmode=test_device.devmode, test_kind=TestKind.INFERENCE, pcc=0.95, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) @@ -123,8 +123,8 @@ def generate_model_bert_seqcls_hf_pytorch(test_device, variant): tokenizer = download_model(BertTokenizer.from_pretrained, model_ckpt) model = download_model(BertForSequenceClassification.from_pretrained, model_ckpt) - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b # Load data sample review = "the movie was great!" @@ -138,7 +138,7 @@ def generate_model_bert_seqcls_hf_pytorch(test_device, variant): return_tensors="pt", ) - model = pybuda.PyTorchModule("pt_bert_sequence_classification", model) + model = forge.PyTorchModule("pt_bert_sequence_classification", model) return model, [input_tokens['input_ids']], {} @@ -149,7 +149,7 @@ def test_bert_sequence_classification_pytorch(test_device): ) if test_device.arch == BackendDevice.Wormhole_B0: - os.environ["PYBUDA_LEGACY_KERNEL_BROADCAST"] = "1" + os.environ["FORGE_LEGACY_KERNEL_BROADCAST"] = "1" verify_module( model, @@ -161,7 +161,7 @@ def test_bert_sequence_classification_pytorch(test_device): devmode=test_device.devmode, test_kind=TestKind.INFERENCE, enabled=False, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) @@ -171,8 +171,8 @@ def generate_model_bert_tkcls_hf_pytorch(test_device, variant): tokenizer = download_model(BertTokenizer.from_pretrained, model_ckpt) model = download_model(BertForTokenClassification.from_pretrained, model_ckpt) - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b # Load data sample sample_text = "HuggingFace is a company based in Paris and New York" @@ -186,7 +186,7 @@ def generate_model_bert_tkcls_hf_pytorch(test_device, variant): return_tensors="pt", ) - model = pybuda.PyTorchModule("pt_bert_token_classification", model) + model = forge.PyTorchModule("pt_bert_token_classification", model) return model, [input_tokens['input_ids']], {} @@ -205,6 +205,6 @@ def test_bert_token_classification_pytorch(test_device): devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) diff --git a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_codegen.py b/forge/test/model_demos/high_prio/nlp/pytorch/test_codegen.py similarity index 81% rename from pybuda/test/model_demos/high_prio/nlp/pytorch/test_codegen.py rename to forge/test/model_demos/high_prio/nlp/pytorch/test_codegen.py index 5fce40d7d..32e5f12e9 100644 --- a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_codegen.py +++ b/forge/test/model_demos/high_prio/nlp/pytorch/test_codegen.py @@ -9,11 +9,11 @@ from test.utils import download_model from transformers import AutoTokenizer, CodeGenForCausalLM -import pybuda -from pybuda import VerifyConfig -from pybuda.verify.config import TestKind, NebulaGalaxy -from pybuda.verify.backend import verify_module -from pybuda._C.backend_api import BackendDevice, BackendType +import forge +from forge import VerifyConfig +from forge.verify.config import TestKind, NebulaGalaxy +from forge.verify.backend import verify_module +from forge._C.backend_api import BackendDevice, BackendType variants = [ "Salesforce/codegen-350M-mono", @@ -24,11 +24,11 @@ @pytest.mark.parametrize("variant", variants, ids=variants) def test_codegen(test_device, variant): # Configurations - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.enable_tvm_cpu_fallback = False compiler_cfg.default_dram_parameters = False compiler_cfg.enable_enumerate_u_kt = False - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{32*1024}" pcc = 0.98 if test_device.arch == BackendDevice.Grayskull: @@ -75,9 +75,9 @@ def forward(self, input_ids, attention_mask): attn_mask = attn_mask.to(torch.float32) out = framework_model(input_ids, attn_mask) - pybuda_model = pybuda.PyTorchModule("pt_codegen", framework_model) + forge_model = forge.PyTorchModule("pt_codegen", framework_model) verify_module( - pybuda_model, + forge_model, input_shapes=[(input_ids.shape, attn_mask.shape,)], inputs=[(input_ids, attn_mask,)], verify_cfg=VerifyConfig( @@ -85,7 +85,7 @@ def forward(self, input_ids, attention_mask): devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], pcc=pcc, ), ) diff --git a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_distilbert.py b/forge/test/model_demos/high_prio/nlp/pytorch/test_distilbert.py similarity index 76% rename from pybuda/test/model_demos/high_prio/nlp/pytorch/test_distilbert.py rename to forge/test/model_demos/high_prio/nlp/pytorch/test_distilbert.py index af509ab64..a18b53fa2 100644 --- a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_distilbert.py +++ b/forge/test/model_demos/high_prio/nlp/pytorch/test_distilbert.py @@ -3,14 +3,14 @@ # SPDX-License-Identifier: Apache-2.0 import pytest from test.utils import download_model -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind, NebulaGalaxy +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind, NebulaGalaxy import os -import pybuda +import forge from transformers import DistilBertForMaskedLM, DistilBertTokenizer, DistilBertForQuestionAnswering, DistilBertForTokenClassification, DistilBertForSequenceClassification variants = ["distilbert-base-uncased", "distilbert-base-cased", "distilbert-base-multilingual-cased"] @@ -25,8 +25,8 @@ def test_distilbert_masked_lm_pytorch(variant, test_device): tokenizer = download_model(DistilBertTokenizer.from_pretrained, variant) model = download_model(DistilBertForMaskedLM.from_pretrained, variant) - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b # Load data sample sample_text = "The capital of France is [MASK]." @@ -41,7 +41,7 @@ def test_distilbert_masked_lm_pytorch(variant, test_device): ) verify_module( - pybuda.PyTorchModule("pt_distilbert_masked_lm", model), + forge.PyTorchModule("pt_distilbert_masked_lm", model), input_shapes=[(input_tokens['input_ids'].shape, input_tokens['attention_mask'].shape,)], inputs=[(input_tokens['input_ids'], input_tokens['attention_mask'],)], verify_cfg=VerifyConfig( @@ -50,7 +50,7 @@ def test_distilbert_masked_lm_pytorch(variant, test_device): devmode=test_device.devmode, test_kind=TestKind.INFERENCE, pcc=0.95, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) @@ -60,8 +60,8 @@ def test_distilbert_question_answering_pytorch(test_device): tokenizer = download_model(DistilBertTokenizer.from_pretrained, model_ckpt) model = download_model(DistilBertForQuestionAnswering.from_pretrained, model_ckpt) - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b # Load data sample from SQuADv1.1 context = """Super Bowl 50 was an American football game to determine the champion of the National Football League @@ -86,7 +86,7 @@ def test_distilbert_question_answering_pytorch(test_device): ) verify_module( - pybuda.PyTorchModule("pt_distilbert_question_answering", model), + forge.PyTorchModule("pt_distilbert_question_answering", model), input_shapes=[(input_tokens['input_ids'].shape, input_tokens['attention_mask'].shape)], inputs=[(input_tokens['input_ids'],input_tokens['attention_mask'])], verify_cfg=VerifyConfig( @@ -95,7 +95,7 @@ def test_distilbert_question_answering_pytorch(test_device): devmode=test_device.devmode, test_kind=TestKind.INFERENCE, pcc=0.9, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) @@ -106,8 +106,8 @@ def test_distilbert_sequence_classification_pytorch(test_device): tokenizer = download_model(DistilBertTokenizer.from_pretrained, model_ckpt) model = download_model(DistilBertForSequenceClassification.from_pretrained, model_ckpt) - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b # Load data sample review = "the movie was great!" @@ -122,7 +122,7 @@ def test_distilbert_sequence_classification_pytorch(test_device): ) verify_module( - pybuda.PyTorchModule("pt_distilbert_sequence_classification", model), + forge.PyTorchModule("pt_distilbert_sequence_classification", model), input_shapes=[(input_tokens['input_ids'].shape, input_tokens['attention_mask'].shape,)], inputs=[(input_tokens['input_ids'], input_tokens['attention_mask'],)], verify_cfg=VerifyConfig( @@ -130,7 +130,7 @@ def test_distilbert_sequence_classification_pytorch(test_device): devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) @@ -140,8 +140,8 @@ def test_distilbert_token_classification_pytorch(test_device): tokenizer = download_model(DistilBertTokenizer.from_pretrained, model_ckpt) model = download_model(DistilBertForTokenClassification.from_pretrained, model_ckpt) - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b # Load data sample sample_text = "HuggingFace is a company based in Paris and New York" @@ -158,7 +158,7 @@ def test_distilbert_token_classification_pytorch(test_device): pcc = 0.98 if test_device.devtype == BackendType.Silicon else 0.99 verify_module( - pybuda.PyTorchModule("pt_distilbert_token_classification", model), + forge.PyTorchModule("pt_distilbert_token_classification", model), input_shapes=[(input_tokens['input_ids'].shape, input_tokens['attention_mask'].shape,)], inputs=[(input_tokens['input_ids'], input_tokens['attention_mask'],)], verify_cfg=VerifyConfig( @@ -167,6 +167,6 @@ def test_distilbert_token_classification_pytorch(test_device): devmode=test_device.devmode, test_kind=TestKind.INFERENCE, pcc=pcc, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) diff --git a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_dpr.py b/forge/test/model_demos/high_prio/nlp/pytorch/test_dpr.py similarity index 75% rename from pybuda/test/model_demos/high_prio/nlp/pytorch/test_dpr.py rename to forge/test/model_demos/high_prio/nlp/pytorch/test_dpr.py index f97d86bf7..f09455132 100644 --- a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_dpr.py +++ b/forge/test/model_demos/high_prio/nlp/pytorch/test_dpr.py @@ -3,14 +3,14 @@ # SPDX-License-Identifier: Apache-2.0 import pytest from test.utils import download_model -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind, NebulaGalaxy +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind, NebulaGalaxy import os -import pybuda +import forge from transformers import DPRContextEncoder, DPRContextEncoderTokenizer, DPRReader, DPRReaderTokenizer, DPRQuestionEncoder, DPRQuestionEncoderTokenizer variants = ["facebook/dpr-ctx_encoder-single-nq-base", "facebook/dpr-ctx_encoder-multiset-base"] @@ -23,8 +23,8 @@ def test_dpr_context_encoder_pytorch(variant, test_device): tokenizer = download_model(DPRContextEncoderTokenizer.from_pretrained, model_ckpt) model = download_model(DPRContextEncoder.from_pretrained, model_ckpt) - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b # Load data sample sample_text = "Hello, is my dog cute?" @@ -39,7 +39,7 @@ def test_dpr_context_encoder_pytorch(variant, test_device): ) verify_module( - pybuda.PyTorchModule("pt_dpr_context_encoder", model), + forge.PyTorchModule("pt_dpr_context_encoder", model), input_shapes=[(input_tokens['input_ids'].shape, input_tokens['attention_mask'].shape, input_tokens['token_type_ids'].shape)], inputs=[(input_tokens['input_ids'], input_tokens['attention_mask'], input_tokens['token_type_ids'])], verify_cfg=VerifyConfig( @@ -48,7 +48,7 @@ def test_dpr_context_encoder_pytorch(variant, test_device): devmode=test_device.devmode, test_kind=TestKind.INFERENCE, pcc=0.98, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) @@ -61,8 +61,8 @@ def test_dpr_question_encoder_pytorch(variant, test_device): tokenizer = download_model(DPRQuestionEncoderTokenizer.from_pretrained, model_ckpt) model = download_model(DPRQuestionEncoder.from_pretrained, model_ckpt) - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b # Load data sample sample_text = "Hello, is my dog cute?" @@ -77,7 +77,7 @@ def test_dpr_question_encoder_pytorch(variant, test_device): ) verify_module( - pybuda.PyTorchModule("pt_dpr_question_encoder", model), + forge.PyTorchModule("pt_dpr_question_encoder", model), input_shapes=[(input_tokens['input_ids'].shape, input_tokens['attention_mask'].shape, input_tokens['token_type_ids'].shape)], inputs=[(input_tokens['input_ids'], input_tokens['attention_mask'], input_tokens['token_type_ids'])], verify_cfg=VerifyConfig( @@ -85,7 +85,7 @@ def test_dpr_question_encoder_pytorch(variant, test_device): devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) @@ -98,11 +98,11 @@ def test_dpr_reader_pytorch(variant, test_device): tokenizer = download_model(DPRReaderTokenizer.from_pretrained, model_ckpt) model = download_model(DPRReader.from_pretrained, model_ckpt) - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b if test_device.arch == BackendDevice.Wormhole_B0: - os.environ["PYBUDA_LEGACY_KERNEL_BROADCAST"] = "1" + os.environ["FORGE_LEGACY_KERNEL_BROADCAST"] = "1" # Data preprocessing input_tokens = tokenizer( @@ -116,7 +116,7 @@ def test_dpr_reader_pytorch(variant, test_device): ) verify_module( - pybuda.PyTorchModule("pt_dpr_reader", model), + forge.PyTorchModule("pt_dpr_reader", model), input_shapes=[(input_tokens['input_ids'].shape, input_tokens['attention_mask'].shape,)], inputs=[(input_tokens['input_ids'], input_tokens['attention_mask'])], verify_cfg=VerifyConfig( @@ -125,6 +125,6 @@ def test_dpr_reader_pytorch(variant, test_device): devmode=test_device.devmode, test_kind=TestKind.INFERENCE, enabled=False, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) diff --git a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_falcon.py b/forge/test/model_demos/high_prio/nlp/pytorch/test_falcon.py similarity index 95% rename from pybuda/test/model_demos/high_prio/nlp/pytorch/test_falcon.py rename to forge/test/model_demos/high_prio/nlp/pytorch/test_falcon.py index 785a021f1..82654f6e0 100644 --- a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_falcon.py +++ b/forge/test/model_demos/high_prio/nlp/pytorch/test_falcon.py @@ -4,7 +4,7 @@ # Falcon-7B Demo Script import pytest -from pybuda import BackendDevice, BackendType +from forge import BackendDevice, BackendType from test.model_demos.models.falcon.model import Falcon def test_falcon_pytorch(test_device): diff --git a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_fuyu_8b.py b/forge/test/model_demos/high_prio/nlp/pytorch/test_fuyu_8b.py similarity index 89% rename from pybuda/test/model_demos/high_prio/nlp/pytorch/test_fuyu_8b.py rename to forge/test/model_demos/high_prio/nlp/pytorch/test_fuyu_8b.py index e2211967f..3bff758d7 100644 --- a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_fuyu_8b.py +++ b/forge/test/model_demos/high_prio/nlp/pytorch/test_fuyu_8b.py @@ -3,18 +3,18 @@ # SPDX-License-Identifier: Apache-2.0 import pytest from test.utils import download_model -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig, TTDeviceImage -from pybuda._C.backend_api import BackendType, BackendDevice, DeviceMode -from pybuda.verify.config import TestKind, NebulaGalaxy -from pybuda.pybudaglobal import TILE_DIM -from pybuda.utils import align_up_tile +from forge.verify.backend import verify_module +from forge import VerifyConfig, TTDeviceImage +from forge._C.backend_api import BackendType, BackendDevice, DeviceMode +from forge.verify.config import TestKind, NebulaGalaxy +from forge.forgeglobal import TILE_DIM +from forge.utils import align_up_tile import requests import os import torch.nn as nn -import pybuda +import forge import torch from PIL import Image @@ -144,14 +144,14 @@ def forward(self, inputs_embeds, attention_mask, position_ids, *past_key_values) def test_fuyu8b(test_device): pytest.skip("Already past-cache version is up") - # Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() #compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b #compiler_cfg.enable_tvm_constant_prop = True compiler_cfg.enable_tvm_cpu_fallback = False compiler_cfg.convert_framework_params_to_tvm = False - os.environ["PYBUDA_GRAPHSOLVER_SELF_CUT_TYPE"] = "FastCut" + os.environ["FORGE_GRAPHSOLVER_SELF_CUT_TYPE"] = "FastCut" #compiler_cfg.amp_level = 2 #compiler_cfg.default_dram_parameters = False @@ -167,12 +167,12 @@ def test_fuyu8b(test_device): image_processor = FuyuImageProcessor() processor = FuyuProcessor(image_processor=image_processor, tokenizer=tokenizer) - # Create PyBuda module from PyTorch model + # Create Forge module from PyTorch model fuyu_model = FuyuForCausalLM.from_pretrained("adept/fuyu-8b", config=config) #fuyu_model = FuyuForCausalLM(config=config) model = FuyuModelWrapper(fuyu_model) model.eval() - tt_model = pybuda.PyTorchModule("pt_fuyu_8b", model) + tt_model = forge.PyTorchModule("pt_fuyu_8b", model) # Prepare inputs text_prompt = "Generate a coco-style caption.\n" @@ -200,19 +200,19 @@ def test_fuyu8b_past_cache(test_device): if test_device.arch == BackendDevice.Grayskull: pytest.skip("Still under development") - # Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b compiler_cfg.enable_tvm_cpu_fallback = False compiler_cfg.compile_subgraphs = True compiler_cfg.convert_framework_params_to_tvm = False compiler_cfg.enable_link_past_cache_ios = True compiler_cfg.amp_level = 2 compiler_cfg.default_dram_parameters = True - os.environ["PYBUDA_GRAPHSOLVER_SELF_CUT_TYPE"] = "FastCut" - os.environ["PYBUDA_RIBBON2"] = "1" - os.environ["PYBUDA_FORCE_SEQUENTIAL"] = "1" + os.environ["FORGE_GRAPHSOLVER_SELF_CUT_TYPE"] = "FastCut" + os.environ["FORGE_RIBBON2"] = "1" + os.environ["FORGE_FORCE_SEQUENTIAL"] = "1" os.environ["TT_BACKEND_USE_PIPEGEN1"] = "1" os.environ["FUYU8B_FULL_LAYERS"] = "1" # flag to run the model wit full-layers, does not affect compile process @@ -260,7 +260,7 @@ def test_fuyu8b_past_cache(test_device): image_processor = FuyuImageProcessor() processor = FuyuProcessor(image_processor=image_processor, tokenizer=tokenizer) - # Create PyBuda module from PyTorch model + # Create Forge module from PyTorch model fuyu_model = FuyuForCausalLM.from_pretrained("adept/fuyu-8b", config=config) # Prepare inputs @@ -312,21 +312,21 @@ def test_fuyu8b_past_cache(test_device): # Instantiate modules if "FUYU8B_FULL_LAYERS" in os.environ and os.environ["FUYU8B_FULL_LAYERS"]: - img_decoder = pybuda.PyTorchModule("pt_fuyu8b_past_cache_img", FuyuModelImgDecoderWrapper(fuyu_model)) # feed inputs_embeds - txt_decoder = pybuda.PyTorchModule("pt_fuyu8b_past_cache_txt", FuyuModelTxtDecoderWrapper(fuyu_model)) # feed inputs_embeds + img_decoder = forge.PyTorchModule("pt_fuyu8b_past_cache_img", FuyuModelImgDecoderWrapper(fuyu_model)) # feed inputs_embeds + txt_decoder = forge.PyTorchModule("pt_fuyu8b_past_cache_txt", FuyuModelTxtDecoderWrapper(fuyu_model)) # feed inputs_embeds else: - img_decoder = pybuda.PyTorchModule(f"pt_fuyu8b_past_cache_img_{num_layers}", FuyuModelImgDecoderWrapper(fuyu_model)) # feed inputs_embeds - txt_decoder = pybuda.PyTorchModule(f"pt_fuyu8b_past_cache_txt_{num_layers}", FuyuModelTxtDecoderWrapper(fuyu_model)) # feed inputs_embeds + img_decoder = forge.PyTorchModule(f"pt_fuyu8b_past_cache_img_{num_layers}", FuyuModelImgDecoderWrapper(fuyu_model)) # feed inputs_embeds + txt_decoder = forge.PyTorchModule(f"pt_fuyu8b_past_cache_txt_{num_layers}", FuyuModelTxtDecoderWrapper(fuyu_model)) # feed inputs_embeds # Place modules - tt0 = pybuda.TTDevice( + tt0 = forge.TTDevice( "tt0", devtype=test_device.devtype, device_mode=test_device.devmode, arch=test_device.arch, module=[img_decoder, txt_decoder]) - output_q = pybuda.initialize_pipeline( + output_q = forge.initialize_pipeline( training=False, sample_inputs=((img_decoder_inputs), (txt_decoder_inputs)) ) @@ -339,12 +339,12 @@ def test_fuyu8b_past_cache(test_device): if idx == 0: tt0.set_active_subgraph(0) tt0.push_to_inputs([inputs_embeds, img_attention_mask]) - pybuda.run_generate(input_count=1, write_index=0) # past-cache output to be MAX_LENGTH instead of 32 + forge.run_generate(input_count=1, write_index=0) # past-cache output to be MAX_LENGTH instead of 32 ans = output_q.get() tt0.set_active_subgraph(1) else: tt0.push_to_inputs([inputs_embeds, attention_mask, position_ids]) - pybuda.run_generate(input_count=1, write_index=current_token_index // TILE_DIM,) + forge.run_generate(input_count=1, write_index=current_token_index // TILE_DIM,) ans = output_q.get() hidden_states = ans[0].value().detach() diff --git a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_gemma_2b.py b/forge/test/model_demos/high_prio/nlp/pytorch/test_gemma_2b.py similarity index 90% rename from pybuda/test/model_demos/high_prio/nlp/pytorch/test_gemma_2b.py rename to forge/test/model_demos/high_prio/nlp/pytorch/test_gemma_2b.py index 1bbcfdf6d..7ff45e23e 100644 --- a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_gemma_2b.py +++ b/forge/test/model_demos/high_prio/nlp/pytorch/test_gemma_2b.py @@ -9,20 +9,20 @@ from transformers import AutoTokenizer, GemmaForCausalLM from transformers import AutoTokenizer, AutoModelForCausalLM -import pybuda -from pybuda import ( +import forge +from forge import ( VerifyConfig, PyTorchModule, CompileDepth, ) from test.utils import download_model -from pybuda.pybudaglobal import TILE_DIM -from pybuda.verify.config import TestKind -from pybuda._C import DataFormat, MathFidelity -from pybuda._C.backend_api import BackendDevice -from pybuda._C.backend_api import BackendType -from pybuda.verify.backend import verify_module -from pybuda.transformers.pipeline import pipeline as pybuda_pipeline +from forge.forgeglobal import TILE_DIM +from forge.verify.config import TestKind +from forge._C import DataFormat, MathFidelity +from forge._C.backend_api import BackendDevice +from forge._C.backend_api import BackendType +from forge.verify.backend import verify_module +from forge.transformers.pipeline import pipeline as forge_pipeline def cpu_sanity_run_0(): @@ -62,7 +62,7 @@ def test_gemma_2b_rotary_embedding(test_device, variant): torch.manual_seed(42) # Configurations - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() # Load model class Wrapper(torch.nn.Module): @@ -112,7 +112,7 @@ def test_gemma_2b_rms_norm(test_device, variant): torch.manual_seed(42) # Configurations - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() # Load model class Wrapper(torch.nn.Module): @@ -161,7 +161,7 @@ def test_gemma_2b_attention(test_device, variant): torch.manual_seed(42) # Configurations - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() # Load model class Wrapper(torch.nn.Module): @@ -212,7 +212,7 @@ def test_gemma_2b_mlp(test_device, variant): torch.manual_seed(42) # Configurations - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() # Load model class Wrapper(torch.nn.Module): @@ -261,7 +261,7 @@ def test_gemma_2b_single_decoder(test_device, variant): torch.manual_seed(42) # Configurations - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() # Load model class Wrapper(torch.nn.Module): @@ -312,9 +312,9 @@ def test_gemma_2b(test_device, variant): torch.manual_seed(42) # Configurations - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" config = download_model(GemmaConfig.from_pretrained, variant) config_dict = config.to_dict() @@ -363,12 +363,12 @@ def test_gemma_2b_1x1(test_device, variant): torch.manual_seed(42) # Configurations - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() - os.environ["PYBUDA_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" + os.environ["FORGE_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge.DataFormat.Float16_b config = download_model(GemmaConfig.from_pretrained, variant) config_dict = config.to_dict() @@ -414,15 +414,15 @@ def test_gemma_2b_gen(test_device, variant): torch.manual_seed(42) # Configurations - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" if test_device.arch != BackendDevice.Grayskull: - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b + compiler_cfg.default_df_override = forge.DataFormat.Float16_b # Configure all matmul ops to operate on HiFi4 with Bfp8_b inputs/params and Float16 accumulation - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( op_type='matmul', math_fidelity=MathFidelity.HiFi4, input_df={0:[DataFormat.Bfp8_b, False], 1:[DataFormat.Bfp8_b, False]}, @@ -430,7 +430,7 @@ def test_gemma_2b_gen(test_device, variant): ) # Configure all other ops to run on HiFi4 with Float16 accumulation - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( op_type='^((?!matmul).)*$', math_fidelity=MathFidelity.HiFi4, accumulate_df=DataFormat.Float16_b @@ -466,11 +466,11 @@ def test_gemma_2b_gen(test_device, variant): print(f"{pt_ans}") # Initialize and Run text2text generator on Tenstorrent device - text2text_generator = pybuda_pipeline( + text2text_generator = forge_pipeline( "text2text-generation", model=pytorch_model, tokenizer=tokenizer, - pybuda_max_length=32, + forge_max_length=32, ) generated_tt_text = text2text_generator( prompt, @@ -496,19 +496,19 @@ def test_gemma_2b_1x1_gen(test_device, variant): # Random seed for reproducibility torch.manual_seed(42) - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() if test_device.devtype == BackendType.Silicon and "CI_PROJECT_DIR" in os.environ: pytest.skip("Failing on CI with Read 0xffffffff from ARC scratch[6]: you should reset the board") # Configurations compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b - os.environ["PYBUDA_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" + os.environ["FORGE_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge.DataFormat.Float16_b + os.environ["FORGE_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" # Configure all matmul ops to operate on HiFi4 with Bfp8_b inputs/params and Float16 accumulation - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( op_type='matmul', math_fidelity=MathFidelity.HiFi4, input_df={0:[DataFormat.Bfp8_b, False], 1:[DataFormat.Bfp8_b, False]}, @@ -516,7 +516,7 @@ def test_gemma_2b_1x1_gen(test_device, variant): ) # Configure all other ops to run on HiFi4 with Float16 accumulation - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( op_type='^((?!matmul).)*$', math_fidelity=MathFidelity.HiFi4, accumulate_df=DataFormat.Float16_b @@ -549,11 +549,11 @@ def test_gemma_2b_1x1_gen(test_device, variant): print(f"{pt_ans}") # Initialize and Run text2text generator on Tenstorrent device - text2text_generator = pybuda_pipeline( + text2text_generator = forge_pipeline( "text2text-generation", model=pytorch_model, tokenizer=tokenizer, - pybuda_max_length=32, + forge_max_length=32, ) generated_tt_text = text2text_generator( prompt, diff --git a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_gpt2.py b/forge/test/model_demos/high_prio/nlp/pytorch/test_gpt2.py similarity index 77% rename from pybuda/test/model_demos/high_prio/nlp/pytorch/test_gpt2.py rename to forge/test/model_demos/high_prio/nlp/pytorch/test_gpt2.py index 2cbcecf6f..083bcbe6e 100644 --- a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_gpt2.py +++ b/forge/test/model_demos/high_prio/nlp/pytorch/test_gpt2.py @@ -4,15 +4,15 @@ import pytest from test.utils import download_model import torch -import pybuda -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind +import forge +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind import os -from pybuda.transformers.pipeline import pipeline as pybuda_pipeline +from forge.transformers.pipeline import pipeline as forge_pipeline from transformers import GPT2LMHeadModel, GPT2Tokenizer, GPT2Config @@ -25,8 +25,8 @@ def test_gpt2_text_gen(test_device): config = GPT2Config(**config_dict) model = download_model(GPT2LMHeadModel.from_pretrained, "gpt2", config=config) - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b # Wrapper to get around past key values class Wrapper(torch.nn.Module): @@ -40,12 +40,12 @@ def forward(self, input_ids, attention_mask): decoder_input_ids = torch.zeros(1, 64, dtype=torch.int64) attn_mask = torch.ones(1, 256) - if "PYBUDA_NEB_GALAXY_CI" in os.environ: + if "FORGE_NEB_GALAXY_CI" in os.environ: chip_ids = [0, 11, 10, 9, 8, 7, 19, 20, 21, 22, 23, 24, 6, 5, 14, 13, 12, 16, 15, 3, 4, 26, 25, 32, 31, 30, 29, 28, 27, 1, 2, 18, 17] else: chip_ids = [0] - tt_model = pybuda.PyTorchModule("gpt2_generation", Wrapper(model)) + tt_model = forge.PyTorchModule("gpt2_generation", Wrapper(model)) verify_module( tt_model, input_shapes=[(input_ids.shape, attn_mask.shape,)], @@ -75,8 +75,8 @@ def forward(self, input_ids, attention_mask, *kv): def test_gpt2_past_cache(test_device): pytest.skip() #Still working on this. os.environ["GOLDEN_WORMHOLE_B0"] = "1" - os.environ["PYBUDA_DEVMODE"] = "1" - compiler_cfg = pybuda.config._get_global_compiler_config() + os.environ["FORGE_DEVMODE"] = "1" + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.compile_subgraphs = True compiler_cfg.enable_tvm_cpu_fallback = False compiler_cfg.enable_auto_fusing = False @@ -97,20 +97,20 @@ def test_gpt2_past_cache(test_device): inputs = tokenizer(prefix_text, max_length=run_length, pad_to_max_length=True, truncation=True, return_tensors="pt") inputs = [inputs["input_ids"].int(), inputs["attention_mask"].float()] - tt0 = pybuda.TTDevice("tt0") - tt0.place_module(module=pybuda.PyTorchModule("gpt2", Wrapper(model))) + tt0 = forge.TTDevice("tt0") + tt0.place_module(module=forge.PyTorchModule("gpt2", Wrapper(model))) tt0.push_to_inputs(inputs) - output_q = pybuda.initialize_pipeline(training=False,) - pybuda.run_forward() + output_q = forge.initialize_pipeline(training=False,) + forge.run_forward() res = output_q.get() tt0.remove_modules() - tt0.place_module(module=pybuda.PyTorchModule("gpt2", Wrapper(model))) + tt0.place_module(module=forge.PyTorchModule("gpt2", Wrapper(model))) inputs.extend([res[1].value(), res[2].value(), res[3].value(), res[4].value()]) inputs[1] = torch.cat((inputs[1], (torch.zeros((1,32)))), 1) inputs[0] = inputs[0][:,:32] tt0.push_to_inputs(inputs) - output_q = pybuda.initialize_pipeline(training=False,) - pybuda.run_forward() + output_q = forge.initialize_pipeline(training=False,) + forge.run_forward() breakpoint() diff --git a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_gptneo.py b/forge/test/model_demos/high_prio/nlp/pytorch/test_gptneo.py similarity index 84% rename from pybuda/test/model_demos/high_prio/nlp/pytorch/test_gptneo.py rename to forge/test/model_demos/high_prio/nlp/pytorch/test_gptneo.py index 23a29af84..08a7e8266 100644 --- a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_gptneo.py +++ b/forge/test/model_demos/high_prio/nlp/pytorch/test_gptneo.py @@ -4,16 +4,16 @@ import pytest from test.utils import download_model import torch -import pybuda -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind +import forge +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind import os import torch -from pybuda.transformers.pipeline import pipeline as pybuda_pipeline +from forge.transformers.pipeline import pipeline as forge_pipeline from transformers import ( AutoTokenizer, GPTNeoForCausalLM, @@ -35,10 +35,10 @@ def test_gptneo_causal_lm(variant, test_device): torch.manual_seed(42) # Configurations - compiler_cfg = pybuda.config._get_global_compiler_config() - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg = forge.config._get_global_compiler_config() + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" compiler_cfg.balancer_policy = "Ribbon" if variant == "EleutherAI/gpt-neo-2.7B" and test_device.arch == BackendDevice.Wormhole_B0: @@ -75,12 +75,12 @@ def forward(self, input_ids, attention_mask): input_ids = inputs["input_ids"] attn_mask = inputs["attention_mask"] - if "PYBUDA_NEB_GALAXY_CI" in os.environ: + if "FORGE_NEB_GALAXY_CI" in os.environ: chip_ids = [0, 11, 10, 9, 8, 7, 19, 20, 21, 22, 23, 24, 6, 5, 14, 13, 12, 16, 15, 3, 4, 26, 25, 32, 31, 30, 29, 28, 27, 1, 2, 18, 17] else: chip_ids = [0] - tt_model = pybuda.PyTorchModule("gptneo_generation", Wrapper(model)) + tt_model = forge.PyTorchModule("gptneo_generation", Wrapper(model)) verify_module( tt_model, input_shapes=[ @@ -120,11 +120,11 @@ def test_gptneo_sequence_classification(variant, test_device): # EleutherAI/gpt-neo-2.7B # Configurations - compiler_cfg = pybuda.config._get_global_compiler_config() - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg = forge.config._get_global_compiler_config() + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b if variant in ["EleutherAI/gpt-neo-1.3B", "EleutherAI/gpt-neo-2.7B"]: - os.environ["PYBUDA_LEGACY_KERNEL_BROADCAST"] = "1" + os.environ["FORGE_LEGACY_KERNEL_BROADCAST"] = "1" tokenizer = download_model(AutoTokenizer.from_pretrained, variant) tokenizer.pad_token = tokenizer.eos_token @@ -153,7 +153,7 @@ def forward(self, input_ids, attention_mask): return self.model(input_ids, None, attention_mask) verify_module( - pybuda.PyTorchModule("pt_gptneo_seq_classification", Wrapper(model)), + forge.PyTorchModule("pt_gptneo_seq_classification", Wrapper(model)), input_shapes=[ ( input_tokens["input_ids"].shape, diff --git a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_mistral.py b/forge/test/model_demos/high_prio/nlp/pytorch/test_mistral.py similarity index 89% rename from pybuda/test/model_demos/high_prio/nlp/pytorch/test_mistral.py rename to forge/test/model_demos/high_prio/nlp/pytorch/test_mistral.py index a7460d812..8a3a71c8f 100644 --- a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_mistral.py +++ b/forge/test/model_demos/high_prio/nlp/pytorch/test_mistral.py @@ -7,15 +7,15 @@ import torch from transformers import AutoModelForCausalLM, AutoTokenizer, MistralConfig -import pybuda -from pybuda import VerifyConfig -from pybuda import PyTorchModule -from pybuda._C.backend_api import BackendDevice, DeviceMode -from pybuda._C import DataFormat, MathFidelity -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +import forge +from forge import VerifyConfig +from forge import PyTorchModule +from forge._C.backend_api import BackendDevice, DeviceMode +from forge._C import DataFormat, MathFidelity +from forge.verify.backend import verify_module +from forge.verify.config import TestKind from typing import Optional -from pybuda.transformers.pipeline import NLPPipelineWrapper +from forge.transformers.pipeline import NLPPipelineWrapper variants = ['mistralai/Mistral-7B-v0.1'] @@ -39,7 +39,7 @@ def test_mistral_decoder_layer(variant, test_device): sample_inputs = torch.randn(batch_size, seqlen, hidden_dim) verify_module( - pybuda.PyTorchModule( + forge.PyTorchModule( f"mistral_decoder_layer_seqlen_{seqlen}_bs_{batch_size}", module), input_shapes=[(sample_inputs.shape,)], inputs=[(sample_inputs,)], @@ -64,10 +64,10 @@ def test_mistral(variant, test_device): configuration.use_cache = False configuration.return_dict = False - pybuda.set_configuration_options(default_df_override=pybuda.DataFormat.Float16_b, balancer_policy='Ribbon') + forge.set_configuration_options(default_df_override=forge.DataFormat.Float16_b, balancer_policy='Ribbon') # configuration for all ops that are not matmul - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( op_type='^((?!matmul).)*$', math_fidelity=MathFidelity.HiFi4, accumulate_df=DataFormat.Float16_b @@ -75,7 +75,7 @@ def test_mistral(variant, test_device): # configuration for all matmul ops # when inputs to matmuls are Bfp8_b, the whole model can fit to single chip - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( op_type='matmul', math_fidelity=MathFidelity.HiFi4, input_df={0:[DataFormat.Bfp8_b, False], 1:[DataFormat.Bfp8_b, False]}, @@ -96,7 +96,7 @@ def test_mistral(variant, test_device): sample_inputs = tokenizer(prompt, return_tensors = 'pt')['input_ids'] verify_module( - pybuda.PyTorchModule( + forge.PyTorchModule( f"full_model_seqlen_{sample_inputs.shape[-1]}_bs_{batch_size}_layers_{configuration.num_hidden_layers}", module), input_shapes=[(sample_inputs.shape,)], inputs=[(sample_inputs, )], @@ -120,10 +120,10 @@ def test_mistral_decode(variant, test_device): configuration.use_cache = False configuration.return_dict = False - pybuda.set_configuration_options(default_df_override=pybuda.DataFormat.Float16_b, balancer_policy='Ribbon') + forge.set_configuration_options(default_df_override=forge.DataFormat.Float16_b, balancer_policy='Ribbon') # configuration for all ops that are not matmul - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( op_type='^((?!matmul).)*$', math_fidelity=MathFidelity.HiFi4, accumulate_df=DataFormat.Float16_b @@ -131,7 +131,7 @@ def test_mistral_decode(variant, test_device): # configuration for all matmul ops # when inputs to matmuls are Bfp8_b, the whole model can fit to single chip - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( op_type='matmul', math_fidelity=MathFidelity.HiFi4, input_df={0:[DataFormat.Bfp8_b, False], 1:[DataFormat.Bfp8_b, False]}, @@ -198,10 +198,10 @@ def test_mistral_kv_cache(variant, test_device): configuration.return_dict = False max_new_tokens = 10 - pybuda.set_configuration_options(default_df_override=pybuda.DataFormat.Float16_b, balancer_policy='Ribbon') + forge.set_configuration_options(default_df_override=forge.DataFormat.Float16_b, balancer_policy='Ribbon') # configuration for all ops that are not matmul - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( op_type='^((?!matmul).)*$', math_fidelity=MathFidelity.HiFi4, accumulate_df=DataFormat.Float16_b @@ -209,7 +209,7 @@ def test_mistral_kv_cache(variant, test_device): # configuration for all matmul ops # when inputs to matmuls are Bfp8_b, the whole model can fit to single chip - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( op_type='matmul', math_fidelity=MathFidelity.HiFi4, input_df={0:[DataFormat.Bfp8_b, False], 1:[DataFormat.Bfp8_b, False]}, @@ -238,7 +238,7 @@ def test_mistral_kv_cache(variant, test_device): # perform prefill with torch model on cpu logits, past_key_values = model(*inputs) - tt1 = pybuda.TTDevice("tt1", devtype=test_device.devtype, arch=test_device.arch, module=PyTorchModule("mistral_model_base", BaseModelWrapper(model))) + tt1 = forge.TTDevice("tt1", devtype=test_device.devtype, arch=test_device.arch, module=PyTorchModule("mistral_model_base", BaseModelWrapper(model))) next_token = sample(logits) output_ids = torch.cat([output_ids, next_token], axis=1) @@ -250,7 +250,7 @@ def test_mistral_kv_cache(variant, test_device): inputs += (past_key_values[i][0], past_key_values[i][1]) # compile model before measuring perf - output_q = pybuda.initialize_pipeline(training=False, sample_inputs=inputs, _sequential=True, _device_mode = DeviceMode.CompileAndRun) + output_q = forge.initialize_pipeline(training=False, sample_inputs=inputs, _sequential=True, _device_mode = DeviceMode.CompileAndRun) start_time = time.time() for i in range(max_new_tokens): @@ -261,7 +261,7 @@ def test_mistral_kv_cache(variant, test_device): inputs = (next_token, mask, position_ids, *past_key_values) tt1.push_to_inputs(inputs) - pybuda.run_forward(input_count=1, _sequential=True) + forge.run_forward(input_count=1, _sequential=True) outputs = output_q.get() logits = outputs[0].value().to(dtype=torch.float) diff --git a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_opt.py b/forge/test/model_demos/high_prio/nlp/pytorch/test_opt.py similarity index 80% rename from pybuda/test/model_demos/high_prio/nlp/pytorch/test_opt.py rename to forge/test/model_demos/high_prio/nlp/pytorch/test_opt.py index c377e0007..1a9427356 100644 --- a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_opt.py +++ b/forge/test/model_demos/high_prio/nlp/pytorch/test_opt.py @@ -3,14 +3,14 @@ # SPDX-License-Identifier: Apache-2.0 import pytest from test.utils import download_model -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind, DataFormat, NebulaGalaxy +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind, DataFormat, NebulaGalaxy import os -import pybuda -from pybuda.transformers.pipeline import pipeline as pybuda_pipeline +import forge +from forge.transformers.pipeline import pipeline as forge_pipeline from transformers import AutoTokenizer, OPTForCausalLM, OPTConfig, OPTForQuestionAnswering, OPTForSequenceClassification variants = ["facebook/opt-125m", "facebook/opt-350m", "facebook/opt-1.3b"] @@ -19,13 +19,13 @@ def test_opt_causal_lm(variant, test_device): # Load tokenizer and model from HuggingFace # Variants: "facebook/opt-125m", "facebook/opt-350m", "facebook/opt-1.3b" - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.default_df_override = DataFormat.Float16_b if variant == "facebook/opt-1.3b": compiler_cfg.amp_level = 2 # Disable expanding output buffer of fork nodes - causes out of memory issue in blobgen. - os.environ["PYBUDA_FORK_JOIN_EXPAND_FORK_OUTPUT_BUF"] = "0" + os.environ["FORGE_FORK_JOIN_EXPAND_FORK_OUTPUT_BUF"] = "0" if variant == "facebook/opt-350m": os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = "65536" @@ -49,7 +49,7 @@ def test_opt_causal_lm(variant, test_device): ) verify_module( - pybuda.PyTorchModule("pt_opt_causal_lm", model), + forge.PyTorchModule("pt_opt_causal_lm", model), input_shapes=[(input_tokens['input_ids'].shape, input_tokens['attention_mask'].shape,)], inputs=[(input_tokens['input_ids'], input_tokens['attention_mask'],)], verify_cfg=VerifyConfig( @@ -58,7 +58,7 @@ def test_opt_causal_lm(variant, test_device): devmode=test_device.devmode, test_kind=TestKind.INFERENCE, pcc=0.7, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) @@ -69,7 +69,7 @@ def test_opt_qa(variant, test_device): # NOTE: These model variants are pre-trined only. They need to be fine-tuned # on a downstream task. Code is for demonstration purposes only. - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.default_df_override = DataFormat.Float16_b if variant == "facebook/opt-1.3b": compiler_cfg.default_df_override = DataFormat.Float16 @@ -93,7 +93,7 @@ def test_opt_qa(variant, test_device): ) verify_module( - pybuda.PyTorchModule("pt_opt_question_answering", model), + forge.PyTorchModule("pt_opt_question_answering", model), input_shapes=[(input_tokens['input_ids'].shape, input_tokens['attention_mask'].shape,)], inputs=[(input_tokens['input_ids'], input_tokens['attention_mask'],)], verify_cfg=VerifyConfig( @@ -102,14 +102,14 @@ def test_opt_qa(variant, test_device): devmode=test_device.devmode, test_kind=TestKind.INFERENCE, pcc=0.7, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) @pytest.mark.parametrize("variant", variants, ids=variants) def test_opt_sequence_classification(variant, test_device): - # Set PyBUDA configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.cpu_fallback_ops.add("adv_index") compiler_cfg.default_df_override = DataFormat.Float16_b if variant == "facebook/opt-1.3b" or variant == "facebook/opt-350m": @@ -138,7 +138,7 @@ def test_opt_sequence_classification(variant, test_device): ) verify_module( - pybuda.PyTorchModule("pt_opt_sequence_classification", model), + forge.PyTorchModule("pt_opt_sequence_classification", model), input_shapes=[(input_tokens['input_ids'].shape, input_tokens['attention_mask'].shape,)], inputs=[(input_tokens['input_ids'], input_tokens['attention_mask'],)], verify_cfg=VerifyConfig( @@ -147,6 +147,6 @@ def test_opt_sequence_classification(variant, test_device): devmode=test_device.devmode, test_kind=TestKind.INFERENCE, pcc=0.93, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) diff --git a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_roberta.py b/forge/test/model_demos/high_prio/nlp/pytorch/test_roberta.py similarity index 70% rename from pybuda/test/model_demos/high_prio/nlp/pytorch/test_roberta.py rename to forge/test/model_demos/high_prio/nlp/pytorch/test_roberta.py index 73aa2080f..9a972e792 100644 --- a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_roberta.py +++ b/forge/test/model_demos/high_prio/nlp/pytorch/test_roberta.py @@ -3,15 +3,15 @@ # SPDX-License-Identifier: Apache-2.0 import pytest from test.utils import download_model -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind, NebulaGalaxy +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind, NebulaGalaxy import csv import os import urllib.request -import pybuda +import forge import torch from transformers import AutoModelForMaskedLM, AutoTokenizer, AutoModelForSequenceClassification @@ -21,8 +21,8 @@ def test_roberta_masked_lm(test_device): tokenizer = download_model(AutoTokenizer.from_pretrained, "xlm-roberta-base") model = download_model(AutoModelForMaskedLM.from_pretrained, "xlm-roberta-base") - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b # Input processing text = "Hello I'm a model." @@ -37,7 +37,7 @@ def test_roberta_masked_lm(test_device): attention_mask[input_tokens != 1] = 1 verify_module( - pybuda.PyTorchModule("pt_roberta", model), + forge.PyTorchModule("pt_roberta", model), input_shapes=[(input_tokens.shape, attention_mask.shape,)], inputs=[(input_tokens, attention_mask,)], verify_cfg=VerifyConfig( @@ -46,7 +46,7 @@ def test_roberta_masked_lm(test_device): devmode=test_device.devmode, test_kind=TestKind.INFERENCE, pcc=0.95, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) @@ -59,8 +59,8 @@ def test_roberta_sentiment_pytorch(test_device): "cardiffnlp/twitter-roberta-base-sentiment" ) - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b # Example from multi-nli validation set text = """Great road trip views! @ Shartlesville, Pennsylvania""" @@ -75,7 +75,7 @@ def test_roberta_sentiment_pytorch(test_device): ) verify_module( - pybuda.PyTorchModule("pt_roberta", model), + forge.PyTorchModule("pt_roberta", model), input_shapes=[(input_tokens.shape,)], inputs=[(input_tokens,)], verify_cfg=VerifyConfig( @@ -83,6 +83,6 @@ def test_roberta_sentiment_pytorch(test_device): devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) diff --git a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_squeezebert.py b/forge/test/model_demos/high_prio/nlp/pytorch/test_squeezebert.py similarity index 66% rename from pybuda/test/model_demos/high_prio/nlp/pytorch/test_squeezebert.py rename to forge/test/model_demos/high_prio/nlp/pytorch/test_squeezebert.py index 776fa6cef..a63a11e01 100644 --- a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_squeezebert.py +++ b/forge/test/model_demos/high_prio/nlp/pytorch/test_squeezebert.py @@ -3,14 +3,14 @@ # SPDX-License-Identifier: Apache-2.0 import pytest from test.utils import download_model -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind, NebulaGalaxy +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind, NebulaGalaxy import os -import pybuda +import forge from transformers import AutoModelForSequenceClassification, AutoTokenizer @@ -21,8 +21,8 @@ def test_squeezebert_sequence_classification_pytorch(test_device): "squeezebert/squeezebert-mnli" ) - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b # Example from multi-nli validation set text = """Hello, my dog is cute""" @@ -37,7 +37,7 @@ def test_squeezebert_sequence_classification_pytorch(test_device): ) verify_module( - pybuda.PyTorchModule("pt_bart", model), + forge.PyTorchModule("pt_bart", model), input_shapes=[(input_tokens.shape,)], inputs=[(input_tokens,)], verify_cfg=VerifyConfig( @@ -45,6 +45,6 @@ def test_squeezebert_sequence_classification_pytorch(test_device): devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) diff --git a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_t5.py b/forge/test/model_demos/high_prio/nlp/pytorch/test_t5.py similarity index 83% rename from pybuda/test/model_demos/high_prio/nlp/pytorch/test_t5.py rename to forge/test/model_demos/high_prio/nlp/pytorch/test_t5.py index f9eadb41a..f2aea25e2 100644 --- a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_t5.py +++ b/forge/test/model_demos/high_prio/nlp/pytorch/test_t5.py @@ -3,33 +3,33 @@ # SPDX-License-Identifier: Apache-2.0 import pytest from test.utils import download_model -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind, NebulaGalaxy -from pybuda.pybudaglobal import TILE_DIM -from pybuda import CompileDepth +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind, NebulaGalaxy +from forge.forgeglobal import TILE_DIM +from forge import CompileDepth import queue import os -import pybuda +import forge import torch -from pybuda.transformers.pipeline import pipeline as pybuda_pipeline +from forge.transformers.pipeline import pipeline as forge_pipeline from transformers import T5ForConditionalGeneration, T5Tokenizer, T5Config @pytest.mark.skip(reason="Not supported") def test_t5_loop_tiny_tile(test_device): import os - os.environ["PYBUDA_ENABLE_TINY_TILE"] = "1" - # Add PyBUDA configurations - os.environ["PYBUDA_PAD_OUTPUT_BUFFER"] = "1" + os.environ["FORGE_ENABLE_TINY_TILE"] = "1" + # Add Forge configurations + os.environ["FORGE_PAD_OUTPUT_BUFFER"] = "1" os.environ["TT_BACKEND_MULTI_THREADED_PUSH"] = "1" - os.environ["PYBUDA_FORCE_SEQUENTIAL"] = "1" + os.environ["FORGE_FORCE_SEQUENTIAL"] = "1" os.environ["TT_BACKEND_DRAM_POLLING_FREQUENCY"] = "64" # os.environ["TT_BACKEND_PROFILER"] = "1" - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.enable_tvm_cpu_fallback = False - compiler_cfg.default_df_override = pybuda._C.Float16_b + compiler_cfg.default_df_override = forge._C.Float16_b compiler_cfg.default_dram_parameters = False compiler_cfg.input_queues_on_host = True compiler_cfg.enable_auto_fusing = False @@ -53,19 +53,19 @@ def __init__(self, model): def forward(self, decoder_input_ids, encoder_outputs): return self.model(None, None, decoder_input_ids, None, None, None, None, (encoder_outputs,)) - tt_model = pybuda.PyTorchModule("t5_generation_loop", Wrapper(model)) + tt_model = forge.PyTorchModule("t5_generation_loop", Wrapper(model)) decoder_input_ids = torch.randint(0, model.config.vocab_size, (1, 1), dtype=torch.int32) encoder_outputs = torch.randn(1, 1, 512) - tt0 = pybuda.TTDevice("tt0") + tt0 = forge.TTDevice("tt0") tt0.place_module(tt_model) - output_q = pybuda.initialize_pipeline(training=False, sample_inputs=(decoder_input_ids, encoder_outputs,)) + output_q = forge.initialize_pipeline(training=False, sample_inputs=(decoder_input_ids, encoder_outputs,)) import time start_time = time.time() for i in range(100): tt0.push_to_inputs((decoder_input_ids, encoder_outputs)) - pybuda.run_forward() + forge.run_forward() output = output_q.get(timeout=0.5) print("TIME: ", time.time() - start_time) @@ -79,16 +79,16 @@ def test_t5_generation(variant, test_device): pytest.skip("Grayskull test failing with TM ERROR (producer = matmul_49, consumer = matmul_53): input using kernel_broadcast but post-TM input canonical form is not periodic") # import os - # os.environ["PYBUDA_ENABLE_TINY_TILE"] = "1" - # Add PyBUDA configurations - compiler_cfg = pybuda.config._get_global_compiler_config() + # os.environ["FORGE_ENABLE_TINY_TILE"] = "1" + # Add Forge configurations + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.enable_tvm_cpu_fallback = False - compiler_cfg.enable_auto_fusing = False # tenstorrent/pybuda#844 + compiler_cfg.enable_auto_fusing = False # tenstorrent/forge#844 compiler_cfg.amp_level = 1 compiler_cfg.enable_enumerate_u_kt = False - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b if "large" in variant: - os.environ["PYBUDA_LEGACY_UBLOCK_SHAPE"] = "1" + os.environ["FORGE_LEGACY_UBLOCK_SHAPE"] = "1" # Load tokenizer and model from HuggingFace # Variants: t5-small, t5-base, t5-large @@ -109,7 +109,7 @@ def __init__(self, model): def forward(self, decoder_input_ids, encoder_outputs): return self.model(None, None, decoder_input_ids, None, None, None, None, (encoder_outputs,)) - tt_model = pybuda.PyTorchModule("t5_generation", Wrapper(model)) + tt_model = forge.PyTorchModule("t5_generation", Wrapper(model)) decoder_input_ids = torch.randint(0, model.config.vocab_size, (1, 1), dtype=torch.int32) if "t5-small" in variant: @@ -131,7 +131,7 @@ def forward(self, decoder_input_ids, encoder_outputs): verify_post_autograd_passes=False, verify_post_placer=False, enabled=False, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ) ) @@ -175,21 +175,21 @@ def forward(self, decoder_input_ids, decoder_attention_mask, encoder_last_hidden def test_t5_past_cache_enc_dec(variant, test_device): import os - os.environ["PYBUDA_PAD_OUTPUT_BUFFER"] = "1" + os.environ["FORGE_PAD_OUTPUT_BUFFER"] = "1" os.environ["TT_BACKEND_MULTI_THREADED_PUSH"] = "1" - os.environ["PYBUDA_EXTRA_L1_MARGIN"] = "120000" - os.environ["PYBUDA_FORCE_SEQUENTIAL"] = "1" + os.environ["FORGE_EXTRA_L1_MARGIN"] = "120000" + os.environ["FORGE_FORCE_SEQUENTIAL"] = "1" if "flan" in variant: - os.environ["PYBUDA_NLP_MANUAL_TARGET"] = "35000" + os.environ["FORGE_NLP_MANUAL_TARGET"] = "35000" else: - os.environ["PYBUDA_NLP_MANUAL_TARGET"] = "26000" + os.environ["FORGE_NLP_MANUAL_TARGET"] = "26000" os.environ["TT_BACKEND_DRAM_POLLING_FREQUENCY"] = "64" os.environ["TT_BACKEND_EPOCH_BIN_NUM_SLOTS"] = "64" - os.environ["PYBUDA_ROTATE_PAST_CACHE_PARAMS"] = "1" - compiler_cfg = pybuda.config._get_global_compiler_config() + os.environ["FORGE_ROTATE_PAST_CACHE_PARAMS"] = "1" + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.enable_tvm_cpu_fallback = False - compiler_cfg.default_df_override = pybuda._C.Float16_b + compiler_cfg.default_df_override = forge._C.Float16_b compiler_cfg.default_dram_parameters = False compiler_cfg.input_queues_on_host = True compiler_cfg.enable_auto_fusing = False @@ -199,14 +199,14 @@ def test_t5_past_cache_enc_dec(variant, test_device): compiler_cfg.enable_link_past_cache_ios = True if test_device.arch == BackendDevice.Grayskull: - os.environ["PYBUDA_TEMP_ELT_UNARY_ESTIMATES_LEGACY"] = "1" + os.environ["FORGE_TEMP_ELT_UNARY_ESTIMATES_LEGACY"] = "1" compiler_cfg.balancer_op_override("matmul_5865", "t_stream_shape", (1, 1)) if test_device.arch == BackendDevice.Wormhole_B0: if variant == "google/flan-t5-large": - os.environ["PYBUDA_TEMP_ELT_UNARY_ESTIMATES_LEGACY"] = "1" + os.environ["FORGE_TEMP_ELT_UNARY_ESTIMATES_LEGACY"] = "1" - # pybuda.set_configuration_options(performance_trace=pybuda.PerfTraceLevel.VERBOSE) + # forge.set_configuration_options(performance_trace=forge.PerfTraceLevel.VERBOSE) model_name = variant config = T5Config.from_pretrained(model_name) config_dict = config.to_dict() @@ -224,10 +224,10 @@ def test_t5_past_cache_enc_dec(variant, test_device): if "n_layers" in locals(): num_blocks = n_layers for i in range(num_blocks): - pybuda.config.override_op_size(f"t5.decoder.block.{i}.layer.0.SelfAttention.k.weight_cache_nop", [1, 1]) - pybuda.config.override_op_size(f"t5.decoder.block.{i}.layer.0.SelfAttention.v.weight_cache_nop", [1, 1]) - pybuda.config.override_t_stream_shape(f"t5.decoder.block.{i}.layer.0.SelfAttention.k.weight_cache_nop", [15, 1]) - pybuda.config.override_t_stream_shape(f"t5.decoder.block.{i}.layer.0.SelfAttention.v.weight_cache_nop", [15, 1]) + forge.config.override_op_size(f"t5.decoder.block.{i}.layer.0.SelfAttention.k.weight_cache_nop", [1, 1]) + forge.config.override_op_size(f"t5.decoder.block.{i}.layer.0.SelfAttention.v.weight_cache_nop", [1, 1]) + forge.config.override_t_stream_shape(f"t5.decoder.block.{i}.layer.0.SelfAttention.k.weight_cache_nop", [15, 1]) + forge.config.override_t_stream_shape(f"t5.decoder.block.{i}.layer.0.SelfAttention.v.weight_cache_nop", [15, 1]) input_length = 64 input_text = "translate English to German: The house is wonderful. We have really enjoyed living here for the past eight years. The only problem that I have with it is that it is too small and the parks are not very close." @@ -253,16 +253,16 @@ def test_t5_past_cache_enc_dec(variant, test_device): decoder_no_ca_inputs += [torch.zeros(enc_past_cache_self_shape), torch.zeros(enc_past_cache_self_shape), torch.zeros(enc_past_cache_cross_shape), torch.zeros(enc_past_cache_cross_shape)] - encoder_module = pybuda.PyTorchModule("T5_encoder", T5_encoder(model)) - decoder_module_cross_attention = pybuda.PyTorchModule("T5_decoder_with_ca", T5_decoder(model)) - decoder_module_no_cross_attention = pybuda.PyTorchModule("T5_decoder_no_ca", T5_decoder(model)) - tt0 = pybuda.TTDevice( + encoder_module = forge.PyTorchModule("T5_encoder", T5_encoder(model)) + decoder_module_cross_attention = forge.PyTorchModule("T5_decoder_with_ca", T5_decoder(model)) + decoder_module_no_cross_attention = forge.PyTorchModule("T5_decoder_no_ca", T5_decoder(model)) + tt0 = forge.TTDevice( "tt0", devtype=test_device.devtype, arch=test_device.arch, module=[encoder_module, decoder_module_cross_attention, decoder_module_no_cross_attention]) - output_q = pybuda.initialize_pipeline( + output_q = forge.initialize_pipeline( training=False, sample_inputs=( (input_ids, encoder_attention_mask), @@ -275,7 +275,7 @@ def test_t5_past_cache_enc_dec(variant, test_device): start = time.time() tt0.set_active_subgraph(0) tt0.push_to_inputs((input_ids, encoder_attention_mask)) - pybuda.run_forward() + forge.run_forward() ans = output_q.get() encoder_last_hidden_state = ans[0].value().detach() first_current_index = max_length - TILE_DIM @@ -291,13 +291,13 @@ def test_t5_past_cache_enc_dec(variant, test_device): tt0.set_active_subgraph(1) generate_inputs = (decoder_input_ids, decoder_attention_mask, encoder_last_hidden_state, encoder_attention_mask) tt0.push_to_inputs(generate_inputs) - pybuda.run_generate(input_count=1, write_index=0) + forge.run_generate(input_count=1, write_index=0) ans = output_q.get() else: tt0.set_active_subgraph(2) generate_inputs = (decoder_input_ids, decoder_attention_mask, encoder_attention_mask) tt0.push_to_inputs(generate_inputs) - pybuda.run_generate(input_count=1, write_index=0) + forge.run_generate(input_count=1, write_index=0) ans = output_q.get() lm_head_out = ans[0].value().detach() @@ -309,7 +309,7 @@ def test_t5_past_cache_enc_dec(variant, test_device): past_cache_pages = current_token_index // TILE_DIM # after one page of past cache, we have to rotate. tt0.set_active_subgraph(3) - pybuda.run_generate(input_count=0, write_index=0) + forge.run_generate(input_count=0, write_index=0) pages_current = 1 decoder_attention_mask[0, -(past_cache_pages + pages_current) * TILE_DIM:] = 1 @@ -326,24 +326,24 @@ def test_t5_past_cache_enc_dec(variant, test_device): variants = ["t5-small", "t5-base", "t5-large", "google/flan-t5-small", "google/flan-t5-base", "google/flan-t5-large"] @pytest.mark.parametrize("variant", variants, ids=variants) @pytest.mark.skip(reason="Redundant") -def test_t5_past_cache_pybuda_pipeline(variant, test_device): +def test_t5_past_cache_forge_pipeline(variant, test_device): import os - os.environ["PYBUDA_PAD_OUTPUT_BUFFER"] = "1" + os.environ["FORGE_PAD_OUTPUT_BUFFER"] = "1" os.environ["TT_BACKEND_MULTI_THREADED_PUSH"] = "1" - os.environ["PYBUDA_EXTRA_L1_MARGIN"] = "169536" - os.environ["PYBUDA_FORCE_SEQUENTIAL"] = "1" - os.environ["PYBUDA_NLP_MANUAL_TARGET"] = "30000" + os.environ["FORGE_EXTRA_L1_MARGIN"] = "169536" + os.environ["FORGE_FORCE_SEQUENTIAL"] = "1" + os.environ["FORGE_NLP_MANUAL_TARGET"] = "30000" os.environ["TT_BACKEND_DRAM_POLLING_FREQUENCY"] = "64" - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.enable_tvm_cpu_fallback = False - compiler_cfg.default_df_override = pybuda._C.Float16_b + compiler_cfg.default_df_override = forge._C.Float16_b compiler_cfg.default_dram_parameters = False compiler_cfg.enable_amp_light() compiler_cfg.input_queues_on_host = True compiler_cfg.enable_auto_fusing = False if "large" in variant: - os.environ["PYBUDA_EXTRA_L1_MARGIN"] = "69536" + os.environ["FORGE_EXTRA_L1_MARGIN"] = "69536" model_name = variant @@ -439,8 +439,8 @@ def test_t5_past_cache_pybuda_pipeline(variant, test_device): inputs += (torch.zeros(enc_past_cache_shape), torch.zeros(enc_past_cache_shape), torch.unsqueeze(blocks.unshape(torch.nn.functional.pad(model_out[1][i][2], pad_shape)), 0), torch.unsqueeze(blocks.unshape(torch.nn.functional.pad(model_out[1][i][3], pad_shape)), 0)) - tt0 = pybuda.TTDevice("tt0", devtype=test_device.devtype, arch=test_device.arch, module=pybuda.PyTorchModule("t5", blocks)) - output_q = pybuda.initialize_pipeline(training=False, sample_inputs=inputs) + tt0 = forge.TTDevice("tt0", devtype=test_device.devtype, arch=test_device.arch, module=forge.PyTorchModule("t5", blocks)) + output_q = forge.initialize_pipeline(training=False, sample_inputs=inputs) abs_index = 480 current_token_index = 0 @@ -459,7 +459,7 @@ def wrap_generate(inputs): decoder_attention_mask[0, abs_index + (current_token_index % TILE_DIM)] = 1 generate_inputs = (encoder_last_hidden_state, decoder_input_ids, decoder_attention_mask, encoder_attention_mask) tt0.push_to_inputs(generate_inputs) - pybuda.run_generate(input_count=1, write_index=current_token_index // TILE_DIM) + forge.run_generate(input_count=1, write_index=current_token_index // TILE_DIM) ans = output_q.get() lm_head_out = ans[0].value().detach() stops.append(time.time()) @@ -472,7 +472,7 @@ def wrap_generate(inputs): decoder_input_ids[0, :] = tokenizer.pad_token_id return lm_head_out - text_generator = pybuda_pipeline("text2text-generation", model=model, tokenizer=tokenizer, forward_fn=wrap_generate) + text_generator = forge_pipeline("text2text-generation", model=model, tokenizer=tokenizer, forward_fn=wrap_generate) answer = text_generator( input_text, @@ -495,18 +495,18 @@ def wrap_generate(inputs): variants = ["t5-small", "t5-base", "t5-large", "google/flan-t5-small", "google/flan-t5-base", "google/flan-t5-large"] @pytest.mark.parametrize("variant", variants, ids=variants) @pytest.mark.skip(reason="Redundant") -def test_t5_pybuda_pipeline(variant, test_device): +def test_t5_forge_pipeline(variant, test_device): # Too slow for post-commit ci import os - os.environ["PYBUDA_PAD_OUTPUT_BUFFER"] = "1" + os.environ["FORGE_PAD_OUTPUT_BUFFER"] = "1" os.environ["TT_BACKEND_MULTI_THREADED_PUSH"] = "1" - os.environ["PYBUDA_FORCE_SEQUENTIAL"] = "1" - os.environ["PYBUDA_NLP_MANUAL_TARGET"] = "30000" + os.environ["FORGE_FORCE_SEQUENTIAL"] = "1" + os.environ["FORGE_NLP_MANUAL_TARGET"] = "30000" os.environ["TT_BACKEND_DRAM_POLLING_FREQUENCY"] = "64" - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.enable_tvm_cpu_fallback = False - compiler_cfg.default_df_override = pybuda._C.Float16_b + compiler_cfg.default_df_override = forge._C.Float16_b compiler_cfg.default_dram_parameters = False compiler_cfg.input_queues_on_host = True compiler_cfg.enable_auto_fusing = False @@ -530,7 +530,7 @@ def test_t5_pybuda_pipeline(variant, test_device): input_text = "translate English to German: The house is wonderful." - text_generator = pybuda_pipeline("text2text-generation", model=model, tokenizer=tokenizer, pybuda_max_length=32) + text_generator = forge_pipeline("text2text-generation", model=model, tokenizer=tokenizer, forge_max_length=32) answer = text_generator( input_text, @@ -547,17 +547,17 @@ def test_t5_small_tiny_tile(test_device): pytest.skip("Grayskull test failing with TM ERROR (producer = matmul_49, consumer = matmul_53): input using kernel_broadcast but post-TM input canonical form is not periodic") import os - os.environ["PYBUDA_ENABLE_TINY_TILE"] = "1" - os.environ["PYBUDA_LEGACY_UBLOCK_SHAPE"] = "1" - # Add PyBUDA configurations - compiler_cfg = pybuda.config._get_global_compiler_config() + os.environ["FORGE_ENABLE_TINY_TILE"] = "1" + os.environ["FORGE_LEGACY_UBLOCK_SHAPE"] = "1" + # Add Forge configurations + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.enable_tvm_cpu_fallback = False - compiler_cfg.enable_auto_fusing = False # tenstorrent/pybuda#844 + compiler_cfg.enable_auto_fusing = False # tenstorrent/forge#844 compiler_cfg.amp_level = 1 compiler_cfg.enable_enumerate_u_kt = False - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b - # tenstorrent/pybuda#1353 + # tenstorrent/forge#1353 if test_device.devtype == BackendType.Golden: compiler_cfg.compile_depth = CompileDepth.BACKEND_GOLDEN_VERIFY @@ -580,7 +580,7 @@ def __init__(self, model): def forward(self, decoder_input_ids, encoder_outputs): return self.model(None, None, decoder_input_ids, None, None, None, None, (encoder_outputs,)) - tt_model = pybuda.PyTorchModule("t5_small_tiny_tile", Wrapper(model)) + tt_model = forge.PyTorchModule("t5_small_tiny_tile", Wrapper(model)) decoder_input_ids = torch.randint(0, model.config.vocab_size, (1, 1), dtype=torch.int32) encoder_outputs = torch.randn(1, 1, 512) diff --git a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_whisper_0.py b/forge/test/model_demos/high_prio/nlp/pytorch/test_whisper_0.py similarity index 81% rename from pybuda/test/model_demos/high_prio/nlp/pytorch/test_whisper_0.py rename to forge/test/model_demos/high_prio/nlp/pytorch/test_whisper_0.py index 31192c5c4..979b0b52b 100644 --- a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_whisper_0.py +++ b/forge/test/model_demos/high_prio/nlp/pytorch/test_whisper_0.py @@ -20,15 +20,15 @@ ) from datasets import load_dataset from typing import Optional -from pybuda.pybudaglobal import TILE_DIM -import pybuda +from forge.forgeglobal import TILE_DIM +import forge from test.utils import download_model -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind, NebulaGalaxy -from pybuda import PyTorchModule, VerifyConfig -from pybuda.config import _get_global_compiler_config -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.transformers.pipeline import pipeline as pybuda_pipeline +from forge.verify import verify_module +from forge.verify.config import TestKind, NebulaGalaxy +from forge import PyTorchModule, VerifyConfig +from forge.config import _get_global_compiler_config +from forge._C.backend_api import BackendType, BackendDevice +from forge.transformers.pipeline import pipeline as forge_pipeline from test.model_demos.models.whisper import Whisper_encoder, Whisper_decoder, generate_model_whisper_decoder_past_cache import time @@ -45,8 +45,8 @@ def generate_model_whisper_congen_hf_pytorch(test_device, variant): # Configurations compiler_cfg = _get_global_compiler_config() compiler_cfg.enable_tvm_cpu_fallback = False # Run full model on silicon - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b - os.environ["PYBUDA_PAD_OUTPUT_BUFFER"] = "1" + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b + os.environ["FORGE_PAD_OUTPUT_BUFFER"] = "1" if "medium" in variant or "large" in variant: os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = "65536" @@ -90,7 +90,7 @@ def forward(self, decoder_input_ids, encoder_hidden_states): # Hitting silicon data mismatches with GELU if variant == "openai/whisper-base" or variant == "openai/whisper-medium" or variant == "openai/whisper-large": - os.environ["PYBUDA_DECOMPOSE_GELU"] = "1" + os.environ["FORGE_DECOMPOSE_GELU"] = "1" if variant == "openai/whisper-small": pcc = 0.94 if test_device.devtype == BackendType.Silicon else 0.99 @@ -106,10 +106,10 @@ def forward(self, decoder_input_ids, encoder_hidden_states): config=model_config, ) framework_model = Wrapper(framework_model) - pybuda_model = PyTorchModule("pt_whisper", framework_model) + forge_model = PyTorchModule("pt_whisper", framework_model) # Load and preprocess sample audio - sample = torch.load("pybuda/test/model_demos/utils/nlp/pytorch/1272-128104-0000.pt") + sample = torch.load("forge/test/model_demos/utils/nlp/pytorch/1272-128104-0000.pt") sample_audio = sample["audio"]["array"] inputs = processor(sample_audio, return_tensors="pt") @@ -124,7 +124,7 @@ def forward(self, decoder_input_ids, encoder_hidden_states): # Sanity run out = framework_model(decoder_input_ids, encoder_outputs) - return pybuda_model, [decoder_input_ids, encoder_outputs], {"pcc": pcc} + return forge_model, [decoder_input_ids, encoder_outputs], {"pcc": pcc} @pytest.mark.skip(reason="Redundant") @pytest.mark.parametrize("variant", variants, ids=variants) @@ -151,7 +151,7 @@ def test_whisper(test_device, variant): test_kind=TestKind.INFERENCE, pcc=other["pcc"], enabled=False if variant == "openai/whisper-medium" else True, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], ), ) @@ -164,13 +164,13 @@ def test_whisper_pipeline(test_device, variant): pytest.skip("Grayskull test failing with no valid grids (50 nodes)") # Configurations - compiler_cfg = pybuda.config._get_global_compiler_config() - compiler_cfg.enable_auto_fusing = False # tenstorrent/pybuda#844 + compiler_cfg = forge.config._get_global_compiler_config() + compiler_cfg.enable_auto_fusing = False # tenstorrent/forge#844 compiler_cfg.amp_level = 2 compiler_cfg.enable_link_past_cache_ios = False compiler_cfg.enable_tvm_cpu_fallback = False # Run full model on silicon compiler_cfg.enable_enumerate_u_kt = False - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b # Load model (with tokenizer and feature extractor) framework_model = download_model( @@ -187,13 +187,13 @@ def test_whisper_pipeline(test_device, variant): feature_extractor=copy.deepcopy(feature_extractor), ) - ### Load PyBuda pipeline - asr_pipeline = pybuda_pipeline( + ### Load Forge pipeline + asr_pipeline = forge_pipeline( "automatic-speech-recognition", model=framework_model, tokenizer=tokenizer, feature_extractor=feature_extractor, - pybuda_max_length=32, + forge_max_length=32, ) # Load & preprocess sample audio @@ -201,7 +201,7 @@ def test_whisper_pipeline(test_device, variant): # data_set = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # sample = processor(data_set[0]["audio"]["array"], return_tensors="pt") ### Load preprocessed from local - sample = torch.load("pybuda/test/model_demos/utils/nlp/pytorch/1272-128104-0000.pt") + sample = torch.load("forge/test/model_demos/utils/nlp/pytorch/1272-128104-0000.pt") sample_audio = sample["audio"]["array"] ### Load direct audio file # sample_audio = "audio_demos/whisper/data_sample/1272-128104-0000.flac" @@ -230,22 +230,22 @@ def test_whisper_encoder(test_device, variant): compiler_cfg.enable_tvm_cpu_fallback = False compiler_cfg.input_queues_on_host = True compiler_cfg.enable_link_past_cache_ios = True - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b - os.environ["PYBUDA_FORCE_SEQUENTIAL"] = "1" + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b + os.environ["FORGE_FORCE_SEQUENTIAL"] = "1" if test_device.arch == BackendDevice.Wormhole_B0: compiler_cfg.amp_level = 1 compiler_cfg.default_dram_parameters = False - os.environ["PYBUDA_PAD_OUTPUT_BUFFER"] = "1" - os.environ["PYBUDA_PAD_OUTPUT_BUFFER_THRESHOLD_TILES"] = "1536" - os.environ["PYBUDA_NLP_MANUAL_TARGET"] = "35000" + os.environ["FORGE_PAD_OUTPUT_BUFFER"] = "1" + os.environ["FORGE_PAD_OUTPUT_BUFFER_THRESHOLD_TILES"] = "1536" + os.environ["FORGE_NLP_MANUAL_TARGET"] = "35000" os.environ["TT_BACKEND_MULTI_THREADED_PUSH"] = "1" os.environ["TT_BACKEND_DRAM_POLLING_FREQUENCY"] = "64" - os.environ["PYBUDA_NOP_ON_DIRECT_SHORT_PATH"] = "1" - os.environ["PYBUDA_SKIP_SMALL_UKT"] = "1" + os.environ["FORGE_NOP_ON_DIRECT_SHORT_PATH"] = "1" + os.environ["FORGE_SKIP_SMALL_UKT"] = "1" elif test_device.arch == BackendDevice.Grayskull: compiler_cfg.enable_auto_fusing = False - os.environ["PYBUDA_NLP_MANUAL_TARGET"] = "2000000" + os.environ["FORGE_NLP_MANUAL_TARGET"] = "2000000" if variant == "openai/whisper-small": os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = "65536" @@ -256,7 +256,7 @@ def test_whisper_encoder(test_device, variant): if variant == "openai/whisper-base": pcc = 0.93 if test_device.devtype == BackendType.Silicon else 0.99 if test_device.arch == BackendDevice.Wormhole_B0: - os.environ["PYBUDA_NLP_MANUAL_TARGET"] = "55000" + os.environ["FORGE_NLP_MANUAL_TARGET"] = "55000" config = WhisperConfig.from_pretrained(variant) config.return_dict = False @@ -270,7 +270,7 @@ def test_whisper_encoder(test_device, variant): config.max_source_positions = padded_len else: os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{150*1024}" - os.environ["PYBUDA_EXTRA_L1_MARGIN"] = f"{100*1024}" + os.environ["FORGE_EXTRA_L1_MARGIN"] = f"{100*1024}" model = download_model( WhisperForConditionalGeneration.from_pretrained, @@ -299,10 +299,10 @@ def forward(self, input_features): processor = download_model(AutoProcessor.from_pretrained, variant) model = Wrapper(model) - pybuda_model = PyTorchModule("pt_whisper", model) + forge_model = PyTorchModule("pt_whisper", model) # Load and preprocess sample audio - sample = torch.load("pybuda/test/model_demos/utils/nlp/pytorch/1272-128104-0000.pt") + sample = torch.load("forge/test/model_demos/utils/nlp/pytorch/1272-128104-0000.pt") sample_audio = sample["audio"]["array"] inputs = processor(sample_audio, return_tensors="pt") @@ -311,14 +311,14 @@ def forward(self, input_features): else: input_features = inputs.input_features - tt0 = pybuda.TTDevice("tt0", devtype=test_device.devtype, arch=test_device.arch, module=pybuda_model) - output_q = pybuda.initialize_pipeline(training=False,sample_inputs=(input_features, ),) + tt0 = forge.TTDevice("tt0", devtype=test_device.devtype, arch=test_device.arch, module=forge_model) + output_q = forge.initialize_pipeline(training=False,sample_inputs=(input_features, ),) start = time.time() tokens_to_generate = 10 if test_device.devtype == BackendType.Silicon else 3 for _ in range(tokens_to_generate): tt0.push_to_inputs(input_features) - pybuda.run_forward(input_count=1) + forge.run_forward(input_count=1) ans = output_q.get() end = time.time() diff --git a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_whisper_1.py b/forge/test/model_demos/high_prio/nlp/pytorch/test_whisper_1.py similarity index 83% rename from pybuda/test/model_demos/high_prio/nlp/pytorch/test_whisper_1.py rename to forge/test/model_demos/high_prio/nlp/pytorch/test_whisper_1.py index 5253923a6..1e34d0f44 100644 --- a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_whisper_1.py +++ b/forge/test/model_demos/high_prio/nlp/pytorch/test_whisper_1.py @@ -20,15 +20,15 @@ ) from datasets import load_dataset from typing import Optional -from pybuda.pybudaglobal import TILE_DIM -import pybuda +from forge.forgeglobal import TILE_DIM +import forge from test.utils import download_model -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind -from pybuda import PyTorchModule, VerifyConfig -from pybuda.config import _get_global_compiler_config -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.transformers.pipeline import pipeline as pybuda_pipeline +from forge.verify import verify_module +from forge.verify.config import TestKind +from forge import PyTorchModule, VerifyConfig +from forge.config import _get_global_compiler_config +from forge._C.backend_api import BackendType, BackendDevice +from forge.transformers.pipeline import pipeline as forge_pipeline from test.model_demos.models.whisper import Whisper_encoder, Whisper_decoder, generate_model_whisper_decoder_past_cache variants = [ @@ -47,14 +47,14 @@ def test_whisper_dec_past_cache(test_device, variant): model, inputs, other = generate_model_whisper_decoder_past_cache(test_device, variant) compile_inputs = other["compile_inputs"] max_length = other["max_length"] - tt0 = pybuda.TTDevice( + tt0 = forge.TTDevice( "tt0", devtype=test_device.devtype, arch=test_device.arch, module=model ) - output_q = pybuda.initialize_pipeline( + output_q = forge.initialize_pipeline( training=False, sample_inputs=compile_inputs, ) @@ -66,7 +66,7 @@ def test_whisper_dec_past_cache(test_device, variant): tokens_to_generate = 64 if test_device.devtype == BackendType.Silicon else 3 for _ in range(tokens_to_generate): tt0.push_to_inputs(inputs) - pybuda.run_generate(input_count=1, write_index=0) + forge.run_generate(input_count=1, write_index=0) ans = output_q.get() end = time.time() @@ -83,37 +83,37 @@ def test_whisper_enc_dec(test_device, variant): compiler_cfg.compile_subgraphs = True compiler_cfg.enable_link_past_cache_ios = True compiler_cfg.backend_opt_level = 4 - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_FORCE_SEQUENTIAL"] = "1" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_FORCE_SEQUENTIAL"] = "1" + os.environ["FORGE_RIBBON2"] = "1" if test_device.arch == BackendDevice.Wormhole_B0: compiler_cfg.amp_level = 1 - os.environ["PYBUDA_PAD_OUTPUT_BUFFER"] = "1" - os.environ["PYBUDA_PAD_OUTPUT_BUFFER_THRESHOLD_TILES"] = "1536" + os.environ["FORGE_PAD_OUTPUT_BUFFER"] = "1" + os.environ["FORGE_PAD_OUTPUT_BUFFER_THRESHOLD_TILES"] = "1536" os.environ["TT_BACKEND_MULTI_THREADED_PUSH"] = "1" os.environ["TT_BACKEND_DRAM_POLLING_FREQUENCY"] = "64" - os.environ["PYBUDA_NOP_ON_DIRECT_SHORT_PATH"] = "1" - os.environ["PYBUDA_SKIP_SMALL_UKT"] = "1" + os.environ["FORGE_NOP_ON_DIRECT_SHORT_PATH"] = "1" + os.environ["FORGE_SKIP_SMALL_UKT"] = "1" if variant == "openai/whisper-base": - os.environ["PYBUDA_GRAPHSOLVER_SELF_CUT_TYPE"] = "None" + os.environ["FORGE_GRAPHSOLVER_SELF_CUT_TYPE"] = "None" compiler_cfg.enable_auto_fusing = False if variant == "openai/whisper-small": - os.environ["PYBUDA_DISABLE_SELF_CUT_FOR_SUBGRAPHS"] = "1, 2" + os.environ["FORGE_DISABLE_SELF_CUT_FOR_SUBGRAPHS"] = "1, 2" if variant == "openai/whisper-medium": - os.environ["PYBUDA_GRAPHSOLVER_SELF_CUT_TYPE"] = "None" + os.environ["FORGE_GRAPHSOLVER_SELF_CUT_TYPE"] = "None" compiler_cfg.enable_auto_fusing = False compiler_cfg.balancer_op_override("layernorm_66.dc.add.14", "t_stream_shape", (1,1)) compiler_cfg.balancer_op_override("layernorm_1193.dc.add.14", "t_stream_shape", (1,1)) if variant == "openai/whisper-large": os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = "0" - os.environ["PYBUDA_TEMP_ELT_UNARY_ESTIMATES_LEGACY"] = "1" + os.environ["FORGE_TEMP_ELT_UNARY_ESTIMATES_LEGACY"] = "1" elif test_device.arch == BackendDevice.Grayskull: compiler_cfg.enable_auto_fusing = False @@ -125,12 +125,12 @@ def test_whisper_enc_dec(test_device, variant): compiler_cfg.amp_level = 1 else: # compiler_cfg.enable_enumerate_u_kt = False - os.environ["PYBUDA_TEMP_RIBBON2_LEGACY_UTIL_EVAL"] = "1" + os.environ["FORGE_TEMP_RIBBON2_LEGACY_UTIL_EVAL"] = "1" run_encoder_on_tt = ("tiny" in variant) or ("base" in variant) or ("small" in variant) or ("medium" in variant) pad_model = True - # pybuda.set_configuration_options(performance_trace=pybuda.PerfTraceLevel.VERBOSE) + # forge.set_configuration_options(performance_trace=forge.PerfTraceLevel.VERBOSE) processor = download_model(AutoProcessor.from_pretrained, variant) config = WhisperConfig.from_pretrained(variant) config.return_dict = False @@ -141,7 +141,7 @@ def test_whisper_enc_dec(test_device, variant): config.max_source_positions = 1536 else: os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{150*1024}" - os.environ["PYBUDA_EXTRA_L1_MARGIN"] = f"{100*1024}" + os.environ["FORGE_EXTRA_L1_MARGIN"] = f"{100*1024}" max_length = config.max_length model = download_model( @@ -157,20 +157,20 @@ def test_whisper_enc_dec(test_device, variant): feature_extractor = download_model(WhisperFeatureExtractor.from_pretrained, variant) tokenizer = WhisperTokenizer.from_pretrained(variant) - encoder_module = pybuda.PyTorchModule("Whisper_encoder", Whisper_encoder(model)) - decoder_module_cross_attention = pybuda.PyTorchModule("Whisper_decoder_with_ca", Whisper_decoder(model)) - decoder_module_no_cross_attention = pybuda.PyTorchModule("Whisper_decoder_no_ca", Whisper_decoder(model)) + encoder_module = forge.PyTorchModule("Whisper_encoder", Whisper_encoder(model)) + decoder_module_cross_attention = forge.PyTorchModule("Whisper_decoder_with_ca", Whisper_decoder(model)) + decoder_module_no_cross_attention = forge.PyTorchModule("Whisper_decoder_no_ca", Whisper_decoder(model)) #ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") smaller_dataset = [] if True:#test_device.devtype != BackendType.Silicon: for i in range(1): - sample = torch.load(f"pybuda/test/model_demos/utils/nlp/pytorch/1272-128104-000{i}.pt") + sample = torch.load(f"forge/test/model_demos/utils/nlp/pytorch/1272-128104-000{i}.pt") smaller_dataset.append(sample) #smaller_dataset.append(ds[i]) ds = smaller_dataset inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt") - # sample = torch.load("pybuda/test/model_demos/utils/nlp/pytorch/1272-128104-0000.pt") + # sample = torch.load("forge/test/model_demos/utils/nlp/pytorch/1272-128104-0000.pt") # sample_audio = sample["audio"]["array"] # inputs = processor(sample_audio, return_tensors="pt") @@ -204,13 +204,13 @@ def test_whisper_enc_dec(test_device, variant): torch.zeros(enc_past_cache_cross_shape), torch.zeros(enc_past_cache_cross_shape)] if run_encoder_on_tt: - tt0 = pybuda.TTDevice( + tt0 = forge.TTDevice( "tt0", devtype=test_device.devtype, arch=test_device.arch, module=[encoder_module, decoder_module_cross_attention, decoder_module_no_cross_attention]) - output_q = pybuda.initialize_pipeline( + output_q = forge.initialize_pipeline( training=False, sample_inputs=( (input_features,), @@ -218,13 +218,13 @@ def test_whisper_enc_dec(test_device, variant): (decoder_no_ca_inputs), )) else: - tt0 = pybuda.TTDevice( + tt0 = forge.TTDevice( "tt0", devtype=test_device.devtype, arch=test_device.arch, module=[decoder_module_cross_attention, decoder_module_no_cross_attention]) - output_q = pybuda.initialize_pipeline( + output_q = forge.initialize_pipeline( training=False, sample_inputs=( (decoder_with_ca_inputs), @@ -259,7 +259,7 @@ def test_whisper_enc_dec(test_device, variant): if run_encoder_on_tt: tt0.set_active_subgraph(0) tt0.push_to_inputs((input_features, )) - pybuda.run_forward() + forge.run_forward() ans = output_q.get() encoder_last_hidden_state = ans[0].value().detach() first_active_subgraph = 1 @@ -275,14 +275,14 @@ def test_whisper_enc_dec(test_device, variant): tt0.set_active_subgraph(first_active_subgraph) generate_inputs = (decoder_input_ids, decoder_attention_mask, encoder_last_hidden_state, position_embeds) tt0.push_to_inputs(generate_inputs) - pybuda.run_generate(input_count=1, write_index=current_token_index//TILE_DIM) + forge.run_generate(input_count=1, write_index=current_token_index//TILE_DIM) ans = output_q.get() tt0.set_active_subgraph(first_active_subgraph+1) start_2 = time.time() else: generate_inputs = (decoder_input_ids, decoder_attention_mask, position_embeds) tt0.push_to_inputs(generate_inputs) - pybuda.run_generate(input_count=1, write_index=current_token_index//TILE_DIM) + forge.run_generate(input_count=1, write_index=current_token_index//TILE_DIM) ans = output_q.get() lm_head_out = ans[0].value().detach() @@ -319,15 +319,15 @@ def test_whisper_enc_dec_pipeline(test_device, variant): compiler_cfg.enable_tvm_cpu_fallback = False # Run full model on silicon compiler_cfg.input_queues_on_host = True compiler_cfg.compile_subgraphs = True - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b compiler_cfg.enable_link_past_cache_ios = True - os.environ["PYBUDA_FORCE_SEQUENTIAL"] = "1" - os.environ["PYBUDA_PAD_OUTPUT_BUFFER"] = "1" + os.environ["FORGE_FORCE_SEQUENTIAL"] = "1" + os.environ["FORGE_PAD_OUTPUT_BUFFER"] = "1" os.environ["TT_BACKEND_MULTI_THREADED_PUSH"] = "1" os.environ["TT_BACKEND_DRAM_POLLING_FREQUENCY"] = "64" os.environ["TT_BACKEND_PROFILER"] = "1" - # pybuda.set_configuration_options(performance_trace=pybuda.PerfTraceLevel.VERBOSE) + # forge.set_configuration_options(performance_trace=forge.PerfTraceLevel.VERBOSE) feature_extractor = download_model(WhisperFeatureExtractor.from_pretrained, variant) processor = download_model(AutoProcessor.from_pretrained, variant) config = WhisperConfig.from_pretrained(variant) @@ -339,15 +339,15 @@ def test_whisper_enc_dec_pipeline(test_device, variant): return_dict=False, ) tokenizer = WhisperTokenizer.from_pretrained(variant) - encoder_module = pybuda.PyTorchModule("Whisper_encoder", Whisper_encoder(model)) - decoder_module_cross_attention = pybuda.PyTorchModule("Whisper_decoder_with_ca", Whisper_decoder(model)) - decoder_module_no_cross_attention = pybuda.PyTorchModule("Whisper_decoder_no_ca", Whisper_decoder(model)) + encoder_module = forge.PyTorchModule("Whisper_encoder", Whisper_encoder(model)) + decoder_module_cross_attention = forge.PyTorchModule("Whisper_decoder_with_ca", Whisper_decoder(model)) + decoder_module_no_cross_attention = forge.PyTorchModule("Whisper_decoder_no_ca", Whisper_decoder(model)) for i in range(config.decoder_layers): - pybuda.config.override_t_stream_shape(f"model.model.decoder.layers.{i}.self_attn.k_proj.weight_cache_nop", [13, 1]) - pybuda.config.override_t_stream_shape(f"model.model.decoder.layers.{i}.self_attn.v_proj.weight_cache_nop", [13, 1]) + forge.config.override_t_stream_shape(f"model.model.decoder.layers.{i}.self_attn.k_proj.weight_cache_nop", [13, 1]) + forge.config.override_t_stream_shape(f"model.model.decoder.layers.{i}.self_attn.v_proj.weight_cache_nop", [13, 1]) - sample = torch.load("pybuda/test/model_demos/utils/nlp/pytorch/1272-128104-0000.pt") + sample = torch.load("forge/test/model_demos/utils/nlp/pytorch/1272-128104-0000.pt") sample_audio = sample["audio"]["array"] inputs = processor(sample_audio, return_tensors="pt") @@ -376,14 +376,14 @@ def test_whisper_enc_dec_pipeline(test_device, variant): decoder_no_ca_inputs += [torch.zeros(enc_past_cache_self_shape), torch.zeros(enc_past_cache_self_shape), torch.zeros(enc_past_cache_cross_shape), torch.zeros(enc_past_cache_cross_shape)] - tt0 = pybuda.TTDevice( + tt0 = forge.TTDevice( "tt0", devtype=test_device.devtype, arch=test_device.arch, module=[decoder_module_cross_attention, decoder_module_no_cross_attention]) # module=[encoder_module, decoder_module_cross_attention, decoder_module_no_cross_attention]) - output_q = pybuda.initialize_pipeline( + output_q = forge.initialize_pipeline( training=False, sample_inputs=( # (input_features,), @@ -411,7 +411,7 @@ def wrap_generate(inputs): generate_inputs = (decoder_input_ids, decoder_attention_mask, encoder_last_hidden_state, position_embeds) tt0.set_active_subgraph(0) tt0.push_to_inputs(generate_inputs) - pybuda.run_generate(input_count=1, write_index=0) + forge.run_generate(input_count=1, write_index=0) ans = output_q.get() lm_head_out = ans[0].value().detach() lm_head_out = lm_head_out[:, :(current_token_index % TILE_DIM) + 1, :] @@ -424,7 +424,7 @@ def wrap_generate(inputs): return lm_head_out - asr_pipeline = pybuda_pipeline( + asr_pipeline = forge_pipeline( "automatic-speech-recognition", model=model, tokenizer=tokenizer, diff --git a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_xglm.py b/forge/test/model_demos/high_prio/nlp/pytorch/test_xglm.py similarity index 79% rename from pybuda/test/model_demos/high_prio/nlp/pytorch/test_xglm.py rename to forge/test/model_demos/high_prio/nlp/pytorch/test_xglm.py index 1610372c5..6cd35804b 100644 --- a/pybuda/test/model_demos/high_prio/nlp/pytorch/test_xglm.py +++ b/forge/test/model_demos/high_prio/nlp/pytorch/test_xglm.py @@ -3,23 +3,23 @@ # SPDX-License-Identifier: Apache-2.0 import pytest from test.utils import download_model -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind, DataFormat, NebulaGalaxy +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind, DataFormat, NebulaGalaxy import os -import pybuda -from pybuda.transformers.pipeline import pipeline as pybuda_pipeline +import forge +from forge.transformers.pipeline import pipeline as forge_pipeline from transformers import AutoTokenizer, XGLMForCausalLM, XGLMConfig variants = ["facebook/xglm-564M", "facebook/xglm-1.7B"] @pytest.mark.parametrize("variant", variants, ids=variants) def test_xglm_causal_lm(variant, test_device): - # Set PyBUDA configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.cpu_fallback_ops.add("take") compiler_cfg.default_df_override = DataFormat.Float16_b compiler_cfg.enable_enumerate_u_kt = False @@ -54,7 +54,7 @@ def test_xglm_causal_lm(variant, test_device): pcc = 0.98 if test_device.devtype == BackendType.Silicon and test_device.arch == BackendDevice.Wormhole_B0 else 0.99 verify_module( - pybuda.PyTorchModule("pt_xglm_causal_lm", model), + forge.PyTorchModule("pt_xglm_causal_lm", model), input_shapes=[(input_tokens['input_ids'].shape, input_tokens['attention_mask'].shape,)], inputs=[(input_tokens['input_ids'], input_tokens['attention_mask'],)], verify_cfg=VerifyConfig( @@ -62,7 +62,7 @@ def test_xglm_causal_lm(variant, test_device): devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - chip_ids=NebulaGalaxy.chip_ids if "PYBUDA_NEB_GALAXY_CI" in os.environ and int(os.environ.get("PYBUDA_NEB_GALAXY_CI"))==1 else [0], + chip_ids=NebulaGalaxy.chip_ids if "FORGE_NEB_GALAXY_CI" in os.environ and int(os.environ.get("FORGE_NEB_GALAXY_CI"))==1 else [0], pcc=pcc, ) ) diff --git a/pybuda/test/model_demos/models/__init__.py b/forge/test/model_demos/models/__init__.py similarity index 100% rename from pybuda/test/model_demos/models/__init__.py rename to forge/test/model_demos/models/__init__.py diff --git a/pybuda/test/model_demos/models/deit.py b/forge/test/model_demos/models/deit.py similarity index 75% rename from pybuda/test/model_demos/models/deit.py rename to forge/test/model_demos/models/deit.py index ae8452029..6219cdaa2 100644 --- a/pybuda/test/model_demos/models/deit.py +++ b/forge/test/model_demos/models/deit.py @@ -7,19 +7,19 @@ from datasets import load_dataset from transformers import AutoFeatureExtractor, ViTForImageClassification -import pybuda +import forge from test.utils import download_model def generate_model_deit_imgcls_hf_pytorch(test_device, variant): - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b compiler_cfg.balancer_policy = "Ribbon" - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model image_processor = download_model(AutoFeatureExtractor.from_pretrained, variant) model = download_model(ViTForImageClassification.from_pretrained, variant) - tt_model = pybuda.PyTorchModule("DeiT_classif_16_224", model) + tt_model = forge.PyTorchModule("DeiT_classif_16_224", model) # STEP 3: Run inference on Tenstorrent device dataset = load_dataset("huggingface/cats-image") diff --git a/pybuda/test/model_demos/models/dla.py b/forge/test/model_demos/models/dla.py similarity index 100% rename from pybuda/test/model_demos/models/dla.py rename to forge/test/model_demos/models/dla.py diff --git a/pybuda/test/model_demos/models/falcon/__init__.py b/forge/test/model_demos/models/falcon/__init__.py similarity index 100% rename from pybuda/test/model_demos/models/falcon/__init__.py rename to forge/test/model_demos/models/falcon/__init__.py diff --git a/pybuda/test/model_demos/models/falcon/configuration_RW.py b/forge/test/model_demos/models/falcon/configuration_RW.py similarity index 100% rename from pybuda/test/model_demos/models/falcon/configuration_RW.py rename to forge/test/model_demos/models/falcon/configuration_RW.py diff --git a/pybuda/test/model_demos/models/falcon/model.py b/forge/test/model_demos/models/falcon/model.py similarity index 98% rename from pybuda/test/model_demos/models/falcon/model.py rename to forge/test/model_demos/models/falcon/model.py index 46694fb75..ef7676920 100644 --- a/pybuda/test/model_demos/models/falcon/model.py +++ b/forge/test/model_demos/models/falcon/model.py @@ -7,7 +7,7 @@ import os -import pybuda +import forge import torch from torch.nn import functional as F from transformers import AutoTokenizer @@ -89,7 +89,7 @@ def __init__( def initialize(self): - pybuda.set_configuration_options( + forge.set_configuration_options( backend_output_dir="tt_build/decode_demo" ) @@ -172,7 +172,7 @@ def inference(self, prompts): self.batch_size == 1 ), "Pretty sure this code assumes batch size == 1, FIXME" - # tensor of right size and shape needed for pybuda to compile. initialise kv with zeros + # tensor of right size and shape needed for forge to compile. initialise kv with zeros # value in tensor doesn't matter. we're going to prefill this in anyways # TODO: replace constants 32 and 64 past_key_values = tuple( diff --git a/pybuda/test/model_demos/models/falcon/pybudify.py b/forge/test/model_demos/models/falcon/pybudify.py similarity index 73% rename from pybuda/test/model_demos/models/falcon/pybudify.py rename to forge/test/model_demos/models/falcon/pybudify.py index 9ed3c6323..0d0e279d8 100644 --- a/pybuda/test/model_demos/models/falcon/pybudify.py +++ b/forge/test/model_demos/models/falcon/pybudify.py @@ -54,81 +54,81 @@ def __init__( os.environ["LOGGER_LEVEL"] = log_level os.environ["LOGURU_LEVEL"] = log_level - # pybuda workarounds + # forge workarounds os.environ[ "GOLDEN_WORMHOLE_B0" ] = "1" # golden should always simulate a B0 as that's all we use now os.environ[ - "PYBUDA_CONVERT_PARAMS_TO_TVM" + "FORGE_CONVERT_PARAMS_TO_TVM" ] = "0" # faster compile times... why would this ever be 1? os.environ[ "TT_BACKEND_TIMEOUT" ] = "0" # default is too aggressive for large models? - # os.environ["PYBUDA_ENABLE_BROADCAST_SPLITTING"] = "1" - # os.environ["PYBUDA_DISABLE_FORK_JOIN_BUF"] = "1" - # os.environ["PYBUDA_DRAM_PICK_CAPACITY"] = "1" + # os.environ["FORGE_ENABLE_BROADCAST_SPLITTING"] = "1" + # os.environ["FORGE_DISABLE_FORK_JOIN_BUF"] = "1" + # os.environ["FORGE_DRAM_PICK_CAPACITY"] = "1" # os.environ["WHA0_DISABLE_RELAY_BUFS"] = "1" - # os.environ["PYBUDA_FUSE_STOP_ON_RECIPROCAL"] = "1" - # os.environ["PYBUDA_PLACER_SNAKE"] = "1" Not what we want for dual chip placement - # os.environ["PYBUDA_DISABLE_INTERACTIVE_PLACER"] = "1" # Until interactive placer supports multi-chip placement overrides - # os.environ["PYBUDA_PLACER_SNAKE"] = "1" - # os.environ["PYBUDA_ETH_LINKS_NEBULA"] = "1" + # os.environ["FORGE_FUSE_STOP_ON_RECIPROCAL"] = "1" + # os.environ["FORGE_PLACER_SNAKE"] = "1" Not what we want for dual chip placement + # os.environ["FORGE_DISABLE_INTERACTIVE_PLACER"] = "1" # Until interactive placer supports multi-chip placement overrides + # os.environ["FORGE_PLACER_SNAKE"] = "1" + # os.environ["FORGE_ETH_LINKS_NEBULA"] = "1" - pybuda = self.pybuda = __import__( - "pybuda" - ) # let us set log levels before importing pybuda + forge = self.forge = __import__( + "forge" + ) # let us set log levels before importing forge if device == "pytorch": pass else: devtype = { - "golden": pybuda.BackendType.Golden, - "silicon": pybuda.BackendType.Silicon, + "golden": forge.BackendType.Golden, + "silicon": forge.BackendType.Silicon, }[device] - module = pybuda.PyTorchModule(netlist_name, self.bound_module) + module = forge.PyTorchModule(netlist_name, self.bound_module) if precision == "fp32": - fallback = pybuda.DataFormat.Float32 + fallback = forge.DataFormat.Float32 elif precision == "fp16": - fallback = pybuda.DataFormat.Float16 + fallback = forge.DataFormat.Float16 elif precision == "bf16": - fallback = pybuda.DataFormat.Float16_b + fallback = forge.DataFormat.Float16_b elif precision == "fp8": - fallback = pybuda.DataFormat.Bfp8 + fallback = forge.DataFormat.Bfp8 elif precision == "fp8b": - fallback = pybuda.DataFormat.Bfp8_b + fallback = forge.DataFormat.Bfp8_b else: raise ValueError('Precision "%s" not implemented' % precision) if bfp8_matmuls: - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( name_regex="matmul_.*", input_df={ - 0: [pybuda.DataFormat.Bfp8_b, True], - 1: [pybuda.DataFormat.Bfp8_b, True], - 2: [pybuda.DataFormat.Bfp8_b, True], + 0: [forge.DataFormat.Bfp8_b, True], + 1: [forge.DataFormat.Bfp8_b, True], + 2: [forge.DataFormat.Bfp8_b, True], }, ) if decode_mode: # Required for decode or we get invalid DF error. Important: DO not set intermed, acc_df or we hang on prefill. - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( op_type="splice", - output_df=pybuda.DataFormat.Float16_b, + output_df=forge.DataFormat.Float16_b, input_df={ - 0: [pybuda.DataFormat.Float16_b, True], - 1: [pybuda.DataFormat.Float16_b, True], - 2: [pybuda.DataFormat.Float16_b, True], + 0: [forge.DataFormat.Float16_b, True], + 1: [forge.DataFormat.Float16_b, True], + 2: [forge.DataFormat.Float16_b, True], }, ) - # pybuda.config.configure_mixed_precision( + # forge.config.configure_mixed_precision( # name_regex="matmul_.*", - # input_df={1: [pybuda.DataFormat.Bfp8_b, True]}) + # input_df={1: [forge.DataFormat.Bfp8_b, True]}) - # pybuda.override_op_size('matmul_61', (1,2)) + # forge.override_op_size('matmul_61', (1,2)) if lora: os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = "147456" @@ -142,23 +142,23 @@ def __init__( q_offset = 70 h4h_offset = 70 for layer_num in range(len(self.bound_module.layers)): - pybuda.config.insert_fracture_group( - [(f"matmul_{26+layer_num*q_offset}", pybuda.k_dim, 2)] + forge.config.insert_fracture_group( + [(f"matmul_{26+layer_num*q_offset}", forge.k_dim, 2)] ) - pybuda.config.insert_fracture_group( - [(f"matmul_{23+layer_num*h4h_offset}", pybuda.k_dim, 4)] + forge.config.insert_fracture_group( + [(f"matmul_{23+layer_num*h4h_offset}", forge.k_dim, 4)] ) if padded_fracture: offset = 73 factor = 2 for layer_num in range(len(self.bound_module.layers)): - pybuda.config.insert_fracture_group( + forge.config.insert_fracture_group( [ (f"matmul_{18+layer_num*offset}", -1, factor), ( f"matmul_{23+layer_num*offset}", - pybuda.k_dim, + forge.k_dim, factor, ), ] @@ -168,23 +168,23 @@ def __init__( layer_num > 0 and layer_num < len(self.bound_module.layers) - 1 ): - pybuda.set_epoch_break(f"multiply_{0+layer_num*offset}") + forge.set_epoch_break(f"multiply_{0+layer_num*offset}") # 4 bit precision for fracturing required otherwise DRAM error occurs for 32 layers - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( name_regex="fractured_1_matmul_.*", input_df={ - 0: [pybuda.DataFormat.Bfp8_b, True], - 1: [pybuda.DataFormat.Bfp4_b, True], - 2: [pybuda.DataFormat.Bfp8_b, True], + 0: [forge.DataFormat.Bfp8_b, True], + 1: [forge.DataFormat.Bfp4_b, True], + 2: [forge.DataFormat.Bfp8_b, True], }, ) - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( name_regex="fractured_0_matmul_.*", input_df={ - 0: [pybuda.DataFormat.Bfp8_b, True], - 1: [pybuda.DataFormat.Bfp4_b, True], - 2: [pybuda.DataFormat.Bfp8_b, True], + 0: [forge.DataFormat.Bfp8_b, True], + 1: [forge.DataFormat.Bfp4_b, True], + 2: [forge.DataFormat.Bfp8_b, True], }, ) @@ -192,12 +192,12 @@ def __init__( offset = 73 for layer_num in range(len(self.bound_module.layers)): if layer_num < len(self.bound_module.layers) // 2: - pybuda.config.insert_fracture_group( + forge.config.insert_fracture_group( [ (f"matmul_{18+layer_num*offset}", -1, 2), ( f"matmul_{23+layer_num*offset}", - pybuda.k_dim, + forge.k_dim, 2, ), ] @@ -207,18 +207,18 @@ def __init__( layer_num > 0 and layer_num < len(self.bound_module.layers) - 1 ): - pybuda.set_epoch_break(f"multiply_{0+layer_num*offset}") + forge.set_epoch_break(f"multiply_{0+layer_num*offset}") # Running padded fracture full (Dragon's exploration) if padded_fracture_full: # 4 bit precision for fracturing required otherwise DRAM error occurs for 32 layers # TODO change this to only affect MLP fractured matmuls and not attention - pybuda.config.configure_mixed_precision( + forge.config.configure_mixed_precision( name_regex="fractured_*._matmul_.*", input_df={ - 0: [pybuda.DataFormat.Bfp8_b, True], - 1: [pybuda.DataFormat.Bfp4_b, True], - 2: [pybuda.DataFormat.Bfp8_b, True], + 0: [forge.DataFormat.Bfp8_b, True], + 1: [forge.DataFormat.Bfp4_b, True], + 2: [forge.DataFormat.Bfp8_b, True], }, ) @@ -230,7 +230,7 @@ def __init__( for layer_num in range(len(self.bound_module.layers)): # Since we move around the users dimension, full attn fracturing won't be possible in a single group - pybuda.config.insert_fracture_group( + forge.config.insert_fracture_group( [ # Q # (f"matmul_{26+layer_num*q_offset}", -2, attn_factor), @@ -284,14 +284,14 @@ def __init__( exits = [f"matmul_{68 + layer_num*q_offset}"] # exits = [f'add_{70 + layer_num*q_offset}'] attn_constr = self.add_sched( - pybuda, + forge, entries, exits, ops, attn_factor, attn_constr, ) - pybuda.config.add_schedule_constraint(attn_constr) + forge.config.add_schedule_constraint(attn_constr) # MLP fracture if fracture_mlp > 0: @@ -305,17 +305,17 @@ def __init__( ) # Manual scheduling to support MLP fracture 2-chip on full size falcon-7B - pybuda.set_epoch_break( + forge.set_epoch_break( f"softmax_{55+mlp_offset}.dc.reduce_max.0" ) - pybuda.config.override_op_placement( + forge.config.override_op_placement( f"concatenate_{48+mlp_offset}.dc.concatenate.2", chip_id=1, temporal_epoch_break=True, ) # MLP fracture - pybuda.config.insert_fracture_group( + forge.config.insert_fracture_group( [ # Can't do fracturing of weights due to transpose # mlp.dense_h_to_4h @@ -323,7 +323,7 @@ def __init__( # mlp.dense_4h_to_h ( f"matmul_{23+mlp_offset}", - pybuda.k_dim, + forge.k_dim, mlp_factor, ), ] @@ -338,26 +338,26 @@ def __init__( ] exits = [f"add_{70 + mlp_offset}"] mlp_constr = self.add_sched( - pybuda, entries, exits, ops, mlp_factor, mlp_constr + forge, entries, exits, ops, mlp_factor, mlp_constr ) - pybuda.config.add_schedule_constraint(mlp_constr) + forge.config.add_schedule_constraint(mlp_constr) perf_level = { None: None, "none": None, - "light": pybuda.PerfTraceLevel.LIGHT, - "verbose": pybuda.PerfTraceLevel.VERBOSE, + "light": forge.PerfTraceLevel.LIGHT, + "verbose": forge.PerfTraceLevel.VERBOSE, }[perf] - pybuda.set_configuration_options( + forge.set_configuration_options( default_df_override=fallback, - accumulate_df=pybuda.DataFormat.Float32, + accumulate_df=forge.DataFormat.Float32, amp_level=amp_level, enable_auto_fusing=fuse, performance_trace=perf_level, backend_opt_level=4, enable_auto_transposing_placement=True, ) - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.input_queues_on_host = host_queues if self.masked_odkv: @@ -365,16 +365,16 @@ def __init__( # compiler_cfg.manual_t_streaming = True - # pybuda.config.override_t_stream_dir(f"concatenate_50.dc.sparse_matmul.4.lc2", "c") - # pybuda.config.override_t_stream_dir(f"concatenate_67.dc.sparse_matmul.4.lc2", "c") + # forge.config.override_t_stream_dir(f"concatenate_50.dc.sparse_matmul.4.lc2", "c") + # forge.config.override_t_stream_dir(f"concatenate_67.dc.sparse_matmul.4.lc2", "c") # import pdb; pdb.set_trace() - # pybuda.config.set_epoch_break("transpose_58.dc.sparse_matmul.4.lc2") + # forge.config.set_epoch_break("transpose_58.dc.sparse_matmul.4.lc2") - # pybuda.config.set_epoch_break("matmul_64") + # forge.config.set_epoch_break("matmul_64") - # pybuda.config.add_schedule_constraint(['transpose_58.dc.sparse_matmul.4.lc2', 'add_59']) + # forge.config.add_schedule_constraint(['transpose_58.dc.sparse_matmul.4.lc2', 'add_59']) if num_layers == 1: names = "input__56, input__57" @@ -390,26 +390,26 @@ def __init__( print(f'names" {names}') names_dict = {name: (i + 1) for i, name in enumerate(names)} - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() - # pybuda.config.insert_fracture_group([(f"concatenate_50", 2, 2)]) - # pybuda.config.insert_fracture_group([(f"concatenate_67", 2, 2)]) + # forge.config.insert_fracture_group([(f"concatenate_50", 2, 2)]) + # forge.config.insert_fracture_group([(f"concatenate_67", 2, 2)]) - # pybuda.config.configure_mixed_precision( + # forge.config.configure_mixed_precision( # name_regex="concatenate_50.dc.sparse_matmul.4.lc2", - # input_df={0: [pybuda.DataFormat.Bfp8_b, True], 1: [pybuda.DataFormat.Bfp8_b, True], 2: [pybuda.DataFormat.Bfp8_b, True]}) + # input_df={0: [forge.DataFormat.Bfp8_b, True], 1: [forge.DataFormat.Bfp8_b, True], 2: [forge.DataFormat.Bfp8_b, True]}) - # pybuda.config.configure_mixed_precision( + # forge.config.configure_mixed_precision( # name_regex="concatenate_50.dc.sparse_matmul.4.lc2", - # input_df={0: [pybuda.DataFormat.Bfp8_b, True], 1: [pybuda.DataFormat.Bfp8_b, True], 2: [pybuda.DataFormat.Bfp8_b, True]}) + # input_df={0: [forge.DataFormat.Bfp8_b, True], 1: [forge.DataFormat.Bfp8_b, True], 2: [forge.DataFormat.Bfp8_b, True]}) compiler_cfg.loopback_outputs = names_dict elif self.odkv: # compiler_cfg.manual_t_streaming = True - # pybuda.config.override_t_stream_dir(f"concatenate_50.dc.sparse_matmul.4.lc2", "c") - # pybuda.config.override_t_stream_dir(f"concatenate_67.dc.sparse_matmul.4.lc2", "c") + # forge.config.override_t_stream_dir(f"concatenate_50.dc.sparse_matmul.4.lc2", "c") + # forge.config.override_t_stream_dir(f"concatenate_67.dc.sparse_matmul.4.lc2", "c") if num_layers == 1: names = "input__54, input__55" @@ -424,34 +424,34 @@ def __init__( print(f'names" {names}') names_dict = {name: (i + 1) for i, name in enumerate(names)} - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() - # pybuda.config.insert_fracture_group([(f"concatenate_50", 2, 2)]) - # pybuda.config.insert_fracture_group([(f"concatenate_67", 2, 2)]) + # forge.config.insert_fracture_group([(f"concatenate_50", 2, 2)]) + # forge.config.insert_fracture_group([(f"concatenate_67", 2, 2)]) - # pybuda.config.configure_mixed_precision( + # forge.config.configure_mixed_precision( # name_regex="concatenate_50.dc.sparse_matmul.4.lc2", - # input_df={0: [pybuda.DataFormat.Bfp8_b, True], 1: [pybuda.DataFormat.Bfp8_b, True], 2: [pybuda.DataFormat.Bfp8_b, True]}) + # input_df={0: [forge.DataFormat.Bfp8_b, True], 1: [forge.DataFormat.Bfp8_b, True], 2: [forge.DataFormat.Bfp8_b, True]}) - # pybuda.config.configure_mixed_precision( + # forge.config.configure_mixed_precision( # name_regex="concatenate_50.dc.sparse_matmul.4.lc2", - # input_df={0: [pybuda.DataFormat.Bfp8_b, True], 1: [pybuda.DataFormat.Bfp8_b, True], 2: [pybuda.DataFormat.Bfp8_b, True]}) + # input_df={0: [forge.DataFormat.Bfp8_b, True], 1: [forge.DataFormat.Bfp8_b, True], 2: [forge.DataFormat.Bfp8_b, True]}) compiler_cfg.loopback_outputs = names_dict - pybuda_arch = { - "grayskull": pybuda.BackendDevice.Grayskull, - "wormhole_b0": pybuda.BackendDevice.Wormhole_B0, + forge_arch = { + "grayskull": forge.BackendDevice.Grayskull, + "wormhole_b0": forge.BackendDevice.Wormhole_B0, }[arch] if tti_load is not None: - self.tt0 = pybuda.TTDevice.load_image(img_path=tti_load) + self.tt0 = forge.TTDevice.load_image(img_path=tti_load) else: - self.tt0 = pybuda.TTDevice( + self.tt0 = forge.TTDevice( "tt0", module=module, fp32_fallback=fallback, - arch=pybuda_arch, + arch=forge_arch, devtype=devtype, chip_ids=list(range(num_chips)), ) @@ -460,11 +460,11 @@ def __init__( self.output_q = mp.Queue() if verify: - self.verify_cfg = pybuda.VerifyConfig( + self.verify_cfg = forge.VerifyConfig( verify_all=True, verify_last=True, - devtype=pybuda.BackendType.Silicon, - arch=pybuda_arch, + devtype=forge.BackendType.Silicon, + arch=forge_arch, ) else: self.verify_cfg = None @@ -473,28 +473,28 @@ def __init__( self.micro_batch_size = micro_batch_size def run_async(self, *args): - """Send inputs to pybuda and run forward pass asynchronously. + """Send inputs to forge and run forward pass asynchronously. Outputs can be read from self.output_q.""" assert ( self.device != "pytorch" - ), "run_async() is only supported for pybuda devices" + ), "run_async() is only supported for forge devices" if self.odkv or self.masked_odkv: self.ensure_initialized(*args) - # print(f'pybuda pushing data') - self.pybuda.sync() + # print(f'forge pushing data') + self.forge.sync() in_args = ( list(args[0]) + list(args[1]) + list(args[2]) + list(args[3]) ) self.tt0.push_to_inputs( in_args ) # don't pass in kv over and over again - self.pybuda.run_generate( + self.forge.run_generate( input_count=1, write_index=0 ) # , _sequential=True) else: self.ensure_initialized(*args) self.tt0.push_to_inputs(*args) - self.pybuda.run_forward(input_count=1) + self.forge.run_forward(input_count=1) def ensure_initialized(self, *args): if not self.initialized and self.device != "pytorch": @@ -507,7 +507,7 @@ def ensure_initialized(self, *args): ) print(f"Saved image to {self.tti_save}") sys.exit(0) - self.pybuda.initialize_pipeline( + self.forge.initialize_pipeline( training=False, sample_inputs=args, output_queue=self.output_q, @@ -530,7 +530,7 @@ def __call__(self, *args, **kwargs): if self.masked_odkv: # print('run_generate1') - self.pybuda.sync() + self.forge.sync() in_args = ( list(args[0]) + list(args[1]) @@ -542,11 +542,11 @@ def __call__(self, *args, **kwargs): self.tt0.push_to_inputs( in_args ) # don't pass in kv over and over again - self.pybuda.run_generate( + self.forge.run_generate( input_count=1, write_index=0, _sequential=True ) elif self.odkv: - self.pybuda.sync() + self.forge.sync() in_args = ( list(args[0]) + list(args[1]) @@ -556,18 +556,18 @@ def __call__(self, *args, **kwargs): self.tt0.push_to_inputs( in_args ) # don't pass in kv over and over again - self.pybuda.run_generate( + self.forge.run_generate( input_count=1, write_index=0, _sequential=True ) else: self.tt0.push_to_inputs(*args) - self.pybuda.run_forward(input_count=1, _sequential=True) + self.forge.run_forward(input_count=1, _sequential=True) ys = self.output_q.get() outputs = tuple( [ y.value().float() for y in ys - if isinstance(y, self.pybuda.tensor.TensorFromPytorch) + if isinstance(y, self.forge.tensor.TensorFromPytorch) ] ) if len(outputs) == 1: @@ -587,7 +587,7 @@ def __call__(self, *args, **kwargs): result = outputs return result - def add_sched(self, pybuda, entries, exits, ops, factor, constr): + def add_sched(self, forge, entries, exits, ops, factor, constr): for elem in entries: constr.append(elem) for lst in ops: @@ -599,19 +599,19 @@ def add_sched(self, pybuda, entries, exits, ops, factor, constr): print( f"[add_sched]: Override op temp. epoch: {fop}, chip {f}" ) - pybuda.config.override_op_placement( + forge.config.override_op_placement( fop, chip_id=f, temporal_epoch_break=True ) else: print( f"[add_sched]: Override op spatial epoch: {fop}, chip {f}" ) - pybuda.config.override_op_placement( + forge.config.override_op_placement( fop, chip_id=f, spatial_epoch_break=True ) constr.append(fop) # for elem in exits: # constr.append(elem) - # pybuda.config.override_op_placement(exits[0], temporal_epoch_break=True) + # forge.config.override_op_placement(exits[0], temporal_epoch_break=True) print(f"[add_sched] sched: {constr}") return constr diff --git a/pybuda/test/model_demos/models/falcon/tt_modeling_RW_pad_masked_odkv.py b/forge/test/model_demos/models/falcon/tt_modeling_RW_pad_masked_odkv.py similarity index 100% rename from pybuda/test/model_demos/models/falcon/tt_modeling_RW_pad_masked_odkv.py rename to forge/test/model_demos/models/falcon/tt_modeling_RW_pad_masked_odkv.py diff --git a/pybuda/test/model_demos/models/ghostnet.py b/forge/test/model_demos/models/ghostnet.py similarity index 71% rename from pybuda/test/model_demos/models/ghostnet.py rename to forge/test/model_demos/models/ghostnet.py index a9feae6d2..21034755b 100644 --- a/pybuda/test/model_demos/models/ghostnet.py +++ b/forge/test/model_demos/models/ghostnet.py @@ -1,7 +1,7 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -import pybuda +import forge from test.utils import download_model from PIL import Image import timm @@ -12,16 +12,16 @@ def generate_model_ghostnet_imgcls_timm(test_device, variant): - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model framework_model = download_model(timm.create_model, variant, pretrained=True) framework_model.eval() - tt_model = pybuda.PyTorchModule("pt_ghostnet_100_timm", framework_model) + tt_model = forge.PyTorchModule("pt_ghostnet_100_timm", framework_model) # STEP 3: Prepare input url, filename = ( diff --git a/pybuda/test/model_demos/models/t5.py b/forge/test/model_demos/models/t5.py similarity index 81% rename from pybuda/test/model_demos/models/t5.py rename to forge/test/model_demos/models/t5.py index 9be295689..091545738 100644 --- a/pybuda/test/model_demos/models/t5.py +++ b/forge/test/model_demos/models/t5.py @@ -3,10 +3,10 @@ # SPDX-License-Identifier: Apache-2.0 from test.utils import download_model -from pybuda.pybudaglobal import TILE_DIM +from forge.forgeglobal import TILE_DIM #import queue import os -import pybuda +import forge import torch from transformers import T5ForConditionalGeneration, T5Tokenizer, T5Config @@ -48,14 +48,14 @@ def forward(self, decoder_input_ids, decoder_attention_mask, encoder_last_hidden def generate_t5_past_cache_enc_dec(test_device, variant): - os.environ["PYBUDA_PAD_OUTPUT_BUFFER"] = "1" - os.environ["PYBUDA_FORCE_SEQUENTIAL"] = "1" + os.environ["FORGE_PAD_OUTPUT_BUFFER"] = "1" + os.environ["FORGE_FORCE_SEQUENTIAL"] = "1" os.environ["TT_BACKEND_DRAM_POLLING_FREQUENCY"] = "64" os.environ["TT_BACKEND_EPOCH_BIN_NUM_SLOTS"] = "64" - os.environ["PYBUDA_ROTATE_PAST_CACHE_PARAMS"] = "1" - compiler_cfg = pybuda.config._get_global_compiler_config() + os.environ["FORGE_ROTATE_PAST_CACHE_PARAMS"] = "1" + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.enable_tvm_cpu_fallback = False - compiler_cfg.default_df_override = pybuda._C.Float16_b + compiler_cfg.default_df_override = forge._C.Float16_b compiler_cfg.default_dram_parameters = False compiler_cfg.enable_amp_light() compiler_cfg.compile_subgraphs = True @@ -75,10 +75,10 @@ def generate_t5_past_cache_enc_dec(test_device, variant): if "n_layers" in locals(): num_blocks = n_layers for i in range(num_blocks): - pybuda.config.override_op_size(f"t5.decoder.block.{i}.layer.0.SelfAttention.k.weight_cache_nop", [1, 1]) - pybuda.config.override_op_size(f"t5.decoder.block.{i}.layer.0.SelfAttention.v.weight_cache_nop", [1, 1]) - pybuda.config.override_t_stream_shape(f"t5.decoder.block.{i}.layer.0.SelfAttention.k.weight_cache_nop", [15, 1]) - pybuda.config.override_t_stream_shape(f"t5.decoder.block.{i}.layer.0.SelfAttention.v.weight_cache_nop", [15, 1]) + forge.config.override_op_size(f"t5.decoder.block.{i}.layer.0.SelfAttention.k.weight_cache_nop", [1, 1]) + forge.config.override_op_size(f"t5.decoder.block.{i}.layer.0.SelfAttention.v.weight_cache_nop", [1, 1]) + forge.config.override_t_stream_shape(f"t5.decoder.block.{i}.layer.0.SelfAttention.k.weight_cache_nop", [15, 1]) + forge.config.override_t_stream_shape(f"t5.decoder.block.{i}.layer.0.SelfAttention.v.weight_cache_nop", [15, 1]) input_length = 64 input_text = "translate English to German: The house is wonderful. We have really enjoyed living here for the past eight years. The only problem that I have with it is that it is too small and the parks are not very close." @@ -104,9 +104,9 @@ def generate_t5_past_cache_enc_dec(test_device, variant): decoder_no_ca_inputs += [torch.zeros(enc_past_cache_self_shape), torch.zeros(enc_past_cache_self_shape), torch.zeros(enc_past_cache_cross_shape), torch.zeros(enc_past_cache_cross_shape)] - encoder_module = pybuda.PyTorchModule("T5_encoder", T5_encoder(model)) - decoder_module_cross_attention = pybuda.PyTorchModule("T5_decoder_with_ca", T5_decoder(model)) - decoder_module_no_cross_attention = pybuda.PyTorchModule("T5_decoder_no_ca", T5_decoder(model)) + encoder_module = forge.PyTorchModule("T5_encoder", T5_encoder(model)) + decoder_module_cross_attention = forge.PyTorchModule("T5_decoder_with_ca", T5_decoder(model)) + decoder_module_no_cross_attention = forge.PyTorchModule("T5_decoder_no_ca", T5_decoder(model)) first_current_index = max_length - TILE_DIM decoder_attention_mask[0, first_current_index] = 1 diff --git a/pybuda/test/model_demos/models/whisper.py b/forge/test/model_demos/models/whisper.py similarity index 85% rename from pybuda/test/model_demos/models/whisper.py rename to forge/test/model_demos/models/whisper.py index a3ab883e1..b3bd8fa15 100644 --- a/pybuda/test/model_demos/models/whisper.py +++ b/forge/test/model_demos/models/whisper.py @@ -13,11 +13,11 @@ ) from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask -import pybuda +import forge from test.utils import download_model -from pybuda.pybudaglobal import TILE_DIM -from pybuda.config import _get_global_compiler_config -from pybuda._C.backend_api import BackendType, BackendDevice +from forge.forgeglobal import TILE_DIM +from forge.config import _get_global_compiler_config +from forge._C.backend_api import BackendType, BackendDevice class Whisper_encoder(torch.nn.Module): @@ -72,28 +72,28 @@ def generate_model_whisper_decoder_past_cache(test_device, variant): compiler_cfg.enable_tvm_cpu_fallback = False # Run full model on silicon compiler_cfg.input_queues_on_host = True compiler_cfg.enable_link_past_cache_ios = True - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b - os.environ["PYBUDA_FORCE_SEQUENTIAL"] = "1" + os.environ["FORGE_FORCE_SEQUENTIAL"] = "1" if test_device.arch == BackendDevice.Wormhole_B0: compiler_cfg.amp_level = 1 - os.environ["PYBUDA_PAD_OUTPUT_BUFFER"] = "1" + os.environ["FORGE_PAD_OUTPUT_BUFFER"] = "1" os.environ["TT_BACKEND_MULTI_THREADED_PUSH"] = "1" os.environ["TT_BACKEND_DRAM_POLLING_FREQUENCY"] = "64" - os.environ["PYBUDA_NOP_ON_DIRECT_SHORT_PATH"] = "1" - os.environ["PYBUDA_NLP_MANUAL_TARGET"] = "23000" - os.environ["PYBUDA_SKIP_SMALL_UKT"] = "1" + os.environ["FORGE_NOP_ON_DIRECT_SHORT_PATH"] = "1" + os.environ["FORGE_NLP_MANUAL_TARGET"] = "23000" + os.environ["FORGE_SKIP_SMALL_UKT"] = "1" elif test_device.arch == BackendDevice.Grayskull: compiler_cfg.enable_auto_fusing = False - os.environ["PYBUDA_NLP_MANUAL_TARGET"] = "2000000" + os.environ["FORGE_NLP_MANUAL_TARGET"] = "2000000" if variant in ["openai/whisper-base", "openai/whisper-medium", "openai/whisper-large"]: os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = "65536" - # pybuda.set_configuration_options(performance_trace=pybuda.PerfTraceLevel.VERBOSE) + # forge.set_configuration_options(performance_trace=forge.PerfTraceLevel.VERBOSE) processor = download_model(AutoProcessor.from_pretrained, variant) config = WhisperConfig.from_pretrained(variant) max_length = config.max_length @@ -104,9 +104,9 @@ def generate_model_whisper_decoder_past_cache(test_device, variant): ) feature_extractor = download_model(WhisperFeatureExtractor.from_pretrained, variant) tokenizer = WhisperTokenizer.from_pretrained(variant) - decoder_module_no_cross_attention = pybuda.PyTorchModule("Whisper_decoder_no_ca", Whisper_decoder(model)) + decoder_module_no_cross_attention = forge.PyTorchModule("Whisper_decoder_no_ca", Whisper_decoder(model)) - sample = torch.load("pybuda/test/model_demos/utils/nlp/pytorch/1272-128104-0000.pt") + sample = torch.load("forge/test/model_demos/utils/nlp/pytorch/1272-128104-0000.pt") sample_audio = sample["audio"]["array"] inputs = processor(sample_audio, return_tensors="pt") @@ -137,20 +137,20 @@ def generate_model_whisper_enc_dec(test_device, variant): compiler_cfg.enable_tvm_cpu_fallback = False # Run full model on silicon compiler_cfg.compile_subgraphs = True compiler_cfg.enable_link_past_cache_ios = True - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b - os.environ["PYBUDA_FORCE_SEQUENTIAL"] = "1" - os.environ["PYBUDA_PAD_OUTPUT_BUFFER"] = "1" - os.environ["PYBUDA_PAD_OUTPUT_BUFFER_THRESHOLD_TILES"] = "1536" + os.environ["FORGE_FORCE_SEQUENTIAL"] = "1" + os.environ["FORGE_PAD_OUTPUT_BUFFER"] = "1" + os.environ["FORGE_PAD_OUTPUT_BUFFER_THRESHOLD_TILES"] = "1536" if variant == "openai/whisper-base": - os.environ["PYBUDA_GRAPHSOLVER_SELF_CUT_TYPE"] = "None" + os.environ["FORGE_GRAPHSOLVER_SELF_CUT_TYPE"] = "None" compiler_cfg.enable_auto_fusing = False run_encoder_on_tt = ("tiny" in variant) or ("base" in variant) or ("small" in variant) pad_model = True - # pybuda.set_configuration_options(performance_trace=pybuda.PerfTraceLevel.VERBOSE) + # forge.set_configuration_options(performance_trace=forge.PerfTraceLevel.VERBOSE) processor = download_model(AutoProcessor.from_pretrained, variant) config = WhisperConfig.from_pretrained(variant) config.return_dict = False @@ -170,11 +170,11 @@ def generate_model_whisper_enc_dec(test_device, variant): feature_extractor = download_model(WhisperFeatureExtractor.from_pretrained, variant) tokenizer = WhisperTokenizer.from_pretrained(variant) - encoder_module = pybuda.PyTorchModule("Whisper_encoder", Whisper_encoder(model)) - decoder_module_cross_attention = pybuda.PyTorchModule("Whisper_decoder_with_ca", Whisper_decoder(model)) - decoder_module_no_cross_attention = pybuda.PyTorchModule("Whisper_decoder_no_ca", Whisper_decoder(model)) + encoder_module = forge.PyTorchModule("Whisper_encoder", Whisper_encoder(model)) + decoder_module_cross_attention = forge.PyTorchModule("Whisper_decoder_with_ca", Whisper_decoder(model)) + decoder_module_no_cross_attention = forge.PyTorchModule("Whisper_decoder_no_ca", Whisper_decoder(model)) - sample = torch.load("pybuda/test/model_demos/utils/nlp/pytorch/1272-128104-0000.pt") + sample = torch.load("forge/test/model_demos/utils/nlp/pytorch/1272-128104-0000.pt") sample_audio = sample["audio"]["array"] inputs = feature_extractor(sample_audio, return_tensors="pt") diff --git a/pybuda/test/model_demos/models/wideresnet.py b/forge/test/model_demos/models/wideresnet.py similarity index 71% rename from pybuda/test/model_demos/models/wideresnet.py rename to forge/test/model_demos/models/wideresnet.py index 68ff26bfd..1b0413b41 100644 --- a/pybuda/test/model_demos/models/wideresnet.py +++ b/forge/test/model_demos/models/wideresnet.py @@ -2,7 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 import os -import pybuda +import forge from test.utils import download_model import timm import torch @@ -14,17 +14,17 @@ def generate_model_wideresnet_imgcls_pytorch(test_device, variant): - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model framework_model = download_model(torch.hub.load,"pytorch/vision:v0.10.0", variant, pretrained=True) framework_model.eval() model_name = f"pt_{variant}" - tt_model = pybuda.PyTorchModule(model_name,framework_model) + tt_model = forge.PyTorchModule(model_name,framework_model) # STEP 3: Prepare input url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") @@ -44,16 +44,16 @@ def generate_model_wideresnet_imgcls_pytorch(test_device, variant): def generate_model_wideresnet_imgcls_timm(test_device, variant): - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = (pybuda.config._get_global_compiler_config()) + # STEP 1: Set Forge configuration parameters + compiler_cfg = (forge.config._get_global_compiler_config()) compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b - os.environ["PYBUDA_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model framework_model = download_model(timm.create_model, variant, pretrained=True) framework_model.eval() - tt_model = pybuda.PyTorchModule( f"pt_{variant}_timm", framework_model) + tt_model = forge.PyTorchModule( f"pt_{variant}_timm", framework_model) # STEP 3: Prepare input config = resolve_data_config({}, model=framework_model) diff --git a/pybuda/test/model_demos/models/xception.py b/forge/test/model_demos/models/xception.py similarity index 69% rename from pybuda/test/model_demos/models/xception.py rename to forge/test/model_demos/models/xception.py index 65ef958a8..e8c98066a 100644 --- a/pybuda/test/model_demos/models/xception.py +++ b/forge/test/model_demos/models/xception.py @@ -2,32 +2,32 @@ # SPDX-License-Identifier: Apache-2.0 import os -import pybuda +import forge import urllib from test.utils import download_model from PIL import Image import timm from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform -from pybuda._C.backend_api import BackendDevice +from forge._C.backend_api import BackendDevice def generate_model_xception_imgcls_timm(test_device, variant): - # STEP 1: Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + # STEP 1: Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b if variant == "xception" and test_device.arch == BackendDevice.Wormhole_B0: compiler_cfg.balancer_policy = "CNN" else: compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_RIBBON2"] = "1" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" - # STEP 2: Create PyBuda module from PyTorch model + # STEP 2: Create Forge module from PyTorch model framework_model = download_model(timm.create_model, variant, pretrained=True) framework_model.eval() - tt_model = pybuda.PyTorchModule(f"pt_{variant}_timm", framework_model) + tt_model = forge.PyTorchModule(f"pt_{variant}_timm", framework_model) # STEP 3: Prepare input config = resolve_data_config({}, model=framework_model) diff --git a/pybuda/test/model_demos/utils/__init__.py b/forge/test/model_demos/utils/__init__.py similarity index 100% rename from pybuda/test/model_demos/utils/__init__.py rename to forge/test/model_demos/utils/__init__.py diff --git a/pybuda/test/model_demos/utils/cnn/onnx/images/carvana.jpg b/forge/test/model_demos/utils/cnn/onnx/images/carvana.jpg similarity index 100% rename from pybuda/test/model_demos/utils/cnn/onnx/images/carvana.jpg rename to forge/test/model_demos/utils/cnn/onnx/images/carvana.jpg diff --git a/pybuda/test/model_demos/utils/cnn/pytorch/images/car.jpg b/forge/test/model_demos/utils/cnn/pytorch/images/car.jpg similarity index 100% rename from pybuda/test/model_demos/utils/cnn/pytorch/images/car.jpg rename to forge/test/model_demos/utils/cnn/pytorch/images/car.jpg diff --git a/pybuda/test/model_demos/utils/cnn/pytorch/images/girl.png b/forge/test/model_demos/utils/cnn/pytorch/images/girl.png similarity index 100% rename from pybuda/test/model_demos/utils/cnn/pytorch/images/girl.png rename to forge/test/model_demos/utils/cnn/pytorch/images/girl.png diff --git a/pybuda/test/model_demos/utils/cnn/pytorch/images/img.jpeg b/forge/test/model_demos/utils/cnn/pytorch/images/img.jpeg similarity index 100% rename from pybuda/test/model_demos/utils/cnn/pytorch/images/img.jpeg rename to forge/test/model_demos/utils/cnn/pytorch/images/img.jpeg diff --git a/pybuda/test/model_demos/utils/cnn/pytorch/saved/efficientnet_lite/src_efficientnet_lite.py b/forge/test/model_demos/utils/cnn/pytorch/saved/efficientnet_lite/src_efficientnet_lite.py similarity index 100% rename from pybuda/test/model_demos/utils/cnn/pytorch/saved/efficientnet_lite/src_efficientnet_lite.py rename to forge/test/model_demos/utils/cnn/pytorch/saved/efficientnet_lite/src_efficientnet_lite.py diff --git a/pybuda/test/model_demos/utils/cnn/pytorch/saved/mobilenetv1_ssd/vision/nn/mobilenet.py b/forge/test/model_demos/utils/cnn/pytorch/saved/mobilenetv1_ssd/vision/nn/mobilenet.py similarity index 100% rename from pybuda/test/model_demos/utils/cnn/pytorch/saved/mobilenetv1_ssd/vision/nn/mobilenet.py rename to forge/test/model_demos/utils/cnn/pytorch/saved/mobilenetv1_ssd/vision/nn/mobilenet.py diff --git a/pybuda/test/model_demos/utils/cnn/pytorch/saved/yolo_v3/holli_src/license b/forge/test/model_demos/utils/cnn/pytorch/saved/yolo_v3/holli_src/license similarity index 100% rename from pybuda/test/model_demos/utils/cnn/pytorch/saved/yolo_v3/holli_src/license rename to forge/test/model_demos/utils/cnn/pytorch/saved/yolo_v3/holli_src/license diff --git a/pybuda/test/model_demos/utils/nlp/pytorch/1272-128104-0000.pt b/forge/test/model_demos/utils/nlp/pytorch/1272-128104-0000.pt similarity index 100% rename from pybuda/test/model_demos/utils/nlp/pytorch/1272-128104-0000.pt rename to forge/test/model_demos/utils/nlp/pytorch/1272-128104-0000.pt diff --git a/pybuda/test/model_demos/utils/nlp/pytorch/1272-128104-0001.pt b/forge/test/model_demos/utils/nlp/pytorch/1272-128104-0001.pt similarity index 100% rename from pybuda/test/model_demos/utils/nlp/pytorch/1272-128104-0001.pt rename to forge/test/model_demos/utils/nlp/pytorch/1272-128104-0001.pt diff --git a/pybuda/test/model_demos/utils/nlp/pytorch/1272-128104-0002.pt b/forge/test/model_demos/utils/nlp/pytorch/1272-128104-0002.pt similarity index 100% rename from pybuda/test/model_demos/utils/nlp/pytorch/1272-128104-0002.pt rename to forge/test/model_demos/utils/nlp/pytorch/1272-128104-0002.pt diff --git a/pybuda/test/model_demos/utils/nlp/pytorch/1272-128104-0003.pt b/forge/test/model_demos/utils/nlp/pytorch/1272-128104-0003.pt similarity index 100% rename from pybuda/test/model_demos/utils/nlp/pytorch/1272-128104-0003.pt rename to forge/test/model_demos/utils/nlp/pytorch/1272-128104-0003.pt diff --git a/pybuda/test/module_utils.py b/forge/test/module_utils.py similarity index 81% rename from pybuda/test/module_utils.py rename to forge/test/module_utils.py index 1710b272b..9d7e28069 100644 --- a/pybuda/test/module_utils.py +++ b/forge/test/module_utils.py @@ -5,13 +5,13 @@ # Container for various modules used for testing # -import pybuda -from pybuda import PyBudaModule -from pybuda.op.nn import Conv2dModule -from pybuda.op.eval.sparse_utils import calculate_conv2d_output_dimensions, conv2d_padding_to_canonical +import forge +from forge import ForgeModule +from forge.op.nn import Conv2dModule +from forge.op.eval.sparse_utils import calculate_conv2d_output_dimensions, conv2d_padding_to_canonical -class Conv2dTModule(PyBudaModule): +class Conv2dTModule(ForgeModule): """ ConvTModule """ @@ -62,7 +62,7 @@ def forward(self, act): y = self.conv(act) if self.add_reshape_transpose_to_end: - y = pybuda.op.Reshape("", y, (1, 1, self.out_channels, outy * outx)) - y = pybuda.op.Transpose("", y, 2, 3) + y = forge.op.Reshape("", y, (1, 1, self.out_channels, outy * outx)) + y = forge.op.Transpose("", y, 2, 3) return y diff --git a/pybuda/test/nightly/cnn/building_blocks/test_building_blocks.py b/forge/test/nightly/cnn/building_blocks/test_building_blocks.py similarity index 96% rename from pybuda/test/nightly/cnn/building_blocks/test_building_blocks.py rename to forge/test/nightly/cnn/building_blocks/test_building_blocks.py index c3c00b666..c07a0e7e1 100644 --- a/pybuda/test/nightly/cnn/building_blocks/test_building_blocks.py +++ b/forge/test/nightly/cnn/building_blocks/test_building_blocks.py @@ -48,7 +48,7 @@ ] def get_path_to_test(test_name): - """Util function which returns path from pybuda repo root to the function pytest receives.""" + """Util function which returns path from forge repo root to the function pytest receives.""" def normalize_string(string): return string.decode("utf-8").strip() diff --git a/pybuda/test/nightly/cnn/building_blocks/test_mobilenet.py b/forge/test/nightly/cnn/building_blocks/test_mobilenet.py similarity index 97% rename from pybuda/test/nightly/cnn/building_blocks/test_mobilenet.py rename to forge/test/nightly/cnn/building_blocks/test_mobilenet.py index 971ee9079..0185c0def 100644 --- a/pybuda/test/nightly/cnn/building_blocks/test_mobilenet.py +++ b/forge/test/nightly/cnn/building_blocks/test_mobilenet.py @@ -9,14 +9,14 @@ import pytest from torch import nn -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, ) -from pybuda.config import _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind -from pybuda._C.backend_api import BackendType, BackendDevice +from forge.config import _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind +from forge._C.backend_api import BackendType, BackendDevice class DepthwiseSeparableConv(nn.Sequential): @@ -184,7 +184,7 @@ def test_mobilenet_v1_depthwise_separable_conv( ) @pytest.mark.xfail(reason="XFAIL due to: " - "tenstorrent/pybuda#416") + "tenstorrent/forge#416") @pytest.mark.parametrize( "image_size, input_size_divider, in_channels_base, out_channels_base, " "stride, width_multiplier, arch", @@ -318,7 +318,7 @@ def test_mobilenet_v2_inverted_residual( ) @pytest.mark.xfail(reason="XFAIL due to: " - "tenstorrent/pybuda#417") + "tenstorrent/forge#417") @pytest.mark.parametrize( "image_size, input_size_divider, in_channels_base, out_channels_base, " "stride, width_multiplier, expansion_factor, arch", diff --git a/pybuda/test/nightly/cnn/building_blocks/test_resnet.py b/forge/test/nightly/cnn/building_blocks/test_resnet.py similarity index 96% rename from pybuda/test/nightly/cnn/building_blocks/test_resnet.py rename to forge/test/nightly/cnn/building_blocks/test_resnet.py index 5b083be23..ece81978f 100644 --- a/pybuda/test/nightly/cnn/building_blocks/test_resnet.py +++ b/forge/test/nightly/cnn/building_blocks/test_resnet.py @@ -9,11 +9,11 @@ import pytest import torch.nn as nn -from pybuda.config import _get_global_compiler_config -from pybuda import PyTorchModule, VerifyConfig -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind -from pybuda._C.backend_api import BackendType, BackendDevice +from forge.config import _get_global_compiler_config +from forge import PyTorchModule, VerifyConfig +from forge.verify.backend import verify_module +from forge.verify.config import TestKind +from forge._C.backend_api import BackendType, BackendDevice class BasicResidualBlock(nn.Module): @@ -222,7 +222,7 @@ def test_resnet_basic_block(input_size, in_channels, out_channels, stride, arch) ) @pytest.mark.xfail(reason="XFAIL due to: " - "tenstorrent/pybuda#369") + "tenstorrent/forge#369") @pytest.mark.parametrize( "input_size, in_channels, out_channels, stride, arch", [ @@ -300,9 +300,9 @@ def test_resnet_bottleneck_block(input_size, in_channels, out_channels, stride, ) @pytest.mark.xfail(reason="XFAIL due to: " - "tenstorrent/pybuda#369, " - "tenstorrent/pybuda#416, " - "tenstorrent/pybuda#417") + "tenstorrent/forge#369, " + "tenstorrent/forge#416, " + "tenstorrent/forge#417") @pytest.mark.parametrize( "input_size, in_channels, out_channels, stride, arch", [ @@ -382,9 +382,9 @@ def test_resnext_bottleneck_block(input_size, in_channels, out_channels, stride, ) @pytest.mark.xfail(reason="XFAIL due to: " - "tenstorrent/pybuda#369, " - "tenstorrent/pybuda#416, " - "tenstorrent/pybuda#417") + "tenstorrent/forge#369, " + "tenstorrent/forge#416, " + "tenstorrent/forge#417") @pytest.mark.parametrize( "input_size, in_channels, out_channels, stride, arch", [ diff --git a/pybuda/test/nightly/cnn/building_blocks/test_unet.py b/forge/test/nightly/cnn/building_blocks/test_unet.py similarity index 97% rename from pybuda/test/nightly/cnn/building_blocks/test_unet.py rename to forge/test/nightly/cnn/building_blocks/test_unet.py index a00fac1a2..492d56d2f 100644 --- a/pybuda/test/nightly/cnn/building_blocks/test_unet.py +++ b/forge/test/nightly/cnn/building_blocks/test_unet.py @@ -10,12 +10,12 @@ import torch import torch.nn as nn -from pybuda import PyTorchModule, VerifyConfig -from pybuda.module import PyTorchModule -from pybuda.config import _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind -from pybuda._C.backend_api import BackendType, BackendDevice +from forge import PyTorchModule, VerifyConfig +from forge.module import PyTorchModule +from forge.config import _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind +from forge._C.backend_api import BackendType, BackendDevice class DoubleConvBatchnormRelu(nn.Module): """2x(Conv + BatchNorm + ReLU) block. @@ -197,7 +197,7 @@ def test_unet_double_conv_batchnorm_relu(input_size, in_channels, out_channels, ) @pytest.mark.xfail(reason="XFAIL due to: " - "tenstorrent/pybuda#369") + "tenstorrent/forge#369") @pytest.mark.parametrize( "input_size, in_channels, out_channels, arch", [ @@ -279,7 +279,7 @@ def test_unet_double_conv_relu(input_size, in_channels, out_channels, arch): ) @pytest.mark.xfail(reason="XFAIL due to: " - "tenstorrent/pybuda#422") + "tenstorrent/forge#422") @pytest.mark.parametrize( "input_size, in_channels, out_channels, arch", [(512, 256, 512, BackendDevice.Wormhole_B0)] diff --git a/pybuda/test/nightly/cnn/building_blocks/test_vit.py b/forge/test/nightly/cnn/building_blocks/test_vit.py similarity index 96% rename from pybuda/test/nightly/cnn/building_blocks/test_vit.py rename to forge/test/nightly/cnn/building_blocks/test_vit.py index 519e3c962..8f6f91f46 100644 --- a/pybuda/test/nightly/cnn/building_blocks/test_vit.py +++ b/forge/test/nightly/cnn/building_blocks/test_vit.py @@ -25,14 +25,14 @@ import pytest from transformers import ViTModel, ViTConfig -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, ) -from pybuda.config import _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind -from pybuda._C.backend_api import BackendType, BackendDevice +from forge.config import _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind +from forge._C.backend_api import BackendType, BackendDevice # TODO probably needs to be broken down into smaller building blocks since encoder itself is a @@ -102,7 +102,7 @@ def test_vit_encoder( ) @pytest.mark.xfail(reason="XFAIL due to: " - "tenstorrent/pybuda#424") + "tenstorrent/forge#424") @pytest.mark.parametrize( "image_size, num_channels, patch_size, num_hidden_layers, num_attention_heads, intermed_expansion_factor, arch", [ diff --git a/pybuda/test/nn/__init__.py b/forge/test/nn/__init__.py similarity index 100% rename from pybuda/test/nn/__init__.py rename to forge/test/nn/__init__.py diff --git a/pybuda/test/nn/architectures/cnn/__init__.py b/forge/test/nn/architectures/cnn/__init__.py similarity index 100% rename from pybuda/test/nn/architectures/cnn/__init__.py rename to forge/test/nn/architectures/cnn/__init__.py diff --git a/pybuda/test/nn/architectures/cnn/resnet/__init__.py b/forge/test/nn/architectures/cnn/resnet/__init__.py similarity index 100% rename from pybuda/test/nn/architectures/cnn/resnet/__init__.py rename to forge/test/nn/architectures/cnn/resnet/__init__.py diff --git a/pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/__init__.py b/forge/test/nn/architectures/cnn/resnet/resnet_blocks/__init__.py similarity index 100% rename from pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/__init__.py rename to forge/test/nn/architectures/cnn/resnet/resnet_blocks/__init__.py diff --git a/pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/basic/__init__.py b/forge/test/nn/architectures/cnn/resnet/resnet_blocks/basic/__init__.py similarity index 100% rename from pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/basic/__init__.py rename to forge/test/nn/architectures/cnn/resnet/resnet_blocks/basic/__init__.py diff --git a/pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/basic/basic.py b/forge/test/nn/architectures/cnn/resnet/resnet_blocks/basic/basic.py similarity index 83% rename from pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/basic/basic.py rename to forge/test/nn/architectures/cnn/resnet/resnet_blocks/basic/basic.py index 1388e702a..1ef7d24f9 100644 --- a/pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/basic/basic.py +++ b/forge/test/nn/architectures/cnn/resnet/resnet_blocks/basic/basic.py @@ -6,15 +6,15 @@ # # from turtle import forward -import pybuda -import pybuda.op +import forge +import forge.op -from pybuda import PyBudaModule -from pybuda.op.nn import Conv2dModule +from forge import ForgeModule +from forge.op.nn import Conv2dModule -class TestBasicBlock(PyBudaModule): +class TestBasicBlock(ForgeModule): def __init__( self, @@ -50,14 +50,14 @@ def forward(self, activations): # Left side of the block, two convolutional layers with relu conv1 = self.conv1(activations) - relu1 = pybuda.op.Relu("relu1", conv1) + relu1 = forge.op.Relu("relu1", conv1) conv2 = self.conv2(relu1) # Right side of the block, just identity tensor, activations # added to convolution from the left side - add1 = pybuda.op.Add("out", conv2, activations) + add1 = forge.op.Add("out", conv2, activations) # Sum of outputs from the left and right sides with applied relu - relu2 = pybuda.op.Relu("relu2", add1) + relu2 = forge.op.Relu("relu2", add1) return relu2 \ No newline at end of file diff --git a/pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/basic/conftest.py b/forge/test/nn/architectures/cnn/resnet/resnet_blocks/basic/conftest.py similarity index 100% rename from pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/basic/conftest.py rename to forge/test/nn/architectures/cnn/resnet/resnet_blocks/basic/conftest.py diff --git a/pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/basic/test_basic.py b/forge/test/nn/architectures/cnn/resnet/resnet_blocks/basic/test_basic.py similarity index 92% rename from pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/basic/test_basic.py rename to forge/test/nn/architectures/cnn/resnet/resnet_blocks/basic/test_basic.py index 52e0b0b9c..89501f489 100644 --- a/pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/basic/test_basic.py +++ b/forge/test/nn/architectures/cnn/resnet/resnet_blocks/basic/test_basic.py @@ -11,10 +11,10 @@ import torch import numpy as np -import pybuda -import pybuda.op -from pybuda import Tensor -from pybuda import TTDevice, BackendType, pybuda_compile, VerifyConfig, CompilerConfig +import forge +import forge.op +from forge import Tensor +from forge import TTDevice, BackendType, forge_compile, VerifyConfig, CompilerConfig from . import TestBasicBlock @@ -76,7 +76,7 @@ def test_basic_block( requires_grad=True ) ) - _, _, _, outputs, _ = pybuda_compile( + _, _, _, outputs, _ = forge_compile( tt0, "conv2d", activations, diff --git a/pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/basic/test_basic_single.py b/forge/test/nn/architectures/cnn/resnet/resnet_blocks/basic/test_basic_single.py similarity index 83% rename from pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/basic/test_basic_single.py rename to forge/test/nn/architectures/cnn/resnet/resnet_blocks/basic/test_basic_single.py index ece726bf4..68a772477 100644 --- a/pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/basic/test_basic_single.py +++ b/forge/test/nn/architectures/cnn/resnet/resnet_blocks/basic/test_basic_single.py @@ -9,23 +9,23 @@ # import torch -# import pybuda -# import pybuda.op +# import forge +# import forge.op -# from pybuda import ( -# PyBudaModule, +# from forge import ( +# ForgeModule, # TTDevice, # TTDeviceType, # Tensor, -# pybuda_compile, +# forge_compile, # CompilerConfig, # VerifyConfig, # ) -# from pybuda.op.nn import Conv2dModule -# from pybuda.op.convolution import Conv2d -# from pybuda.utils import align_up_tile -# from pybuda.op.eval import compare_tensor_to_golden +# from forge.op.nn import Conv2dModule +# from forge.op.convolution import Conv2d +# from forge.utils import align_up_tile +# from forge.op.eval import compare_tensor_to_golden # from . import TestBasicBlock @@ -71,7 +71,7 @@ # requires_grad=True # ) # ) -# _, _, _, outputs, _ = pybuda_compile( +# _, _, _, outputs, _ = forge_compile( # tt0, # "conv2d", # activations, diff --git a/pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/basic/test_command.sh b/forge/test/nn/architectures/cnn/resnet/resnet_blocks/basic/test_command.sh similarity index 100% rename from pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/basic/test_command.sh rename to forge/test/nn/architectures/cnn/resnet/resnet_blocks/basic/test_command.sh diff --git a/pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/__init__.py b/forge/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/__init__.py similarity index 100% rename from pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/__init__.py rename to forge/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/__init__.py diff --git a/pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/bottleneck.py b/forge/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/bottleneck.py similarity index 85% rename from pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/bottleneck.py rename to forge/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/bottleneck.py index b0ce884d8..123a083da 100644 --- a/pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/bottleneck.py +++ b/forge/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/bottleneck.py @@ -5,14 +5,14 @@ # Test for Resnet Bottleneck Block # -import pybuda -import pybuda.op -from pybuda import PyBudaModule +import forge +import forge.op +from forge import ForgeModule -from pybuda.op.nn import Conv2dModule +from forge.op.nn import Conv2dModule -class TestBottleneckBlock(PyBudaModule): +class TestBottleneckBlock(ForgeModule): def __init__( self, @@ -66,16 +66,16 @@ def forward(self, activations): # Left side of the block, three convolutional layers with relu conv1 = self.conv1(activations) - relu1 = pybuda.op.Relu("relu1", conv1) + relu1 = forge.op.Relu("relu1", conv1) conv2 = self.conv2(relu1) - relu2 = pybuda.op.Relu("relu2", conv2) + relu2 = forge.op.Relu("relu2", conv2) conv3 = self.conv3(relu2) # Right side of the block, just identity tensor, activations # added to convolution from the left side - add1 = pybuda.op.Add("out", conv3, activations) + add1 = forge.op.Add("out", conv3, activations) # Sum of outputs from the left and right sides with applied relu - relu3 = pybuda.op.Relu("relu3", add1) + relu3 = forge.op.Relu("relu3", add1) return relu3 \ No newline at end of file diff --git a/pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/conftest.py b/forge/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/conftest.py similarity index 100% rename from pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/conftest.py rename to forge/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/conftest.py diff --git a/pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/test_bottleneck.py b/forge/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/test_bottleneck.py similarity index 92% rename from pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/test_bottleneck.py rename to forge/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/test_bottleneck.py index afcbadebf..2f654386b 100644 --- a/pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/test_bottleneck.py +++ b/forge/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/test_bottleneck.py @@ -11,10 +11,10 @@ import torch import numpy as np -import pybuda -import pybuda.op -from pybuda import Tensor -from pybuda import TTDevice, BackendType, pybuda_compile, VerifyConfig, CompilerConfig +import forge +import forge.op +from forge import Tensor +from forge import TTDevice, BackendType, forge_compile, VerifyConfig, CompilerConfig from . import TestBottleneckBlock @@ -76,7 +76,7 @@ def test_bottleneck_block( requires_grad=True ) ) - _, _, _, outputs, _ = pybuda_compile( + _, _, _, outputs, _ = forge_compile( tt0, "conv2d", activations, diff --git a/pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/test_bottleneck_single.py b/forge/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/test_bottleneck_single.py similarity index 84% rename from pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/test_bottleneck_single.py rename to forge/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/test_bottleneck_single.py index 843b2e9ae..057ac4044 100644 --- a/pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/test_bottleneck_single.py +++ b/forge/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/test_bottleneck_single.py @@ -9,21 +9,21 @@ # import torch -# import pybuda -# from pybuda import ( -# PyBudaModule, +# import forge +# from forge import ( +# ForgeModule, # TTDevice, # TTDeviceType, # Tensor, -# pybuda_compile, +# forge_compile, # CompilerConfig, # VerifyConfig, # ) -# from pybuda.op.nn import Conv2dModule -# from pybuda.op.convolution import Conv2d -# from pybuda.utils import align_up_tile -# from pybuda.op.eval import compare_tensor_to_golden -# import pybuda.op +# from forge.op.nn import Conv2dModule +# from forge.op.convolution import Conv2d +# from forge.utils import align_up_tile +# from forge.op.eval import compare_tensor_to_golden +# import forge.op # from . import TestBottleneckBlock @@ -75,7 +75,7 @@ # requires_grad=True # ) # ) -# _, _, _, outputs, _ = pybuda_compile( +# _, _, _, outputs, _ = forge_compile( # tt0, # "conv2d", # activations, diff --git a/pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/test_command.sh b/forge/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/test_command.sh similarity index 100% rename from pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/test_command.sh rename to forge/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/test_command.sh diff --git a/pybuda/test/nn/functional/__init__.py b/forge/test/nn/functional/__init__.py similarity index 100% rename from pybuda/test/nn/functional/__init__.py rename to forge/test/nn/functional/__init__.py diff --git a/pybuda/test/nn/functional/softmax/__init__.py b/forge/test/nn/functional/softmax/__init__.py similarity index 100% rename from pybuda/test/nn/functional/softmax/__init__.py rename to forge/test/nn/functional/softmax/__init__.py diff --git a/pybuda/test/nn/functional/softmax/models/__init__.py b/forge/test/nn/functional/softmax/models/__init__.py similarity index 100% rename from pybuda/test/nn/functional/softmax/models/__init__.py rename to forge/test/nn/functional/softmax/models/__init__.py diff --git a/pybuda/test/nn/functional/softmax/models/model_0.py b/forge/test/nn/functional/softmax/models/model_0.py similarity index 83% rename from pybuda/test/nn/functional/softmax/models/model_0.py rename to forge/test/nn/functional/softmax/models/model_0.py index a8e0e8029..84a3dcad4 100644 --- a/pybuda/test/nn/functional/softmax/models/model_0.py +++ b/forge/test/nn/functional/softmax/models/model_0.py @@ -3,22 +3,22 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 0 -# Softmax operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Softmax operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch from torch.distributions import Uniform, Normal -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaSoftmaxTest(PyBudaModule): +class BudaSoftmaxTest(ForgeModule): """ Buda Test 0 @@ -50,7 +50,7 @@ def __init__( # print(f"shape: {self.shape}, dim: {self.dim}") - self.train_param = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param = forge.Parameter(*self.shape, requires_grad=True) input = BudaSoftmaxTest.INPUTS_DISTRIBUTION( BudaSoftmaxTest.INPUTS_RANGE_MIN, @@ -66,7 +66,7 @@ def __init__( def forward(self, x): # Layer 2 - mul = pybuda.op.Multiply("mul", x, self.train_param) + mul = forge.op.Multiply("mul", x, self.train_param) # Layer 3 sm1 = nn.Softmax("sm1", mul, dim=self.dim, stable=self.stable) diff --git a/pybuda/test/nn/functional/softmax/models/model_1.py b/forge/test/nn/functional/softmax/models/model_1.py similarity index 78% rename from pybuda/test/nn/functional/softmax/models/model_1.py rename to forge/test/nn/functional/softmax/models/model_1.py index 7252e3a8a..408e03936 100644 --- a/pybuda/test/nn/functional/softmax/models/model_1.py +++ b/forge/test/nn/functional/softmax/models/model_1.py @@ -3,22 +3,22 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 1 -# Softmax operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Softmax operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch from torch.distributions import Uniform, Normal -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaSoftmaxTest(PyBudaModule): +class BudaSoftmaxTest(ForgeModule): """ Buda Test 1 @@ -48,7 +48,7 @@ def __init__( self.dim = dim self.stable = stable - self.train_param = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param = forge.Parameter(*self.shape, requires_grad=True) input = BudaSoftmaxTest.INPUTS_DISTRIBUTION( BudaSoftmaxTest.INPUTS_RANGE_MIN, @@ -64,7 +64,7 @@ def __init__( def forward(self, x): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x, self.train_param) + mul1 = forge.op.Multiply("mul1", x, self.train_param) # Layer 3 sm1 = nn.Softmax("sm1", x, dim=self.dim, stable=self.stable) @@ -72,11 +72,11 @@ def forward(self, x): sm3 = nn.Softmax("sm3", self.train_param, dim=self.dim, stable=self.stable) # Layer 4 - add1 = pybuda.op.Add("add1", sm1, sm2) - mul2 = pybuda.op.Multiply("mul2", sm2, sm3) + add1 = forge.op.Add("add1", sm1, sm2) + mul2 = forge.op.Multiply("mul2", sm2, sm3) # Layer 5 - mul3 = pybuda.op.Multiply("mul3", add1, mul2) + mul3 = forge.op.Multiply("mul3", add1, mul2) sm4 = nn.Softmax("sm4", mul3, dim=self.dim, stable=self.stable) return sm4 diff --git a/pybuda/test/nn/functional/softmax/models/model_2.py b/forge/test/nn/functional/softmax/models/model_2.py similarity index 69% rename from pybuda/test/nn/functional/softmax/models/model_2.py rename to forge/test/nn/functional/softmax/models/model_2.py index 9b133526b..b5e70bd3e 100644 --- a/pybuda/test/nn/functional/softmax/models/model_2.py +++ b/forge/test/nn/functional/softmax/models/model_2.py @@ -3,22 +3,22 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 2 -# Softmax operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Softmax operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch from torch.distributions import Uniform, Normal -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaSoftmaxTest(PyBudaModule): +class BudaSoftmaxTest(ForgeModule): """ Buda Test 2 @@ -48,8 +48,8 @@ def __init__( self.dim = dim self.stable = stable - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [] for i in range(2): @@ -68,37 +68,37 @@ def __init__( def forward(self, x1, x2): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - add1 = pybuda.op.Add("add1", x2, self.train_param2) + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + add1 = forge.op.Add("add1", x2, self.train_param2) # Layer 3 sm1 = nn.Softmax("sm1", self.train_param1, dim=self.dim, stable=self.stable) - mul2 = pybuda.op.Multiply("mul2", add1, sm1) + mul2 = forge.op.Multiply("mul2", add1, sm1) sm2 = nn.Softmax("sm2", self.train_param2, dim=self.dim, stable=self.stable) # Layer 4 - add2 = pybuda.op.Add("add2", x1, mul1) - mul3 = pybuda.op.Multiply("mul3", sm1, add2) - mul4 = pybuda.op.Multiply("mul4", mul2, sm2) + add2 = forge.op.Add("add2", x1, mul1) + mul3 = forge.op.Multiply("mul3", sm1, add2) + mul4 = forge.op.Multiply("mul4", mul2, sm2) # Layer 5 sm3 = nn.Softmax("sm3", add2, dim=self.dim, stable=self.stable) sm4 = nn.Softmax("sm4", mul3, dim=self.dim, stable=self.stable) sm5 = nn.Softmax("sm5", mul2, dim=self.dim, stable=self.stable) - mul5 = pybuda.op.Multiply("mul5", sm5, self.train_param2) + mul5 = forge.op.Multiply("mul5", sm5, self.train_param2) # Layer 6 - mul6 = pybuda.op.Multiply("mul6", sm3, sm4) - add3 = pybuda.op.Add("add3", mul5, sm2) - mul7 = pybuda.op.Multiply("mul7", sm5, add3) + mul6 = forge.op.Multiply("mul6", sm3, sm4) + add3 = forge.op.Add("add3", mul5, sm2) + mul7 = forge.op.Multiply("mul7", sm5, add3) # Layer 7 sm6 = nn.Softmax("sm6", mul6, dim=self.dim, stable=self.stable) sm7 = nn.Softmax("sm7", mul7, dim=self.dim, stable=self.stable) # Layer 8 - add4 = pybuda.op.Add("add4", sm6, sm7) - add5 = pybuda.op.Add("add5", mul4, add4) + add4 = forge.op.Add("add4", sm6, sm7) + add5 = forge.op.Add("add5", mul4, add4) return add5 diff --git a/pybuda/test/nn/functional/softmax/models/model_3.py b/forge/test/nn/functional/softmax/models/model_3.py similarity index 74% rename from pybuda/test/nn/functional/softmax/models/model_3.py rename to forge/test/nn/functional/softmax/models/model_3.py index 0ebd945bd..23f44c582 100644 --- a/pybuda/test/nn/functional/softmax/models/model_3.py +++ b/forge/test/nn/functional/softmax/models/model_3.py @@ -3,22 +3,22 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 3 -# Softmax operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Softmax operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch from torch.distributions import Uniform, Normal -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaSoftmaxTest(PyBudaModule): +class BudaSoftmaxTest(ForgeModule): """ Buda Test 3 @@ -48,8 +48,8 @@ def __init__( self.dim = dim self.stable = stable - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [] for i in range(2): @@ -68,8 +68,8 @@ def __init__( def forward(self, x1, x2): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x2, self.train_param2) + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x2, self.train_param2) # Layer 3 sm1 = nn.Softmax("sm1", mul1, dim=self.dim, stable=self.stable) @@ -79,23 +79,23 @@ def forward(self, x1, x2): # Layer 4 sm5 = nn.Softmax("sm5", sm1, dim=self.dim, stable=self.stable) - add1 = pybuda.op.Add("add1", sm2, sm3) + add1 = forge.op.Add("add1", sm2, sm3) sm6 = nn.Softmax("sm6", sm4, dim=self.dim, stable=self.stable) # Layer 5 - mul3 = pybuda.op.Multiply("mul3", sm5, add1) - add2 = pybuda.op.Add("add2", self.train_param2, sm6) + mul3 = forge.op.Multiply("mul3", sm5, add1) + add2 = forge.op.Add("add2", self.train_param2, sm6) # Layer 6 - add3 = pybuda.op.Add("add3", mul3, add1) - mul4 = pybuda.op.Multiply("mul4", add2, sm4) + add3 = forge.op.Add("add3", mul3, add1) + mul4 = forge.op.Multiply("mul4", add2, sm4) # Layer 7 sm7 = nn.Softmax("sm7", add3, dim=self.dim, stable=self.stable) sm8 = nn.Softmax("sm8", mul4, dim=self.dim, stable=self.stable) # Layer 8 - mul5 = pybuda.op.Multiply("mul5", sm7, sm8) + mul5 = forge.op.Multiply("mul5", sm7, sm8) return mul5 diff --git a/pybuda/test/nn/functional/softmax/models/model_4.py b/forge/test/nn/functional/softmax/models/model_4.py similarity index 68% rename from pybuda/test/nn/functional/softmax/models/model_4.py rename to forge/test/nn/functional/softmax/models/model_4.py index b0f9cfdbe..37d32f089 100644 --- a/pybuda/test/nn/functional/softmax/models/model_4.py +++ b/forge/test/nn/functional/softmax/models/model_4.py @@ -3,22 +3,22 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 4 -# Softmax operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Softmax operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch from torch.distributions import Uniform, Normal -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaSoftmaxTest(PyBudaModule): +class BudaSoftmaxTest(ForgeModule): """ Buda Test 4 @@ -48,9 +48,9 @@ def __init__( self.dim = dim self.stable=stable - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [] for i in range(3): @@ -69,9 +69,9 @@ def __init__( def forward(self, x1, x2, x3): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x2, self.train_param2) - mul3 = pybuda.op.Multiply("mul3", x3, self.train_param3) + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x2, self.train_param2) + mul3 = forge.op.Multiply("mul3", x3, self.train_param3) # Layer 3 sm1 = nn.Softmax("sm1", mul1, dim=self.dim, stable=self.stable) @@ -79,9 +79,9 @@ def forward(self, x1, x2, x3): sm3 = nn.Softmax("sm3", mul3, dim=self.dim, stable=self.stable) # Layer 4 - mul4 = pybuda.op.Multiply("mul4", sm1, self.train_param1) - mul5 = pybuda.op.Multiply("mul5", sm2, self.train_param2) - mul6 = pybuda.op.Multiply("mul6", sm3, self.train_param3) + mul4 = forge.op.Multiply("mul4", sm1, self.train_param1) + mul5 = forge.op.Multiply("mul5", sm2, self.train_param2) + mul6 = forge.op.Multiply("mul6", sm3, self.train_param3) # Layer 5 sm4 = nn.Softmax("sm4", mul4, dim=self.dim, stable=self.stable) @@ -89,13 +89,13 @@ def forward(self, x1, x2, x3): sm6 = nn.Softmax("sm6", mul6, dim=self.dim, stable=self.stable) # Layer 6 - add1 = pybuda.op.Add("add1", sm4, self.train_param1) - add2 = pybuda.op.Add("add2", sm5, self.train_param2) - add3 = pybuda.op.Add("add3", sm6, self.train_param3) + add1 = forge.op.Add("add1", sm4, self.train_param1) + add2 = forge.op.Add("add2", sm5, self.train_param2) + add3 = forge.op.Add("add3", sm6, self.train_param3) # Layer 7 - mul7 = pybuda.op.Multiply("mul7", add1, sm2) - mul8 = pybuda.op.Multiply("mul8", add2, self.train_param2) + mul7 = forge.op.Multiply("mul7", add1, sm2) + mul8 = forge.op.Multiply("mul8", add2, self.train_param2) # Layer 8 sm7 = nn.Softmax("sm7", mul7, dim=self.dim, stable=self.stable) diff --git a/pybuda/test/nn/functional/softmax/models/model_5.py b/forge/test/nn/functional/softmax/models/model_5.py similarity index 70% rename from pybuda/test/nn/functional/softmax/models/model_5.py rename to forge/test/nn/functional/softmax/models/model_5.py index fd7d45a89..9d13c63c0 100644 --- a/pybuda/test/nn/functional/softmax/models/model_5.py +++ b/forge/test/nn/functional/softmax/models/model_5.py @@ -3,22 +3,22 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 5 -# Softmax operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Softmax operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch from torch.distributions import Uniform, Normal -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaSoftmaxTest(PyBudaModule): +class BudaSoftmaxTest(ForgeModule): """ Buda Test 5 @@ -48,9 +48,9 @@ def __init__( self.dim = dim self.stable=stable - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [] for i in range(3): @@ -69,9 +69,9 @@ def __init__( def forward(self, x1, x2, x3): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param2) - mul2 = pybuda.op.Multiply("mul2", x2, self.train_param1) - mul3 = pybuda.op.Multiply("mul3", x3, self.train_param3) + mul1 = forge.op.Multiply("mul1", x1, self.train_param2) + mul2 = forge.op.Multiply("mul2", x2, self.train_param1) + mul3 = forge.op.Multiply("mul3", x3, self.train_param3) sm1 = nn.Softmax("sm1", mul2, dim=self.dim, stable=self.stable) sm2 = nn.Softmax("sm2", mul1, dim=self.dim, stable=self.stable) @@ -79,9 +79,9 @@ def forward(self, x1, x2, x3): sm4 = nn.Softmax("sm4", mul2, dim=self.dim, stable=self.stable) # Layer 3 - add1 = pybuda.op.Add("add1", self.train_param1, sm2) - add2 = pybuda.op.Add("add2", sm4, sm1) - add3 = pybuda.op.Add("add3", sm3, x3) + add1 = forge.op.Add("add1", self.train_param1, sm2) + add2 = forge.op.Add("add2", sm4, sm1) + add3 = forge.op.Add("add3", sm3, x3) sm5 = nn.Softmax("sm5", add2, dim=self.dim, stable=self.stable) sm6 = nn.Softmax("sm6", add1, dim=self.dim, stable=self.stable) @@ -89,9 +89,9 @@ def forward(self, x1, x2, x3): sm8 = nn.Softmax("sm8", add2, dim=self.dim, stable=self.stable) # Layer 4 - mul4 = pybuda.op.Multiply("mul4", x2, sm7) - mul5 = pybuda.op.Multiply("mul5", sm6, sm8) - mul6 = pybuda.op.Multiply("mul6", sm5, self.train_param2) + mul4 = forge.op.Multiply("mul4", x2, sm7) + mul5 = forge.op.Multiply("mul5", sm6, sm8) + mul6 = forge.op.Multiply("mul6", sm5, self.train_param2) sm9 = nn.Softmax("sm9", mul5, dim=self.dim, stable=self.stable) sm10 = nn.Softmax("sm10", mul4, dim=self.dim, stable=self.stable) @@ -99,8 +99,8 @@ def forward(self, x1, x2, x3): sm12 = nn.Softmax("sm12", mul5, dim=self.dim, stable=self.stable) # Layer 5 - mul7 = pybuda.op.Multiply("mul7", sm9, sm11) - mul8 = pybuda.op.Multiply("mul8", sm10, sm12) + mul7 = forge.op.Multiply("mul7", sm9, sm11) + mul8 = forge.op.Multiply("mul8", sm10, sm12) return mul7, mul8 diff --git a/pybuda/test/nn/functional/softmax/test_softmax.py b/forge/test/nn/functional/softmax/test_softmax.py similarity index 93% rename from pybuda/test/nn/functional/softmax/test_softmax.py rename to forge/test/nn/functional/softmax/test_softmax.py index df7124c7b..94230b422 100644 --- a/pybuda/test/nn/functional/softmax/test_softmax.py +++ b/forge/test/nn/functional/softmax/test_softmax.py @@ -11,13 +11,13 @@ import pytest import numpy as np -import pybuda -import pybuda.op -from pybuda import TTDevice, BackendType, pybuda_compile, VerifyConfig, CompilerConfig +import forge +import forge.op +from forge import TTDevice, BackendType, forge_compile, VerifyConfig, CompilerConfig from . import models -MODELS_PATH = "./pybuda/test/nn/functional/softmax/models" +MODELS_PATH = "./forge/test/nn/functional/softmax/models" SHAPE_NO = 5 SHAPE_SIZE_MIN = 2 @@ -80,7 +80,7 @@ def test_softmax( tt0 = TTDevice("tt0", devtype=BackendType.Golden) tt0.place_module(model) - pybuda_compile( + forge_compile( tt0, model.testname, *model.inputs, diff --git a/pybuda/test/nn/layers/normalization/__init__.py b/forge/test/nn/layers/normalization/__init__.py similarity index 100% rename from pybuda/test/nn/layers/normalization/__init__.py rename to forge/test/nn/layers/normalization/__init__.py diff --git a/pybuda/test/nn/layers/normalization/models/__init__.py b/forge/test/nn/layers/normalization/models/__init__.py similarity index 100% rename from pybuda/test/nn/layers/normalization/models/__init__.py rename to forge/test/nn/layers/normalization/models/__init__.py diff --git a/pybuda/test/nn/layers/normalization/models/model_1.py b/forge/test/nn/layers/normalization/models/model_1.py similarity index 71% rename from pybuda/test/nn/layers/normalization/models/model_1.py rename to forge/test/nn/layers/normalization/models/model_1.py index e49d66eee..4cd944140 100644 --- a/pybuda/test/nn/layers/normalization/models/model_1.py +++ b/forge/test/nn/layers/normalization/models/model_1.py @@ -8,15 +8,15 @@ import torch -import pybuda -import pybuda.op -from pybuda.op import nn +import forge +import forge.op +from forge.op import nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class LayernormTest(PyBudaModule): +class LayernormTest(ForgeModule): def __init__( self, @@ -35,9 +35,9 @@ def __init__( self.dim = dim self.epsilon = epsilon - self.gamma = pybuda.Parameter(*self.gamma_shape, requires_grad=True) - self.beta = pybuda.Parameter(*self.beta_shape, requires_grad=True) - self.train_param = pybuda.Parameter(*self.input_shape, requires_grad=True) + self.gamma = forge.Parameter(*self.gamma_shape, requires_grad=True) + self.beta = forge.Parameter(*self.beta_shape, requires_grad=True) + self.train_param = forge.Parameter(*self.input_shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.input_shape))] self.set_parameter("train_param", torch.rand(*self.input_shape, requires_grad=True)) @@ -47,7 +47,7 @@ def __init__( def forward(self, x): # Layer 2 - mul = pybuda.op.Multiply("mul", x, self.train_param) + mul = forge.op.Multiply("mul", x, self.train_param) # Layer 3 ln = nn.Layernorm("ln", mul, self.gamma, self.beta, self.dim, self.epsilon) diff --git a/pybuda/test/nn/layers/normalization/models/model_10.py b/forge/test/nn/layers/normalization/models/model_10.py similarity index 74% rename from pybuda/test/nn/layers/normalization/models/model_10.py rename to forge/test/nn/layers/normalization/models/model_10.py index fd9f7bf8b..f9cc1166d 100644 --- a/pybuda/test/nn/layers/normalization/models/model_10.py +++ b/forge/test/nn/layers/normalization/models/model_10.py @@ -8,15 +8,15 @@ import torch -import pybuda -import pybuda.op -from pybuda.op import nn +import forge +import forge.op +from forge.op import nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class LayernormTest(PyBudaModule): +class LayernormTest(ForgeModule): def __init__( self, @@ -36,15 +36,15 @@ def __init__( self.epsilon = epsilon self.gamma = { - f"gamma{i}": pybuda.Parameter(*self.gamma_shape, requires_grad=True) + f"gamma{i}": forge.Parameter(*self.gamma_shape, requires_grad=True) for i in range(1, 60) } self.beta = { - f"beta{i}": pybuda.Parameter(*self.beta_shape, requires_grad=True) + f"beta{i}": forge.Parameter(*self.beta_shape, requires_grad=True) for i in range(1, 60) } - self.train_param1 = pybuda.Parameter(*self.input_shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.input_shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.input_shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.input_shape, requires_grad=True) self.inputs = [] for i in range(1, 3): @@ -135,13 +135,13 @@ def forward(self, x1, x2): ) # Layer 4 - mul1 = pybuda.op.Multiply("mul1", ln_layer3[0], ln_layer3[2]) - mul2 = pybuda.op.Multiply("mul2", ln_layer3[1], ln_layer3[3]) - mul3 = pybuda.op.Multiply("mul3", ln_layer3[2], ln_layer3[4]) - mul4 = pybuda.op.Multiply("mul4", ln_layer3[5], ln_layer3[8]) - mul5 = pybuda.op.Multiply("mul5", ln_layer3[6], ln_layer3[9]) - add1 = pybuda.op.Add("add1", ln_layer3[7], ln20) - add2 = pybuda.op.Add("add2", ln19, ln21) + mul1 = forge.op.Multiply("mul1", ln_layer3[0], ln_layer3[2]) + mul2 = forge.op.Multiply("mul2", ln_layer3[1], ln_layer3[3]) + mul3 = forge.op.Multiply("mul3", ln_layer3[2], ln_layer3[4]) + mul4 = forge.op.Multiply("mul4", ln_layer3[5], ln_layer3[8]) + mul5 = forge.op.Multiply("mul5", ln_layer3[6], ln_layer3[9]) + add1 = forge.op.Add("add1", ln_layer3[7], ln20) + add2 = forge.op.Add("add2", ln19, ln21) # Layer 5 input_layer5 = [mul1, mul2, mul3, mul4, mul5, add1, add2] @@ -171,13 +171,13 @@ def forward(self, x1, x2): ) # Layer 6 - mul6 = pybuda.op.Multiply("mul6", ln_layer5[0], ln_layer5[2]) - add3 = pybuda.op.Add("add3", ln_layer5[1], ln_layer5[3]) - mul7 = pybuda.op.Multiply("mul7", ln_layer5[4], ln_layer5[7]) - add4 = pybuda.op.Add("add4", ln_layer5[5], ln_layer5[8]) - mul8 = pybuda.op.Multiply("mul8", ln_layer5[6], ln_layer5[9]) - add5 = pybuda.op.Add("add5", ln_layer5[10], ln_layer5[12]) - mul9 = pybuda.op.Multiply("mul9", ln_layer5[11], ln_layer5[13]) + mul6 = forge.op.Multiply("mul6", ln_layer5[0], ln_layer5[2]) + add3 = forge.op.Add("add3", ln_layer5[1], ln_layer5[3]) + mul7 = forge.op.Multiply("mul7", ln_layer5[4], ln_layer5[7]) + add4 = forge.op.Add("add4", ln_layer5[5], ln_layer5[8]) + mul8 = forge.op.Multiply("mul8", ln_layer5[6], ln_layer5[9]) + add5 = forge.op.Add("add5", ln_layer5[10], ln_layer5[12]) + mul9 = forge.op.Multiply("mul9", ln_layer5[11], ln_layer5[13]) # Layer 7 input_layer7 = [mul6, add3, mul7, add4, mul8, add5, mul9] @@ -195,10 +195,10 @@ def forward(self, x1, x2): ) # Layer 8 - add6 = pybuda.op.Add("add6", ln_layer7[0], ln_layer7[1]) - add7 = pybuda.op.Add("add7", ln_layer7[2], ln_layer7[3]) - mul10 = pybuda.op.Multiply("mul10", ln_layer7[4], ln_layer7[5]) - mul11 = pybuda.op.Multiply("mul11", ln_layer7[5], ln_layer7[6]) + add6 = forge.op.Add("add6", ln_layer7[0], ln_layer7[1]) + add7 = forge.op.Add("add7", ln_layer7[2], ln_layer7[3]) + mul10 = forge.op.Multiply("mul10", ln_layer7[4], ln_layer7[5]) + mul11 = forge.op.Multiply("mul11", ln_layer7[5], ln_layer7[6]) # Layer 9 input_layer9 = [add6, ln_layer5[4], add7, ln_layer5[10], mul10, ln_layer5[13], mul11, ln_layer5[12]] @@ -216,11 +216,11 @@ def forward(self, x1, x2): ) # Layer 10 - mul12 = pybuda.op.Multiply("mul12", ln_layer9[0], ln_layer9[2]) - mul13 = pybuda.op.Multiply("mul13", ln_layer9[1], ln_layer9[3]) - mul14 = pybuda.op.Multiply("mul14", ln_layer9[4], ln_layer9[6]) - mul15 = pybuda.op.Multiply("mul15", ln_layer9[5], ln_layer9[7]) - mul16 = pybuda.op.Multiply("mul16", ln_layer7[1], ln_layer7[4]) + mul12 = forge.op.Multiply("mul12", ln_layer9[0], ln_layer9[2]) + mul13 = forge.op.Multiply("mul13", ln_layer9[1], ln_layer9[3]) + mul14 = forge.op.Multiply("mul14", ln_layer9[4], ln_layer9[6]) + mul15 = forge.op.Multiply("mul15", ln_layer9[5], ln_layer9[7]) + mul16 = forge.op.Multiply("mul16", ln_layer7[1], ln_layer7[4]) # Layer 11 input_layer11 = [mul16, mul12, mul13, mul14, mul15] @@ -238,9 +238,9 @@ def forward(self, x1, x2): ) # Layer 12 - add8 = pybuda.op.Add("add8", ln_layer11[1], ln_layer11[2]) - mul17 = pybuda.op.Multiply("mul17", ln_layer9[4], ln_layer7[6]) - add9 = pybuda.op.Add("add9", ln_layer11[3], ln_layer11[4]) + add8 = forge.op.Add("add8", ln_layer11[1], ln_layer11[2]) + mul17 = forge.op.Multiply("mul17", ln_layer9[4], ln_layer7[6]) + add9 = forge.op.Add("add9", ln_layer11[3], ln_layer11[4]) # Layer 13 ln56 = nn.Layernorm( @@ -267,7 +267,7 @@ def forward(self, x1, x2): self.epsilon ) - add10 = pybuda.op.Add("add10", ln_layer11[0], ln57) + add10 = forge.op.Add("add10", ln_layer11[0], ln57) # Layer 14 ln59 = nn.Layernorm("ln59", add10, self.gamma['gamma59'], self.beta['beta59'], self.dim, self.epsilon) diff --git a/pybuda/test/nn/layers/normalization/models/model_2.py b/forge/test/nn/layers/normalization/models/model_2.py similarity index 74% rename from pybuda/test/nn/layers/normalization/models/model_2.py rename to forge/test/nn/layers/normalization/models/model_2.py index adf4bd50f..42f6f4319 100644 --- a/pybuda/test/nn/layers/normalization/models/model_2.py +++ b/forge/test/nn/layers/normalization/models/model_2.py @@ -8,15 +8,15 @@ import torch -import pybuda -import pybuda.op -from pybuda.op import nn +import forge +import forge.op +from forge.op import nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class LayernormTest(PyBudaModule): +class LayernormTest(ForgeModule): def __init__( self, @@ -36,15 +36,15 @@ def __init__( self.epsilon = epsilon self.gamma = { - f"gamma{i}": pybuda.Parameter(*self.gamma_shape, requires_grad=True) + f"gamma{i}": forge.Parameter(*self.gamma_shape, requires_grad=True) for i in range(1, 4) } self.beta = { - f"beta{i}": pybuda.Parameter(*self.beta_shape, requires_grad=True) + f"beta{i}": forge.Parameter(*self.beta_shape, requires_grad=True) for i in range(1, 4) } - self.train_param1 = pybuda.Parameter(*self.input_shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.input_shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.input_shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.input_shape, requires_grad=True) self.inputs = [] for i in range(1, 3): @@ -57,8 +57,8 @@ def __init__( def forward(self, x1, x2): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x2, self.train_param2) + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x2, self.train_param2) # Layer 3 ln1 = nn.Layernorm( @@ -79,7 +79,7 @@ def forward(self, x1, x2): ) # Layer 4 - mul3 = pybuda.op.Multiply("mul3", ln1, ln2) + mul3 = forge.op.Multiply("mul3", ln1, ln2) # Layer 5 ln3 = nn.Layernorm( diff --git a/pybuda/test/nn/layers/normalization/models/model_3.py b/forge/test/nn/layers/normalization/models/model_3.py similarity index 77% rename from pybuda/test/nn/layers/normalization/models/model_3.py rename to forge/test/nn/layers/normalization/models/model_3.py index 25e92675b..d775478fa 100644 --- a/pybuda/test/nn/layers/normalization/models/model_3.py +++ b/forge/test/nn/layers/normalization/models/model_3.py @@ -8,15 +8,15 @@ import torch -import pybuda -import pybuda.op -from pybuda.op import nn +import forge +import forge.op +from forge.op import nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class LayernormTest(PyBudaModule): +class LayernormTest(ForgeModule): def __init__( self, @@ -36,15 +36,15 @@ def __init__( self.epsilon = epsilon self.gamma = { - f"gamma{i}": pybuda.Parameter(*self.gamma_shape, requires_grad=True) + f"gamma{i}": forge.Parameter(*self.gamma_shape, requires_grad=True) for i in range(1, 12) } self.beta = { - f"beta{i}": pybuda.Parameter(*self.beta_shape, requires_grad=True) + f"beta{i}": forge.Parameter(*self.beta_shape, requires_grad=True) for i in range(1, 12) } - self.train_param1 = pybuda.Parameter(*self.input_shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.input_shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.input_shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.input_shape, requires_grad=True) self.inputs = [] for i in range(1, 3): @@ -57,7 +57,7 @@ def __init__( def forward(self, x1, x2): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) ln1 = nn.Layernorm( "ln1", x2, @@ -84,13 +84,13 @@ def forward(self, x1, x2): self.dim, self.epsilon ) - mul2 = pybuda.op.Multiply("mul2", ln1, ln2) + mul2 = forge.op.Multiply("mul2", ln1, ln2) # Layer 4 - add1 = pybuda.op.Add("add1", ln3, x2) - add2 = pybuda.op.Add("add2", ln3, self.train_param2) - add3 = pybuda.op.Add("add3", x1, mul2) - add4 = pybuda.op.Add("add4", self.train_param2, mul2) + add1 = forge.op.Add("add1", ln3, x2) + add2 = forge.op.Add("add2", ln3, self.train_param2) + add3 = forge.op.Add("add3", x1, mul2) + add4 = forge.op.Add("add4", self.train_param2, mul2) # Layer 5 ln4 = nn.Layernorm( @@ -127,8 +127,8 @@ def forward(self, x1, x2): ) # Layer 6 - mul3 = pybuda.op.Multiply("mul3", ln4, ln5) - mul4 = pybuda.op.Multiply("mul4", ln6, ln7) + mul3 = forge.op.Multiply("mul3", ln4, ln5) + mul4 = forge.op.Multiply("mul4", ln6, ln7) # Layer 7 ln8 = nn.Layernorm( @@ -149,8 +149,8 @@ def forward(self, x1, x2): ) # Layer 8 - mul5 = pybuda.op.Multiply("mul5", ln8, ln6) - add5 = pybuda.op.Add("add5", ln5, ln9) + mul5 = forge.op.Multiply("mul5", ln8, ln6) + add5 = forge.op.Add("add5", ln5, ln9) # Layer 9 ln10 = nn.Layernorm( diff --git a/pybuda/test/nn/layers/normalization/models/model_4.py b/forge/test/nn/layers/normalization/models/model_4.py similarity index 75% rename from pybuda/test/nn/layers/normalization/models/model_4.py rename to forge/test/nn/layers/normalization/models/model_4.py index 570723c99..c1ab5f1f0 100644 --- a/pybuda/test/nn/layers/normalization/models/model_4.py +++ b/forge/test/nn/layers/normalization/models/model_4.py @@ -8,15 +8,15 @@ import torch -import pybuda -import pybuda.op -from pybuda.op import nn +import forge +import forge.op +from forge.op import nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class LayernormTest(PyBudaModule): +class LayernormTest(ForgeModule): def __init__( self, @@ -36,15 +36,15 @@ def __init__( self.epsilon = epsilon self.gamma = { - f"gamma{i}": pybuda.Parameter(*self.gamma_shape, requires_grad=True) + f"gamma{i}": forge.Parameter(*self.gamma_shape, requires_grad=True) for i in range(1, 9) } self.beta = { - f"beta{i}": pybuda.Parameter(*self.beta_shape, requires_grad=True) + f"beta{i}": forge.Parameter(*self.beta_shape, requires_grad=True) for i in range(1, 9) } - self.train_param1 = pybuda.Parameter(*self.input_shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.input_shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.input_shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.input_shape, requires_grad=True) self.inputs = [] for i in range(1, 3): @@ -57,10 +57,10 @@ def __init__( def forward(self, x1, x2): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, x2) - mul2 = pybuda.op.Multiply("mul2", x1, self.train_param1) - mul3 = pybuda.op.Multiply("mul3", self.train_param1, self.train_param2) - mul4 = pybuda.op.Multiply("mul4", x2, self.train_param2) + mul1 = forge.op.Multiply("mul1", x1, x2) + mul2 = forge.op.Multiply("mul2", x1, self.train_param1) + mul3 = forge.op.Multiply("mul3", self.train_param1, self.train_param2) + mul4 = forge.op.Multiply("mul4", x2, self.train_param2) # Layer 3 ln1 = nn.Layernorm( @@ -115,12 +115,12 @@ def forward(self, x1, x2): ) # Layer 5 - mul5 = pybuda.op.Multiply("mul5", ln5, ln2) - mul6 = pybuda.op.Multiply("mul6", ln6, ln4) + mul5 = forge.op.Multiply("mul5", ln5, ln2) + mul6 = forge.op.Multiply("mul6", ln6, ln4) # Layer 6 - add1 = pybuda.op.Add("add1", mul5, mul6) - add2 = pybuda.op.Add("add2", ln1, ln4) + add1 = forge.op.Add("add1", mul5, mul6) + add2 = forge.op.Add("add2", ln1, ln4) # Layer 7 ln7 = nn.Layernorm( diff --git a/pybuda/test/nn/layers/normalization/models/model_5.py b/forge/test/nn/layers/normalization/models/model_5.py similarity index 72% rename from pybuda/test/nn/layers/normalization/models/model_5.py rename to forge/test/nn/layers/normalization/models/model_5.py index 098aafca5..5c48f243c 100644 --- a/pybuda/test/nn/layers/normalization/models/model_5.py +++ b/forge/test/nn/layers/normalization/models/model_5.py @@ -8,15 +8,15 @@ import torch -import pybuda -import pybuda.op -from pybuda.op import nn +import forge +import forge.op +from forge.op import nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class LayernormTest(PyBudaModule): +class LayernormTest(ForgeModule): def __init__( self, @@ -36,16 +36,16 @@ def __init__( self.epsilon = epsilon self.gamma = { - f"gamma{i}": pybuda.Parameter(*self.gamma_shape, requires_grad=True) + f"gamma{i}": forge.Parameter(*self.gamma_shape, requires_grad=True) for i in range(1, 16) } self.beta = { - f"beta{i}": pybuda.Parameter(*self.beta_shape, requires_grad=True) + f"beta{i}": forge.Parameter(*self.beta_shape, requires_grad=True) for i in range(1, 16) } - self.train_param1 = pybuda.Parameter(*self.input_shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.input_shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.input_shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.input_shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.input_shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.input_shape, requires_grad=True) self.inputs = [] for i in range(1, 4): @@ -58,9 +58,9 @@ def __init__( def forward(self, x1, x2, x3): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x2, self.train_param2) - mul3 = pybuda.op.Multiply("mul3", x3, self.train_param3) + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x2, self.train_param2) + mul3 = forge.op.Multiply("mul3", x3, self.train_param3) # Layer 3 ln1 = nn.Layernorm( @@ -89,13 +89,13 @@ def forward(self, x1, x2, x3): ) # Layer 4 - add1 = pybuda.op.Add("add1", ln1, x2) - add2 = pybuda.op.Add("add2", ln2, x3) - add3 = pybuda.op.Add("add3", ln3, self.train_param2) + add1 = forge.op.Add("add1", ln1, x2) + add2 = forge.op.Add("add2", ln2, x3) + add3 = forge.op.Add("add3", ln3, self.train_param2) # Layer 5 - add4 = pybuda.op.Add("add4", add1, ln2) - add5 = pybuda.op.Add("add5", add2, self.train_param1) + add4 = forge.op.Add("add4", add1, ln2) + add5 = forge.op.Add("add5", add2, self.train_param1) ln4 = nn.Layernorm( "ln4", add3, @@ -106,9 +106,9 @@ def forward(self, x1, x2, x3): ) # Layer 6 - mul4 = pybuda.op.Multiply("mul4", ln1, add4) - mul5 = pybuda.op.Multiply("mul5", x2, add5) - add6 = pybuda.op.Add("add6", self.train_param2, ln4) + mul4 = forge.op.Multiply("mul4", ln1, add4) + mul5 = forge.op.Multiply("mul5", x2, add5) + add6 = forge.op.Add("add6", self.train_param2, ln4) # Layer 7 ln5 = nn.Layernorm( @@ -137,11 +137,11 @@ def forward(self, x1, x2, x3): ) # Layer 8 - add7 = pybuda.op.Add("add7", add2, ln6) - add8 = pybuda.op.Add("add8", add5, ln3) - mul6 = pybuda.op.Multiply("mul6", ln5, mul5) - mul7 = pybuda.op.Multiply("mul7", mul4, ln6) - mul8 = pybuda.op.Multiply("mul8", mul5, ln7) + add7 = forge.op.Add("add7", add2, ln6) + add8 = forge.op.Add("add8", add5, ln3) + mul6 = forge.op.Multiply("mul6", ln5, mul5) + mul7 = forge.op.Multiply("mul7", mul4, ln6) + mul8 = forge.op.Multiply("mul8", mul5, ln7) # Layer 9 ln8 = nn.Layernorm( @@ -186,8 +186,8 @@ def forward(self, x1, x2, x3): ) # Layer 10 - mul9 = pybuda.op.Multiply("mul9", ln8, ln9) - mul10 = pybuda.op.Multiply("mul10", ln10, ln11) + mul9 = forge.op.Multiply("mul9", ln8, ln9) + mul10 = forge.op.Multiply("mul10", ln10, ln11) # Layer 11 ln13 = nn.Layernorm( @@ -208,11 +208,11 @@ def forward(self, x1, x2, x3): ) # Layer 12 - add9 = pybuda.op.Add("add9", ln13, ln14) - add10 = pybuda.op.Add("add10", ln14, ln12) + add9 = forge.op.Add("add9", ln13, ln14) + add10 = forge.op.Add("add10", ln14, ln12) # Layer 13 - mul11 = pybuda.op.Multiply("mul11", add9, add10) + mul11 = forge.op.Multiply("mul11", add9, add10) # Layer 14 ln15 = nn.Layernorm( diff --git a/pybuda/test/nn/layers/normalization/models/model_6.py b/forge/test/nn/layers/normalization/models/model_6.py similarity index 77% rename from pybuda/test/nn/layers/normalization/models/model_6.py rename to forge/test/nn/layers/normalization/models/model_6.py index d2ec79386..e3e4c6b4e 100644 --- a/pybuda/test/nn/layers/normalization/models/model_6.py +++ b/forge/test/nn/layers/normalization/models/model_6.py @@ -9,15 +9,15 @@ import torch import pprint -import pybuda -import pybuda.op -from pybuda.op import nn +import forge +import forge.op +from forge.op import nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class LayernormTest(PyBudaModule): +class LayernormTest(ForgeModule): def __init__( self, @@ -37,15 +37,15 @@ def __init__( self.epsilon = epsilon self.gamma = { - f"gamma{i}": pybuda.Parameter(*self.gamma_shape, requires_grad=True) + f"gamma{i}": forge.Parameter(*self.gamma_shape, requires_grad=True) for i in range(1, 22) } self.beta = { - f"beta{i}": pybuda.Parameter(*self.beta_shape, requires_grad=True) + f"beta{i}": forge.Parameter(*self.beta_shape, requires_grad=True) for i in range(1, 22) } - self.train_param1 = pybuda.Parameter(*self.input_shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.input_shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.input_shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.input_shape, requires_grad=True) self.inputs = [] for i in range(1, 3): @@ -58,10 +58,10 @@ def __init__( def forward(self, x1, x2): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x1, self.train_param1) - mul3 = pybuda.op.Multiply("mul3", x2, self.train_param1) - mul4 = pybuda.op.Multiply("mul4", x2, self.train_param2) + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x1, self.train_param1) + mul3 = forge.op.Multiply("mul3", x2, self.train_param1) + mul4 = forge.op.Multiply("mul4", x2, self.train_param2) # Layer 3 ln1 = nn.Layernorm( @@ -98,10 +98,10 @@ def forward(self, x1, x2): ) # Layer 4 - mul5 = pybuda.op.Multiply("mul5", ln1, x1) - add1 = pybuda.op.Add("add1", ln2, self.train_param1) - add2 = pybuda.op.Add("add2", ln3, x2) - mul6 = pybuda.op.Multiply("mul6", ln4, self.train_param2) + mul5 = forge.op.Multiply("mul5", ln1, x1) + add1 = forge.op.Add("add1", ln2, self.train_param1) + add2 = forge.op.Add("add2", ln3, x2) + mul6 = forge.op.Multiply("mul6", ln4, self.train_param2) # Layer 5 ln5 = nn.Layernorm( @@ -137,10 +137,10 @@ def forward(self, x1, x2): ) # Layer 6 - add3 = pybuda.op.Add("add3", ln5, ln2) - add4 = pybuda.op.Add("add4", ln6, ln3) - add5 = pybuda.op.Add("add5", ln7, ln4) - add6 = pybuda.op.Add("add6", ln3, ln8) + add3 = forge.op.Add("add3", ln5, ln2) + add4 = forge.op.Add("add4", ln6, ln3) + add5 = forge.op.Add("add5", ln7, ln4) + add6 = forge.op.Add("add6", ln3, ln8) # Layer 7 ln9 = nn.Layernorm( @@ -177,10 +177,10 @@ def forward(self, x1, x2): ) # Layer 8 - mul7 = pybuda.op.Multiply("mul7", ln1, ln9) - mul8 = pybuda.op.Multiply("mul8", ln2, ln10) - mul9 = pybuda.op.Multiply("mul9", ln7, ln11) - mul10 = pybuda.op.Multiply("mul10", ln8, ln12) + mul7 = forge.op.Multiply("mul7", ln1, ln9) + mul8 = forge.op.Multiply("mul8", ln2, ln10) + mul9 = forge.op.Multiply("mul9", ln7, ln11) + mul10 = forge.op.Multiply("mul10", ln8, ln12) # Layer 9 ln13 = nn.Layernorm( @@ -217,8 +217,8 @@ def forward(self, x1, x2): ) # Layer 10 - mul11 = pybuda.op.Multiply("mul11", ln13, ln14) - mul12 = pybuda.op.Multiply("mul12", ln15, ln16) + mul11 = forge.op.Multiply("mul11", ln13, ln14) + mul12 = forge.op.Multiply("mul12", ln15, ln16) # Layer 11 ln17 = nn.Layernorm( @@ -239,9 +239,9 @@ def forward(self, x1, x2): ) # Layer 12 - mul13 = pybuda.op.Multiply("mul13", ln14, ln15) - add7 = pybuda.op.Add("add7", mul7, ln17) - add8 = pybuda.op.Add("add8", add6, ln18) + mul13 = forge.op.Multiply("mul13", ln14, ln15) + add7 = forge.op.Add("add7", mul7, ln17) + add8 = forge.op.Add("add8", add6, ln18) # Layer 13 ln19 = nn.Layernorm( @@ -262,7 +262,7 @@ def forward(self, x1, x2): ) # Layer 14 - add9 = pybuda.op.Add("add9", mul13, ln19) + add9 = forge.op.Add("add9", mul13, ln19) # Layer 15 ln21 = nn.Layernorm( diff --git a/pybuda/test/nn/layers/normalization/models/model_7.py b/forge/test/nn/layers/normalization/models/model_7.py similarity index 76% rename from pybuda/test/nn/layers/normalization/models/model_7.py rename to forge/test/nn/layers/normalization/models/model_7.py index 5c0022615..6963ba32c 100644 --- a/pybuda/test/nn/layers/normalization/models/model_7.py +++ b/forge/test/nn/layers/normalization/models/model_7.py @@ -8,15 +8,15 @@ import torch -import pybuda -import pybuda.op -from pybuda.op import nn +import forge +import forge.op +from forge.op import nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class LayernormTest(PyBudaModule): +class LayernormTest(ForgeModule): def __init__( self, @@ -36,16 +36,16 @@ def __init__( self.epsilon = epsilon self.gamma = { - f"gamma{i}": pybuda.Parameter(*self.gamma_shape, requires_grad=True) + f"gamma{i}": forge.Parameter(*self.gamma_shape, requires_grad=True) for i in range(1, 21) } self.beta = { - f"beta{i}": pybuda.Parameter(*self.beta_shape, requires_grad=True) + f"beta{i}": forge.Parameter(*self.beta_shape, requires_grad=True) for i in range(1, 21) } - self.train_param1 = pybuda.Parameter(*self.input_shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.input_shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.input_shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.input_shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.input_shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.input_shape, requires_grad=True) self.inputs = [] for i in range(1, 4): @@ -84,9 +84,9 @@ def forward(self, x1, x2, x3): ) # Layer 3 - mul1 = pybuda.op.Multiply("mul1", ln1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", ln2, self.train_param2) - mul3 = pybuda.op.Multiply("mul3", ln3, self.train_param3) + mul1 = forge.op.Multiply("mul1", ln1, self.train_param1) + mul2 = forge.op.Multiply("mul2", ln2, self.train_param2) + mul3 = forge.op.Multiply("mul3", ln3, self.train_param3) # Layer 4 ln4 = nn.Layernorm( @@ -115,9 +115,9 @@ def forward(self, x1, x2, x3): ) # Layer 5 - add1 = pybuda.op.Add("add1", ln4, ln2) - add2 = pybuda.op.Add("add2", ln5, ln3) - add3 = pybuda.op.Add("add3", ln6, self.train_param3) + add1 = forge.op.Add("add1", ln4, ln2) + add2 = forge.op.Add("add2", ln5, ln3) + add3 = forge.op.Add("add3", ln6, self.train_param3) # Layer 6 ln7 = nn.Layernorm( @@ -146,9 +146,9 @@ def forward(self, x1, x2, x3): ) # Layer 7 - mul4 = pybuda.op.Multiply("mul4", ln1, ln7) - mul5 = pybuda.op.Multiply("mul5", ln2, ln8) - add4 = pybuda.op.Add("add4", ln3, ln9) + mul4 = forge.op.Multiply("mul4", ln1, ln7) + mul5 = forge.op.Multiply("mul5", ln2, ln8) + add4 = forge.op.Add("add4", ln3, ln9) # Layer 8 ln10 = nn.Layernorm( @@ -177,14 +177,14 @@ def forward(self, x1, x2, x3): ) # Layer 9 - mul6 = pybuda.op.Multiply("mul6", ln10, ln4) - mul7 = pybuda.op.Multiply("mul7", ln11, ln5) - mul8 = pybuda.op.Multiply("mul8", ln12, ln6) + mul6 = forge.op.Multiply("mul6", ln10, ln4) + mul7 = forge.op.Multiply("mul7", ln11, ln5) + mul8 = forge.op.Multiply("mul8", ln12, ln6) # Layer 10 - mul9 = pybuda.op.Multiply("mul9", mul6, ln5) - mul10 = pybuda.op.Multiply("mul10", ln2, mul8) - add5 = pybuda.op.Add("add5", ln4, mul7) + mul9 = forge.op.Multiply("mul9", mul6, ln5) + mul10 = forge.op.Multiply("mul10", ln2, mul8) + add5 = forge.op.Add("add5", ln4, mul7) # Layer 11 ln13 = nn.Layernorm( @@ -213,9 +213,9 @@ def forward(self, x1, x2, x3): ) # Layer 12 - add6 = pybuda.op.Add("add6", mul6, ln13) - mul11 = pybuda.op.Multiply("mul11", ln11, ln14) - mul12 = pybuda.op.Multiply("mul12", ln9, ln15) + add6 = forge.op.Add("add6", mul6, ln13) + mul11 = forge.op.Multiply("mul11", ln11, ln14) + mul12 = forge.op.Multiply("mul12", ln9, ln15) # Layer 13 ln16 = nn.Layernorm( @@ -244,8 +244,8 @@ def forward(self, x1, x2, x3): ) # Layer 14 - mul13 = pybuda.op.Multiply("mul13", ln16, ln17) - add7 = pybuda.op.Add("add7", ln17, ln18) + mul13 = forge.op.Multiply("mul13", ln16, ln17) + add7 = forge.op.Add("add7", ln17, ln18) # Layer 15 ln19 = nn.Layernorm( @@ -266,6 +266,6 @@ def forward(self, x1, x2, x3): ) # Layer 16 - add8 = pybuda.op.Add("add8", ln19, ln20) + add8 = forge.op.Add("add8", ln19, ln20) return add8 diff --git a/pybuda/test/nn/layers/normalization/models/model_8.py b/forge/test/nn/layers/normalization/models/model_8.py similarity index 77% rename from pybuda/test/nn/layers/normalization/models/model_8.py rename to forge/test/nn/layers/normalization/models/model_8.py index fa393b633..acbdc105f 100644 --- a/pybuda/test/nn/layers/normalization/models/model_8.py +++ b/forge/test/nn/layers/normalization/models/model_8.py @@ -8,15 +8,15 @@ import torch -import pybuda -import pybuda.op -from pybuda.op import nn +import forge +import forge.op +from forge.op import nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class LayernormTest(PyBudaModule): +class LayernormTest(ForgeModule): def __init__( self, @@ -36,17 +36,17 @@ def __init__( self.epsilon = epsilon self.gamma = { - f"gamma{i}": pybuda.Parameter(*self.gamma_shape, requires_grad=True) + f"gamma{i}": forge.Parameter(*self.gamma_shape, requires_grad=True) for i in range(1, 25) } self.beta = { - f"beta{i}": pybuda.Parameter(*self.beta_shape, requires_grad=True) + f"beta{i}": forge.Parameter(*self.beta_shape, requires_grad=True) for i in range(1, 25) } - self.train_param1 = pybuda.Parameter(*self.input_shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.input_shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.input_shape, requires_grad=True) - self.train_param4 = pybuda.Parameter(*self.input_shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.input_shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.input_shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.input_shape, requires_grad=True) + self.train_param4 = forge.Parameter(*self.input_shape, requires_grad=True) self.inputs = [] for i in range(1, 5): @@ -92,18 +92,18 @@ def forward(self, x1, x2, x3, x4): self.epsilon ) - mul1 = pybuda.op.Multiply("mul1", self.train_param1, self.train_param4) - mul2 = pybuda.op.Multiply("mul2", self.train_param1, self.train_param2) - mul3 = pybuda.op.Multiply("mul3", self.train_param2, self.train_param3) - mul4 = pybuda.op.Multiply("mul4", self.train_param3, self.train_param4) + mul1 = forge.op.Multiply("mul1", self.train_param1, self.train_param4) + mul2 = forge.op.Multiply("mul2", self.train_param1, self.train_param2) + mul3 = forge.op.Multiply("mul3", self.train_param2, self.train_param3) + mul4 = forge.op.Multiply("mul4", self.train_param3, self.train_param4) # Layer 3 - mul5 = pybuda.op.Multiply("mul5", ln1, ln2) - add1 = pybuda.op.Add("add1", mul1, mul2) - mul6 = pybuda.op.Multiply("mul6", ln2, ln3) - add2 = pybuda.op.Add("add2", mul2, mul3) - add3 = pybuda.op.Add("add3", ln3, ln4) - add4 = pybuda.op.Add("add4", mul3, mul4) + mul5 = forge.op.Multiply("mul5", ln1, ln2) + add1 = forge.op.Add("add1", mul1, mul2) + mul6 = forge.op.Multiply("mul6", ln2, ln3) + add2 = forge.op.Add("add2", mul2, mul3) + add3 = forge.op.Add("add3", ln3, ln4) + add4 = forge.op.Add("add4", mul3, mul4) # Layer 4 ln5 = nn.Layernorm( @@ -189,11 +189,11 @@ def forward(self, x1, x2, x3, x4): self.epsilon ) - add5 = pybuda.op.Add("add5", ln5, ln11) - add6 = pybuda.op.Add("add6", ln6, ln12) - add7 = pybuda.op.Add("add7", ln7, ln13) - mul7 = pybuda.op.Multiply("mul7", ln8, ln14) - mul8 = pybuda.op.Multiply("mul8", ln9, ln10) + add5 = forge.op.Add("add5", ln5, ln11) + add6 = forge.op.Add("add6", ln6, ln12) + add7 = forge.op.Add("add7", ln7, ln13) + mul7 = forge.op.Multiply("mul7", ln8, ln14) + mul8 = forge.op.Multiply("mul8", ln9, ln10) # Layer 6 ln15 = nn.Layernorm( @@ -238,11 +238,11 @@ def forward(self, x1, x2, x3, x4): ) # Layer 7 - mul9 = pybuda.op.Multiply("mul9", ln5, ln15) - mul10 = pybuda.op.Multiply("mul10", ln6, ln16) - add8 = pybuda.op.Add("add8", ln7, ln17) - add9 = pybuda.op.Add("add9", ln8, ln18) - add10 = pybuda.op.Add("add10", ln10, ln19) + mul9 = forge.op.Multiply("mul9", ln5, ln15) + mul10 = forge.op.Multiply("mul10", ln6, ln16) + add8 = forge.op.Add("add8", ln7, ln17) + add9 = forge.op.Add("add9", ln8, ln18) + add10 = forge.op.Add("add10", ln10, ln19) # Layer 8 ln20 = nn.Layernorm( diff --git a/pybuda/test/nn/layers/normalization/models/model_9.py b/forge/test/nn/layers/normalization/models/model_9.py similarity index 73% rename from pybuda/test/nn/layers/normalization/models/model_9.py rename to forge/test/nn/layers/normalization/models/model_9.py index 61f5224e5..ac946d228 100644 --- a/pybuda/test/nn/layers/normalization/models/model_9.py +++ b/forge/test/nn/layers/normalization/models/model_9.py @@ -8,15 +8,15 @@ import torch -import pybuda -import pybuda.op -from pybuda.op import nn +import forge +import forge.op +from forge.op import nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class LayernormTest(PyBudaModule): +class LayernormTest(ForgeModule): def __init__( self, @@ -36,16 +36,16 @@ def __init__( self.epsilon = epsilon self.gamma = { - f"gamma{i}": pybuda.Parameter(*self.gamma_shape, requires_grad=True) + f"gamma{i}": forge.Parameter(*self.gamma_shape, requires_grad=True) for i in range(1, 57) } self.beta = { - f"beta{i}": pybuda.Parameter(*self.beta_shape, requires_grad=True) + f"beta{i}": forge.Parameter(*self.beta_shape, requires_grad=True) for i in range(1, 57) } - self.train_param1 = pybuda.Parameter(*self.input_shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.input_shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.input_shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.input_shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.input_shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.input_shape, requires_grad=True) self.inputs = [] for i in range(1, 4): @@ -111,12 +111,12 @@ def forward(self, x1, x2, x3): ) # Layer 4 - mul1 = pybuda.op.Multiply("mul1", ln_layer3[0], ln_layer3[2]) - mul2 = pybuda.op.Multiply("mul2", ln_layer3[1], ln_layer3[3]) - mul3 = pybuda.op.Multiply("mul3", ln_layer3[4], ln_layer3[6]) - mul4 = pybuda.op.Multiply("mul4", ln_layer3[5], ln_layer3[7]) - mul5 = pybuda.op.Multiply("mul5", ln_layer3[8], ln_layer3[10]) - mul6 = pybuda.op.Multiply("mul6", ln_layer3[9], ln_layer3[11]) + mul1 = forge.op.Multiply("mul1", ln_layer3[0], ln_layer3[2]) + mul2 = forge.op.Multiply("mul2", ln_layer3[1], ln_layer3[3]) + mul3 = forge.op.Multiply("mul3", ln_layer3[4], ln_layer3[6]) + mul4 = forge.op.Multiply("mul4", ln_layer3[5], ln_layer3[7]) + mul5 = forge.op.Multiply("mul5", ln_layer3[8], ln_layer3[10]) + mul6 = forge.op.Multiply("mul6", ln_layer3[9], ln_layer3[11]) # Layer 5 input_layer5 = [mul1, mul2, mul3, mul4, mul5, mul6] @@ -146,9 +146,9 @@ def forward(self, x1, x2, x3): ) # Layer 6 - mul7 = pybuda.op.Multiply("mul7", ln_layer5[2], self.train_param1) - mul8 = pybuda.op.Multiply("mul8", ln_layer5[6], self.train_param2) - mul9 = pybuda.op.Multiply("mul9", ln_layer5[10], self.train_param3) + mul7 = forge.op.Multiply("mul7", ln_layer5[2], self.train_param1) + mul8 = forge.op.Multiply("mul8", ln_layer5[6], self.train_param2) + mul9 = forge.op.Multiply("mul9", ln_layer5[10], self.train_param3) # Layer 7 ln31 = nn.Layernorm( @@ -177,17 +177,17 @@ def forward(self, x1, x2, x3): ) # Layer 8 - add1 = pybuda.op.Add("add1", ln_layer5[1], ln31) - add2 = pybuda.op.Add("add2", ln_layer5[4], ln32) - add3 = pybuda.op.Add("add3", ln_layer5[8], ln33) + add1 = forge.op.Add("add1", ln_layer5[1], ln31) + add2 = forge.op.Add("add2", ln_layer5[4], ln32) + add3 = forge.op.Add("add3", ln_layer5[8], ln33) # Layer 9 - mul10 = pybuda.op.Multiply("mul10", ln_layer5[0], add1) - mul11 = pybuda.op.Multiply("mul11", ln_layer5[3], add2) - add4 = pybuda.op.Add("add4", ln_layer5[5], ln32) - add5 = pybuda.op.Add("add5", ln_layer5[7], add3) - mul12 = pybuda.op.Multiply("mul12", ln_layer5[9], ln33) - mul13 = pybuda.op.Multiply("mul13", mul9, ln_layer5[11]) + mul10 = forge.op.Multiply("mul10", ln_layer5[0], add1) + mul11 = forge.op.Multiply("mul11", ln_layer5[3], add2) + add4 = forge.op.Add("add4", ln_layer5[5], ln32) + add5 = forge.op.Add("add5", ln_layer5[7], add3) + mul12 = forge.op.Multiply("mul12", ln_layer5[9], ln33) + mul13 = forge.op.Multiply("mul13", mul9, ln_layer5[11]) # Layer 10 input_layer10 = [mul10, mul11, add4, add5, mul12, mul13] @@ -217,12 +217,12 @@ def forward(self, x1, x2, x3): ) # Layer 11 - add6 = pybuda.op.Add("add6", ln_layer10[0], ln_layer10[1]) - add7 = pybuda.op.Add("add7", ln_layer10[2], ln_layer10[3]) - add8 = pybuda.op.Add("add8", ln_layer10[4], ln_layer10[5]) - add9 = pybuda.op.Add("add9", ln_layer10[6], ln_layer10[7]) - add10 = pybuda.op.Add("add10", ln_layer10[8], ln_layer10[9]) - add11 = pybuda.op.Add("add11", ln_layer10[10], ln_layer10[11]) + add6 = forge.op.Add("add6", ln_layer10[0], ln_layer10[1]) + add7 = forge.op.Add("add7", ln_layer10[2], ln_layer10[3]) + add8 = forge.op.Add("add8", ln_layer10[4], ln_layer10[5]) + add9 = forge.op.Add("add9", ln_layer10[6], ln_layer10[7]) + add10 = forge.op.Add("add10", ln_layer10[8], ln_layer10[9]) + add11 = forge.op.Add("add11", ln_layer10[10], ln_layer10[11]) # Layer 12 input_layer12 = [add6, add7, add8, add9, add10, add11] @@ -240,9 +240,9 @@ def forward(self, x1, x2, x3): ) # Layer 13 - add12 = pybuda.op.Add("add12", ln_layer12[0], ln_layer12[2]) - add13 = pybuda.op.Add("add13", ln_layer12[1], ln_layer12[4]) - add14 = pybuda.op.Add("add14", ln_layer12[3], ln_layer12[5]) + add12 = forge.op.Add("add12", ln_layer12[0], ln_layer12[2]) + add13 = forge.op.Add("add13", ln_layer12[1], ln_layer12[4]) + add14 = forge.op.Add("add14", ln_layer12[3], ln_layer12[5]) # Layer 14 ln52 = nn.Layernorm( @@ -271,8 +271,8 @@ def forward(self, x1, x2, x3): ) # Layer 15 - mul14 = pybuda.op.Multiply("mul14", ln52, ln53) - mul15 = pybuda.op.Multiply("mul15", ln53, ln54) + mul14 = forge.op.Multiply("mul14", ln52, ln53) + mul15 = forge.op.Multiply("mul15", ln53, ln54) # Layer 16 ln55 = nn.Layernorm( diff --git a/pybuda/test/nn/layers/normalization/test_layernorm.py b/forge/test/nn/layers/normalization/test_layernorm.py similarity index 93% rename from pybuda/test/nn/layers/normalization/test_layernorm.py rename to forge/test/nn/layers/normalization/test_layernorm.py index 0d94a13a3..704564259 100644 --- a/pybuda/test/nn/layers/normalization/test_layernorm.py +++ b/forge/test/nn/layers/normalization/test_layernorm.py @@ -12,13 +12,13 @@ import pytest import numpy as np -import pybuda -import pybuda.op -from pybuda import TTDevice, BackendType, pybuda_compile, VerifyConfig, CompilerConfig +import forge +import forge.op +from forge import TTDevice, BackendType, forge_compile, VerifyConfig, CompilerConfig from . import models -MODELS_PATH = "./pybuda/test/nn/layers/normalization/models/" +MODELS_PATH = "./forge/test/nn/layers/normalization/models/" SHAPE_NO = 1 SHAPE_SIZE = 4 @@ -95,7 +95,7 @@ def test_layernorm( tt0 = TTDevice("tt0", devtype=BackendType.Golden) tt0.place_module(model) - pybuda_compile( + forge_compile( tt0, model.testname, *model.inputs, diff --git a/pybuda/test/operators/eltwise_binary/__init__.py b/forge/test/operators/eltwise_binary/__init__.py similarity index 100% rename from pybuda/test/operators/eltwise_binary/__init__.py rename to forge/test/operators/eltwise_binary/__init__.py diff --git a/pybuda/test/operators/eltwise_binary/conftest.py b/forge/test/operators/eltwise_binary/conftest.py similarity index 100% rename from pybuda/test/operators/eltwise_binary/conftest.py rename to forge/test/operators/eltwise_binary/conftest.py diff --git a/pybuda/test/operators/eltwise_binary/models/__init__.py b/forge/test/operators/eltwise_binary/models/__init__.py similarity index 100% rename from pybuda/test/operators/eltwise_binary/models/__init__.py rename to forge/test/operators/eltwise_binary/models/__init__.py diff --git a/pybuda/test/operators/eltwise_binary/models/model_1.py b/forge/test/operators/eltwise_binary/models/model_1.py similarity index 77% rename from pybuda/test/operators/eltwise_binary/models/model_1.py rename to forge/test/operators/eltwise_binary/models/model_1.py index c7cb73106..ff451321e 100644 --- a/pybuda/test/operators/eltwise_binary/models/model_1.py +++ b/forge/test/operators/eltwise_binary/models/model_1.py @@ -3,19 +3,19 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 1 -# Binary element-wise operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Binary element-wise operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaElementWiseBinaryTest(PyBudaModule): +class BudaElementWiseBinaryTest(ForgeModule): """ Buda Test 1 @@ -23,7 +23,7 @@ class BudaElementWiseBinaryTest(PyBudaModule): One operand represents input and the other one is trainable paramater. Args: - operator (function): PyBuda binary element-wise operator. + operator (function): Forge binary element-wise operator. opname (str): Operation name (e.g. add, mul, sub, ...). This name test use to generate names of operation nodes in a graph/model. shape (tuple, list): Shape of the input tensors. @@ -35,7 +35,7 @@ def __init__(self, operator, opname, shape): self.opname = opname self.testname = "Operator " + opname + " Test 1" self.shape = shape - self.train_param = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape))] self.set_parameter("train_param", torch.rand(*self.shape, requires_grad=True)) diff --git a/pybuda/test/operators/eltwise_binary/models/model_10.py b/forge/test/operators/eltwise_binary/models/model_10.py similarity index 74% rename from pybuda/test/operators/eltwise_binary/models/model_10.py rename to forge/test/operators/eltwise_binary/models/model_10.py index ee6fa1fe0..a9ba9f55b 100644 --- a/pybuda/test/operators/eltwise_binary/models/model_10.py +++ b/forge/test/operators/eltwise_binary/models/model_10.py @@ -3,26 +3,26 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 10 -# Binary element-wise operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Binary element-wise operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaElementWiseBinaryTest(PyBudaModule): +class BudaElementWiseBinaryTest(ForgeModule): """ Buda Test 10 In this test we have 22 operators with 3 input operands and 9 trainable operands. Args: - operator (function): PyBuda binary element-wise operator. + operator (function): Forge binary element-wise operator. opname (str): Operation name (e.g. add, mul, sub, ...). This name test use to generate names of operation nodes in a graph/model. """ @@ -33,17 +33,17 @@ def __init__(self, operator, opname, shape): self.opname = opname self.testname = "Operator " + opname + " Test 10" self.shape = shape - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) - self.train_param4 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param5 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param6 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param4 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param5 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param6 = forge.Parameter(*self.shape, requires_grad=True) - self.train_param7 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param8 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param9 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param7 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param8 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param9 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(3)] for i in range(9): diff --git a/pybuda/test/operators/eltwise_binary/models/model_11.py b/forge/test/operators/eltwise_binary/models/model_11.py similarity index 78% rename from pybuda/test/operators/eltwise_binary/models/model_11.py rename to forge/test/operators/eltwise_binary/models/model_11.py index c885ed568..434f5b9b4 100644 --- a/pybuda/test/operators/eltwise_binary/models/model_11.py +++ b/forge/test/operators/eltwise_binary/models/model_11.py @@ -3,26 +3,26 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 11 -# Binary element-wise operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Binary element-wise operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaElementWiseBinaryTest(PyBudaModule): +class BudaElementWiseBinaryTest(ForgeModule): """ Buda Test 11 In this test we have 22 operators with three input operands and 6 trainable operands. Args: - operator (function): PyBuda binary element-wise operator. + operator (function): Forge binary element-wise operator. opname (str): Operation name (e.g. add, mul, sub, ...). This name test use to generate names of operation nodes in a graph/model. """ @@ -33,12 +33,12 @@ def __init__(self, operator, opname, shape): self.opname = opname self.testname = "Operator " + opname + " Test 11" self.shape = shape - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param4 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param5 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param6 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param4 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param5 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param6 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(3)] for i in range(6): diff --git a/pybuda/test/operators/eltwise_binary/models/model_2.py b/forge/test/operators/eltwise_binary/models/model_2.py similarity index 75% rename from pybuda/test/operators/eltwise_binary/models/model_2.py rename to forge/test/operators/eltwise_binary/models/model_2.py index d8b59aedc..34f87c3a4 100644 --- a/pybuda/test/operators/eltwise_binary/models/model_2.py +++ b/forge/test/operators/eltwise_binary/models/model_2.py @@ -3,19 +3,19 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 2 -# Binary element-wise operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Binary element-wise operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaElementWiseBinaryTest(PyBudaModule): +class BudaElementWiseBinaryTest(ForgeModule): """ Buda Test 2 @@ -23,7 +23,7 @@ class BudaElementWiseBinaryTest(PyBudaModule): One operand represents input and the other one is trainable paramater. Args: - operator (function): PyBuda binary element-wise operator. + operator (function): Forge binary element-wise operator. opname (str): Operation name (e.g. add, mul, sub, ...). This name test use to generate names of operation nodes in a graph/model. """ @@ -34,9 +34,9 @@ def __init__(self, operator, opname, shape): self.opname = opname self.testname = "Operator " + opname + " Test 2" self.shape = shape - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(3)] for i in range(3): diff --git a/pybuda/test/operators/eltwise_binary/models/model_3.py b/forge/test/operators/eltwise_binary/models/model_3.py similarity index 77% rename from pybuda/test/operators/eltwise_binary/models/model_3.py rename to forge/test/operators/eltwise_binary/models/model_3.py index 80eba92e3..fdf57b166 100644 --- a/pybuda/test/operators/eltwise_binary/models/model_3.py +++ b/forge/test/operators/eltwise_binary/models/model_3.py @@ -3,26 +3,26 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 3 -# Binary element-wise operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Binary element-wise operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaElementWiseBinaryTest(PyBudaModule): +class BudaElementWiseBinaryTest(ForgeModule): """ Buda Test 3 In this test we have 10 operations, and three input tensors and three trainable variables. Args: - operator (function): PyBuda binary element-wise operator. + operator (function): Forge binary element-wise operator. opname (str): Operation name (e.g. add, mul, sub, ...). This name test use to generate names of operation nodes in a graph/model. """ @@ -33,9 +33,9 @@ def __init__(self, operator, opname, shape): self.opname = opname self.testname = "Operator " + opname + " Test 3" self.shape = shape - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(3)] for i in range(3): diff --git a/pybuda/test/operators/eltwise_binary/models/model_4.py b/forge/test/operators/eltwise_binary/models/model_4.py similarity index 75% rename from pybuda/test/operators/eltwise_binary/models/model_4.py rename to forge/test/operators/eltwise_binary/models/model_4.py index ab18c9126..4772d31ff 100644 --- a/pybuda/test/operators/eltwise_binary/models/model_4.py +++ b/forge/test/operators/eltwise_binary/models/model_4.py @@ -3,26 +3,26 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 4 -# Binary element-wise operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Binary element-wise operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaElementWiseBinaryTest(PyBudaModule): +class BudaElementWiseBinaryTest(ForgeModule): """ Buda Test 4 In this test we have six operators with three input operands and three trainable operands. Args: - operator (function): PyBuda binary element-wise operator. + operator (function): Forge binary element-wise operator. opname (str): Operation name (e.g. add, mul, sub, ...). This name test use to generate names of operation nodes in a graph/model. """ @@ -33,9 +33,9 @@ def __init__(self, operator, opname, shape): self.opname = opname self.testname = "Operator " + opname + " Test 4" self.shape = shape - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(3)] for i in range(3): diff --git a/pybuda/test/operators/eltwise_binary/models/model_5.py b/forge/test/operators/eltwise_binary/models/model_5.py similarity index 76% rename from pybuda/test/operators/eltwise_binary/models/model_5.py rename to forge/test/operators/eltwise_binary/models/model_5.py index 869f89c5f..eca933ea0 100644 --- a/pybuda/test/operators/eltwise_binary/models/model_5.py +++ b/forge/test/operators/eltwise_binary/models/model_5.py @@ -3,26 +3,26 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 5 -# Binary element-wise operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Binary element-wise operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaElementWiseBinaryTest(PyBudaModule): +class BudaElementWiseBinaryTest(ForgeModule): """ Buda Test 5 In this test we have 8 operators with three input operands and three trainable operands. Args: - operator (function): PyBuda binary element-wise operator. + operator (function): Forge binary element-wise operator. opname (str): Operation name (e.g. add, mul, sub, ...). This name test use to generate names of operation nodes in a graph/model. """ @@ -33,9 +33,9 @@ def __init__(self, operator, opname, shape): self.opname = opname self.testname = "Operator " + opname + " Test 5" self.shape = shape - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(3)] for i in range(3): diff --git a/pybuda/test/operators/eltwise_binary/models/model_6.py b/forge/test/operators/eltwise_binary/models/model_6.py similarity index 77% rename from pybuda/test/operators/eltwise_binary/models/model_6.py rename to forge/test/operators/eltwise_binary/models/model_6.py index 2b212d087..0cf77cf48 100644 --- a/pybuda/test/operators/eltwise_binary/models/model_6.py +++ b/forge/test/operators/eltwise_binary/models/model_6.py @@ -3,26 +3,26 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 6 -# Binary element-wise operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Binary element-wise operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaElementWiseBinaryTest(PyBudaModule): +class BudaElementWiseBinaryTest(ForgeModule): """ Buda Test 6 In this test we have 13 operators with 4 input operands and 4 trainable operands. Args: - operator (function): PyBuda binary element-wise operator. + operator (function): Forge binary element-wise operator. opname (str): Operation name (e.g. add, mul, sub, ...). This name test use to generate names of operation nodes in a graph/model. """ @@ -33,10 +33,10 @@ def __init__(self, operator, opname, shape): self.opname = opname self.testname = "Operator " + opname + " Test 6" self.shape = shape - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param4 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param4 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(4)] for i in range(4): diff --git a/pybuda/test/operators/eltwise_binary/models/model_7.py b/forge/test/operators/eltwise_binary/models/model_7.py similarity index 83% rename from pybuda/test/operators/eltwise_binary/models/model_7.py rename to forge/test/operators/eltwise_binary/models/model_7.py index 891800c8c..14596908f 100644 --- a/pybuda/test/operators/eltwise_binary/models/model_7.py +++ b/forge/test/operators/eltwise_binary/models/model_7.py @@ -3,26 +3,26 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 7 -# Binary element-wise operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Binary element-wise operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaElementWiseBinaryTest(PyBudaModule): +class BudaElementWiseBinaryTest(ForgeModule): """ Buda Test 7 In this test we have 25 operators with 4 input operands and 4 trainable operands. Args: - operator (function): PyBuda binary element-wise operator. + operator (function): Forge binary element-wise operator. opname (str): Operation name (e.g. add, mul, sub, ...). This name test use to generate names of operation nodes in a graph/model. """ @@ -33,10 +33,10 @@ def __init__(self, operator, opname, shape): self.opname = opname self.testname = "Operator " + opname + " Test 7" self.shape = shape - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param4 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param4 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(4)] for i in range(4): diff --git a/pybuda/test/operators/eltwise_binary/models/model_8.py b/forge/test/operators/eltwise_binary/models/model_8.py similarity index 84% rename from pybuda/test/operators/eltwise_binary/models/model_8.py rename to forge/test/operators/eltwise_binary/models/model_8.py index bb905da6d..8ca5cc345 100644 --- a/pybuda/test/operators/eltwise_binary/models/model_8.py +++ b/forge/test/operators/eltwise_binary/models/model_8.py @@ -3,26 +3,26 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 8 -# Binary element-wise operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Binary element-wise operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaElementWiseBinaryTest(PyBudaModule): +class BudaElementWiseBinaryTest(ForgeModule): """ Buda Test 8 In this test we have 22 operators with three input operands and three trainable operands. Args: - operator (function): PyBuda binary element-wise operator. + operator (function): Forge binary element-wise operator. opname (str): Operation name (e.g. add, mul, sub, ...). This name test use to generate names of operation nodes in a graph/model. """ @@ -33,9 +33,9 @@ def __init__(self, operator, opname, shape): self.opname = opname self.testname = "Operator " + opname + " Test 8" self.shape = shape - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(3)] for i in range(3): diff --git a/pybuda/test/operators/eltwise_binary/models/model_9.py b/forge/test/operators/eltwise_binary/models/model_9.py similarity index 80% rename from pybuda/test/operators/eltwise_binary/models/model_9.py rename to forge/test/operators/eltwise_binary/models/model_9.py index 0df574828..bffc48821 100644 --- a/pybuda/test/operators/eltwise_binary/models/model_9.py +++ b/forge/test/operators/eltwise_binary/models/model_9.py @@ -3,26 +3,26 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 9 -# Binary element-wise operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Binary element-wise operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaElementWiseBinaryTest(PyBudaModule): +class BudaElementWiseBinaryTest(ForgeModule): """ Buda Test 9 In this test we have 12 operators with three input operands and three trainable operands. Args: - operator (function): PyBuda binary element-wise operator. + operator (function): Forge binary element-wise operator. opname (str): Operation name (e.g. add, mul, sub, ...). This name test use to generate names of operation nodes in a graph/model. """ @@ -33,9 +33,9 @@ def __init__(self, operator, opname, shape): self.opname = opname self.testname = "Operator " + opname + " Test 9" self.shape = shape - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(3)] for i in range(3): diff --git a/pybuda/test/operators/eltwise_binary/test_command.sh b/forge/test/operators/eltwise_binary/test_command.sh similarity index 100% rename from pybuda/test/operators/eltwise_binary/test_command.sh rename to forge/test/operators/eltwise_binary/test_command.sh diff --git a/pybuda/test/operators/eltwise_binary/test_eltwise_binary.py b/forge/test/operators/eltwise_binary/test_eltwise_binary.py similarity index 89% rename from pybuda/test/operators/eltwise_binary/test_eltwise_binary.py rename to forge/test/operators/eltwise_binary/test_eltwise_binary.py index 8363a17ca..0d97677be 100644 --- a/pybuda/test/operators/eltwise_binary/test_eltwise_binary.py +++ b/forge/test/operators/eltwise_binary/test_eltwise_binary.py @@ -11,13 +11,13 @@ import pytest import numpy as np -import pybuda -import pybuda.op -from pybuda import TTDevice, BackendType, pybuda_compile, VerifyConfig, CompilerConfig +import forge +import forge.op +from forge import TTDevice, BackendType, forge_compile, VerifyConfig, CompilerConfig from . import models -MODELS_PATH = "./pybuda/test/operators/eltwise_binary/models/" +MODELS_PATH = "./forge/test/operators/eltwise_binary/models/" SHAPE_NO = 2 SHAPE_SIZE_MIN = 2 @@ -74,13 +74,13 @@ def test_eltwise_binary( if not training and recompute: pytest.skip("Inference and recompute is the same as just inference.") - architecture = f'models.{model}.BudaElementWiseBinaryTest(operator=pybuda.op.{operation}, opname="{operation}", shape={shape})' + architecture = f'models.{model}.BudaElementWiseBinaryTest(operator=forge.op.{operation}, opname="{operation}", shape={shape})' model = eval(architecture) tt0 = TTDevice("tt0", devtype=test_device.devtype, arch=test_device.arch) tt0.place_module(model) - #Fusing disabled due to tenstorrent/pybuda#548 - pybuda_compile( + #Fusing disabled due to tenstorrent/forge#548 + forge_compile( tt0, model.testname, *model.inputs, diff --git a/pybuda/test/operators/eltwise_binary/test_eltwise_binary_single.py b/forge/test/operators/eltwise_binary/test_eltwise_binary_single.py similarity index 87% rename from pybuda/test/operators/eltwise_binary/test_eltwise_binary_single.py rename to forge/test/operators/eltwise_binary/test_eltwise_binary_single.py index e9f853012..63f5b2752 100644 --- a/pybuda/test/operators/eltwise_binary/test_eltwise_binary_single.py +++ b/forge/test/operators/eltwise_binary/test_eltwise_binary_single.py @@ -5,13 +5,13 @@ import pytest import numpy as np -import pybuda -import pybuda.op -from pybuda import TTDevice, BackendType, pybuda_compile, VerifyConfig, CompilerConfig +import forge +import forge.op +from forge import TTDevice, BackendType, forge_compile, VerifyConfig, CompilerConfig from . import models -MODELS_PATH = "./pybuda/test/operators/eltwise_binary/models/" +MODELS_PATH = "./forge/test/operators/eltwise_binary/models/" def test_eltwise_binary_single( bin_model, @@ -54,11 +54,11 @@ def test_eltwise_binary_single( print(f"Shape --> {shape}") print("\n") - architecture = f'models.{model}.BudaElementWiseBinaryTest(operator=pybuda.op.{operation}, opname="{operation}", shape={shape})' + architecture = f'models.{model}.BudaElementWiseBinaryTest(operator=forge.op.{operation}, opname="{operation}", shape={shape})' model = eval(architecture) tt0 = TTDevice("tt0", devtype=BackendType.Golden) tt0.place_module(model) - pybuda_compile( + forge_compile( tt0, model.testname, *model.inputs, diff --git a/pybuda/test/operators/eltwise_binary_comparison/__init__.py b/forge/test/operators/eltwise_binary_comparison/__init__.py similarity index 100% rename from pybuda/test/operators/eltwise_binary_comparison/__init__.py rename to forge/test/operators/eltwise_binary_comparison/__init__.py diff --git a/pybuda/test/operators/eltwise_binary_comparison/models/__init__.py b/forge/test/operators/eltwise_binary_comparison/models/__init__.py similarity index 100% rename from pybuda/test/operators/eltwise_binary_comparison/models/__init__.py rename to forge/test/operators/eltwise_binary_comparison/models/__init__.py diff --git a/pybuda/test/operators/eltwise_binary_comparison/models/model_1.py b/forge/test/operators/eltwise_binary_comparison/models/model_1.py similarity index 76% rename from pybuda/test/operators/eltwise_binary_comparison/models/model_1.py rename to forge/test/operators/eltwise_binary_comparison/models/model_1.py index 32f7f983f..e6e2838ec 100644 --- a/pybuda/test/operators/eltwise_binary_comparison/models/model_1.py +++ b/forge/test/operators/eltwise_binary_comparison/models/model_1.py @@ -3,21 +3,21 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 1 -# Cimparison operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Cimparison operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaComparisonTest(PyBudaModule): +class BudaComparisonTest(ForgeModule): """ Buda Test 1 @@ -42,7 +42,7 @@ def __init__( self.rng_min = rng_min self.rng_max = rng_max - self.train_param = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param = forge.Parameter(*self.shape, requires_grad=True) input_ = torch.rand(*self.shape) * (self.rng_max - self.rng_min) + self.rng_min if self.mask: @@ -55,7 +55,7 @@ def forward(self, x): # Layer 2 comp = self.operator(self.opname, x, self.train_param) # Layer 3 - mul = pybuda.op.Multiply("mul", comp, self.train_param) + mul = forge.op.Multiply("mul", comp, self.train_param) return mul diff --git a/pybuda/test/operators/eltwise_binary_comparison/models/model_10.py b/forge/test/operators/eltwise_binary_comparison/models/model_10.py similarity index 54% rename from pybuda/test/operators/eltwise_binary_comparison/models/model_10.py rename to forge/test/operators/eltwise_binary_comparison/models/model_10.py index f4fbba464..c5167839e 100644 --- a/pybuda/test/operators/eltwise_binary_comparison/models/model_10.py +++ b/forge/test/operators/eltwise_binary_comparison/models/model_10.py @@ -3,21 +3,21 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 10 -# Cimparison operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Cimparison operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaComparisonTest(PyBudaModule): +class BudaComparisonTest(ForgeModule): """ Buda Test 10 @@ -42,10 +42,10 @@ def __init__( self.rng_min = rng_min self.rng_max = rng_max - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param4 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param4 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [] for _ in range(4): @@ -67,12 +67,12 @@ def forward(self, x1, x2, x3, x4): comp6 = self.operator(self.opname + "6", x4, self.train_param4) # Layer 3 - mul1 = pybuda.op.Multiply("mul1", comp1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", comp2, self.train_param2) - mul3 = pybuda.op.Multiply("mul3", comp3, x3) - mul4 = pybuda.op.Multiply("mul4", comp4, self.train_param3) - mul5 = pybuda.op.Multiply("mul5", comp5, self.train_param4) - mul6 = pybuda.op.Multiply("mul6", x4, self.train_param4) + mul1 = forge.op.Multiply("mul1", comp1, self.train_param1) + mul2 = forge.op.Multiply("mul2", comp2, self.train_param2) + mul3 = forge.op.Multiply("mul3", comp3, x3) + mul4 = forge.op.Multiply("mul4", comp4, self.train_param3) + mul5 = forge.op.Multiply("mul5", comp5, self.train_param4) + mul6 = forge.op.Multiply("mul6", x4, self.train_param4) # Layer 4 comp7 = self.operator(self.opname + "7", mul1, mul2) @@ -81,15 +81,15 @@ def forward(self, x1, x2, x3, x4): comp10 = self.operator(self.opname + "10", mul5, mul6) # Layer 5 - mul7 = pybuda.op.Multiply("mul7", comp7, mul2) - mul8 = pybuda.op.Multiply("mul8", comp8, mul3) - mul9 = pybuda.op.Multiply("mul9", comp9, mul4) - mul10 = pybuda.op.Multiply("mul10", comp10, mul6) + mul7 = forge.op.Multiply("mul7", comp7, mul2) + mul8 = forge.op.Multiply("mul8", comp8, mul3) + mul9 = forge.op.Multiply("mul9", comp9, mul4) + mul10 = forge.op.Multiply("mul10", comp10, mul6) # Layer 6 - mul11 = pybuda.op.Multiply("mul11", mul7, mul3) - mul12 = pybuda.op.Multiply("mul12", mul8, mul5) - mul13 = pybuda.op.Multiply("mul13", mul9, mul10) + mul11 = forge.op.Multiply("mul11", mul7, mul3) + mul12 = forge.op.Multiply("mul12", mul8, mul5) + mul13 = forge.op.Multiply("mul13", mul9, mul10) # Layer 7 comp11 = self.operator(self.opname + "11", mul1, mul7) @@ -99,18 +99,18 @@ def forward(self, x1, x2, x3, x4): comp15 = self.operator(self.opname + "15", mul13, mul10) # Layer 8 - mul14 = pybuda.op.Multiply("mul14", comp11, mul11) - mul15 = pybuda.op.Multiply("mul15", comp12, mul12) - mul16 = pybuda.op.Multiply("mul16", comp13, mul9) - mul17 = pybuda.op.Multiply("mul17", comp14, mul13) - mul18 = pybuda.op.Multiply("mul18", comp15, mul10) + mul14 = forge.op.Multiply("mul14", comp11, mul11) + mul15 = forge.op.Multiply("mul15", comp12, mul12) + mul16 = forge.op.Multiply("mul16", comp13, mul9) + mul17 = forge.op.Multiply("mul17", comp14, mul13) + mul18 = forge.op.Multiply("mul18", comp15, mul10) # Layer 9 - mul19 = pybuda.op.Multiply("mul19", mul14, comp12) - mul20 = pybuda.op.Multiply("mul20", mul15, comp13) - mul21 = pybuda.op.Multiply("mul21", mul16, comp14) - mul22 = pybuda.op.Multiply("mul22", mul17, comp15) - mul23 = pybuda.op.Multiply("mul23", comp13, mul18) + mul19 = forge.op.Multiply("mul19", mul14, comp12) + mul20 = forge.op.Multiply("mul20", mul15, comp13) + mul21 = forge.op.Multiply("mul21", mul16, comp14) + mul22 = forge.op.Multiply("mul22", mul17, comp15) + mul23 = forge.op.Multiply("mul23", comp13, mul18) # Layer 10 comp16 = self.operator(self.opname + "16", mul19, mul20) @@ -118,13 +118,13 @@ def forward(self, x1, x2, x3, x4): comp18 = self.operator(self.opname + "18", mul22, mul23) # Layer 11 - mul24 = pybuda.op.Multiply("mul24", comp16, mul20) - mul25 = pybuda.op.Multiply("mul25", comp17, mul21) - mul26 = pybuda.op.Multiply("mul26", comp18, mul23) + mul24 = forge.op.Multiply("mul24", comp16, mul20) + mul25 = forge.op.Multiply("mul25", comp17, mul21) + mul26 = forge.op.Multiply("mul26", comp18, mul23) # Layer 12 - mul27 = pybuda.op.Multiply("mul27", mul24, mul25) - mul28 = pybuda.op.Multiply("mul28", mul21, mul26) + mul27 = forge.op.Multiply("mul27", mul24, mul25) + mul28 = forge.op.Multiply("mul28", mul21, mul26) return mul27, mul28 diff --git a/pybuda/test/operators/eltwise_binary_comparison/models/model_2.py b/forge/test/operators/eltwise_binary_comparison/models/model_2.py similarity index 68% rename from pybuda/test/operators/eltwise_binary_comparison/models/model_2.py rename to forge/test/operators/eltwise_binary_comparison/models/model_2.py index 4197d94d3..638545a39 100644 --- a/pybuda/test/operators/eltwise_binary_comparison/models/model_2.py +++ b/forge/test/operators/eltwise_binary_comparison/models/model_2.py @@ -3,21 +3,21 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 2 -# Cimparison operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Cimparison operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaComparisonTest(PyBudaModule): +class BudaComparisonTest(ForgeModule): """ Buda Test 2 @@ -42,8 +42,8 @@ def __init__( self.rng_min = rng_min self.rng_max = rng_max - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [] for _ in range(2): @@ -57,20 +57,20 @@ def __init__( def forward(self, x1, x2): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x2, self.train_param2) + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x2, self.train_param2) comp1 = self.operator(self.opname + "1", self.train_param1, x2) # Layer 3 - mul3 = pybuda.op.Multiply("mul3", mul1, comp1) - mul4 = pybuda.op.Multiply("mul4", comp1, mul2) + mul3 = forge.op.Multiply("mul3", mul1, comp1) + mul4 = forge.op.Multiply("mul4", comp1, mul2) # Layer 4 comp2 = self.operator(self.opname + "2", mul1, mul3) comp3 = self.operator(self.opname + "3", mul4, mul2) # Layer 5 - mul5 = pybuda.op.Multiply("mul5", comp2, comp3) + mul5 = forge.op.Multiply("mul5", comp2, comp3) return mul5 diff --git a/pybuda/test/operators/eltwise_binary_comparison/models/model_3.py b/forge/test/operators/eltwise_binary_comparison/models/model_3.py similarity index 60% rename from pybuda/test/operators/eltwise_binary_comparison/models/model_3.py rename to forge/test/operators/eltwise_binary_comparison/models/model_3.py index f55235d0a..7e65c389a 100644 --- a/pybuda/test/operators/eltwise_binary_comparison/models/model_3.py +++ b/forge/test/operators/eltwise_binary_comparison/models/model_3.py @@ -3,21 +3,21 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 3 -# Cimparison operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Cimparison operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaComparisonTest(PyBudaModule): +class BudaComparisonTest(ForgeModule): """ Buda Test 3 @@ -42,8 +42,8 @@ def __init__( self.rng_min = rng_min self.rng_max = rng_max - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [] for _ in range(2): @@ -57,21 +57,21 @@ def __init__( def forward(self, x1, x2): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) comp1 = self.operator(self.opname + "1", self.train_param1, x2) comp2 = self.operator(self.opname + "2", x2, self.train_param2) comp3 = self.operator(self.opname + "3", self.train_param1, self.train_param2) # Layer 3 - mul2 = pybuda.op.Multiply("mul2", mul1, comp1) - mul3 = pybuda.op.Multiply("mul3", x2, comp2) - mul4 = pybuda.op.Multiply("mul4", self.train_param2, comp3) + mul2 = forge.op.Multiply("mul2", mul1, comp1) + mul3 = forge.op.Multiply("mul3", x2, comp2) + mul4 = forge.op.Multiply("mul4", self.train_param2, comp3) # Layer 4 - mul5 = pybuda.op.Multiply("mul5", mul2, comp2) - mul6 = pybuda.op.Multiply("mul6", comp1, mul3) - mul7 = pybuda.op.Multiply("mul7", comp2, mul4) - mul8 = pybuda.op.Multiply("mul8", x2, comp3) + mul5 = forge.op.Multiply("mul5", mul2, comp2) + mul6 = forge.op.Multiply("mul6", comp1, mul3) + mul7 = forge.op.Multiply("mul7", comp2, mul4) + mul8 = forge.op.Multiply("mul8", x2, comp3) # Layer 5 comp4 = self.operator(self.opname + "4", mul5, mul6) @@ -80,14 +80,14 @@ def forward(self, x1, x2): comp7 = self.operator(self.opname + "7", mul4, self.train_param2) # Layer 6 - mul9 = pybuda.op.Multiply("mul9", comp4, mul6) - mul10 = pybuda.op.Multiply("mul10", comp5, mul4) - mul11 = pybuda.op.Multiply("mul11", comp6, comp3) - mul12 = pybuda.op.Multiply("mul12", mul8, comp7) + mul9 = forge.op.Multiply("mul9", comp4, mul6) + mul10 = forge.op.Multiply("mul10", comp5, mul4) + mul11 = forge.op.Multiply("mul11", comp6, comp3) + mul12 = forge.op.Multiply("mul12", mul8, comp7) # Layer 7 - mul13 = pybuda.op.Multiply("mul13", mul9, mul10) - mul14 = pybuda.op.Multiply("mul14", mul11, mul12) + mul13 = forge.op.Multiply("mul13", mul9, mul10) + mul14 = forge.op.Multiply("mul14", mul11, mul12) return mul13, mul14 diff --git a/pybuda/test/operators/eltwise_binary_comparison/models/model_4.py b/forge/test/operators/eltwise_binary_comparison/models/model_4.py similarity index 59% rename from pybuda/test/operators/eltwise_binary_comparison/models/model_4.py rename to forge/test/operators/eltwise_binary_comparison/models/model_4.py index a1351398a..935dd4782 100644 --- a/pybuda/test/operators/eltwise_binary_comparison/models/model_4.py +++ b/forge/test/operators/eltwise_binary_comparison/models/model_4.py @@ -3,21 +3,21 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 4 -# Cimparison operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Cimparison operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaComparisonTest(PyBudaModule): +class BudaComparisonTest(ForgeModule): """ Buda Test 4 @@ -42,8 +42,8 @@ def __init__( self.rng_min = rng_min self.rng_max = rng_max - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [] for _ in range(2): @@ -57,9 +57,9 @@ def __init__( def forward(self, x1, x2): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x2, self.train_param1) - mul3 = pybuda.op.Multiply("mul3", x2, self.train_param2) + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x2, self.train_param1) + mul3 = forge.op.Multiply("mul3", x2, self.train_param2) # Layer 3 comp1 = self.operator(self.opname + "1", mul1, self.train_param1) @@ -67,9 +67,9 @@ def forward(self, x1, x2): comp3 = self.operator(self.opname + "3", mul3, self.train_param2) # Layer 4 - mul4 = pybuda.op.Multiply("mul4", comp1, mul2) - mul5 = pybuda.op.Multiply("mul5", comp2, mul3) - mul6 = pybuda.op.Multiply("mul6", comp3, mul1) + mul4 = forge.op.Multiply("mul4", comp1, mul2) + mul5 = forge.op.Multiply("mul5", comp2, mul3) + mul6 = forge.op.Multiply("mul6", comp3, mul1) # Layer 5 comp4 = self.operator(self.opname + "4", mul4, mul2) @@ -77,13 +77,13 @@ def forward(self, x1, x2): comp6 = self.operator(self.opname + "6", mul3, mul6) # Layer 6 - mul7 = pybuda.op.Multiply("mul7", comp4, mul5) - mul8 = pybuda.op.Multiply("mul8", comp5, self.train_param2) - mul9 = pybuda.op.Multiply("mul9", mul5, comp6) + mul7 = forge.op.Multiply("mul7", comp4, mul5) + mul8 = forge.op.Multiply("mul8", comp5, self.train_param2) + mul9 = forge.op.Multiply("mul9", mul5, comp6) # Layer 7 - mul10 = pybuda.op.Multiply("mul10", mul7, mul8) - mul11 = pybuda.op.Multiply("mul11", mul8, mul9) + mul10 = forge.op.Multiply("mul10", mul7, mul8) + mul11 = forge.op.Multiply("mul11", mul8, mul9) # Layer 8 comp7 = self.operator(self.opname + "7", mul10, mul2) @@ -96,18 +96,18 @@ def forward(self, x1, x2): comp12 = self.operator(self.opname + "12", mul8, mul9) # Layer 10 - mul12 = pybuda.op.Multiply("mul12", comp10, self.train_param1) - mul13 = pybuda.op.Multiply("mul13", comp11, mul9) - mul14 = pybuda.op.Multiply("mul14", comp12, mul6) + mul12 = forge.op.Multiply("mul12", comp10, self.train_param1) + mul13 = forge.op.Multiply("mul13", comp11, mul9) + mul14 = forge.op.Multiply("mul14", comp12, mul6) # Layer 11 - mul15 = pybuda.op.Multiply("mul15", mul12, mul13) - mul16 = pybuda.op.Multiply("mul16", mul13, mul14) + mul15 = forge.op.Multiply("mul15", mul12, mul13) + mul16 = forge.op.Multiply("mul16", mul13, mul14) # Layer 12 - mul17 = pybuda.op.Multiply("mul17", mul13, mul16) - mul18 = pybuda.op.Multiply("mul18", mul15, mul17) - mul19 = pybuda.op.Multiply("mul19", mul16, mul17) + mul17 = forge.op.Multiply("mul17", mul13, mul16) + mul18 = forge.op.Multiply("mul18", mul15, mul17) + mul19 = forge.op.Multiply("mul19", mul16, mul17) return mul18, mul19 diff --git a/pybuda/test/operators/eltwise_binary_comparison/models/model_5.py b/forge/test/operators/eltwise_binary_comparison/models/model_5.py similarity index 61% rename from pybuda/test/operators/eltwise_binary_comparison/models/model_5.py rename to forge/test/operators/eltwise_binary_comparison/models/model_5.py index 915d00132..6aa1d3906 100644 --- a/pybuda/test/operators/eltwise_binary_comparison/models/model_5.py +++ b/forge/test/operators/eltwise_binary_comparison/models/model_5.py @@ -3,21 +3,21 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 5 -# Cimparison operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Cimparison operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaComparisonTest(PyBudaModule): +class BudaComparisonTest(ForgeModule): """ Buda Test 5 @@ -42,8 +42,8 @@ def __init__( self.rng_min = rng_min self.rng_max = rng_max - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [] for _ in range(2): @@ -62,44 +62,44 @@ def forward(self, x1, x2): comp3 = self.operator(self.opname + "3", self.train_param1, self.train_param2) # Layer 3 - mul1 = pybuda.op.Multiply("mul1", comp1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", comp2, x2) - mul3 = pybuda.op.Multiply("mul3", comp3, self.train_param2) + mul1 = forge.op.Multiply("mul1", comp1, self.train_param1) + mul2 = forge.op.Multiply("mul2", comp2, x2) + mul3 = forge.op.Multiply("mul3", comp3, self.train_param2) # Layer 4 - mul4 = pybuda.op.Multiply("mul4", x1, mul2) - mul5 = pybuda.op.Multiply("mul5", x2, mul3) + mul4 = forge.op.Multiply("mul4", x1, mul2) + mul5 = forge.op.Multiply("mul5", x2, mul3) # Layer 5 - mul6 = pybuda.op.Multiply("mul6", mul1, mul2) - mul7 = pybuda.op.Multiply("mul7", mul4, mul3) + mul6 = forge.op.Multiply("mul6", mul1, mul2) + mul7 = forge.op.Multiply("mul7", mul4, mul3) # Layer 6 comp4 = self.operator(self.opname + "4", mul6, mul4) comp5 = self.operator(self.opname + "5", mul7, mul5) # Layer 7 - mul8 = pybuda.op.Multiply("mul8", mul1, comp4) - mul9 = pybuda.op.Multiply("mul9", mul7, comp5) + mul8 = forge.op.Multiply("mul8", mul1, comp4) + mul9 = forge.op.Multiply("mul9", mul7, comp5) comp6 = self.operator(self.opname + "6", mul4, mul2) comp7 = self.operator(self.opname + "7", mul5, mul3) # Layer 8 - mul10 = pybuda.op.Multiply("mul10", comp6, mul7) - mul11 = pybuda.op.Multiply("mul11", comp7, mul5) + mul10 = forge.op.Multiply("mul10", comp6, mul7) + mul11 = forge.op.Multiply("mul11", comp7, mul5) # Layer 9 - mul12 = pybuda.op.Multiply("mul12", mul8, mul10) - mul13 = pybuda.op.Multiply("mul13", mul10, mul9) - mul14 = pybuda.op.Multiply("mul14", mul9, mul11) + mul12 = forge.op.Multiply("mul12", mul8, mul10) + mul13 = forge.op.Multiply("mul13", mul10, mul9) + mul14 = forge.op.Multiply("mul14", mul9, mul11) # Layer 10 comp8 = self.operator(self.opname + "8", mul12, mul13) comp9 = self.operator(self.opname + "9", mul13, mul11) # Layer 11 - mul15 = pybuda.op.Multiply("mul15", comp8, mul13) - mul16 = pybuda.op.Multiply("mul16", comp9, mul14) + mul15 = forge.op.Multiply("mul15", comp8, mul13) + mul16 = forge.op.Multiply("mul16", comp9, mul14) return mul15, mul16 diff --git a/pybuda/test/operators/eltwise_binary_comparison/models/model_6.py b/forge/test/operators/eltwise_binary_comparison/models/model_6.py similarity index 57% rename from pybuda/test/operators/eltwise_binary_comparison/models/model_6.py rename to forge/test/operators/eltwise_binary_comparison/models/model_6.py index 47cfd594a..1d3c1a280 100644 --- a/pybuda/test/operators/eltwise_binary_comparison/models/model_6.py +++ b/forge/test/operators/eltwise_binary_comparison/models/model_6.py @@ -3,21 +3,21 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 6 -# Cimparison operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Cimparison operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaComparisonTest(PyBudaModule): +class BudaComparisonTest(ForgeModule): """ Buda Test 6 @@ -42,9 +42,9 @@ def __init__( self.rng_min = rng_min self.rng_max = rng_max - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [] for _ in range(3): @@ -58,11 +58,11 @@ def __init__( def forward(self, x1, x2, x3): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x1, x2) - mul3 = pybuda.op.Multiply("mul3", x2, self.train_param2) - mul4 = pybuda.op.Multiply("mul4", x2, x3) - mul5 = pybuda.op.Multiply("mul5", self.train_param2, self.train_param3) + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x1, x2) + mul3 = forge.op.Multiply("mul3", x2, self.train_param2) + mul4 = forge.op.Multiply("mul4", x2, x3) + mul5 = forge.op.Multiply("mul5", self.train_param2, self.train_param3) # Layer 3 comp1 = self.operator(self.opname + "1", mul1, self.train_param1) @@ -72,16 +72,16 @@ def forward(self, x1, x2, x3): comp5 = self.operator(self.opname + "5", mul5, self.train_param3) # Layer 4 - mul6 = pybuda.op.Multiply("mul6", comp1, mul2) - mul7 = pybuda.op.Multiply("mul7", comp2, comp3) - mul8 = pybuda.op.Multiply("mul8", comp3, mul4) - mul9 = pybuda.op.Multiply("mul9", comp4, mul5) - mul10 = pybuda.op.Multiply("mul10", comp4, comp5) + mul6 = forge.op.Multiply("mul6", comp1, mul2) + mul7 = forge.op.Multiply("mul7", comp2, comp3) + mul8 = forge.op.Multiply("mul8", comp3, mul4) + mul9 = forge.op.Multiply("mul9", comp4, mul5) + mul10 = forge.op.Multiply("mul10", comp4, comp5) # Layer 5 - mul11 = pybuda.op.Multiply("mul11", mul6, mul8) - mul12 = pybuda.op.Multiply("mul12", mul7, mul9) - mul13 = pybuda.op.Multiply("mul13", mul8, mul10) + mul11 = forge.op.Multiply("mul11", mul6, mul8) + mul12 = forge.op.Multiply("mul12", mul7, mul9) + mul13 = forge.op.Multiply("mul13", mul8, mul10) return mul11, mul12, mul13 diff --git a/pybuda/test/operators/eltwise_binary_comparison/models/model_7.py b/forge/test/operators/eltwise_binary_comparison/models/model_7.py similarity index 57% rename from pybuda/test/operators/eltwise_binary_comparison/models/model_7.py rename to forge/test/operators/eltwise_binary_comparison/models/model_7.py index 5207f43da..96e58558d 100644 --- a/pybuda/test/operators/eltwise_binary_comparison/models/model_7.py +++ b/forge/test/operators/eltwise_binary_comparison/models/model_7.py @@ -3,21 +3,21 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 7 -# Cimparison operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Cimparison operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaComparisonTest(PyBudaModule): +class BudaComparisonTest(ForgeModule): """ Buda Test 7 @@ -42,9 +42,9 @@ def __init__( self.rng_min = rng_min self.rng_max = rng_max - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [] for _ in range(3): @@ -65,12 +65,12 @@ def forward(self, x1, x2, x3): comp5 = self.operator(self.opname + "5", self.train_param2, self.train_param3) # Layer 3 - mul1 = pybuda.op.Multiply("mul1", x1, comp2) - mul2 = pybuda.op.Multiply("mul2", self.train_param1, comp4) - mul3 = pybuda.op.Multiply("mul3", x2, comp5) - mul4 = pybuda.op.Multiply("mul4", comp3, x3) - mul5 = pybuda.op.Multiply("mul5", comp4, comp5) - mul6 = pybuda.op.Multiply("mul6", comp5, self.train_param3) + mul1 = forge.op.Multiply("mul1", x1, comp2) + mul2 = forge.op.Multiply("mul2", self.train_param1, comp4) + mul3 = forge.op.Multiply("mul3", x2, comp5) + mul4 = forge.op.Multiply("mul4", comp3, x3) + mul5 = forge.op.Multiply("mul5", comp4, comp5) + mul6 = forge.op.Multiply("mul6", comp5, self.train_param3) # Layer 4 comp6 = self.operator(self.opname + "6", mul1, mul2) @@ -80,22 +80,22 @@ def forward(self, x1, x2, x3): comp10 = self.operator(self.opname + "10", comp5, mul6) # Layer 5 - mul7 = pybuda.op.Multiply("mul7", comp6, mul2) - mul8 = pybuda.op.Multiply("mul8", mul1, comp9) - mul9 = pybuda.op.Multiply("mul9", comp7, mul4) - mul10 = pybuda.op.Multiply("mul10", comp8, mul5) - mul11 = pybuda.op.Multiply("mul11", mul4, comp10) + mul7 = forge.op.Multiply("mul7", comp6, mul2) + mul8 = forge.op.Multiply("mul8", mul1, comp9) + mul9 = forge.op.Multiply("mul9", comp7, mul4) + mul10 = forge.op.Multiply("mul10", comp8, mul5) + mul11 = forge.op.Multiply("mul11", mul4, comp10) # Layer 6 - mul12 = pybuda.op.Multiply("mul12", mul7, mul3) - mul13 = pybuda.op.Multiply("mul13", mul8, mul2) - mul14 = pybuda.op.Multiply("mul14", mul9, mul5) - mul15 = pybuda.op.Multiply("mul15", mul10, mul11) + mul12 = forge.op.Multiply("mul12", mul7, mul3) + mul13 = forge.op.Multiply("mul13", mul8, mul2) + mul14 = forge.op.Multiply("mul14", mul9, mul5) + mul15 = forge.op.Multiply("mul15", mul10, mul11) # Layer 7 - mul16 = pybuda.op.Multiply("mul16", mul12, mul13) - mul17 = pybuda.op.Multiply("mul17", mul13, mul14) - mul18 = pybuda.op.Multiply("mul18", mul14, mul15) + mul16 = forge.op.Multiply("mul16", mul12, mul13) + mul17 = forge.op.Multiply("mul17", mul13, mul14) + mul18 = forge.op.Multiply("mul18", mul14, mul15) return mul16, mul17, mul18 diff --git a/pybuda/test/operators/eltwise_binary_comparison/models/model_8.py b/forge/test/operators/eltwise_binary_comparison/models/model_8.py similarity index 52% rename from pybuda/test/operators/eltwise_binary_comparison/models/model_8.py rename to forge/test/operators/eltwise_binary_comparison/models/model_8.py index 0bb049574..1959f2a2b 100644 --- a/pybuda/test/operators/eltwise_binary_comparison/models/model_8.py +++ b/forge/test/operators/eltwise_binary_comparison/models/model_8.py @@ -3,21 +3,21 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 8 -# Cimparison operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Cimparison operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaComparisonTest(PyBudaModule): +class BudaComparisonTest(ForgeModule): """ Buda Test 8 @@ -42,9 +42,9 @@ def __init__( self.rng_min = rng_min self.rng_max = rng_max - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [] for _ in range(3): @@ -58,14 +58,14 @@ def __init__( def forward(self, x1, x2, x3): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param2) - mul2 = pybuda.op.Multiply("mul2", self.train_param1, x3) - mul3 = pybuda.op.Multiply("mul3", x2, self.train_param3) + mul1 = forge.op.Multiply("mul1", x1, self.train_param2) + mul2 = forge.op.Multiply("mul2", self.train_param1, x3) + mul3 = forge.op.Multiply("mul3", x2, self.train_param3) # Layer 3 - mul4 = pybuda.op.Multiply("mul4", x1, self.train_param1) - mul5 = pybuda.op.Multiply("mul5", x2, self.train_param2) - mul6 = pybuda.op.Multiply("mul6", x3, self.train_param3) + mul4 = forge.op.Multiply("mul4", x1, self.train_param1) + mul5 = forge.op.Multiply("mul5", x2, self.train_param2) + mul6 = forge.op.Multiply("mul6", x3, self.train_param3) # Layer 4 comp1 = self.operator(self.opname + "1", x1, self.train_param1) @@ -75,18 +75,18 @@ def forward(self, x1, x2, x3): comp5 = self.operator(self.opname + "5", mul6, x3) # Layer 5 - mul7 = pybuda.op.Multiply("mul7", comp1, mul4) - mul8 = pybuda.op.Multiply("mul8", comp2, mul1) - mul9 = pybuda.op.Multiply("mul9", comp3, mul5) - mul10 = pybuda.op.Multiply("mul10", comp4, mul6) - mul11 = pybuda.op.Multiply("mul11", comp5, self.train_param3) + mul7 = forge.op.Multiply("mul7", comp1, mul4) + mul8 = forge.op.Multiply("mul8", comp2, mul1) + mul9 = forge.op.Multiply("mul9", comp3, mul5) + mul10 = forge.op.Multiply("mul10", comp4, mul6) + mul11 = forge.op.Multiply("mul11", comp5, self.train_param3) # Layer 6 - mul12 = pybuda.op.Multiply("mul12", mul7, comp3) - mul13 = pybuda.op.Multiply("mul13", comp1, mul9) - mul14 = pybuda.op.Multiply("mul14", mul8, comp4) - mul15 = pybuda.op.Multiply("mul15", mul10, comp5) - mul16 = pybuda.op.Multiply("mul16", comp2, mul11) + mul12 = forge.op.Multiply("mul12", mul7, comp3) + mul13 = forge.op.Multiply("mul13", comp1, mul9) + mul14 = forge.op.Multiply("mul14", mul8, comp4) + mul15 = forge.op.Multiply("mul15", mul10, comp5) + mul16 = forge.op.Multiply("mul16", comp2, mul11) # Layer 7 comp6 = self.operator(self.opname + "6", mul12, mul4) @@ -95,10 +95,10 @@ def forward(self, x1, x2, x3): comp9 = self.operator(self.opname + "9", mul15, mul16) # Layer 8 - mul17 = pybuda.op.Multiply("mul17", comp6, mul8) - mul18 = pybuda.op.Multiply("mul18", comp7, mul9) - mul19 = pybuda.op.Multiply("mul19", comp8, mul15) - mul20 = pybuda.op.Multiply("mul20", comp9, mul14) + mul17 = forge.op.Multiply("mul17", comp6, mul8) + mul18 = forge.op.Multiply("mul18", comp7, mul9) + mul19 = forge.op.Multiply("mul19", comp8, mul15) + mul20 = forge.op.Multiply("mul20", comp9, mul14) # Layer 9 comp10 = self.operator(self.opname + "10", mul17, mul18) @@ -107,15 +107,15 @@ def forward(self, x1, x2, x3): comp13 = self.operator(self.opname + "13", mul20, mul16) # Layer 10 - mul21 = pybuda.op.Multiply("mul21", comp10, mul18) - mul22 = pybuda.op.Multiply("mul22", comp11, mul19) - mul23 = pybuda.op.Multiply("mul23", comp12, mul20) - mul24 = pybuda.op.Multiply("mul24", comp13, mul15) + mul21 = forge.op.Multiply("mul21", comp10, mul18) + mul22 = forge.op.Multiply("mul22", comp11, mul19) + mul23 = forge.op.Multiply("mul23", comp12, mul20) + mul24 = forge.op.Multiply("mul24", comp13, mul15) # Layer 11 - mul25 = pybuda.op.Multiply("mul25", mul17, mul22) - mul26 = pybuda.op.Multiply("mul26", mul21, mul23) - mul27 = pybuda.op.Multiply("mul27", mul19, mul24) + mul25 = forge.op.Multiply("mul25", mul17, mul22) + mul26 = forge.op.Multiply("mul26", mul21, mul23) + mul27 = forge.op.Multiply("mul27", mul19, mul24) return mul25, mul26, mul27 diff --git a/pybuda/test/operators/eltwise_binary_comparison/models/model_9.py b/forge/test/operators/eltwise_binary_comparison/models/model_9.py similarity index 51% rename from pybuda/test/operators/eltwise_binary_comparison/models/model_9.py rename to forge/test/operators/eltwise_binary_comparison/models/model_9.py index 782a60cf8..26e848f33 100644 --- a/pybuda/test/operators/eltwise_binary_comparison/models/model_9.py +++ b/forge/test/operators/eltwise_binary_comparison/models/model_9.py @@ -3,21 +3,21 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 9 -# Cimparison operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Cimparison operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaComparisonTest(PyBudaModule): +class BudaComparisonTest(ForgeModule): """ Buda Test 9 @@ -42,9 +42,9 @@ def __init__( self.rng_min = rng_min self.rng_max = rng_max - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [] for _ in range(3): @@ -58,23 +58,23 @@ def __init__( def forward(self, x1, x2, x3): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x2, self.train_param2) - mul3 = pybuda.op.Multiply("mul3", x3, self.train_param3) + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x2, self.train_param2) + mul3 = forge.op.Multiply("mul3", x3, self.train_param3) comp1 = self.operator(self.opname + "1", x1, self.train_param1) comp2 = self.operator(self.opname + "2", x2, self.train_param2) comp3 = self.operator(self.opname + "3", x3, self.train_param3) # Layer 3 - mul4 = pybuda.op.Multiply("mul4", comp1, x2) - mul5 = pybuda.op.Multiply("mul5", comp2, x3) - mul6 = pybuda.op.Multiply("mul6", self.train_param3, comp3) + mul4 = forge.op.Multiply("mul4", comp1, x2) + mul5 = forge.op.Multiply("mul5", comp2, x3) + mul6 = forge.op.Multiply("mul6", self.train_param3, comp3) # Layer 4 - mul7 = pybuda.op.Multiply("mul7", mul1, mul2) - mul8 = pybuda.op.Multiply("mul8", mul4, mul2) - mul9 = pybuda.op.Multiply("mul9", mul5, mul3) - mul10 = pybuda.op.Multiply("mul10", mul2, mul6) + mul7 = forge.op.Multiply("mul7", mul1, mul2) + mul8 = forge.op.Multiply("mul8", mul4, mul2) + mul9 = forge.op.Multiply("mul9", mul5, mul3) + mul10 = forge.op.Multiply("mul10", mul2, mul6) # Layer 5 comp4 = self.operator(self.opname + "4", mul7, mul4) @@ -82,10 +82,10 @@ def forward(self, x1, x2, x3): comp6 = self.operator(self.opname + "6", mul2, mul10) # Layer 6 - mul11 = pybuda.op.Multiply("mul11", comp4, mul8) - mul12 = pybuda.op.Multiply("mul12", comp5, mul5) - mul13 = pybuda.op.Multiply("mul13", mul9, mul6) - mul14 = pybuda.op.Multiply("mul14", comp6, mul10) + mul11 = forge.op.Multiply("mul11", comp4, mul8) + mul12 = forge.op.Multiply("mul12", comp5, mul5) + mul13 = forge.op.Multiply("mul13", mul9, mul6) + mul14 = forge.op.Multiply("mul14", comp6, mul10) # Layer 7 comp7 = self.operator(self.opname + "7", mul1, mul11) @@ -95,22 +95,22 @@ def forward(self, x1, x2, x3): comp11 = self.operator(self.opname + "11", mul14, mul6) # Layer 8 - mul15 = pybuda.op.Multiply("mul15", comp7, mul12) - mul16 = pybuda.op.Multiply("mul16", comp8, mul13) - mul17 = pybuda.op.Multiply("mul17", comp9, mul14) - mul18 = pybuda.op.Multiply("mul18", comp10, mul13) - mul19 = pybuda.op.Multiply("mul19", comp11, mul14) + mul15 = forge.op.Multiply("mul15", comp7, mul12) + mul16 = forge.op.Multiply("mul16", comp8, mul13) + mul17 = forge.op.Multiply("mul17", comp9, mul14) + mul18 = forge.op.Multiply("mul18", comp10, mul13) + mul19 = forge.op.Multiply("mul19", comp11, mul14) # Layer 9 - mul20 = pybuda.op.Multiply("mul20", mul15, mul16) - mul21 = pybuda.op.Multiply("mul21", mul16, comp9) - mul22 = pybuda.op.Multiply("mul22", mul17, comp10) - mul23 = pybuda.op.Multiply("mul23", mul18, mul19) + mul20 = forge.op.Multiply("mul20", mul15, mul16) + mul21 = forge.op.Multiply("mul21", mul16, comp9) + mul22 = forge.op.Multiply("mul22", mul17, comp10) + mul23 = forge.op.Multiply("mul23", mul18, mul19) # Layer 10 - mul24 = pybuda.op.Multiply("mul24", comp8, mul21) - mul25 = pybuda.op.Multiply("mul25", comp9, mul22) - mul26 = pybuda.op.Multiply("mul26", comp11, mul23) + mul24 = forge.op.Multiply("mul24", comp8, mul21) + mul25 = forge.op.Multiply("mul25", comp9, mul22) + mul26 = forge.op.Multiply("mul26", comp11, mul23) # Layer 11 comp12 = self.operator(self.opname + "12", mul20, mul24) @@ -118,12 +118,12 @@ def forward(self, x1, x2, x3): comp14 = self.operator(self.opname + "14", mul18, mul26) # Layer 12 - mul27 = pybuda.op.Multiply("mul27", comp12, mul21) - mul28 = pybuda.op.Multiply("mul28", comp13, comp14) + mul27 = forge.op.Multiply("mul27", comp12, mul21) + mul28 = forge.op.Multiply("mul28", comp13, comp14) # Layer 13 - mul29 = pybuda.op.Multiply("mul29", mul27, mul25) - mul30 = pybuda.op.Multiply("mul30", mul28, mul22) + mul29 = forge.op.Multiply("mul29", mul27, mul25) + mul30 = forge.op.Multiply("mul30", mul28, mul22) return mul29, mul30 diff --git a/pybuda/test/operators/eltwise_binary_comparison/test_eltwise_binary_comparison.py b/forge/test/operators/eltwise_binary_comparison/test_eltwise_binary_comparison.py similarity index 89% rename from pybuda/test/operators/eltwise_binary_comparison/test_eltwise_binary_comparison.py rename to forge/test/operators/eltwise_binary_comparison/test_eltwise_binary_comparison.py index 06a96b42d..e96e25dc3 100644 --- a/pybuda/test/operators/eltwise_binary_comparison/test_eltwise_binary_comparison.py +++ b/forge/test/operators/eltwise_binary_comparison/test_eltwise_binary_comparison.py @@ -12,13 +12,13 @@ import pytest import numpy as np -import pybuda -import pybuda.op -from pybuda import TTDevice, BackendType, pybuda_compile, VerifyConfig, CompilerConfig +import forge +import forge.op +from forge import TTDevice, BackendType, forge_compile, VerifyConfig, CompilerConfig from . import models -MODELS_PATH = "./pybuda/test/operators/eltwise_binary_comparison/models/" +MODELS_PATH = "./forge/test/operators/eltwise_binary_comparison/models/" SHAPE_NO = 1 SHAPE_DIM_MIN = 1 @@ -85,7 +85,7 @@ def test_comparison( architecture = f'models.{model}.BudaComparisonTest(' +\ f'shape={shape} ,' +\ f'opname="{op}" ,' +\ - f'operator=pybuda.op.{op} ,' +\ + f'operator=forge.op.{op} ,' +\ f'mask={mask} ,' +\ f'rng_min={rng_min} ,' +\ f'rng_max={rng_max})' @@ -93,8 +93,8 @@ def test_comparison( tt0 = TTDevice("tt0", devtype=BackendType.Golden) tt0.place_module(model) - #Fusing disabled due to tenstorrent/pybuda#784 - pybuda_compile( + #Fusing disabled due to tenstorrent/forge#784 + forge_compile( tt0, model.testname, *model.inputs, diff --git a/pybuda/test/operators/eltwise_unary/__init__.py b/forge/test/operators/eltwise_unary/__init__.py similarity index 100% rename from pybuda/test/operators/eltwise_unary/__init__.py rename to forge/test/operators/eltwise_unary/__init__.py diff --git a/pybuda/test/operators/eltwise_unary/conftest.py b/forge/test/operators/eltwise_unary/conftest.py similarity index 100% rename from pybuda/test/operators/eltwise_unary/conftest.py rename to forge/test/operators/eltwise_unary/conftest.py diff --git a/pybuda/test/operators/eltwise_unary/models/__init__.py b/forge/test/operators/eltwise_unary/models/__init__.py similarity index 100% rename from pybuda/test/operators/eltwise_unary/models/__init__.py rename to forge/test/operators/eltwise_unary/models/__init__.py diff --git a/pybuda/test/operators/eltwise_unary/models/model_1.py b/forge/test/operators/eltwise_unary/models/model_1.py similarity index 75% rename from pybuda/test/operators/eltwise_unary/models/model_1.py rename to forge/test/operators/eltwise_unary/models/model_1.py index 4c1b086e1..7683f09ed 100644 --- a/pybuda/test/operators/eltwise_unary/models/model_1.py +++ b/forge/test/operators/eltwise_unary/models/model_1.py @@ -3,19 +3,19 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 1 -# Unary element-wise operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Unary element-wise operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaElementWiseUnaryTest(PyBudaModule): +class BudaElementWiseUnaryTest(ForgeModule): """ Buda Test 1 @@ -23,7 +23,7 @@ class BudaElementWiseUnaryTest(PyBudaModule): One operand represents input and the other one is trainable paramater. Args: - operator (function): PyBuda unary element-wise operator. + operator (function): Forge unary element-wise operator. opname (str): Operation name (e.g. exp, sqrt, gelu, ...). This name test use to generate names of operation nodes in a graph/model. shape (tuple, list): Shape of the input tensors. @@ -36,13 +36,13 @@ def __init__(self, operator, opname, shape, **kwargs): self.testname = "Operator " + opname + " Test 1" self.shape = shape self.kwargs = kwargs - self.train_param = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape))] self.set_parameter("train_param", torch.rand(*self.shape, requires_grad=True)) def forward(self, x): - mul = pybuda.op.Multiply("mul", x, self.train_param) + mul = forge.op.Multiply("mul", x, self.train_param) un = self.operator(self.opname, mul, **self.kwargs) return un diff --git a/pybuda/test/operators/eltwise_unary/models/model_10.py b/forge/test/operators/eltwise_unary/models/model_10.py similarity index 65% rename from pybuda/test/operators/eltwise_unary/models/model_10.py rename to forge/test/operators/eltwise_unary/models/model_10.py index 002edbef4..ba8f61c1c 100644 --- a/pybuda/test/operators/eltwise_unary/models/model_10.py +++ b/forge/test/operators/eltwise_unary/models/model_10.py @@ -3,26 +3,26 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 10 -# Unary element-wise operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Unary element-wise operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaElementWiseUnaryTest(PyBudaModule): +class BudaElementWiseUnaryTest(ForgeModule): """ Buda Test 10 In this test we have 11 unary operations, and 2 input tensors and 2 trainable variables. Args: - operator (function): PyBuda unary element-wise operator. + operator (function): Forge unary element-wise operator. opname (str): Operation name (e.g. exp, sqrt, gelu, ...). This name test use to generate names of operation nodes in a graph/model. shape (tuple, list): Shape of the input tensors. @@ -36,8 +36,8 @@ def __init__(self, operator, opname, shape, **kwargs): self.shape = shape self.kwargs = kwargs - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(2)] @@ -47,10 +47,10 @@ def __init__(self, operator, opname, shape, **kwargs): def forward(self, x1, x2): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, x1) - mul2 = pybuda.op.Multiply("mul2", self.train_param1, self.train_param1) - mul3 = pybuda.op.Multiply("mul3", x2, x2) - mul4 = pybuda.op.Multiply("mul4", self.train_param2, self.train_param2) + mul1 = forge.op.Multiply("mul1", x1, x1) + mul2 = forge.op.Multiply("mul2", self.train_param1, self.train_param1) + mul3 = forge.op.Multiply("mul3", x2, x2) + mul4 = forge.op.Multiply("mul4", self.train_param2, self.train_param2) # Layer 3 un1 = self.operator(self.opname + "1", mul1, **self.kwargs) @@ -59,11 +59,11 @@ def forward(self, x1, x2): un4 = self.operator(self.opname + "4", mul4, **self.kwargs) # Layer 4 - mul5 = pybuda.op.Multiply("mul5", un1, un2) - mul6 = pybuda.op.Multiply("mul6", un1, un2) - mul7 = pybuda.op.Multiply("mul7", un1, un2) - mul8 = pybuda.op.Multiply("mul8", un3, un4) - mul9 = pybuda.op.Multiply("mul9", un3, un4) + mul5 = forge.op.Multiply("mul5", un1, un2) + mul6 = forge.op.Multiply("mul6", un1, un2) + mul7 = forge.op.Multiply("mul7", un1, un2) + mul8 = forge.op.Multiply("mul8", un3, un4) + mul9 = forge.op.Multiply("mul9", un3, un4) # Layer 5 un5 = self.operator(self.opname + "5", mul5, **self.kwargs) @@ -73,10 +73,10 @@ def forward(self, x1, x2): un9 = self.operator(self.opname + "9", mul9, **self.kwargs) # Layer 6 - mul10 = pybuda.op.Multiply("mul10", un5, un6) - mul11 = pybuda.op.Multiply("mul11", un6, un7) - mul12 = pybuda.op.Multiply("mul12", un7, un8) - mul13 = pybuda.op.Multiply("mul13", un8, un9) + mul10 = forge.op.Multiply("mul10", un5, un6) + mul11 = forge.op.Multiply("mul11", un6, un7) + mul12 = forge.op.Multiply("mul12", un7, un8) + mul13 = forge.op.Multiply("mul13", un8, un9) # Layer 7 un10 = self.operator(self.opname + "10", mul10, **self.kwargs) @@ -85,9 +85,9 @@ def forward(self, x1, x2): un13 = self.operator(self.opname + "13", mul13, **self.kwargs) # Layer 8 - mul14 = pybuda.op.Multiply("mul14", un10, un6) - mul15 = pybuda.op.Multiply("mul15", un11, un7) - mul16 = pybuda.op.Multiply("mul16", un12, un13) + mul14 = forge.op.Multiply("mul14", un10, un6) + mul15 = forge.op.Multiply("mul15", un11, un7) + mul16 = forge.op.Multiply("mul16", un12, un13) # Layer 9 un14 = self.operator(self.opname + "14", mul14, **self.kwargs) @@ -95,15 +95,15 @@ def forward(self, x1, x2): un16 = self.operator(self.opname + "16", mul16, **self.kwargs) # Layer 10 - mul17 = pybuda.op.Multiply("mul17", un14, un11) - mul18 = pybuda.op.Multiply("mul18", un15, un12) + mul17 = forge.op.Multiply("mul17", un14, un11) + mul18 = forge.op.Multiply("mul18", un15, un12) # Layer 11 un17 = self.operator(self.opname + "17", mul17, **self.kwargs) un18 = self.operator(self.opname + "18", mul18, **self.kwargs) # Layer 12 - mul19 = pybuda.op.Multiply("mul19", un17, un18) + mul19 = forge.op.Multiply("mul19", un17, un18) # Layer 13 un19 = self.operator(self.opname + "19", mul19, **self.kwargs) diff --git a/pybuda/test/operators/eltwise_unary/models/model_2.py b/forge/test/operators/eltwise_unary/models/model_2.py similarity index 68% rename from pybuda/test/operators/eltwise_unary/models/model_2.py rename to forge/test/operators/eltwise_unary/models/model_2.py index 63a1eee3e..fb3d96ae9 100644 --- a/pybuda/test/operators/eltwise_unary/models/model_2.py +++ b/forge/test/operators/eltwise_unary/models/model_2.py @@ -3,26 +3,26 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 2 -# Unary element-wise operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Unary element-wise operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaElementWiseUnaryTest(PyBudaModule): +class BudaElementWiseUnaryTest(ForgeModule): """ Buda Test 2 In this test we have 6 unary operations, and three input tensors and three trainable variables. Args: - operator (function): PyBuda unary element-wise operator. + operator (function): Forge unary element-wise operator. opname (str): Operation name (e.g. exp, sqrt, gelu, ...). This name test use to generate names of operation nodes in a graph/model. shape (tuple, list): Shape of the input tensors. @@ -36,9 +36,9 @@ def __init__(self, operator, opname, shape, **kwargs): self.shape = shape self.kwargs = kwargs - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(3)] @@ -48,9 +48,9 @@ def __init__(self, operator, opname, shape, **kwargs): def forward(self, x1, x2, x3): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - add1 = pybuda.op.Add("add1", x2, self.train_param2) - mul2 = pybuda.op.Multiply("mul2", x3, self.train_param3) + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + add1 = forge.op.Add("add1", x2, self.train_param2) + mul2 = forge.op.Multiply("mul2", x3, self.train_param3) # Layer 3 un1 = self.operator(self.opname + "1", mul1, **self.kwargs) @@ -58,14 +58,14 @@ def forward(self, x1, x2, x3): un3 = self.operator(self.opname + "3", mul2, **self.kwargs) # Layer 4 - mul3 = pybuda.op.Multiply("mul3", un1, un2) + mul3 = forge.op.Multiply("mul3", un1, un2) # Layer 5 un4 = self.operator(self.opname + "4", mul3, **self.kwargs) un5 = self.operator(self.opname + "5", un3, **self.kwargs) # Layer 6 - add2 = pybuda.op.Add("add2", un4, un5) + add2 = forge.op.Add("add2", un4, un5) un6 = self.operator(self.opname + "6", add2, **self.kwargs) diff --git a/pybuda/test/operators/eltwise_unary/models/model_3.py b/forge/test/operators/eltwise_unary/models/model_3.py similarity index 69% rename from pybuda/test/operators/eltwise_unary/models/model_3.py rename to forge/test/operators/eltwise_unary/models/model_3.py index 741db7438..8abcabf5a 100644 --- a/pybuda/test/operators/eltwise_unary/models/model_3.py +++ b/forge/test/operators/eltwise_unary/models/model_3.py @@ -3,26 +3,26 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 3 -# Unary element-wise operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Unary element-wise operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaElementWiseUnaryTest(PyBudaModule): +class BudaElementWiseUnaryTest(ForgeModule): """ Buda Test 3 In this test we have 11 unary operations, and 3 input tensors and 3 trainable variables. Args: - operator (function): PyBuda unary element-wise operator. + operator (function): Forge unary element-wise operator. opname (str): Operation name (e.g. exp, sqrt, gelu, ...). This name test use to generate names of operation nodes in a graph/model. shape (tuple, list): Shape of the input tensors. @@ -36,9 +36,9 @@ def __init__(self, operator, opname, shape, **kwargs): self.shape = shape self.kwargs = kwargs - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(3)] @@ -54,9 +54,9 @@ def forward(self, x1, x2, x3): un4 = self.operator(self.opname + "4", x3, **self.kwargs) # Layer 3 - add1 = pybuda.op.Add("add1", un1, un2) - mul1 = pybuda.op.Multiply("mul1", x2, un3) - sub1 = pybuda.op.Subtract("sub1", un4, self.train_param3) + add1 = forge.op.Add("add1", un1, un2) + mul1 = forge.op.Multiply("mul1", x2, un3) + sub1 = forge.op.Subtract("sub1", un4, self.train_param3) # Layer 4 un5 = self.operator(self.opname + "5", add1, **self.kwargs) @@ -64,20 +64,20 @@ def forward(self, x1, x2, x3): un7 = self.operator(self.opname + "7", sub1, **self.kwargs) # Layer 5 - add2 = pybuda.op.Add("add2", self.train_param3, un7) - mul2 = pybuda.op.Multiply("mul2", x2, un6) + add2 = forge.op.Add("add2", self.train_param3, un7) + mul2 = forge.op.Multiply("mul2", x2, un6) # Layer 6 un8 = self.operator(self.opname + "8", mul2, **self.kwargs) un9 = self.operator(self.opname + "9", add2, **self.kwargs) - sub2 = pybuda.op.Subtract("sub2", self.train_param1, un9) + sub2 = forge.op.Subtract("sub2", self.train_param1, un9) # Layer 7 - add3 = pybuda.op.Add("add3", un5, un8) + add3 = forge.op.Add("add3", un5, un8) un10 = self.operator(self.opname + "10", sub2, **self.kwargs) # Layer 8 - mul3 = pybuda.op.Multiply("mul3", add3, un10) + mul3 = forge.op.Multiply("mul3", add3, un10) # Layer 9 un11 = self.operator(self.opname + "11", mul3, **self.kwargs) diff --git a/pybuda/test/operators/eltwise_unary/models/model_4.py b/forge/test/operators/eltwise_unary/models/model_4.py similarity index 72% rename from pybuda/test/operators/eltwise_unary/models/model_4.py rename to forge/test/operators/eltwise_unary/models/model_4.py index 451b2c3c1..2aee3eb2a 100644 --- a/pybuda/test/operators/eltwise_unary/models/model_4.py +++ b/forge/test/operators/eltwise_unary/models/model_4.py @@ -3,26 +3,26 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 4 -# Unary element-wise operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Unary element-wise operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaElementWiseUnaryTest(PyBudaModule): +class BudaElementWiseUnaryTest(ForgeModule): """ Buda Test 4 In this test we have 11 unary operations, and 2 input tensors and 2 trainable variables. Args: - operator (function): PyBuda unary element-wise operator. + operator (function): Forge unary element-wise operator. opname (str): Operation name (e.g. exp, sqrt, gelu, ...). This name test use to generate names of operation nodes in a graph/model. shape (tuple, list): Shape of the input tensors. @@ -36,8 +36,8 @@ def __init__(self, operator, opname, shape, **kwargs): self.shape = shape self.kwargs = kwargs - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(2)] @@ -47,8 +47,8 @@ def __init__(self, operator, opname, shape, **kwargs): def forward(self, x1, x2): # Layer 2 - add1 = pybuda.op.Add("add1", x1, self.train_param1) - mul1 = pybuda.op.Multiply("mul1", x2, self.train_param2) + add1 = forge.op.Add("add1", x1, self.train_param1) + mul1 = forge.op.Multiply("mul1", x2, self.train_param2) # Layer 3 un1 = self.operator(self.opname + "1", add1, **self.kwargs) @@ -57,8 +57,8 @@ def forward(self, x1, x2): un4 = self.operator(self.opname + "4", x2, **self.kwargs) # Layer 4 - sub1 = pybuda.op.Subtract("sub1", un1, un2) - add2 = pybuda.op.Add("add2", un3, un4) + sub1 = forge.op.Subtract("sub1", un1, un2) + add2 = forge.op.Add("add2", un3, un4) # Layer 5 un5 = self.operator(self.opname + "5", self.train_param2, **self.kwargs) @@ -67,13 +67,13 @@ def forward(self, x1, x2): un8 = self.operator(self.opname + "8", add2, **self.kwargs) # Layer 6 - mul2 = pybuda.op.Multiply("mul2", un5, un6) - add3 = pybuda.op.Add("add3", un7, un8) + mul2 = forge.op.Multiply("mul2", un5, un6) + add3 = forge.op.Add("add3", un7, un8) # Layer 7 un9 = self.operator(self.opname + "9", mul2, **self.kwargs) un10 = self.operator(self.opname + "10", add3, **self.kwargs) - add4 = pybuda.op.Add("add4", un9, un10) + add4 = forge.op.Add("add4", un9, un10) # Layer 8 un11 = self.operator(self.opname + "11", add4, **self.kwargs) diff --git a/pybuda/test/operators/eltwise_unary/models/model_5.py b/forge/test/operators/eltwise_unary/models/model_5.py similarity index 64% rename from pybuda/test/operators/eltwise_unary/models/model_5.py rename to forge/test/operators/eltwise_unary/models/model_5.py index 7226f023c..30151fb19 100644 --- a/pybuda/test/operators/eltwise_unary/models/model_5.py +++ b/forge/test/operators/eltwise_unary/models/model_5.py @@ -3,26 +3,26 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 5 -# Unary element-wise operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Unary element-wise operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaElementWiseUnaryTest(PyBudaModule): +class BudaElementWiseUnaryTest(ForgeModule): """ Buda Test 5 In this test we have 23 unary operations, and three input tensors and three trainable variables. Args: - operator (function): PyBuda unary element-wise operator. + operator (function): Forge unary element-wise operator. opname (str): Operation name (e.g. exp, sqrt, gelu, ...). This name test use to generate names of operation nodes in a graph/model. shape (tuple, list): Shape of the input tensors. @@ -36,9 +36,9 @@ def __init__(self, operator, opname, shape, **kwargs): self.shape = shape self.kwargs = kwargs - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(3)] @@ -56,12 +56,12 @@ def forward(self, x1, x2, x3): un6 = self.operator(self.opname + "6", self.train_param3, **self.kwargs) # Layer 3 - add1 = pybuda.op.Add("add1", un4, un5) - sub1 = pybuda.op.Subtract("sub1", un1, un3) - add2 = pybuda.op.Add("add2", un2, un6) - sub2 = pybuda.op.Subtract("sub2", un1, un4) - add3 = pybuda.op.Add("add3", un3, un6) - sub3 = pybuda.op.Subtract("sub3", un2, un5) + add1 = forge.op.Add("add1", un4, un5) + sub1 = forge.op.Subtract("sub1", un1, un3) + add2 = forge.op.Add("add2", un2, un6) + sub2 = forge.op.Subtract("sub2", un1, un4) + add3 = forge.op.Add("add3", un3, un6) + sub3 = forge.op.Subtract("sub3", un2, un5) # Layer 4 un7 = self.operator(self.opname + "7", add1, **self.kwargs) @@ -72,12 +72,12 @@ def forward(self, x1, x2, x3): un12 = self.operator(self.opname + "12", sub3, **self.kwargs) # Layer 5 - add4 = pybuda.op.Add("add4", un7, self.train_param1) - mul1 = pybuda.op.Multiply("mul1", un8, un3) - mul2 = pybuda.op.Multiply("mul2", un9, self.train_param2) - mul3 = pybuda.op.Multiply("mul3", un10, x3) - add5 = pybuda.op.Add("add5", un11, un6) - sub4 = pybuda.op.Subtract("sub4", un12, self.train_param3) + add4 = forge.op.Add("add4", un7, self.train_param1) + mul1 = forge.op.Multiply("mul1", un8, un3) + mul2 = forge.op.Multiply("mul2", un9, self.train_param2) + mul3 = forge.op.Multiply("mul3", un10, x3) + add5 = forge.op.Add("add5", un11, un6) + sub4 = forge.op.Subtract("sub4", un12, self.train_param3) # Layer 6 un13 = self.operator(self.opname + "13", add4, **self.kwargs) @@ -88,31 +88,31 @@ def forward(self, x1, x2, x3): un18 = self.operator(self.opname + "18", sub4, **self.kwargs) # Layer 7 - add6 = pybuda.op.Add("add6", un13, un14) - add7 = pybuda.op.Add("add7", un14, un9) - mul4 = pybuda.op.Multiply("mul4", un15, un10) - add8 = pybuda.op.Add("add8", un16, un17) - sub5 = pybuda.op.Subtract("sub5", un11, un18) + add6 = forge.op.Add("add6", un13, un14) + add7 = forge.op.Add("add7", un14, un9) + mul4 = forge.op.Multiply("mul4", un15, un10) + add8 = forge.op.Add("add8", un16, un17) + sub5 = forge.op.Subtract("sub5", un11, un18) # Layer 8 - add9 = pybuda.op.Add("add9", add6, add7) - mul5 = pybuda.op.Multiply("mul5", un15, mul4) - mul6 = pybuda.op.Multiply("mul6", add8, sub5) + add9 = forge.op.Add("add9", add6, add7) + mul5 = forge.op.Multiply("mul5", un15, mul4) + mul6 = forge.op.Multiply("mul6", add8, sub5) # Layer 9 un19 = self.operator(self.opname + "19", add9, **self.kwargs) un20 = self.operator(self.opname + "20", mul6, **self.kwargs) # Layer 10 - mul7 = pybuda.op.Multiply("mul7", un19, mul5) - mul8 = pybuda.op.Multiply("mul8", mul5, un20) + mul7 = forge.op.Multiply("mul7", un19, mul5) + mul8 = forge.op.Multiply("mul8", mul5, un20) # Layer 11 un21 = self.operator(self.opname + "21", mul7, **self.kwargs) un22 = self.operator(self.opname + "22", mul8, **self.kwargs) # Layer 12 - add10 = pybuda.op.Add("add10", un21, un22) + add10 = forge.op.Add("add10", un21, un22) # Layer 13 un23 = self.operator(self.opname + "23", add10, **self.kwargs) diff --git a/pybuda/test/operators/eltwise_unary/models/model_6.py b/forge/test/operators/eltwise_unary/models/model_6.py similarity index 61% rename from pybuda/test/operators/eltwise_unary/models/model_6.py rename to forge/test/operators/eltwise_unary/models/model_6.py index 1678549f6..861b25ba6 100644 --- a/pybuda/test/operators/eltwise_unary/models/model_6.py +++ b/forge/test/operators/eltwise_unary/models/model_6.py @@ -3,26 +3,26 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 6 -# Unary element-wise operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Unary element-wise operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaElementWiseUnaryTest(PyBudaModule): +class BudaElementWiseUnaryTest(ForgeModule): """ Buda Test 6 In this test we have 15 unary operations, and 3 input tensors and 6 trainable variables. Args: - operator (function): PyBuda unary element-wise operator. + operator (function): Forge unary element-wise operator. opname (str): Operation name (e.g. exp, sqrt, gelu, ...). This name test use to generate names of operation nodes in a graph/model. shape (tuple, list): Shape of the input tensors. @@ -36,13 +36,13 @@ def __init__(self, operator, opname, shape, **kwargs): self.shape = shape self.kwargs = kwargs - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) - self.train_param4 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param5 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param6 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param4 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param5 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param6 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(3)] @@ -60,14 +60,14 @@ def forward(self, x1, x2, x3): un6 = self.operator(self.opname + "6", self.train_param3, **self.kwargs) # Layer 3 - add1 = pybuda.op.Add("add1", un1, un2) - mul1 = pybuda.op.Multiply("mul1", un3, un4) - add2 = pybuda.op.Add("add2", un5, un6) + add1 = forge.op.Add("add1", un1, un2) + mul1 = forge.op.Multiply("mul1", un3, un4) + add2 = forge.op.Add("add2", un5, un6) # Layer 4 - mul2 = pybuda.op.Multiply("mul2", add1, self.train_param6) - mul3 = pybuda.op.Multiply("mul3", mul1, self.train_param5) - mul4 = pybuda.op.Multiply("mul4", add2, self.train_param4) + mul2 = forge.op.Multiply("mul2", add1, self.train_param6) + mul3 = forge.op.Multiply("mul3", mul1, self.train_param5) + mul4 = forge.op.Multiply("mul4", add2, self.train_param4) # Layer 5 un7 = self.operator(self.opname + "7", mul2, **self.kwargs) @@ -75,9 +75,9 @@ def forward(self, x1, x2, x3): un9 = self.operator(self.opname + "9", mul4, **self.kwargs) # Layer 6 - mul5 = pybuda.op.Multiply("mul5", un7, self.train_param4) - mul6 = pybuda.op.Multiply("mul6", un8, self.train_param5) - mul7 = pybuda.op.Multiply("mul7", un9, self.train_param6) + mul5 = forge.op.Multiply("mul5", un7, self.train_param4) + mul6 = forge.op.Multiply("mul6", un8, self.train_param5) + mul7 = forge.op.Multiply("mul7", un9, self.train_param6) # Layer 7 un10 = self.operator(self.opname + "10", mul5, **self.kwargs) @@ -85,19 +85,19 @@ def forward(self, x1, x2, x3): un12 = self.operator(self.opname + "12", mul7, **self.kwargs) # Layer 8 - mul8 = pybuda.op.Multiply("mul8", un10, self.train_param1) - mul9 = pybuda.op.Multiply("mul9", un11, self.train_param2) - mul10 = pybuda.op.Multiply("mul10", un12, self.train_param3) + mul8 = forge.op.Multiply("mul8", un10, self.train_param1) + mul9 = forge.op.Multiply("mul9", un11, self.train_param2) + mul10 = forge.op.Multiply("mul10", un12, self.train_param3) # Layer 9 un14 = self.operator(self.opname + "14", mul10, **self.kwargs) - mul11 = pybuda.op.Multiply("mul11", mul8, mul9) + mul11 = forge.op.Multiply("mul11", mul8, mul9) # Layer 10 un13 = self.operator(self.opname + "13", mul11, **self.kwargs) # Layer 11 - mul12 = pybuda.op.Multiply("mul12", un13, un14) + mul12 = forge.op.Multiply("mul12", un13, un14) # Layer 12 un15 = self.operator(self.opname + "15", mul12, **self.kwargs) diff --git a/pybuda/test/operators/eltwise_unary/models/model_7.py b/forge/test/operators/eltwise_unary/models/model_7.py similarity index 52% rename from pybuda/test/operators/eltwise_unary/models/model_7.py rename to forge/test/operators/eltwise_unary/models/model_7.py index 5683120d0..d163d60aa 100644 --- a/pybuda/test/operators/eltwise_unary/models/model_7.py +++ b/forge/test/operators/eltwise_unary/models/model_7.py @@ -3,26 +3,26 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 7 -# Unary element-wise operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Unary element-wise operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaElementWiseUnaryTest(PyBudaModule): +class BudaElementWiseUnaryTest(ForgeModule): """ Buda Test 7 In this test we have 6 unary operations, and 3 input tensors and 6 trainable variables. Args: - operator (function): PyBuda unary element-wise operator. + operator (function): Forge unary element-wise operator. opname (str): Operation name (e.g. exp, sqrt, gelu, ...). This name test use to generate names of operation nodes in a graph/model. shape (tuple, list): Shape of the input tensors. @@ -36,17 +36,17 @@ def __init__(self, operator, opname, shape, **kwargs): self.shape = shape self.kwargs = kwargs - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) - self.train_param4 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param5 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param6 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param4 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param5 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param6 = forge.Parameter(*self.shape, requires_grad=True) - self.train_param7 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param8 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param9 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param7 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param8 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param9 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(3)] @@ -56,9 +56,9 @@ def __init__(self, operator, opname, shape, **kwargs): def forward(self, x1, x2, x3): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x2, self.train_param2) - mul3 = pybuda.op.Multiply("mul3", x3, self.train_param3) + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x2, self.train_param2) + mul3 = forge.op.Multiply("mul3", x3, self.train_param3) # Layer 3 un1 = self.operator(self.opname + "1", mul1, **self.kwargs) @@ -66,9 +66,9 @@ def forward(self, x1, x2, x3): un3 = self.operator(self.opname + "3", mul3, **self.kwargs) # Layer 4 - mul4 = pybuda.op.Multiply("mul4", un1, self.train_param4) - mul5 = pybuda.op.Multiply("mul5", un2, self.train_param5) - mul6 = pybuda.op.Multiply("mul6", un3, self.train_param6) + mul4 = forge.op.Multiply("mul4", un1, self.train_param4) + mul5 = forge.op.Multiply("mul5", un2, self.train_param5) + mul6 = forge.op.Multiply("mul6", un3, self.train_param6) # Layer 5 un4 = self.operator(self.opname + "4", mul4, **self.kwargs) @@ -76,9 +76,9 @@ def forward(self, x1, x2, x3): un6 = self.operator(self.opname + "6", mul6, **self.kwargs) # Layer 6 - mul7 = pybuda.op.Multiply("mul7", un4, self.train_param7) - mul8 = pybuda.op.Multiply("mul8", un5, self.train_param8) - mul9 = pybuda.op.Multiply("mul9", un6, self.train_param9) + mul7 = forge.op.Multiply("mul7", un4, self.train_param7) + mul8 = forge.op.Multiply("mul8", un5, self.train_param8) + mul9 = forge.op.Multiply("mul9", un6, self.train_param9) return un4, un5, un6, mul9, mul8, mul7 diff --git a/pybuda/test/operators/eltwise_unary/models/model_8.py b/forge/test/operators/eltwise_unary/models/model_8.py similarity index 72% rename from pybuda/test/operators/eltwise_unary/models/model_8.py rename to forge/test/operators/eltwise_unary/models/model_8.py index b75dc4537..b1b6e0e04 100644 --- a/pybuda/test/operators/eltwise_unary/models/model_8.py +++ b/forge/test/operators/eltwise_unary/models/model_8.py @@ -3,26 +3,26 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 8 -# Unary element-wise operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Unary element-wise operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaElementWiseUnaryTest(PyBudaModule): +class BudaElementWiseUnaryTest(ForgeModule): """ Buda Test 8 In this test we have 11 unary operations, and 2 input tensors and 2 trainable variables. Args: - operator (function): PyBuda unary element-wise operator. + operator (function): Forge unary element-wise operator. opname (str): Operation name (e.g. exp, sqrt, gelu, ...). This name test use to generate names of operation nodes in a graph/model. shape (tuple, list): Shape of the input tensors. @@ -36,8 +36,8 @@ def __init__(self, operator, opname, shape, **kwargs): self.shape = shape self.kwargs = kwargs - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(2)] @@ -53,10 +53,10 @@ def forward(self, x1, x2): un4 = self.operator(self.opname + "4", self.train_param2, **self.kwargs) # Layer 3 - mul1 = pybuda.op.Multiply("mul1", un1, un1) - mul2 = pybuda.op.Multiply("mul2", un2, un2) - mul3 = pybuda.op.Multiply("mul3", un3, un3) - mul4 = pybuda.op.Multiply("mul4", un4, un4) + mul1 = forge.op.Multiply("mul1", un1, un1) + mul2 = forge.op.Multiply("mul2", un2, un2) + mul3 = forge.op.Multiply("mul3", un3, un3) + mul4 = forge.op.Multiply("mul4", un4, un4) # Layer 4 un5 = self.operator(self.opname + "5", mul1, **self.kwargs) @@ -65,15 +65,15 @@ def forward(self, x1, x2): un8 = self.operator(self.opname + "8", mul4, **self.kwargs) # Layer 5 - mul5 = pybuda.op.Multiply("mul5", un5, un7) - mul6 = pybuda.op.Multiply("mul6", un6, un8) + mul5 = forge.op.Multiply("mul5", un5, un7) + mul6 = forge.op.Multiply("mul6", un6, un8) # Layer 6 un9 = self.operator(self.opname + "9", mul5, **self.kwargs) un10 = self.operator(self.opname + "10", mul6, **self.kwargs) # Layer 7 - mul7 = pybuda.op.Multiply("mul7", un9, un10) + mul7 = forge.op.Multiply("mul7", un9, un10) # Layer 8 un11 = self.operator(self.opname + "11", mul7, **self.kwargs) diff --git a/pybuda/test/operators/eltwise_unary/models/model_9.py b/forge/test/operators/eltwise_unary/models/model_9.py similarity index 70% rename from pybuda/test/operators/eltwise_unary/models/model_9.py rename to forge/test/operators/eltwise_unary/models/model_9.py index 96233122a..abda84df1 100644 --- a/pybuda/test/operators/eltwise_unary/models/model_9.py +++ b/forge/test/operators/eltwise_unary/models/model_9.py @@ -3,26 +3,26 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 9 -# Unary element-wise operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Unary element-wise operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaElementWiseUnaryTest(PyBudaModule): +class BudaElementWiseUnaryTest(ForgeModule): """ Buda Test 9 In this test we have 11 unary operations, and 2 input tensors and 2 trainable variables. Args: - operator (function): PyBuda unary element-wise operator. + operator (function): Forge unary element-wise operator. opname (str): Operation name (e.g. exp, sqrt, gelu, ...). This name test use to generate names of operation nodes in a graph/model. shape (tuple, list): Shape of the input tensors. @@ -36,8 +36,8 @@ def __init__(self, operator, opname, shape, **kwargs): self.shape = shape self.kwargs = kwargs - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(2)] @@ -53,8 +53,8 @@ def forward(self, x1, x2): un4 = self.operator(self.opname + "4", self.train_param2, **self.kwargs) # Layer 3 - mul1 = pybuda.op.Multiply("mul1", un1, un4) - mul2 = pybuda.op.Multiply("mul2", un2, un3) + mul1 = forge.op.Multiply("mul1", un1, un4) + mul2 = forge.op.Multiply("mul2", un2, un3) # Layer 4 un5 = self.operator(self.opname + "5", mul1, **self.kwargs) @@ -65,23 +65,23 @@ def forward(self, x1, x2): un8 = self.operator(self.opname + "8", un6, **self.kwargs) # Layer 6 - mul3 = pybuda.op.Multiply("mul3", un7, un5) - mul4 = pybuda.op.Multiply("mul4", un8, un8) + mul3 = forge.op.Multiply("mul3", un7, un5) + mul4 = forge.op.Multiply("mul4", un8, un8) # Layer 7 - mul5 = pybuda.op.Multiply("mul5", mul3, mul1) - mul6 = pybuda.op.Multiply("mul6", mul4, mul2) + mul5 = forge.op.Multiply("mul5", mul3, mul1) + mul6 = forge.op.Multiply("mul6", mul4, mul2) # Layer 8 un9 = self.operator(self.opname + "9", mul5, **self.kwargs) un10 = self.operator(self.opname + "10", mul6, **self.kwargs) # Layer 9 - mul7 = pybuda.op.Multiply("mul7", un9, mul2) - mul8 = pybuda.op.Multiply("mul8", mul1, un10) + mul7 = forge.op.Multiply("mul7", un9, mul2) + mul8 = forge.op.Multiply("mul8", mul1, un10) # Layer 10 - mul9 = pybuda.op.Multiply("mul9", mul7, mul8) + mul9 = forge.op.Multiply("mul9", mul7, mul8) # Layer 11 un11 = self.operator(self.opname + "11", mul9, **self.kwargs) diff --git a/forge/test/operators/eltwise_unary/test_command.sh b/forge/test/operators/eltwise_unary/test_command.sh new file mode 100644 index 000000000..50b1e5961 --- /dev/null +++ b/forge/test/operators/eltwise_unary/test_command.sh @@ -0,0 +1,37 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 +# +# Commands for running element-wise unary tests +# + +# Run single test +# +# To run using default parameters +# model, --un_model --> model_1, Note: for binary ops we have 10 models, model_[1-10] +# training, --un_train --> True +# recompute, --un_recompute --> True +# shape, --un_shape --> [1, 16, 32, 64] +# operation, --un_op --> Exp +pytest -svv test_eltwise_unary_single.py + +# Few examples with passed arguments +pytest -svv test_eltwise_unary_single.py --un_model model_3 --un_train True --un_recompute True --un_shape '[1, 32, 96, 128]' --un_op 'Log' +pytest -svv test_eltwise_unary_single.py --un_model model_1 --un_train False --un_recompute True --un_shape '[1, 32, 256, 128]' +pytest -svv test_eltwise_unary_single.py --un_model model_2 --un_train True --un_recompute False +pytest -svv test_eltwise_unary_single.py --un_model model_5 --un_train False --un_op 'Gelu' +pytest -svv test_eltwise_unary_single.py --un_model model_4 --un_shape '[1, 32, 256, 2048]' + +pytest -svv forge/test/operators/eltwise_unary/test_eltwise_unary_single.py --un_model model_3 --un_train False --un_recompute False --un_shape '[1, 32, 96, 128]' --un_op 'Clip' --un_kwargs_json='{"min": 0.234, "max": 0.982}' +pytest -svv forge/test/operators/eltwise_unary/test_eltwise_unary_single.py --un_model model_3 --un_train False --un_recompute False --un_shape '[1, 32, 96, 128]' --un_op 'LogicalNot' +pytest -svv forge/test/operators/eltwise_unary/test_eltwise_unary_single.py --un_model model_3 --un_train False --un_recompute False --un_shape '[1, 32, 96, 128]' --un_op 'CumSum' --un_kwargs_json='{"axis": 2}' +pytest -svv forge/test/operators/eltwise_unary/test_eltwise_unary_single.py --un_model model_5 --un_train False --un_recompute False --un_shape '[19, 20, 16]' --un_op 'Pow' --un_kwargs_json='{"exponent": 0.54881352186203}' +pytest -svv forge/test/operators/eltwise_unary/test_eltwise_unary_single.py --un_model model_5 --un_train False --un_recompute False --un_shape '[1, 1, 24, 9]' --un_op 'Pow' --un_kwargs_json='{"exponent": 0.5488135039273248}' +pytest -svv forge/test/operators/eltwise_unary/test_eltwise_unary_single.py --un_model model_5 --un_train False --un_recompute False --un_shape '[1, 1, 24, 9]' --un_op 'Tilize' + +# Issues +pytest -svv test_eltwise_unary_single.py --un_model model_4 --un_train True --un_recompute False --un_op 'Exp' --un_shape '[21, 127, 102, 19]' + + +# pytest -svv forge/test/operators/eltwise_unary/test_eltwise_unary_single.py --un_model model_6 --un_train True --un_recompute False --un_op 'Relu' --un_shape '[1, 12, 13]' +# pytest -svv forge/test/operators/eltwise_unary/test_eltwise_unary_single.py --un_model model_7 --un_train True --un_recompute True --un_op 'Exp' --un_shape '[1, 12, 13]' \ No newline at end of file diff --git a/pybuda/test/operators/eltwise_unary/test_eltwise_unary.py b/forge/test/operators/eltwise_unary/test_eltwise_unary.py similarity index 87% rename from pybuda/test/operators/eltwise_unary/test_eltwise_unary.py rename to forge/test/operators/eltwise_unary/test_eltwise_unary.py index 44e9778e3..7fff91eb6 100644 --- a/pybuda/test/operators/eltwise_unary/test_eltwise_unary.py +++ b/forge/test/operators/eltwise_unary/test_eltwise_unary.py @@ -11,13 +11,13 @@ import pytest import numpy as np -import pybuda.op -from pybuda import TTDevice, BackendType, pybuda_compile, VerifyConfig, CompilerConfig -from pybuda.verify.config import TestKind +import forge.op +from forge import TTDevice, BackendType, forge_compile, VerifyConfig, CompilerConfig +from forge.verify.config import TestKind from . import models -MODELS_PATH = "./pybuda/test/operators/eltwise_unary/models/" +MODELS_PATH = "./forge/test/operators/eltwise_unary/models/" SHAPE_NO = 2 SHAPE_SIZE_MIN = 2 @@ -62,7 +62,7 @@ def test_eltwise_unary( test_kind = op_test_kind if model == "model_9" and operation == "Reciprocal": - pytest.xfail("tenstorrent/pybuda#18") + pytest.xfail("tenstorrent/forge#18") kwargs = {} pcc = 0.99 @@ -74,7 +74,7 @@ def test_eltwise_unary( kwargs['min'] = np.random.rand() kwargs['max'] = np.random.rand() - architecture = f'models.{model}.BudaElementWiseUnaryTest(operator=pybuda.op.{operation}, opname="{operation}", shape={shape}' + architecture = f'models.{model}.BudaElementWiseUnaryTest(operator=forge.op.{operation}, opname="{operation}", shape={shape}' for k, v in kwargs.items(): architecture = f'{architecture}, {k}={v}' architecture = f'{architecture})' @@ -83,8 +83,8 @@ def test_eltwise_unary( tt0 = TTDevice("tt0", devtype=BackendType.Golden) tt0.place_module(model) - #Fusing disabled due to tenstorrent/pybuda#784 - pybuda_compile( + #Fusing disabled due to tenstorrent/forge#784 + forge_compile( tt0, model.testname, *model.inputs, diff --git a/pybuda/test/operators/eltwise_unary/test_eltwise_unary_single.py b/forge/test/operators/eltwise_unary/test_eltwise_unary_single.py similarity index 89% rename from pybuda/test/operators/eltwise_unary/test_eltwise_unary_single.py rename to forge/test/operators/eltwise_unary/test_eltwise_unary_single.py index 3a89066a5..ca17bc29b 100644 --- a/pybuda/test/operators/eltwise_unary/test_eltwise_unary_single.py +++ b/forge/test/operators/eltwise_unary/test_eltwise_unary_single.py @@ -12,16 +12,16 @@ import pytest import numpy as np -import pybuda.op -from pybuda import TTDevice, BackendType, pybuda_compile, VerifyConfig, CompilerConfig +import forge.op +from forge import TTDevice, BackendType, forge_compile, VerifyConfig, CompilerConfig from . import models -MODELS_PATH = "./pybuda/test/operators/eltwise_unary/models/" +MODELS_PATH = "./forge/test/operators/eltwise_unary/models/" # @pytest.mark.xfail( -# reason="tenstorrent/pybuda#1" +# reason="tenstorrent/forge#1" # ) def test_eltwise_unary( un_train, @@ -74,7 +74,7 @@ def test_eltwise_unary( print(f"Kwargs --> {kwargs}") print("\n") - architecture = f'models.{model}.BudaElementWiseUnaryTest(operator=pybuda.op.{operation}, opname="{operation}", shape={shape}' + architecture = f'models.{model}.BudaElementWiseUnaryTest(operator=forge.op.{operation}, opname="{operation}", shape={shape}' for k, v in kwargs.items(): architecture = f'{architecture}, {k}={v}' architecture = f'{architecture})' @@ -82,7 +82,7 @@ def test_eltwise_unary( model = eval(architecture) tt0 = TTDevice("tt0", devtype=BackendType.Golden) tt0.place_module(model) - pybuda_compile( + forge_compile( tt0, model.testname, *model.inputs, diff --git a/pybuda/test/operators/eltwise_unary_attr/__init__.py b/forge/test/operators/eltwise_unary_attr/__init__.py similarity index 100% rename from pybuda/test/operators/eltwise_unary_attr/__init__.py rename to forge/test/operators/eltwise_unary_attr/__init__.py diff --git a/pybuda/test/operators/eltwise_unary_attr/clip/__init__.py b/forge/test/operators/eltwise_unary_attr/clip/__init__.py similarity index 100% rename from pybuda/test/operators/eltwise_unary_attr/clip/__init__.py rename to forge/test/operators/eltwise_unary_attr/clip/__init__.py diff --git a/pybuda/test/operators/eltwise_unary_attr/clip/models/__init__.py b/forge/test/operators/eltwise_unary_attr/clip/models/__init__.py similarity index 100% rename from pybuda/test/operators/eltwise_unary_attr/clip/models/__init__.py rename to forge/test/operators/eltwise_unary_attr/clip/models/__init__.py diff --git a/pybuda/test/operators/eltwise_unary_attr/clip/models/model_1.py b/forge/test/operators/eltwise_unary_attr/clip/models/model_1.py similarity index 75% rename from pybuda/test/operators/eltwise_unary_attr/clip/models/model_1.py rename to forge/test/operators/eltwise_unary_attr/clip/models/model_1.py index f4dbba0a0..07ccb5ebc 100644 --- a/pybuda/test/operators/eltwise_unary_attr/clip/models/model_1.py +++ b/forge/test/operators/eltwise_unary_attr/clip/models/model_1.py @@ -3,22 +3,22 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 1 -# Clip operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Clip operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch from torch.distributions import Normal -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaClipTest(PyBudaModule): +class BudaClipTest(ForgeModule): """ Buda Test 1 @@ -45,7 +45,7 @@ def __init__( self.min_value = min_value self.max_value = max_value - self.train_param = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param = forge.Parameter(*self.shape, requires_grad=True) input = BudaClipTest.INPUTS_DISTRIBUTION( BudaClipTest.INPUTS_RANGE_MIN, @@ -61,9 +61,9 @@ def __init__( def forward(self, x): # Layer 2 - mul = pybuda.op.Multiply("mul", x, self.train_param) + mul = forge.op.Multiply("mul", x, self.train_param) # Layer 3 - clip = pybuda.op.Clip("clip", mul, min=self.min_value, max=self.max_value) + clip = forge.op.Clip("clip", mul, min=self.min_value, max=self.max_value) return clip diff --git a/pybuda/test/operators/eltwise_unary_attr/clip/models/model_2.py b/forge/test/operators/eltwise_unary_attr/clip/models/model_2.py similarity index 56% rename from pybuda/test/operators/eltwise_unary_attr/clip/models/model_2.py rename to forge/test/operators/eltwise_unary_attr/clip/models/model_2.py index 50d1d8744..ed2b5ef21 100644 --- a/pybuda/test/operators/eltwise_unary_attr/clip/models/model_2.py +++ b/forge/test/operators/eltwise_unary_attr/clip/models/model_2.py @@ -3,22 +3,22 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 2 -# Clip operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Clip operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch from torch.distributions import Normal -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaClipTest(PyBudaModule): +class BudaClipTest(ForgeModule): """ Buda Test 2 @@ -45,8 +45,8 @@ def __init__( self.min_value = min_value self.max_value = max_value - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [] for i in range(2): @@ -66,24 +66,24 @@ def __init__( def forward(self, x1, x2): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x2, self.train_param1) - mul3 = pybuda.op.Multiply("mul3", x2, self.train_param2) + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x2, self.train_param1) + mul3 = forge.op.Multiply("mul3", x2, self.train_param2) # Layer 3 - clip1 = pybuda.op.Clip("clip1", mul1, min=self.min_value, max=self.max_value) - clip2 = pybuda.op.Clip("clip2", mul2, min=self.min_value, max=self.max_value) - clip3 = pybuda.op.Clip("clip3", mul3, min=self.min_value, max=self.max_value) + clip1 = forge.op.Clip("clip1", mul1, min=self.min_value, max=self.max_value) + clip2 = forge.op.Clip("clip2", mul2, min=self.min_value, max=self.max_value) + clip3 = forge.op.Clip("clip3", mul3, min=self.min_value, max=self.max_value) # Layer 4 - mul4 = pybuda.op.Multiply("mul4", clip1, clip2) - mul5 = pybuda.op.Multiply("mul5", clip2, clip3) + mul4 = forge.op.Multiply("mul4", clip1, clip2) + mul5 = forge.op.Multiply("mul5", clip2, clip3) # Layer 5 - clip4 = pybuda.op.Clip("clip4", mul4, min=self.min_value, max=self.max_value) - clip5 = pybuda.op.Clip("clip5", mul5, min=self.min_value, max=self.max_value) + clip4 = forge.op.Clip("clip4", mul4, min=self.min_value, max=self.max_value) + clip5 = forge.op.Clip("clip5", mul5, min=self.min_value, max=self.max_value) # Layer 6 - mul6 = pybuda.op.Multiply("mul6", clip4, clip5) + mul6 = forge.op.Multiply("mul6", clip4, clip5) return mul6 diff --git a/forge/test/operators/eltwise_unary_attr/clip/models/model_3.py b/forge/test/operators/eltwise_unary_attr/clip/models/model_3.py new file mode 100644 index 000000000..a6fb4b7ed --- /dev/null +++ b/forge/test/operators/eltwise_unary_attr/clip/models/model_3.py @@ -0,0 +1,104 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 +# +# Test 3 +# Clip operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures +# + + +import torch +from torch.distributions import Normal + +import forge +import forge.op +import forge.op.nn as nn + +from forge import ForgeModule, Tensor + + +class BudaClipTest(ForgeModule): + """ + Buda Test 3 + + """ + + INPUTS_RANGE_MIN = -1.0 + INPUTS_RANGE_MAX = 1.0 + INPUTS_DISTRIBUTION = Normal + + WEIGHTS_RANGE_MIN = -1.0 + WEIGHTS_RANGE_MAX = 1.0 + WEIGHTS_DISTRIBUTION = Normal + + def __init__( + self, + shape, + min_value, + max_value + ): + super().__init__("Buda Test 3") + + self.testname = "Operator Clip, Test 3" + self.shape = shape + self.min_value = min_value + self.max_value = max_value + + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + + self.inputs = [] + for i in range(2): + input = BudaClipTest.INPUTS_DISTRIBUTION( + BudaClipTest.INPUTS_RANGE_MIN, + BudaClipTest.INPUTS_RANGE_MAX).sample(self.shape) + self.inputs.append(Tensor.create_from_torch(input)) + + for i in range(1, 3): + weights = BudaClipTest.WEIGHTS_DISTRIBUTION( + BudaClipTest.WEIGHTS_RANGE_MIN, + BudaClipTest.WEIGHTS_RANGE_MAX).sample(self.shape) + weights.requires_grad = True + self.set_parameter("train_param" + str(i), weights) + + + def forward(self, x1, x2): + + # Layer 2 + clip1 = forge.op.Clip("clip1", x1, min=self.min_value, max=self.max_value) + clip2 = forge.op.Clip("clip2", self.train_param1, min=self.min_value, max=self.max_value) + clip3 = forge.op.Clip("clip3", x2, min=self.min_value, max=self.max_value) + clip4 = forge.op.Clip("clip4", self.train_param2, min=self.min_value, max=self.max_value) + + # Layer 3 + mul1 = forge.op.Multiply("mul1", clip1, clip2) + mul2 = forge.op.Multiply("mul2", clip2, clip3) + mul3 = forge.op.Multiply("mul3", clip3, clip4) + + # Layer 4 + clip5 = forge.op.Clip("clip5", mul1, min=self.min_value, max=self.max_value) + clip6 = forge.op.Clip("clip6", mul2, min=self.min_value, max=self.max_value) + clip7 = forge.op.Clip("clip7", mul3, min=self.min_value, max=self.max_value) + + # Layer 5 + mul4 = forge.op.Multiply("mul4", clip5, self.train_param1) + mul5 = forge.op.Multiply("mul5", clip6, x2) + mul6 = forge.op.Multiply("mul6", clip7, clip4) + + # Layer 6 + clip8 = forge.op.Clip("clip8", mul4, min=self.min_value, max=self.max_value) + clip9 = forge.op.Clip("clip9", mul5, min=self.min_value, max=self.max_value) + clip10 = forge.op.Clip("clip10", mul6, min=self.min_value, max=self.max_value) + + # Layer 7 + add1 = forge.op.Add("add1", clip8, mul2) + add2 = forge.op.Add("add2", clip4, clip10) + mul7 = forge.op.Multiply("mul7", clip9, mul3) + + # Layer 8 + clip11 = forge.op.Clip("clip11", add1, min=self.min_value, max=self.max_value) + clip12 = forge.op.Clip("clip12", mul7, min=self.min_value, max=self.max_value) + clip13 = forge.op.Clip("clip13", add2, min=self.min_value, max=self.max_value) + + return clip11, clip12, clip13 diff --git a/forge/test/operators/eltwise_unary_attr/clip/models/model_4.py b/forge/test/operators/eltwise_unary_attr/clip/models/model_4.py new file mode 100644 index 000000000..df9039f9b --- /dev/null +++ b/forge/test/operators/eltwise_unary_attr/clip/models/model_4.py @@ -0,0 +1,132 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 +# +# Test 4 +# Clip operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures +# + + +import torch +from torch.distributions import Normal + +import forge +import forge.op +import forge.op.nn as nn + +from forge import ForgeModule, Tensor + + +class BudaClipTest(ForgeModule): + """ + Buda Test 4 + + """ + + INPUTS_RANGE_MIN = -1.0 + INPUTS_RANGE_MAX = 1.0 + INPUTS_DISTRIBUTION = Normal + + WEIGHTS_RANGE_MIN = -1.0 + WEIGHTS_RANGE_MAX = 1.0 + WEIGHTS_DISTRIBUTION = Normal + + def __init__( + self, + shape, + min_value, + max_value + ): + super().__init__("Buda Test 4") + + self.testname = "Operator Clip, Test 4" + self.shape = shape + self.min_value = min_value + self.max_value = max_value + + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) + + self.inputs = [] + for i in range(3): + input = BudaClipTest.INPUTS_DISTRIBUTION( + BudaClipTest.INPUTS_RANGE_MIN, + BudaClipTest.INPUTS_RANGE_MAX).sample(self.shape) + self.inputs.append(Tensor.create_from_torch(input)) + + for i in range(1, 4): + weights = BudaClipTest.WEIGHTS_DISTRIBUTION( + BudaClipTest.WEIGHTS_RANGE_MIN, + BudaClipTest.WEIGHTS_RANGE_MAX).sample(self.shape) + weights.requires_grad = True + self.set_parameter("train_param" + str(i), weights) + + + + def forward(self, x1, x2, x3): + + # Layer 2 + add1 = forge.op.Add("add1", x1, self.train_param1) + add2 = forge.op.Add("add2", x1, x2) + add3 = forge.op.Add("add3", x2, self.train_param3) + add4 = forge.op.Add("add4", x3, self.train_param2) + + # Layer 3 + clip1 = forge.op.Clip("clip1", add1, min=self.min_value, max=self.max_value) + clip2 = forge.op.Clip("clip2", add2, min=self.min_value, max=self.max_value) + clip3 = forge.op.Clip("clip3", add3, min=self.min_value, max=self.max_value) + clip4 = forge.op.Clip("clip4", add4, min=self.min_value, max=self.max_value) + + # Layer 4 + clip5 = forge.op.Clip("clip5", self.train_param1, min=self.min_value, max=self.max_value) + clip6 = forge.op.Clip("clip6", self.train_param2, min=self.min_value, max=self.max_value) + clip7 = forge.op.Clip("clip7", self.train_param3, min=self.min_value, max=self.max_value) + + # Layer 5 + mul1 = forge.op.Multiply("mul1", clip1, clip5) + mul2 = forge.op.Multiply("mul2", clip2, clip3) + mul3 = forge.op.Multiply("mul3", clip5, clip4) + mul4 = forge.op.Multiply("mul4", clip6, clip7) + + # Layer 6 + clip8 = forge.op.Clip("clip8", mul1, min=self.min_value, max=self.max_value) + clip9 = forge.op.Clip("clip9", mul2, min=self.min_value, max=self.max_value) + clip10 = forge.op.Clip("clip10", mul3, min=self.min_value, max=self.max_value) + clip11 = forge.op.Clip("clip11", mul4, min=self.min_value, max=self.max_value) + + # Layer 7 + add5 = forge.op.Add("add5", clip8, clip5) + add6 = forge.op.Add("add6", clip9, clip6) + add7 = forge.op.Add("add7", clip10, clip7) + add8 = forge.op.Add("add8", clip4, clip11) + + # Layer 8 + clip12 = forge.op.Clip("clip12", add5, min=self.min_value, max=self.max_value) + clip13 = forge.op.Clip("clip13", add6, min=self.min_value, max=self.max_value) + clip14 = forge.op.Clip("clip14", add7, min=self.min_value, max=self.max_value) + clip15 = forge.op.Clip("clip15", add8, min=self.min_value, max=self.max_value) + + # Layer 9 + mul5 = forge.op.Multiply("mul5", clip1, clip12) + mul6 = forge.op.Multiply("mul6", mul2, clip13) + mul7 = forge.op.Multiply("mul7", clip6, clip14) + mul8 = forge.op.Multiply("mul8", clip15, clip7) + + # Layer 10 + clip16 = forge.op.Clip("clip16", mul5, min=self.min_value, max=self.max_value) + clip17 = forge.op.Clip("clip17", mul6, min=self.min_value, max=self.max_value) + clip18 = forge.op.Clip("clip18", mul7, min=self.min_value, max=self.max_value) + clip19 = forge.op.Clip("clip19", mul8, min=self.min_value, max=self.max_value) + + # Layer 11 + mul9 = forge.op.Multiply("mul9", clip16, clip17) + mul10 = forge.op.Multiply("mul10", clip17, clip18) + mul11 = forge.op.Multiply("mul11", clip18, clip19) + + # Layer 12 + mul12 = forge.op.Multiply("mul12", mul9, clip9) + mul13 = forge.op.Multiply("mul13", mul10, mul11) + + return mul12, mul13 diff --git a/forge/test/operators/eltwise_unary_attr/clip/models/model_5.py b/forge/test/operators/eltwise_unary_attr/clip/models/model_5.py new file mode 100644 index 000000000..8030497e5 --- /dev/null +++ b/forge/test/operators/eltwise_unary_attr/clip/models/model_5.py @@ -0,0 +1,145 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 +# +# Test 5 +# Clip operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures +# + + +import torch +from torch.distributions import Normal + +import forge +import forge.op +import forge.op.nn as nn + +from forge import ForgeModule, Tensor + + +class BudaClipTest(ForgeModule): + """ + Buda Test 5 + + """ + + INPUTS_RANGE_MIN = -1.0 + INPUTS_RANGE_MAX = 1.0 + INPUTS_DISTRIBUTION = Normal + + WEIGHTS_RANGE_MIN = -1.0 + WEIGHTS_RANGE_MAX = 1.0 + WEIGHTS_DISTRIBUTION = Normal + + def __init__( + self, + shape, + min_value, + max_value + ): + super().__init__("Buda Test 5") + + self.testname = "Operator Clip, Test 5" + self.shape = shape + self.min_value = min_value + self.max_value = max_value + + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) + + self.inputs = [] + for i in range(3): + input = BudaClipTest.INPUTS_DISTRIBUTION( + BudaClipTest.INPUTS_RANGE_MIN, + BudaClipTest.INPUTS_RANGE_MAX).sample(self.shape) + self.inputs.append(Tensor.create_from_torch(input)) + + for i in range(1, 4): + weights = BudaClipTest.WEIGHTS_DISTRIBUTION( + BudaClipTest.WEIGHTS_RANGE_MIN, + BudaClipTest.WEIGHTS_RANGE_MAX).sample(self.shape) + weights.requires_grad = True + self.set_parameter("train_param" + str(i), weights) + + + + def forward(self, x1, x2, x3): + + # Layer 2 + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x2, self.train_param2) + mul3 = forge.op.Multiply("mul3", x3, self.train_param3) + + # Layer 3 + mul4 = forge.op.Multiply("mul4", x2, self.train_param1) + mul5 = forge.op.Multiply("mul5", x3, self.train_param2) + clip1 = forge.op.Clip("clip1", mul1, min=self.min_value, max=self.max_value) + clip2 = forge.op.Clip("clip2", mul2, min=self.min_value, max=self.max_value) + clip3 = forge.op.Clip("clip3", mul3, min=self.min_value, max=self.max_value) + + # Layer 4 + clip4 = forge.op.Clip("clip4", mul4, min=self.min_value, max=self.max_value) + clip5 = forge.op.Clip("clip5", mul5, min=self.min_value, max=self.max_value) + + # Layer 5 + add1 = forge.op.Add("add1", clip1, self.train_param1) + add2 = forge.op.Add("add2", clip4, x2) + add3 = forge.op.Add("add3", clip2, self.train_param2) + add4 = forge.op.Add("add4", clip5, x3) + add5 = forge.op.Add("add5", clip3, self.train_param3) + + # Layer 6 + clip6 = forge.op.Clip("clip6", add1, min=self.min_value, max=self.max_value) + clip7 = forge.op.Clip("clip7", add2, min=self.min_value, max=self.max_value) + clip8 = forge.op.Clip("clip8", add3, min=self.min_value, max=self.max_value) + clip9 = forge.op.Clip("clip9", add4, min=self.min_value, max=self.max_value) + clip10 = forge.op.Clip("clip10", add5, min=self.min_value, max=self.max_value) + + # Layer 7 + mul6 = forge.op.Multiply("mul6", clip6, clip4) + mul7 = forge.op.Multiply("mul7", mul1, clip7) + mul8 = forge.op.Multiply("mul8", mul2, clip8) + mul9 = forge.op.Multiply("mul9", clip3, clip9) + mul10 = forge.op.Multiply("mul10", add3, clip10) + + # Layer 8 + clip11 = forge.op.Clip("clip11", mul6, min=self.min_value, max=self.max_value) + clip12 = forge.op.Clip("clip12", mul7, min=self.min_value, max=self.max_value) + clip13 = forge.op.Clip("clip13", mul8, min=self.min_value, max=self.max_value) + clip14 = forge.op.Clip("clip14", mul9, min=self.min_value, max=self.max_value) + clip15 = forge.op.Clip("clip15", mul10, min=self.min_value, max=self.max_value) + + # Layer 9 + mul11 = forge.op.Multiply("mul11", clip11, clip8) + mul12 = forge.op.Multiply("mul12", clip12, clip5) + mul13 = forge.op.Multiply("mul13", clip13, clip7) + mul14 = forge.op.Multiply("mul14", clip14, add5) + mul15 = forge.op.Multiply("mul15", clip13, mul5) + + # Layer 10 + clip16 = forge.op.Clip("clip16", mul11, min=self.min_value, max=self.max_value) + clip17 = forge.op.Clip("clip17", mul12, min=self.min_value, max=self.max_value) + clip18 = forge.op.Clip("clip18", mul13, min=self.min_value, max=self.max_value) + clip19 = forge.op.Clip("clip19", mul14, min=self.min_value, max=self.max_value) + clip20 = forge.op.Clip("clip20", mul15, min=self.min_value, max=self.max_value) + + # Layer 11 + mul16 = forge.op.Multiply("mul16", clip16, clip12) + mul17 = forge.op.Multiply("mul17", clip17, clip13) + mul18 = forge.op.Multiply("mul18", clip18, clip19) + mul19 = forge.op.Multiply("mul19", clip13, clip20) + + # Layer 12 + clip21 = forge.op.Clip("clip21", mul16, min=self.min_value, max=self.max_value) + clip22 = forge.op.Clip("clip22", mul17, min=self.min_value, max=self.max_value) + clip23 = forge.op.Clip("clip23", mul18, min=self.min_value, max=self.max_value) + clip24 = forge.op.Clip("clip24", mul19, min=self.min_value, max=self.max_value) + + # Layer 13 + mul20 = forge.op.Multiply("mul20", clip21, mul12) + mul21 = forge.op.Multiply("mul21", clip22, clip18) + mul22 = forge.op.Multiply("mul22", clip23, clip24) + + return mul20, mul21, mul22 diff --git a/pybuda/test/operators/eltwise_unary_attr/clip/test_clip.py b/forge/test/operators/eltwise_unary_attr/clip/test_clip.py similarity index 90% rename from pybuda/test/operators/eltwise_unary_attr/clip/test_clip.py rename to forge/test/operators/eltwise_unary_attr/clip/test_clip.py index 44b54106d..4a28805bf 100644 --- a/pybuda/test/operators/eltwise_unary_attr/clip/test_clip.py +++ b/forge/test/operators/eltwise_unary_attr/clip/test_clip.py @@ -11,13 +11,13 @@ import pytest import numpy as np -import pybuda -import pybuda.op -from pybuda import TTDevice, BackendType, pybuda_compile, VerifyConfig, CompilerConfig +import forge +import forge.op +from forge import TTDevice, BackendType, forge_compile, VerifyConfig, CompilerConfig from . import models -MODELS_PATH = "./pybuda/test/operators/eltwise_unary_attr/clip/models" +MODELS_PATH = "./forge/test/operators/eltwise_unary_attr/clip/models" SHAPE_NO = 2 SHAPE_DIM_MIN = 1 @@ -80,8 +80,8 @@ def test_clip( tt0 = TTDevice("tt0", devtype=BackendType.Golden) tt0.place_module(model) - #Fusing disabled due to tenstorrent/pybuda#784 - pybuda_compile( + #Fusing disabled due to tenstorrent/forge#784 + forge_compile( tt0, model.testname, *model.inputs, diff --git a/pybuda/test/operators/eltwise_unary_attr/leaky_relu/__init__.py b/forge/test/operators/eltwise_unary_attr/leaky_relu/__init__.py similarity index 100% rename from pybuda/test/operators/eltwise_unary_attr/leaky_relu/__init__.py rename to forge/test/operators/eltwise_unary_attr/leaky_relu/__init__.py diff --git a/pybuda/test/operators/eltwise_unary_attr/leaky_relu/models/__init__.py b/forge/test/operators/eltwise_unary_attr/leaky_relu/models/__init__.py similarity index 100% rename from pybuda/test/operators/eltwise_unary_attr/leaky_relu/models/__init__.py rename to forge/test/operators/eltwise_unary_attr/leaky_relu/models/__init__.py diff --git a/pybuda/test/operators/eltwise_unary_attr/leaky_relu/models/model_1.py b/forge/test/operators/eltwise_unary_attr/leaky_relu/models/model_1.py similarity index 74% rename from pybuda/test/operators/eltwise_unary_attr/leaky_relu/models/model_1.py rename to forge/test/operators/eltwise_unary_attr/leaky_relu/models/model_1.py index 3d2235b92..e0f42ea80 100644 --- a/pybuda/test/operators/eltwise_unary_attr/leaky_relu/models/model_1.py +++ b/forge/test/operators/eltwise_unary_attr/leaky_relu/models/model_1.py @@ -3,22 +3,22 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 1 -# LeakyRelu operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# LeakyRelu operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch from torch.distributions import Normal -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaLeakyReluTest(PyBudaModule): +class BudaLeakyReluTest(ForgeModule): """ Buda Test 1 @@ -43,7 +43,7 @@ def __init__( self.shape = shape self.alpha = alpha - self.train_param = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param = forge.Parameter(*self.shape, requires_grad=True) input = BudaLeakyReluTest.INPUTS_DISTRIBUTION( BudaLeakyReluTest.INPUTS_RANGE_MIN, @@ -59,9 +59,9 @@ def __init__( def forward(self, x): # Layer 2 - mul = pybuda.op.Multiply("mul", x, self.train_param) + mul = forge.op.Multiply("mul", x, self.train_param) # Layer 3 - lrelu = pybuda.op.LeakyRelu("lrelu", mul, alpha=self.alpha) + lrelu = forge.op.LeakyRelu("lrelu", mul, alpha=self.alpha) return lrelu diff --git a/pybuda/test/operators/eltwise_unary_attr/leaky_relu/models/model_2.py b/forge/test/operators/eltwise_unary_attr/leaky_relu/models/model_2.py similarity index 56% rename from pybuda/test/operators/eltwise_unary_attr/leaky_relu/models/model_2.py rename to forge/test/operators/eltwise_unary_attr/leaky_relu/models/model_2.py index 1fc865e1a..e20536a93 100644 --- a/pybuda/test/operators/eltwise_unary_attr/leaky_relu/models/model_2.py +++ b/forge/test/operators/eltwise_unary_attr/leaky_relu/models/model_2.py @@ -3,22 +3,22 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 2 -# LeakyRelu operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# LeakyRelu operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch from torch.distributions import Normal -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaLeakyReluTest(PyBudaModule): +class BudaLeakyReluTest(ForgeModule): """ Buda Test 2 @@ -43,8 +43,8 @@ def __init__( self.shape = shape self.alpha = alpha - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [] for i in range(2): @@ -64,24 +64,24 @@ def __init__( def forward(self, x1, x2): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x2, self.train_param2) - mul3 = pybuda.op.Multiply("mul3", x2, self.train_param2) + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x2, self.train_param2) + mul3 = forge.op.Multiply("mul3", x2, self.train_param2) # Layer 3 - lrelu1 = pybuda.op.LeakyRelu("lrelu1", mul1, alpha=self.alpha) - lrelu2 = pybuda.op.LeakyRelu("lrelu2", mul2, alpha=self.alpha) - lrelu3 = pybuda.op.LeakyRelu("lrelu3", mul3, alpha=self.alpha) + lrelu1 = forge.op.LeakyRelu("lrelu1", mul1, alpha=self.alpha) + lrelu2 = forge.op.LeakyRelu("lrelu2", mul2, alpha=self.alpha) + lrelu3 = forge.op.LeakyRelu("lrelu3", mul3, alpha=self.alpha) # Layer 4 - mul4 = pybuda.op.Multiply("mul4", lrelu1, lrelu2) - mul5 = pybuda.op.Multiply("mul5", lrelu2, lrelu3) + mul4 = forge.op.Multiply("mul4", lrelu1, lrelu2) + mul5 = forge.op.Multiply("mul5", lrelu2, lrelu3) # Layer 5 - lrelu4 = pybuda.op.LeakyRelu("lrelu4", mul4, alpha=self.alpha) - lrelu5 = pybuda.op.LeakyRelu("lrelu5", mul5, alpha=self.alpha) + lrelu4 = forge.op.LeakyRelu("lrelu4", mul4, alpha=self.alpha) + lrelu5 = forge.op.LeakyRelu("lrelu5", mul5, alpha=self.alpha) # Layer 6 - mul6 = pybuda.op.Multiply("mul6", lrelu4, lrelu5) + mul6 = forge.op.Multiply("mul6", lrelu4, lrelu5) return mul6 \ No newline at end of file diff --git a/forge/test/operators/eltwise_unary_attr/leaky_relu/models/model_3.py b/forge/test/operators/eltwise_unary_attr/leaky_relu/models/model_3.py new file mode 100644 index 000000000..9c46ed149 --- /dev/null +++ b/forge/test/operators/eltwise_unary_attr/leaky_relu/models/model_3.py @@ -0,0 +1,101 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 +# +# Test 3 +# LeakyRelu operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures +# + + +import torch +from torch.distributions import Normal + +import forge +import forge.op +import forge.op.nn as nn + +from forge import ForgeModule, Tensor + + +class BudaLeakyReluTest(ForgeModule): + """ + Buda Test 3 + + """ + + INPUTS_RANGE_MIN = -1.0 + INPUTS_RANGE_MAX = 1.0 + INPUTS_DISTRIBUTION = Normal + + WEIGHTS_RANGE_MIN = -1.0 + WEIGHTS_RANGE_MAX = 1.0 + WEIGHTS_DISTRIBUTION = Normal + + def __init__( + self, + shape, + alpha + ): + super().__init__("Buda Test 3") + + self.testname = "Operator LeakyRelu, Test 3" + self.shape = shape + self.alpha = alpha + + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + + self.inputs = [] + for i in range(2): + input = BudaLeakyReluTest.INPUTS_DISTRIBUTION( + BudaLeakyReluTest.INPUTS_RANGE_MIN, + BudaLeakyReluTest.INPUTS_RANGE_MAX).sample(self.shape) + self.inputs.append(Tensor.create_from_torch(input)) + + for i in range(1, 3): + weights = BudaLeakyReluTest.WEIGHTS_DISTRIBUTION( + BudaLeakyReluTest.WEIGHTS_RANGE_MIN, + BudaLeakyReluTest.WEIGHTS_RANGE_MAX).sample(self.shape) + weights.requires_grad = True + self.set_parameter("train_param" + str(i), weights) + + + def forward(self, x1, x2): + + # Layer 2 + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x2, self.train_param1) + mul3 = forge.op.Multiply("mul3", x2, self.train_param2) + + # Layer 3 + lrelu1 = forge.op.LeakyRelu("lrelu1", mul1, alpha=self.alpha) + lrelu2 = forge.op.LeakyRelu("lrelu2", mul2, alpha=self.alpha) + lrelu3 = forge.op.LeakyRelu("lrelu3", mul3, alpha=self.alpha) + + # Layer 4 + mul4 = forge.op.Multiply("mul4", lrelu1, self.train_param1) + mul5 = forge.op.Multiply("mul5", lrelu3, self.train_param2) + add1 = forge.op.Add("add1", lrelu2, x2) + + # Layer 5 + lrelu4 = forge.op.LeakyRelu("lrelu4", mul4, alpha=self.alpha) + lrelu5 = forge.op.LeakyRelu("lrelu5", add1, alpha=self.alpha) + lrelu6 = forge.op.LeakyRelu("lrelu6", mul5, alpha=self.alpha) + + # Layer 6 + mul6 = forge.op.Multiply("mul6", lrelu4, lrelu2) + mul7 = forge.op.Multiply("mul7", mul2, lrelu6) + add2 = forge.op.Add("add2", lrelu5, mul3) + + # Layer 7 + lrelu7 = forge.op.LeakyRelu("lrelu7", mul6, alpha=self.alpha) + lrelu8 = forge.op.LeakyRelu("lrelu8", add2, alpha=self.alpha) + lrelu9 = forge.op.LeakyRelu("lrelu9", mul7, alpha=self.alpha) + + # Layer 8 + mul8 = forge.op.Multiply("mul8", lrelu7, mul4) + mul9 = forge.op.Multiply("mul9", lrelu8, lrelu6) + mul10 = forge.op.Multiply("mul10", lrelu9, lrelu3) + + return mul8, mul9, mul10 \ No newline at end of file diff --git a/forge/test/operators/eltwise_unary_attr/leaky_relu/models/model_4.py b/forge/test/operators/eltwise_unary_attr/leaky_relu/models/model_4.py new file mode 100644 index 000000000..f1e496d18 --- /dev/null +++ b/forge/test/operators/eltwise_unary_attr/leaky_relu/models/model_4.py @@ -0,0 +1,140 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 +# +# Test 4 +# LeakyRelu operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures +# + + +import torch +from torch.distributions import Normal + +import forge +import forge.op +import forge.op.nn as nn + +from forge import ForgeModule, Tensor + + +class BudaLeakyReluTest(ForgeModule): + """ + Buda Test 4 + + """ + + INPUTS_RANGE_MIN = -1.0 + INPUTS_RANGE_MAX = 1.0 + INPUTS_DISTRIBUTION = Normal + + WEIGHTS_RANGE_MIN = -1.0 + WEIGHTS_RANGE_MAX = 1.0 + WEIGHTS_DISTRIBUTION = Normal + + def __init__( + self, + shape, + alpha + ): + super().__init__("Buda Test 4") + + self.testname = "Operator LeakyRelu, Test 4" + self.shape = shape + self.alpha = alpha + + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) + + self.inputs = [] + for i in range(3): + input = BudaLeakyReluTest.INPUTS_DISTRIBUTION( + BudaLeakyReluTest.INPUTS_RANGE_MIN, + BudaLeakyReluTest.INPUTS_RANGE_MAX).sample(self.shape) + self.inputs.append(Tensor.create_from_torch(input)) + + for i in range(1, 4): + weights = BudaLeakyReluTest.WEIGHTS_DISTRIBUTION( + BudaLeakyReluTest.WEIGHTS_RANGE_MIN, + BudaLeakyReluTest.WEIGHTS_RANGE_MAX).sample(self.shape) + weights.requires_grad = True + self.set_parameter("train_param" + str(i), weights) + + + + def forward(self, x1, x2, x3): + + # Layer 2 + add1 = forge.op.Add("add1", x1, self.train_param1) + add2 = forge.op.Add("add2", x2, self.train_param1) + add3 = forge.op.Add("add3", x3, self.train_param2) + mul1 = forge.op.Multiply("mul1", x2, self.train_param2) + mul2 = forge.op.Multiply("mul2", x3, self.train_param3) + + # Layer 3 + lrelu1 = forge.op.LeakyRelu("lrelu1", add1, alpha=self.alpha) + lrelu2 = forge.op.LeakyRelu("lrelu2", add2, alpha=self.alpha) + lrelu3 = forge.op.LeakyRelu("lrelu3", mul1, alpha=self.alpha) + lrelu4 = forge.op.LeakyRelu("lrelu4", add3, alpha=self.alpha) + lrelu5 = forge.op.LeakyRelu("lrelu5", mul2, alpha=self.alpha) + + # Layer 4 + mul3 = forge.op.Multiply("mul3", lrelu1, self.train_param1) + mul4 = forge.op.Multiply("mul4", lrelu2, x2) + mul5 = forge.op.Multiply("mul5", lrelu3, self.train_param2) + mul6 = forge.op.Multiply("mul6", lrelu4, x3) + add4 = forge.op.Add("add4", lrelu5, self.train_param3) + + # Layer 5 + lrelu6 = forge.op.LeakyRelu("lrelu6", mul3, alpha=self.alpha) + lrelu7 = forge.op.LeakyRelu("lrelu7", mul4, alpha=self.alpha) + lrelu8 = forge.op.LeakyRelu("lrelu8", mul5, alpha=self.alpha) + lrelu9 = forge.op.LeakyRelu("lrelu9", mul6, alpha=self.alpha) + lrelu10 = forge.op.LeakyRelu("lrelu10", add4, alpha=self.alpha) + + # Layer 6 + mul7 = forge.op.Multiply("mul7", lrelu6, add2) + mul8 = forge.op.Multiply("mul8", lrelu8, lrelu4) + mul9 = forge.op.Multiply("mul9", lrelu9, lrelu5) + mul10 = forge.op.Multiply("mul10", lrelu10, self.train_param3) + add5 = forge.op.Add("add5", lrelu7, lrelu3) + + # Layer 7 + lrelu11 = forge.op.LeakyRelu("lrelu11", mul7, alpha=self.alpha) + lrelu12 = forge.op.LeakyRelu("lrelu12", add5, alpha=self.alpha) + lrelu13 = forge.op.LeakyRelu("lrelu13", mul8, alpha=self.alpha) + lrelu14 = forge.op.LeakyRelu("lrelu14", mul9, alpha=self.alpha) + lrelu15 = forge.op.LeakyRelu("lrelu15", mul10, alpha=self.alpha) + + # Layer 8 + add6 = forge.op.Add("add6", lrelu11, mul3) + add7 = forge.op.Add("add7", lrelu12, mul8) + mul11 = forge.op.Multiply("mul11", lrelu13, mul5) + mul12 = forge.op.Multiply("mul12", lrelu14, add4) + mul13 = forge.op.Multiply("mul13", mul5, lrelu15) + + # Layer 9 + lrelu16 = forge.op.LeakyRelu("lrelu16", add6, alpha=self.alpha) + lrelu17 = forge.op.LeakyRelu("lrelu17", add7, alpha=self.alpha) + lrelu18 = forge.op.LeakyRelu("lrelu18", mul11, alpha=self.alpha) + lrelu19 = forge.op.LeakyRelu("lrelu19", mul12, alpha=self.alpha) + lrelu20 = forge.op.LeakyRelu("lrelu20", mul13, alpha=self.alpha) + + # Layer 10 + mul14 = forge.op.Multiply("mul14", lrelu16, mul7) + mul15 = forge.op.Multiply("mul15", lrelu17, mul8) + mul16 = forge.op.Multiply("mul16", lrelu18, lrelu19) + mul17 = forge.op.Multiply("mul17", add5, lrelu20) + + # Layer 11 + lrelu21 = forge.op.LeakyRelu("lrelu21", mul14, alpha=self.alpha) + lrelu22 = forge.op.LeakyRelu("lrelu22", mul15, alpha=self.alpha) + lrelu23 = forge.op.LeakyRelu("lrelu23", mul16, alpha=self.alpha) + lrelu24 = forge.op.LeakyRelu("lrelu24", mul17, alpha=self.alpha) + + # Layer 12 + add8 = forge.op.Add("add8", lrelu21, lrelu23) + add9 = forge.op.Add("add9", lrelu22, lrelu24) + + return add8, add9 diff --git a/forge/test/operators/eltwise_unary_attr/leaky_relu/models/model_5.py b/forge/test/operators/eltwise_unary_attr/leaky_relu/models/model_5.py new file mode 100644 index 000000000..b27e10806 --- /dev/null +++ b/forge/test/operators/eltwise_unary_attr/leaky_relu/models/model_5.py @@ -0,0 +1,121 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 +# +# Test 4 +# LeakyRelu operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures +# + + +import torch +from torch.distributions import Normal + +import forge +import forge.op +import forge.op.nn as nn + +from forge import ForgeModule, Tensor + + +class BudaLeakyReluTest(ForgeModule): + """ + Buda Test 4 + + """ + + INPUTS_RANGE_MIN = -1.0 + INPUTS_RANGE_MAX = 1.0 + INPUTS_DISTRIBUTION = Normal + + WEIGHTS_RANGE_MIN = -1.0 + WEIGHTS_RANGE_MAX = 1.0 + WEIGHTS_DISTRIBUTION = Normal + + def __init__( + self, + shape, + alpha + ): + super().__init__("Buda Test 4") + + self.testname = "Operator LeakyRelu, Test 4" + self.shape = shape + self.alpha = alpha + + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) + + self.inputs = [] + for i in range(3): + input = BudaLeakyReluTest.INPUTS_DISTRIBUTION( + BudaLeakyReluTest.INPUTS_RANGE_MIN, + BudaLeakyReluTest.INPUTS_RANGE_MAX).sample(self.shape) + self.inputs.append(Tensor.create_from_torch(input)) + + for i in range(1, 4): + weights = BudaLeakyReluTest.WEIGHTS_DISTRIBUTION( + BudaLeakyReluTest.WEIGHTS_RANGE_MIN, + BudaLeakyReluTest.WEIGHTS_RANGE_MAX).sample(self.shape) + weights.requires_grad = True + self.set_parameter("train_param" + str(i), weights) + + + + def forward(self, x1, x2, x3): + + # Layer 2 + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x2, self.train_param2) + mul3 = forge.op.Multiply("mul3", x3, self.train_param3) + + # Layer 3 + lrelu1 = forge.op.LeakyRelu("lrelu1", mul1, alpha=self.alpha) + lrelu2 = forge.op.LeakyRelu("lrelu2", mul2, alpha=self.alpha) + lrelu3 = forge.op.LeakyRelu("lrelu3", mul3, alpha=self.alpha) + + # Layer 4 + mul4 = forge.op.Multiply("mul4", lrelu1, x2) + mul5 = forge.op.Multiply("mul5", lrelu2, x3) + mul6 = forge.op.Multiply("mul6", self.train_param2, lrelu3) + + # Layer 5 + lrelu4 = forge.op.LeakyRelu("lrelu4", mul4, alpha=self.alpha) + lrelu5 = forge.op.LeakyRelu("lrelu5", mul5, alpha=self.alpha) + lrelu6 = forge.op.LeakyRelu("lrelu6", mul6, alpha=self.alpha) + + # Layer 6 + mul7 = forge.op.Multiply("mul7", lrelu4, mul2) + mul8 = forge.op.Multiply("mul8", lrelu5, mul3) + mul9 = forge.op.Multiply("mul9", lrelu6, mul1) + mul10 = forge.op.Multiply("mul10", lrelu4, lrelu5) + + # Layer 7 + lrelu7 = forge.op.LeakyRelu("lrelu7", mul10, alpha=self.alpha) + lrelu8 = forge.op.LeakyRelu("lrelu8", mul8, alpha=self.alpha) + lrelu9 = forge.op.LeakyRelu("lrelu9", mul9, alpha=self.alpha) + + # Layer 8 + mul11 = forge.op.Multiply("mul11", mul7, lrelu7) + mul12 = forge.op.Multiply("mul12", lrelu8, mul6) + mul13 = forge.op.Multiply("mul13", mul5, lrelu9) + + # Layer 9 + lrelu10 = forge.op.LeakyRelu("lrelu10", mul11, alpha=self.alpha) + lrelu11 = forge.op.LeakyRelu("lrelu11", mul12, alpha=self.alpha) + lrelu12 = forge.op.LeakyRelu("lrelu12", mul13, alpha=self.alpha) + + # Layer 10 + mul14 = forge.op.Multiply("mul14", lrelu10, mul8) + mul15 = forge.op.Multiply("mul15", lrelu11, mul9) + mul16 = forge.op.Multiply("mul16", lrelu12, lrelu6) + + # Layer 11 + mul17 = forge.op.Multiply("mul17", mul14, lrelu8) + mul18 = forge.op.Multiply("mul18", mul15, mul16) + + # Layer 12 + lrelu13 = forge.op.LeakyRelu("lrelu13", mul18, alpha=self.alpha) + + return mul17, lrelu13 diff --git a/pybuda/test/operators/eltwise_unary_attr/leaky_relu/test_leaky_relu.py b/forge/test/operators/eltwise_unary_attr/leaky_relu/test_leaky_relu.py similarity index 89% rename from pybuda/test/operators/eltwise_unary_attr/leaky_relu/test_leaky_relu.py rename to forge/test/operators/eltwise_unary_attr/leaky_relu/test_leaky_relu.py index b21d44933..e5ad50141 100644 --- a/pybuda/test/operators/eltwise_unary_attr/leaky_relu/test_leaky_relu.py +++ b/forge/test/operators/eltwise_unary_attr/leaky_relu/test_leaky_relu.py @@ -11,13 +11,13 @@ import pytest import numpy as np -import pybuda -import pybuda.op -from pybuda import TTDevice, BackendType, pybuda_compile, VerifyConfig, CompilerConfig +import forge +import forge.op +from forge import TTDevice, BackendType, forge_compile, VerifyConfig, CompilerConfig from . import models -MODELS_PATH = "./pybuda/test/operators/eltwise_unary_attr/leaky_relu/models" +MODELS_PATH = "./forge/test/operators/eltwise_unary_attr/leaky_relu/models" SHAPE_NO = 3 SHAPE_DIM_MIN = 1 @@ -74,8 +74,8 @@ def test_leaky_relu( tt0 = TTDevice("tt0", devtype=BackendType.Golden) tt0.place_module(model) - #Fusing disabled due to tenstorrent/pybuda#784 - pybuda_compile( + #Fusing disabled due to tenstorrent/forge#784 + forge_compile( tt0, model.testname, *model.inputs, diff --git a/pybuda/test/operators/grouped_reduce/__init__.py b/forge/test/operators/grouped_reduce/__init__.py similarity index 100% rename from pybuda/test/operators/grouped_reduce/__init__.py rename to forge/test/operators/grouped_reduce/__init__.py diff --git a/pybuda/test/operators/grouped_reduce/models/__init__.py b/forge/test/operators/grouped_reduce/models/__init__.py similarity index 100% rename from pybuda/test/operators/grouped_reduce/models/__init__.py rename to forge/test/operators/grouped_reduce/models/__init__.py diff --git a/pybuda/test/operators/grouped_reduce/models/model_0.py b/forge/test/operators/grouped_reduce/models/model_0.py similarity index 73% rename from pybuda/test/operators/grouped_reduce/models/model_0.py rename to forge/test/operators/grouped_reduce/models/model_0.py index 327ef94f3..fbc132825 100644 --- a/pybuda/test/operators/grouped_reduce/models/model_0.py +++ b/forge/test/operators/grouped_reduce/models/model_0.py @@ -3,24 +3,24 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 0 -# Grouped reduce operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Grouped reduce operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaReduceTest(PyBudaModule): +class BudaReduceTest(ForgeModule): """ Buda Test 0 Args: - operator (function): PyBuda reduce operator. + operator (function): Forge reduce operator. opname (str): Operation name (e.g. reduce_sum, reduce_avg, ...). This name test uses to generate names of operation nodes in a graph/model. """ @@ -34,12 +34,12 @@ def __init__(self, operator, opname, shape, dim, groups, keep_dims): self.dim = dim self.groups = groups self.keep_dims = keep_dims - self.train_param = pybuda.Parameter(torch.randn(*self.shape), requires_grad=True) + self.train_param = forge.Parameter(torch.randn(*self.shape), requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape))] def forward(self, x): - mul = pybuda.op.Multiply("mul", x, self.train_param) + mul = forge.op.Multiply("mul", x, self.train_param) red = self.operator(self.opname, mul, self.dim, self.groups, self.keep_dims) return red diff --git a/pybuda/test/operators/grouped_reduce/test_grouped_reduce.py b/forge/test/operators/grouped_reduce/test_grouped_reduce.py similarity index 87% rename from pybuda/test/operators/grouped_reduce/test_grouped_reduce.py rename to forge/test/operators/grouped_reduce/test_grouped_reduce.py index a8dbb7bad..d25c235b1 100644 --- a/pybuda/test/operators/grouped_reduce/test_grouped_reduce.py +++ b/forge/test/operators/grouped_reduce/test_grouped_reduce.py @@ -15,15 +15,15 @@ import numpy as np import torch -import pybuda -import pybuda.op -from pybuda import TTDevice, BackendType, pybuda_compile, VerifyConfig, CompilerConfig +import forge +import forge.op +from forge import TTDevice, BackendType, forge_compile, VerifyConfig, CompilerConfig -from pybuda.verify.config import TestKind +from forge.verify.config import TestKind from . import models -MODELS_PATH = "./pybuda/test/operators/grouped_reduce/models/" +MODELS_PATH = "./forge/test/operators/grouped_reduce/models/" SHAPE_NO = 1 SHAPE_SIZE = 4 @@ -73,11 +73,11 @@ def test_grouped_reduce( groups_to_try = [np.random.choice(facs) for _ in range(min(len(facs), 3))] # groups_to_try = [8] for groups in groups_to_try: - architecture = f'models.{model}.BudaReduceTest(operator=pybuda.op.{operation}, opname="{operation}", shape={shape}, dim={dim}, groups={groups}, keep_dims={keep_dims})' + architecture = f'models.{model}.BudaReduceTest(operator=forge.op.{operation}, opname="{operation}", shape={shape}, dim={dim}, groups={groups}, keep_dims={keep_dims})' tt_model = eval(architecture) tt0 = TTDevice("tt0", devtype=BackendType.Golden) tt0.place_module(tt_model) - pybuda_compile( + forge_compile( tt0, tt_model.testname, *tt_model.inputs, diff --git a/pybuda/test/operators/matmul/__init__.py b/forge/test/operators/matmul/__init__.py similarity index 100% rename from pybuda/test/operators/matmul/__init__.py rename to forge/test/operators/matmul/__init__.py diff --git a/pybuda/test/operators/matmul/conftest.py b/forge/test/operators/matmul/conftest.py similarity index 100% rename from pybuda/test/operators/matmul/conftest.py rename to forge/test/operators/matmul/conftest.py diff --git a/pybuda/test/operators/matmul/models/__init__.py b/forge/test/operators/matmul/models/__init__.py similarity index 100% rename from pybuda/test/operators/matmul/models/__init__.py rename to forge/test/operators/matmul/models/__init__.py diff --git a/pybuda/test/operators/matmul/models/custom/model_4.py b/forge/test/operators/matmul/models/custom/model_4.py similarity index 72% rename from pybuda/test/operators/matmul/models/custom/model_4.py rename to forge/test/operators/matmul/models/custom/model_4.py index 61bdf9caa..6677762f1 100644 --- a/pybuda/test/operators/matmul/models/custom/model_4.py +++ b/forge/test/operators/matmul/models/custom/model_4.py @@ -3,19 +3,19 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 4 -# Matmul operator defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Matmul operator defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaMatmulTest(PyBudaModule): +class BudaMatmulTest(ForgeModule): """ Buda Test 4 @@ -39,10 +39,10 @@ def __init__(self): self.shape_train3 = (1, 1, 420, 540) self.shape_train4 = (1, 1, 768, 320) - self.train_param1 = pybuda.Parameter(*self.shape_train1, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape_train2, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape_train3, requires_grad=True) - self.train_param4 = pybuda.Parameter(*self.shape_train4, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape_train1, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape_train2, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape_train3, requires_grad=True) + self.train_param4 = forge.Parameter(*self.shape_train4, requires_grad=True) def my_rand(*shape, requires_grad=False): return (torch.rand(*shape, requires_grad=requires_grad) - 0.5).detach() @@ -61,23 +61,23 @@ def my_rand(*shape, requires_grad=False): def forward(self, x1, x2, x3, x4): # Layer 2 - mm1 = pybuda.op.Matmul("mm1", x1, self.train_param1) + mm1 = forge.op.Matmul("mm1", x1, self.train_param1) # (1, 1, 64, 64) x (1, 1, 64, 128) -> (1, 1, 64, 128) - mm2 = pybuda.op.Matmul("mm2", x2, self.train_param2) + mm2 = forge.op.Matmul("mm2", x2, self.train_param2) # (1, 1, 128, 70) x (1, 1, 70, 350) -> (1, 1, 128, 350) - mm3 = pybuda.op.Matmul("mm3", x3, self.train_param3) + mm3 = forge.op.Matmul("mm3", x3, self.train_param3) # (1, 1, 350, 420) x (1, 1, 420, 540) -> (1, 1, 350, 540) - mm4 = pybuda.op.Matmul("mm4", x4, self.train_param4) + mm4 = forge.op.Matmul("mm4", x4, self.train_param4) # (1, 1, 540, 768) x (1, 1, 768, 320) -> (1, 1, 540, 320) # Layer 3 - mm5 = pybuda.op.Matmul("mm5", mm1, mm2) + mm5 = forge.op.Matmul("mm5", mm1, mm2) # (1, 1, 64, 128) x (1, 1, 128, 350) -> (1, 1, 64, 350) - mm6 = pybuda.op.Matmul("mm6", mm3, mm4) + mm6 = forge.op.Matmul("mm6", mm3, mm4) # (1, 1, 350, 540) x (1, 1, 540, 320) -> (1, 1, 350, 320) # Layer 4 - mm7 = pybuda.op.Matmul("mm7", mm5, mm6) + mm7 = forge.op.Matmul("mm7", mm5, mm6) # (1, 1, 64, 350) x (1, 1, 350, 320) -> (1, 1, 64, 320) return mm7 diff --git a/pybuda/test/operators/matmul/models/custom/model_5.py b/forge/test/operators/matmul/models/custom/model_5.py similarity index 70% rename from pybuda/test/operators/matmul/models/custom/model_5.py rename to forge/test/operators/matmul/models/custom/model_5.py index 9b46fe1ce..2789cc981 100644 --- a/pybuda/test/operators/matmul/models/custom/model_5.py +++ b/forge/test/operators/matmul/models/custom/model_5.py @@ -3,19 +3,19 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 5 -# Matmul operator defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Matmul operator defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaMatmulTest(PyBudaModule): +class BudaMatmulTest(ForgeModule): """ Buda Test 5 @@ -38,10 +38,10 @@ def __init__(self): self.shape_train3 = (1, 18, 100, 522) self.shape_train4 = (1, 18, 256, 128) - self.train_param1 = pybuda.Parameter(*self.shape_train1, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape_train2, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape_train3, requires_grad=True) - self.train_param4 = pybuda.Parameter(*self.shape_train4, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape_train1, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape_train2, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape_train3, requires_grad=True) + self.train_param4 = forge.Parameter(*self.shape_train4, requires_grad=True) self.inputs = [ Tensor.create_from_torch(torch.rand(*self.shape_input1)), @@ -57,31 +57,31 @@ def __init__(self): def forward(self, x1, x2, x3): # Layer 2 - mm1 = pybuda.op.Matmul("mm1", x1, self.train_param1) + mm1 = forge.op.Matmul("mm1", x1, self.train_param1) # (1, 18, 32, 56) x (1, 18, 56, 128) -> (1, 18, 32, 128) - mm2 = pybuda.op.Matmul("mm2", x2, self.train_param2) + mm2 = forge.op.Matmul("mm2", x2, self.train_param2) # (1, 18, 128, 86) x (1, 18, 86, 256) -> (1, 18, 128, 256) - mm3 = pybuda.op.Matmul("mm3", x3, self.train_param3) + mm3 = forge.op.Matmul("mm3", x3, self.train_param3) # (1, 18, 256, 100) x (1, 18, 100, 522) -> (1, 18, 256, 522) # Layer 3 - mm4 = pybuda.op.Matmul("mm4", mm1, mm2) + mm4 = forge.op.Matmul("mm4", mm1, mm2) # (1, 18, 32, 128) x (1, 18, 128, 256) -> (1, 18, 32, 256) - mm5 = pybuda.op.Matmul("mm5", mm2, mm3) + mm5 = forge.op.Matmul("mm5", mm2, mm3) # (1, 18, 128, 256) x (1, 18, 256, 522) -> (1, 18, 128, 522) # Layer 4 - mm6 = pybuda.op.Matmul("mm6", mm4, x3) + mm6 = forge.op.Matmul("mm6", mm4, x3) # (1, 18, 32, 256) x (1, 18, 256, 100) -> (1, 18, 32, 100) - inter1 = pybuda.op.Matmul("inter1", mm4, self.train_param4) + inter1 = forge.op.Matmul("inter1", mm4, self.train_param4) # (1, 18, 32, 256) x (1, 18, 256, 128) -> (1, 18, 32, 128) - mm7 = pybuda.op.Matmul("mm7", inter1, mm5) + mm7 = forge.op.Matmul("mm7", inter1, mm5) # (1, 18, 32, 128) x (1, 18, 128, 522) -> (1, 18, 32, 522) # Layer 5 - tr1 = pybuda.op.Transpose("tr1", mm6, 3, 2) + tr1 = forge.op.Transpose("tr1", mm6, 3, 2) # (1, 18, 32, 100) -> (1, 18, 100, 32) - mm8 = pybuda.op.Matmul("mm8", tr1, mm7) + mm8 = forge.op.Matmul("mm8", tr1, mm7) # (1, 18, 100, 32) x (1, 18, 32, 522) -> (1, 18, 100, 522) return mm8 diff --git a/pybuda/test/operators/matmul/models/custom/model_9.py b/forge/test/operators/matmul/models/custom/model_9.py similarity index 62% rename from pybuda/test/operators/matmul/models/custom/model_9.py rename to forge/test/operators/matmul/models/custom/model_9.py index e1b6723f0..642d0af32 100644 --- a/pybuda/test/operators/matmul/models/custom/model_9.py +++ b/forge/test/operators/matmul/models/custom/model_9.py @@ -3,19 +3,19 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 9 -# Matmul operator defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Matmul operator defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaMatmulTest(PyBudaModule): +class BudaMatmulTest(ForgeModule): """ Buda Test 9 @@ -48,19 +48,19 @@ def __init__(self): self.shape_train12 = (1, 16, 210, 70) self.shape_train13 = (1, 16, 50, 512) - self.train_param1 = pybuda.Parameter(*self.shape_train1, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape_train2, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape_train3, requires_grad=True) - self.train_param4 = pybuda.Parameter(*self.shape_train4, requires_grad=True) - self.train_param5 = pybuda.Parameter(*self.shape_train5, requires_grad=True) - self.train_param6 = pybuda.Parameter(*self.shape_train6, requires_grad=True) - self.train_param7 = pybuda.Parameter(*self.shape_train7, requires_grad=True) - self.train_param8 = pybuda.Parameter(*self.shape_train8, requires_grad=True) - self.train_param9 = pybuda.Parameter(*self.shape_train9, requires_grad=True) - self.train_param10 = pybuda.Parameter(*self.shape_train10, requires_grad=True) - self.train_param11 = pybuda.Parameter(*self.shape_train11, requires_grad=True) - self.train_param12 = pybuda.Parameter(*self.shape_train12, requires_grad=True) - self.train_param13 = pybuda.Parameter(*self.shape_train13, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape_train1, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape_train2, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape_train3, requires_grad=True) + self.train_param4 = forge.Parameter(*self.shape_train4, requires_grad=True) + self.train_param5 = forge.Parameter(*self.shape_train5, requires_grad=True) + self.train_param6 = forge.Parameter(*self.shape_train6, requires_grad=True) + self.train_param7 = forge.Parameter(*self.shape_train7, requires_grad=True) + self.train_param8 = forge.Parameter(*self.shape_train8, requires_grad=True) + self.train_param9 = forge.Parameter(*self.shape_train9, requires_grad=True) + self.train_param10 = forge.Parameter(*self.shape_train10, requires_grad=True) + self.train_param11 = forge.Parameter(*self.shape_train11, requires_grad=True) + self.train_param12 = forge.Parameter(*self.shape_train12, requires_grad=True) + self.train_param13 = forge.Parameter(*self.shape_train13, requires_grad=True) def my_rand(*shape, requires_grad=False): return (torch.rand(*shape, requires_grad=requires_grad) - 0.5).detach() @@ -88,92 +88,92 @@ def my_rand(*shape, requires_grad=False): def forward(self, x1, x2, x3): # Layer 2 - mm1 = pybuda.op.Matmul("mm1", x1, self.train_param1) + mm1 = forge.op.Matmul("mm1", x1, self.train_param1) # (1, 16, 64, 210) x (1, 16, 210, 78) -> (1, 16, 64, 78) - mm2 = pybuda.op.Matmul("mm2", x2, self.train_param2) + mm2 = forge.op.Matmul("mm2", x2, self.train_param2) # (1, 16, 70, 64) x (1, 16, 64, 36) -> (1, 16, 70, 36) - mm3 = pybuda.op.Matmul("mm3", x3, self.train_param3) + mm3 = forge.op.Matmul("mm3", x3, self.train_param3) # (1, 16, 240, 512) x (1, 16, 512, 64) -> (1, 16, 240, 64) # Layer 3 - tr1 = pybuda.op.Transpose("tr1", self.train_param1, 3, 2) + tr1 = forge.op.Transpose("tr1", self.train_param1, 3, 2) # (1, 16, 210, 78) -> (1, 16, 78, 210) - mm4 = pybuda.op.Matmul("mm4", mm1, tr1) + mm4 = forge.op.Matmul("mm4", mm1, tr1) # (1, 16, 64, 78) x (1, 16, 78, 210) -> (1, 16, 64, 210) - inter1 = pybuda.op.Matmul("iter1", x2, self.train_param7) + inter1 = forge.op.Matmul("iter1", x2, self.train_param7) # (1, 16, 70, 64) x (1, 16, 64, 240) -> (1, 16, 70, 240) - mm5 = pybuda.op.Matmul("mm5", inter1, x3) + mm5 = forge.op.Matmul("mm5", inter1, x3) # (1, 16, 70, 240) x (1, 16, 240, 512) -> (1, 16, 70, 512) - inter2 = pybuda.op.Matmul("inter2", mm2, self.train_param8) + inter2 = forge.op.Matmul("inter2", mm2, self.train_param8) # (1, 16, 70, 36) x (1, 16, 36, 240) -> (1, 16, 70, 240) - mm6 = pybuda.op.Matmul("mm6", inter2, mm3) + mm6 = forge.op.Matmul("mm6", inter2, mm3) # (1, 16, 70, 240) x (1, 16, 240, 64) -> (1, 16, 70, 64) # Layer 4 - tr2 = pybuda.op.Transpose("tr2", mm4, 3, 2) + tr2 = forge.op.Transpose("tr2", mm4, 3, 2) # (1, 16, 64, 210) -> (1, 16, 210, 64) - tr3 = pybuda.op.Transpose("tr3", x2, 3, 2) + tr3 = forge.op.Transpose("tr3", x2, 3, 2) # (1, 16, 70, 64) -> (1, 16, 64, 70) - mm7 = pybuda.op.Matmul("mm7", tr2, tr3) + mm7 = forge.op.Matmul("mm7", tr2, tr3) # (1, 16, 210, 64) x (1, 16, 64, 70) -> (1, 16, 210, 70) - inter6 = pybuda.op.Matmul("inter6", mm4, self.train_param12) + inter6 = forge.op.Matmul("inter6", mm4, self.train_param12) # (1, 16, 64, 210) x (1, 16, 210, 70) -> (1, 16, 64, 70) - mm8 = pybuda.op.Matmul("mm8", inter6, mm5) + mm8 = forge.op.Matmul("mm8", inter6, mm5) # (1, 16, 64, 70) x (1, 16, 70, 512) -> (1, 16, 64, 512) - mm9 = pybuda.op.Matmul("mm9", mm5, self.train_param4) + mm9 = forge.op.Matmul("mm9", mm5, self.train_param4) # (1, 16, 70, 512) x (1, 16, 512, 64) -> (1, 16, 70, 64) - tr4 = pybuda.op.Transpose("tr4", self.train_param3, 3, 2) + tr4 = forge.op.Transpose("tr4", self.train_param3, 3, 2) # (1, 16, 512, 64) -> (1, 16, 64, 512) - mm10 = pybuda.op.Matmul("mm10", mm6, tr4) + mm10 = forge.op.Matmul("mm10", mm6, tr4) # (1, 16, 70, 64) x (1, 16, 64, 512) -> (1, 16, 70, 512) # Layer 5 - inter3 = pybuda.op.Matmul("inter3", mm7, self.train_param9) + inter3 = forge.op.Matmul("inter3", mm7, self.train_param9) # (1, 16, 210, 70) x (1, 16, 70, 64) -> (1, 16, 210, 64) - mm11 = pybuda.op.Matmul("mm11", inter3, mm8) + mm11 = forge.op.Matmul("mm11", inter3, mm8) # (1, 16, 210, 64) x (1, 16, 64, 512) -> (1, 16, 210, 512) - inter4 = pybuda.op.Matmul("inter4", x1, self.train_param10) + inter4 = forge.op.Matmul("inter4", x1, self.train_param10) # (1, 16, 64, 210) x (1, 16, 210, 240) -> (1, 16, 64, 240) - mm12 = pybuda.op.Matmul("mm12", inter4, x3) + mm12 = forge.op.Matmul("mm12", inter4, x3) # (1, 16, 64, 240) x (1, 16, 240, 512) -> (1, 16, 64, 512) - tr5 = pybuda.op.Transpose("tr5", mm6, 3, 2) + tr5 = forge.op.Transpose("tr5", mm6, 3, 2) # (1, 16, 70, 64) -> (1, 16, 64, 70) - mm13 = pybuda.op.Matmul("mm13", tr5, mm10) + mm13 = forge.op.Matmul("mm13", tr5, mm10) # (1, 16, 64, 70) x (1, 16, 70, 512) -> (1, 16, 64, 512) # Layer 6 - mm14 = pybuda.op.Matmul("mm14", x1, mm11) + mm14 = forge.op.Matmul("mm14", x1, mm11) # (1, 16, 64, 210) x (1, 16, 210, 512) -> (1, 16, 64, 512) - tr6 = pybuda.op.Transpose("tr6", mm12, 3, 2) + tr6 = forge.op.Transpose("tr6", mm12, 3, 2) # (1, 16, 64, 512) -> (1, 16, 512, 64) - mm15 = pybuda.op.Matmul("mm15", tr6, mm13) + mm15 = forge.op.Matmul("mm15", tr6, mm13) # (1, 16, 512, 64) x (1, 16, 64, 512) -> (1, 16, 512, 512) - mm16 = pybuda.op.Matmul("mm16", mm9, self.train_param5) + mm16 = forge.op.Matmul("mm16", mm9, self.train_param5) # (1, 16, 70, 64) x (1, 16, 64, 256) -> (1, 16, 70, 256) # Layer 7 - mm17 = pybuda.op.Matmul("mm17", mm14, mm15) + mm17 = forge.op.Matmul("mm17", mm14, mm15) # (1, 16, 64, 512) x (1, 16, 512, 512) -> (1, 16, 64, 512) - mm18 = pybuda.op.Matmul("mm18", mm15, self.train_param6) + mm18 = forge.op.Matmul("mm18", mm15, self.train_param6) # (1, 16, 512, 512) x (1, 16, 512, 50) -> (1, 16, 512, 50) - tr7 = pybuda.op.Transpose("tr7", mm16, 3, 2) + tr7 = forge.op.Transpose("tr7", mm16, 3, 2) # (1, 16, 70, 256) -> (1, 16, 256, 70) - mm19 = pybuda.op.Matmul("mm19", tr7, mm10) + mm19 = forge.op.Matmul("mm19", tr7, mm10) # (1, 16, 256, 70) x (1, 16, 70, 512) -> (1, 16, 256, 512) # Layer 8 - mm20 = pybuda.op.Matmul("mm20", mm17, mm18) + mm20 = forge.op.Matmul("mm20", mm17, mm18) # (1, 16, 64, 512) x (1, 16, 512, 50) -> (1, 16, 64, 50) - inter5 = pybuda.op.Matmul("inter5", mm18, self.train_param11) + inter5 = forge.op.Matmul("inter5", mm18, self.train_param11) # (1, 16, 512, 50) x (1, 16, 50, 256) -> (1, 16, 512, 256) - mm21 = pybuda.op.Matmul("mm21", inter5, mm19) + mm21 = forge.op.Matmul("mm21", inter5, mm19) # (1, 16, 512, 256) x (1, 16, 256, 512) -> (1, 16, 512, 512) # Layer 9 - inter7 = pybuda.op.Matmul("inter7", mm20, self.train_param13) + inter7 = forge.op.Matmul("inter7", mm20, self.train_param13) # (1, 16, 64, 50) x (1, 16, 50, 512) -> (1, 16, 64, 512) - mm22 = pybuda.op.Matmul("mm22", inter7, mm21) + mm22 = forge.op.Matmul("mm22", inter7, mm21) # (1, 16, 64, 512) x (1, 16, 512, 512) -> (1, 16, 64, 512) return mm22 diff --git a/pybuda/test/operators/matmul/models/generic/model_1.py b/forge/test/operators/matmul/models/generic/model_1.py similarity index 69% rename from pybuda/test/operators/matmul/models/generic/model_1.py rename to forge/test/operators/matmul/models/generic/model_1.py index 6be75ec1c..c9879099a 100644 --- a/pybuda/test/operators/matmul/models/generic/model_1.py +++ b/forge/test/operators/matmul/models/generic/model_1.py @@ -3,19 +3,19 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 1 -# Matmul operator defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Matmul operator defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaMatmulTest(PyBudaModule): +class BudaMatmulTest(ForgeModule): """ Buda Matmul Test 1 @@ -27,14 +27,14 @@ def __init__(self, shape): super().__init__("Buda Matmul Test 1") self.testname = "Operator Matmul Test 1" self.shape = shape - self.train_param = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape))] self.set_parameter("train_param", torch.rand(*self.shape, requires_grad=True)) def forward(self, x): - tr = pybuda.op.Transpose("tr", self.train_param, -1, -2) - return pybuda.op.Matmul("mm", x, tr) + tr = forge.op.Transpose("tr", self.train_param, -1, -2) + return forge.op.Matmul("mm", x, tr) def values(self): return [item.value() for item in self.inputs] \ No newline at end of file diff --git a/forge/test/operators/matmul/models/generic/model_10.py b/forge/test/operators/matmul/models/generic/model_10.py new file mode 100644 index 000000000..16bcca53a --- /dev/null +++ b/forge/test/operators/matmul/models/generic/model_10.py @@ -0,0 +1,101 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 +# +# Test 10 +# Matmul operator defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures +# + + +import torch + +import forge + +from forge import ForgeModule, Tensor + + +class BudaMatmulTest(ForgeModule): + """ + Buda Test 10 + + In this test we have 22 operations, and 3 input tensors and 9 trainable variables. + One operand represents input and the other one is trainable paramater. + """ + + def __init__(self, shape): + super().__init__("Buda Test 10") + self.testname = "Operator Matmul Test 10" + self.shape = shape + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) + + self.train_param4 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param5 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param6 = forge.Parameter(*self.shape, requires_grad=True) + + self.train_param7 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param8 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param9 = forge.Parameter(*self.shape, requires_grad=True) + + self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(3)] + for i in range(9): + self.set_parameter("train_param" + str(i + 1), torch.rand(*self.shape, requires_grad=True)) + + def forward(self, x1, x2, x3): + + # Layer 2 + tr1 = forge.op.Transpose("tr1", self.train_param1, -1, -2) + mm1 = forge.op.Matmul("mm1", x1, tr1) + tr2 = forge.op.Transpose("tr2", self.train_param2, -1, -2) + mm2 = forge.op.Matmul("mm2", x2, tr2) + tr3 = forge.op.Transpose("tr3", self.train_param3, -1, -2) + mm3 = forge.op.Matmul("mm3", x3, tr3) + + # Layer 3 + mm4 = forge.op.Matmul("mm4", mm1, x2) + mm5 = forge.op.Matmul("mm5", mm2, x3) + mm6 = forge.op.Matmul("mm6", mm3, x2) + + # Layer 4 + tr4 = forge.op.Transpose("tr4", self.train_param4, -1, -2) + mm7 = forge.op.Matmul("mm7", mm4, tr4) + tr5 = forge.op.Transpose("tr5", self.train_param5, -1, -2) + mm8 = forge.op.Matmul("mm8", mm5, tr5) + tr6 = forge.op.Transpose("tr6", self.train_param6, -1, -2) + mm9 = forge.op.Matmul("mm9", mm6, tr6) + + # Layer + mm10 = forge.op.Matmul("mm10", mm2, self.train_param4) + + # Layer 6 + mm11 = forge.op.Matmul("mm11", mm1, self.train_param5) + mm12 = forge.op.Matmul("mm12", mm2, self.train_param6) + + # Layer 7 + tr7 = forge.op.Transpose("tr7", mm10, -1, -2) + mm13 = forge.op.Matmul("mm13", mm11, tr7) + mm14 = forge.op.Matmul("mm14", mm7, mm9) + mm15 = forge.op.Matmul("mm15", mm8, mm12) + + # Layer 8 + mm16 = forge.op.Matmul("mm16", mm13, self.train_param9) + mm17 = forge.op.Matmul("mm17", mm14, self.train_param7) + tr8 = forge.op.Transpose("tr8", self.train_param8, -1, -2) + mm18 = forge.op.Matmul("mm18", mm15, tr8) + + # Layer 9 + mm19 = forge.op.Matmul("mm19", mm16, tr1) + mm20 = forge.op.Matmul("mm20", mm17, tr4) + + # Layer 10 + mm21 = forge.op.Matmul("mm21", mm19, mm20) + + # Layer 11 + mm22 = forge.op.Matmul("mm22", mm21, mm18) + + return mm22 + + def values(self): + return [item.value() for item in self.inputs] \ No newline at end of file diff --git a/pybuda/test/operators/matmul/models/generic/model_2.py b/forge/test/operators/matmul/models/generic/model_2.py similarity index 62% rename from pybuda/test/operators/matmul/models/generic/model_2.py rename to forge/test/operators/matmul/models/generic/model_2.py index 0a523dd02..29aaed537 100644 --- a/pybuda/test/operators/matmul/models/generic/model_2.py +++ b/forge/test/operators/matmul/models/generic/model_2.py @@ -3,19 +3,19 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 2 -# Matmul operator defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Matmul operator defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaMatmulTest(PyBudaModule): +class BudaMatmulTest(ForgeModule): """ Buda Test 2 @@ -27,9 +27,9 @@ def __init__(self, shape): super().__init__("Buda Test 2") self.testname = "Operator Matmul Test 2" self.shape = shape - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(3)] for i in range(3): @@ -38,25 +38,25 @@ def __init__(self, shape): def forward(self, x1, x2, x3): # Layer 2 - tr1 = pybuda.op.Transpose("tr1", self.train_param1, -1, -2) + tr1 = forge.op.Transpose("tr1", self.train_param1, -1, -2) # (W, Z, R, C) --> (W, Z, C, R) - mm1 = pybuda.op.Matmul("mm1", x1, tr1) + mm1 = forge.op.Matmul("mm1", x1, tr1) # (W, Z, R, C) x (W, Z, C, R) --> (W, Z, R, R) - tr2 = pybuda.op.Transpose("tr2", self.train_param2, -1, -2) + tr2 = forge.op.Transpose("tr2", self.train_param2, -1, -2) # (W, Z, R, C) --> (W, Z, C, R) - mm2 = pybuda.op.Matmul("mm2", x2, tr2) + mm2 = forge.op.Matmul("mm2", x2, tr2) # (W, Z, R, C) x (W, Z, C, R) --> (W, Z, R, R) - tr3 = pybuda.op.Transpose("tr3", self.train_param3, -1, -2) + tr3 = forge.op.Transpose("tr3", self.train_param3, -1, -2) # (W, Z, R, C) --> (W, Z, C, R) - mm3 = pybuda.op.Matmul("mm3", x3, tr3) + mm3 = forge.op.Matmul("mm3", x3, tr3) # (W, Z, R, C) x (W, Z, C, R) --> (W, Z, R, R) # Layer 3 - mm4 = pybuda.op.Matmul("mm4", mm1, mm2) + mm4 = forge.op.Matmul("mm4", mm1, mm2) # (W, Z, R, R) x (W, Z, R, R) --> (W, Z, R, R) # Layer 4 - mm5 = pybuda.op.Matmul("mm5", mm4, mm3) + mm5 = forge.op.Matmul("mm5", mm4, mm3) # (W, Z, R, R) x (W, Z, R, R) --> (W, Z, R, R) return mm5 diff --git a/forge/test/operators/matmul/models/generic/model_3.py b/forge/test/operators/matmul/models/generic/model_3.py new file mode 100644 index 000000000..f1469d123 --- /dev/null +++ b/forge/test/operators/matmul/models/generic/model_3.py @@ -0,0 +1,67 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 +# +# Test 3 +# Matmul operator defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures +# + + +import torch + +import forge + +from forge import ForgeModule, Tensor + + +class BudaMatmulTest(ForgeModule): + """ + Buda Test 3 + + In this test we have 10 operations, and three input tensors and three trainable variables. + One operand represents input and the other one is trainable paramater. + """ + + def __init__(self, shape): + super().__init__("Buda Test 3") + self.testname = "Operator Matmul Test 3" + self.shape = shape + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) + + self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(3)] + for i in range(3): + self.set_parameter("train_param" + str(i + 1), torch.rand(*self.shape, requires_grad=True)) + + def forward(self, x1, x2, x3): + + # Layer 2 + tr1 = forge.op.Transpose("tr1", self.train_param1, -1, -2) + mm1 = forge.op.Matmul("mm1", x1, tr1) + tr2 = forge.op.Transpose("tr2", self.train_param2, -1, -2) + mm2 = forge.op.Matmul("mm2", x2, tr2) + tr3 = forge.op.Transpose("tr3", x3, -1, -2) + mm3 = forge.op.Matmul("mm3", tr3, self.train_param3) + + # Layer 3 + mm4 = forge.op.Matmul("mm4", mm1, x2) + mm5 = forge.op.Matmul("mm5", self.train_param2, mm3) + mm6 = forge.op.Matmul("mm6", mm3, tr3) + + # Layer 4 + mm7 = forge.op.Matmul("mm7", mm2, mm5) + mm8 = forge.op.Matmul("mm8", mm6, x3) + + # Layer 5 + mm9 = forge.op.Matmul("mm9", mm7, mm8) + + # Layer 6 + tr4 = forge.op.Transpose("tr4", mm4, -1, -2) + mm10 = forge.op.Matmul("mm10", tr4, mm9) + + return mm10 + + def values(self): + return [item.value() for item in self.inputs] \ No newline at end of file diff --git a/forge/test/operators/matmul/models/generic/model_6.py b/forge/test/operators/matmul/models/generic/model_6.py new file mode 100644 index 000000000..a060dd501 --- /dev/null +++ b/forge/test/operators/matmul/models/generic/model_6.py @@ -0,0 +1,75 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 +# +# Test 6 +# Matmul operator defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures +# + + +import torch + +import forge + +from forge import ForgeModule, Tensor + + +class BudaMatmulTest(ForgeModule): + """ + Buda Test 6 + + In this test we have 13 operations, and 4 input tensors and 4 trainable variables. + One operand represents input and the other one is trainable paramater. + """ + + def __init__(self, shape): + super().__init__("Buda Test 6") + self.testname = "Operator Matmul Test 6" + self.shape = shape + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param4 = forge.Parameter(*self.shape, requires_grad=True) + + self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(4)] + for i in range(4): + self.set_parameter("train_param" + str(i + 1), torch.rand(*self.shape, requires_grad=True)) + + def forward(self, x1, x2, x3, x4): + + # Layer 2 + tr1 = forge.op.Transpose("tr1", self.train_param1, -1, -2) + mm1 = forge.op.Matmul("mm1", x1, tr1) + + tr2 = forge.op.Transpose("tr2", self.train_param2, -1, -2) + mm2 = forge.op.Matmul("mm2", x2, tr2) + + tr3 = forge.op.Transpose("tr3", x3, -1, -2) + mm3 = forge.op.Matmul("mm3", tr3, self.train_param3) + + tr4 = forge.op.Transpose("tr4", x4, -1, -2) + mm4 = forge.op.Matmul("mm4", tr4, self.train_param4) + + # Layer 3 + mm5 = forge.op.Matmul("mm5", mm1, mm2) + mm6 = forge.op.Matmul("mm6", x3, mm3) + mm7 = forge.op.Matmul("mm7", mm3, mm4) + + # Layer 4 + mm8 = forge.op.Matmul("mm8", mm5, mm6) + mm9 = forge.op.Matmul("mm9", mm3, mm7) + mm10 = forge.op.Matmul("mm10", mm6, mm7) + + # Layer 5 + mm11 = forge.op.Matmul("mm11", mm8, mm9) + tr5 = forge.op.Transpose("tr5", mm10, -1, -2) + mm12 = forge.op.Matmul("mm12", mm9, tr5) + + # Layer 6 + mm13 = forge.op.Matmul("mm13", mm11, mm12) + + return mm13 + + def values(self): + return [item.value() for item in self.inputs] \ No newline at end of file diff --git a/pybuda/test/operators/matmul/models/generic/model_7.py b/forge/test/operators/matmul/models/generic/model_7.py similarity index 57% rename from pybuda/test/operators/matmul/models/generic/model_7.py rename to forge/test/operators/matmul/models/generic/model_7.py index 532b663a4..140fddecd 100644 --- a/pybuda/test/operators/matmul/models/generic/model_7.py +++ b/forge/test/operators/matmul/models/generic/model_7.py @@ -3,19 +3,19 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 7 -# Matmul operator defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Matmul operator defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaMatmulTest(PyBudaModule): +class BudaMatmulTest(ForgeModule): """ Buda Test 7 @@ -27,10 +27,10 @@ def __init__(self, shape): super().__init__("Buda Test 7") self.testname = "Operator Matmul Test 7" self.shape = shape - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param4 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param4 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch((torch.rand(*self.shape) - 0.5).detach()) for i in range(4)] for i in range(4): @@ -38,85 +38,85 @@ def __init__(self, shape): def forward(self, x1, x2, x3, x4): # Layer 2 - tr1 = pybuda.op.Transpose("tr1", self.train_param1, -1, -2) + tr1 = forge.op.Transpose("tr1", self.train_param1, -1, -2) # (..., R, C) --> (..., C, R) - mm1 = pybuda.op.Matmul("mm1", x1, tr1) + mm1 = forge.op.Matmul("mm1", x1, tr1) # (..., R, C) x (..., C, R) --> (..., R, R) - tr2 = pybuda.op.Transpose("tr2", self.train_param2, -1, -2) + tr2 = forge.op.Transpose("tr2", self.train_param2, -1, -2) # (..., R, C) --> (..., C, R) - mm2 = pybuda.op.Matmul("mm2", x1, tr2) + mm2 = forge.op.Matmul("mm2", x1, tr2) # (..., R, C) x (..., C, R) --> (..., R, R) - tr3 = pybuda.op.Transpose("tr3", x2, -1, -2) + tr3 = forge.op.Transpose("tr3", x2, -1, -2) # (..., R, C) --> (..., C, R) - mm3 = pybuda.op.Matmul("mm3", tr3, self.train_param2) + mm3 = forge.op.Matmul("mm3", tr3, self.train_param2) # (..., C, R) x (..., R, C) --> (..., C, C) - mm4 = pybuda.op.Matmul("mm4", tr3, self.train_param3) + mm4 = forge.op.Matmul("mm4", tr3, self.train_param3) # (..., C, R) x (..., R, C) --> (..., C, C) - tr4 = pybuda.op.Transpose("tr4", self.train_param3, -1, -2) + tr4 = forge.op.Transpose("tr4", self.train_param3, -1, -2) # (..., R, C) --> (..., C, R) - mm5 = pybuda.op.Matmul("mm5", x3, tr4) + mm5 = forge.op.Matmul("mm5", x3, tr4) # (..., R, C) x (..., C, R) --> (..., R, R) - tr5 = pybuda.op.Transpose("tr5", self.train_param4, -1, -2) + tr5 = forge.op.Transpose("tr5", self.train_param4, -1, -2) # (..., R, C) --> (..., C, R) - mm6 = pybuda.op.Matmul("mm6", x4, tr5) + mm6 = forge.op.Matmul("mm6", x4, tr5) # (..., R, C) x (..., C, R) --> (..., R, R) # Layer 3 - mm7 = pybuda.op.Matmul("mm7", mm1, mm2) + mm7 = forge.op.Matmul("mm7", mm1, mm2) # (..., R, R) x (..., R, R) --> (..., R, R) - mm8 = pybuda.op.Matmul("mm8", x2, mm3) + mm8 = forge.op.Matmul("mm8", x2, mm3) # (..., R, C) x (..., C, C) --> (..., R, C) - mm9 = pybuda.op.Matmul("mm9", mm3, mm4) + mm9 = forge.op.Matmul("mm9", mm3, mm4) # (..., C, C) x (..., C, C) --> (..., C, C) - mm10 = pybuda.op.Matmul("mm10", mm1, mm5) + mm10 = forge.op.Matmul("mm10", mm1, mm5) # (..., R, R) x (..., R, R) --> (..., R, R) - mm11 = pybuda.op.Matmul("mm11", tr2, mm6) + mm11 = forge.op.Matmul("mm11", tr2, mm6) # (..., C, R) x (..., R, R) --> (..., C, R) # Layer 4 - mm12 = pybuda.op.Matmul("mm12", mm7, mm8) + mm12 = forge.op.Matmul("mm12", mm7, mm8) # (..., R, R) x (..., R, C) --> (..., R, C) - mm13 = pybuda.op.Matmul("mm13", mm8, mm9) + mm13 = forge.op.Matmul("mm13", mm8, mm9) # (..., R, C) x (..., C, C) --> (..., R, C) - mm14 = pybuda.op.Matmul("mm14", mm10, mm8) + mm14 = forge.op.Matmul("mm14", mm10, mm8) # (..., R, R) x (..., R, C) --> (..., R, C) - mm15 = pybuda.op.Matmul("mm15", mm8, mm11) + mm15 = forge.op.Matmul("mm15", mm8, mm11) # (..., R, C) x (..., C, R) --> (..., R, R) # Layer 5 - tr6 = pybuda.op.Transpose("tr6", mm13, -1, -2) + tr6 = forge.op.Transpose("tr6", mm13, -1, -2) # (..., R, C) --> (..., C, R) - mm16 = pybuda.op.Matmul("mm16", mm12, tr6) + mm16 = forge.op.Matmul("mm16", mm12, tr6) # (..., R, C) x (..., C, R) --> (..., R, R) - mm17 = pybuda.op.Matmul("mm17", mm14, tr6) + mm17 = forge.op.Matmul("mm17", mm14, tr6) # (..., R, C) x (..., C, R) --> (..., R, R) - mm18 = pybuda.op.Matmul("mm18", mm15, mm14) + mm18 = forge.op.Matmul("mm18", mm15, mm14) # (..., R, R) x (..., R, C) --> (..., R, C) - mm19 = pybuda.op.Matmul("mm19", mm15, mm12) + mm19 = forge.op.Matmul("mm19", mm15, mm12) # (..., R, R) x (..., R, C) --> (..., R, C) # Layer 6 - mm20 = pybuda.op.Matmul("mm20", mm16, mm17) + mm20 = forge.op.Matmul("mm20", mm16, mm17) # (..., R, R) x (..., R, R) --> (..., R, R) - mm21 = pybuda.op.Matmul("mm21", mm17, mm18) + mm21 = forge.op.Matmul("mm21", mm17, mm18) # (..., R, R) x (..., R, C) --> (..., R, C) - mm22 = pybuda.op.Matmul("mm22", mm16, mm19) + mm22 = forge.op.Matmul("mm22", mm16, mm19) # (..., R, R) x (..., R, C) --> (..., R, C) # Layer 7 - mm23 = pybuda.op.Matmul("mm23", mm20, mm21) + mm23 = forge.op.Matmul("mm23", mm20, mm21) # (..., R, R) x (..., R, C) --> (..., R, C) - tr7 = pybuda.op.Transpose("tr7", mm22, -1, -2) + tr7 = forge.op.Transpose("tr7", mm22, -1, -2) # (..., R, C) --> (..., C, R) - mm24 = pybuda.op.Matmul("mm24", mm21, tr7) + mm24 = forge.op.Matmul("mm24", mm21, tr7) # (..., R, C) x (..., C, R) --> (..., R, R) # Layer 8 - mm25 = pybuda.op.Matmul("mm25", mm24, mm23) + mm25 = forge.op.Matmul("mm25", mm24, mm23) # (..., R, R) x (..., R, C) --> (..., R, C) return mm25 diff --git a/pybuda/test/operators/matmul/models/generic/model_8.py b/forge/test/operators/matmul/models/generic/model_8.py similarity index 55% rename from pybuda/test/operators/matmul/models/generic/model_8.py rename to forge/test/operators/matmul/models/generic/model_8.py index 6146c9bc5..2dcaa3650 100644 --- a/pybuda/test/operators/matmul/models/generic/model_8.py +++ b/forge/test/operators/matmul/models/generic/model_8.py @@ -3,19 +3,19 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 8 -# Matmul operator defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Matmul operator defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaMatmulTest(PyBudaModule): +class BudaMatmulTest(ForgeModule): """ Buda Test 8 @@ -27,12 +27,12 @@ def __init__(self, shape): super().__init__("Buda Test 8") self.testname = "Operator Matmul Test 8" self.shape = shape - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param4 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param5 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param6 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param4 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param5 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param6 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch((torch.rand(*self.shape) - 0.5).detach()) for i in range(3)] for i in range(6): @@ -47,83 +47,83 @@ def forward(self, x1, x2, x3): # (..., H, W) # Layer 2 - tr1 = pybuda.op.Transpose("tr1", self.train_param1, -1, -2) + tr1 = forge.op.Transpose("tr1", self.train_param1, -1, -2) # (..., H, W) -> (..., W, H) - mm1 = pybuda.op.Matmul("mm1", x1, tr1) + mm1 = forge.op.Matmul("mm1", x1, tr1) # (..., H, W) x (..., W, H) -> (..., H, H) - tr2 = pybuda.op.Transpose("tr2", self.train_param2, -1, -2) + tr2 = forge.op.Transpose("tr2", self.train_param2, -1, -2) # (..., H, W) -> (..., W, H) - mm2 = pybuda.op.Matmul("mm2", x2, tr2) + mm2 = forge.op.Matmul("mm2", x2, tr2) # (..., H, W) x (..., W, H) -> (..., H, H) - tr3 = pybuda.op.Transpose("tr3", self.train_param3, -1, -2) + tr3 = forge.op.Transpose("tr3", self.train_param3, -1, -2) # (..., H, W) -> (..., W, H) - mm3 = pybuda.op.Matmul("mm3", x3, tr3) + mm3 = forge.op.Matmul("mm3", x3, tr3) # (..., H, W) x (..., W, H) -> (..., H, H) # Layer 3 - mm4 = pybuda.op.Matmul("mm4", mm1, self.train_param1) + mm4 = forge.op.Matmul("mm4", mm1, self.train_param1) # (..., H, H) x (..., H, W) -> (..., H, W) - tr4 = pybuda.op.Transpose("tr4", x2, -1, -2) + tr4 = forge.op.Transpose("tr4", x2, -1, -2) # (..., H, W) -> (..., W, H) - mm5 = pybuda.op.Matmul("mm5", tr4, x3) + mm5 = forge.op.Matmul("mm5", tr4, x3) # (..., W, H) x (..., H, W) -> (..., W, W) - mm6 = pybuda.op.Matmul("mm6", mm2, mm3) + mm6 = forge.op.Matmul("mm6", mm2, mm3) # (..., H, H) x (..., H, H) -> (..., H, H) # Layer 4 - mm7 = pybuda.op.Matmul("mm7", mm4, tr4) + mm7 = forge.op.Matmul("mm7", mm4, tr4) # (..., H, W) x (..., W, H) -> (..., H, H) - mm8 = pybuda.op.Matmul("mm8", mm4, mm5) + mm8 = forge.op.Matmul("mm8", mm4, mm5) # (..., H, W) x (..., W, W) -> (..., H, W) - tr5 = pybuda.op.Transpose("tr5", self.train_param4, -1, -2) + tr5 = forge.op.Transpose("tr5", self.train_param4, -1, -2) # (..., H, W) -> (..., W, H) - mm9 = pybuda.op.Matmul("mm9", mm5, tr5) + mm9 = forge.op.Matmul("mm9", mm5, tr5) # (..., W, W) x (..., W, H) -> (..., W, H) - mm10 = pybuda.op.Matmul("mm10", mm6, self.train_param3) + mm10 = forge.op.Matmul("mm10", mm6, self.train_param3) # (..., H, H) x (..., H, W) -> (..., H, W) # Layer 5 - mm11 = pybuda.op.Matmul("mm11", mm7, mm8) + mm11 = forge.op.Matmul("mm11", mm7, mm8) # (..., H, H) x (..., H, W) -> (..., H, W) - tr6 = pybuda.op.Transpose("tr6", x3, -1, -2) + tr6 = forge.op.Transpose("tr6", x3, -1, -2) # (..., H, W) -> (..., W, H) - mm12 = pybuda.op.Matmul("mm12", self.train_param1, tr6) + mm12 = forge.op.Matmul("mm12", self.train_param1, tr6) # (..., H, W) x (..., W, H) -> (..., H, H) - mm13 = pybuda.op.Matmul("mm13", mm6, mm10) + mm13 = forge.op.Matmul("mm13", mm6, mm10) # (..., H, H) x (..., H, W) -> (..., H, W) # Layer 6 - tr7 = pybuda.op.Transpose("tr7", mm11, -1, -2) + tr7 = forge.op.Transpose("tr7", mm11, -1, -2) # (..., H, W) -> (..., W, H) - mm14 = pybuda.op.Matmul("mm14", x1, tr7) + mm14 = forge.op.Matmul("mm14", x1, tr7) # (..., H, W) x (..., W, H) -> (..., H, H) - mm15 = pybuda.op.Matmul("mm15", mm12, mm13) + mm15 = forge.op.Matmul("mm15", mm12, mm13) # (..., H, H) x (..., H, W) -> (..., H, W) - mm16 = pybuda.op.Matmul("mm16", mm9, self.train_param5) + mm16 = forge.op.Matmul("mm16", mm9, self.train_param5) # (..., W, H) x (..., H, W) -> (..., W, W) # Layer 7 - mm17 = pybuda.op.Matmul("mm17", mm14, mm15) + mm17 = forge.op.Matmul("mm17", mm14, mm15) # (..., H, H) x (..., H, W) -> (..., H, W) - tr9 = pybuda.op.Transpose("tr9", self.train_param6, -1, -2) + tr9 = forge.op.Transpose("tr9", self.train_param6, -1, -2) # (..., H, W) -> (..., W, H) - mm18 = pybuda.op.Matmul("mm18", mm15, tr9) + mm18 = forge.op.Matmul("mm18", mm15, tr9) # (..., H, W) x (..., W, H) -> (..., H, H) - mm19 = pybuda.op.Matmul("mm19", mm10, mm16) + mm19 = forge.op.Matmul("mm19", mm10, mm16) # (..., H, W) x (..., W, W) -> (..., H, W) # Layer 8 - mm20 = pybuda.op.Matmul("mm20", mm18, mm17) + mm20 = forge.op.Matmul("mm20", mm18, mm17) # (..., H, H) x (..., H, W) -> (..., H, W) - mm21 = pybuda.op.Matmul("mm21", mm18, mm19) + mm21 = forge.op.Matmul("mm21", mm18, mm19) # (..., H, H) x (..., H, W) -> (..., H, W) # Layer 9 - tr8 = pybuda.op.Transpose("tr8", mm20, -1, -2) + tr8 = forge.op.Transpose("tr8", mm20, -1, -2) # (..., H, W) -> (..., W, H) - mm22 = pybuda.op.Matmul("mm22", tr8, mm21) + mm22 = forge.op.Matmul("mm22", tr8, mm21) # (..., W, H) x (..., H, W) -> (..., W, W) return mm22 diff --git a/pybuda/test/operators/matmul/test_command.sh b/forge/test/operators/matmul/test_command.sh similarity index 100% rename from pybuda/test/operators/matmul/test_command.sh rename to forge/test/operators/matmul/test_command.sh diff --git a/pybuda/test/operators/matmul/test_matmul.py b/forge/test/operators/matmul/test_matmul.py similarity index 91% rename from pybuda/test/operators/matmul/test_matmul.py rename to forge/test/operators/matmul/test_matmul.py index cb2345b0a..7352f9b07 100644 --- a/pybuda/test/operators/matmul/test_matmul.py +++ b/forge/test/operators/matmul/test_matmul.py @@ -11,16 +11,16 @@ import pytest import numpy as np -import pybuda -import pybuda.op -from pybuda import TTDevice, BackendType, pybuda_compile, VerifyConfig, CompilerConfig +import forge +import forge.op +from forge import TTDevice, BackendType, forge_compile, VerifyConfig, CompilerConfig -from pybuda.verify.config import TestKind +from forge.verify.config import TestKind from .models import generic from .models import custom -MODELS_PATH = "./pybuda/test/operators/matmul/models/" +MODELS_PATH = "./forge/test/operators/matmul/models/" MODELS_GENERIC_PATH = MODELS_PATH + "generic/" MODELS_CUSTOM_PATH = MODELS_PATH + "custom/" @@ -57,7 +57,7 @@ # Generic Shape #@pytest.mark.xfail( -# reason="tenstorrent/pybuda#22" +# reason="tenstorrent/forge#22" #) @pytest.mark.parametrize("shape", shape, ids=[f"shape{'x'.join([str(jtem) for jtem in item])}" for item in shape]) @pytest.mark.parametrize("model", [item.split(".")[0] for item in os.listdir(MODELS_GENERIC_PATH) if "model" in item]) @@ -76,7 +76,7 @@ def test_matmul_generic( tt0 = TTDevice("tt0", devtype=BackendType.Golden) tt0.place_module(model) print(model.get_parameters()) - pybuda_compile( + forge_compile( tt0, model.testname, *model.inputs, @@ -106,7 +106,7 @@ def test_matmul_custom( model = eval(architecture) tt0 = TTDevice("tt0", devtype=BackendType.Golden) tt0.place_module(model) - pybuda_compile( + forge_compile( tt0, model.testname, *model.inputs, diff --git a/pybuda/test/operators/matmul/test_matmul_single.py b/forge/test/operators/matmul/test_matmul_single.py similarity index 90% rename from pybuda/test/operators/matmul/test_matmul_single.py rename to forge/test/operators/matmul/test_matmul_single.py index c3dbc1c7d..e65de221e 100644 --- a/pybuda/test/operators/matmul/test_matmul_single.py +++ b/forge/test/operators/matmul/test_matmul_single.py @@ -11,17 +11,17 @@ import pytest import numpy as np -import pybuda -import pybuda.op -from pybuda import TTDevice, BackendType, pybuda_compile, VerifyConfig, CompilerConfig +import forge +import forge.op +from forge import TTDevice, BackendType, forge_compile, VerifyConfig, CompilerConfig from .models import generic -MODELS_PATH = "./pybuda/test/operators/matmul/models/" +MODELS_PATH = "./forge/test/operators/matmul/models/" MODELS_GENERIC_PATH = MODELS_PATH + "generic/" # @pytest.mark.xfail( -# reason="tenstorrent/pybuda#5" +# reason="tenstorrent/forge#5" # ) def test_matmul_generic( mm_train, @@ -67,7 +67,7 @@ def test_matmul_generic( model = eval(architecture) tt0 = TTDevice("tt0", devtype=BackendType.Golden) tt0.place_module(model) - pybuda_compile( + forge_compile( tt0, model.testname, *model.inputs, diff --git a/pybuda/test/operators/nary/__init__.py b/forge/test/operators/nary/__init__.py similarity index 100% rename from pybuda/test/operators/nary/__init__.py rename to forge/test/operators/nary/__init__.py diff --git a/pybuda/test/operators/nary/test_eltwise_nary.py b/forge/test/operators/nary/test_eltwise_nary.py similarity index 71% rename from pybuda/test/operators/nary/test_eltwise_nary.py rename to forge/test/operators/nary/test_eltwise_nary.py index 14b07462a..bd76cf74d 100644 --- a/pybuda/test/operators/nary/test_eltwise_nary.py +++ b/forge/test/operators/nary/test_eltwise_nary.py @@ -5,17 +5,17 @@ # Tests for testing of element-wise nary operators # -import pybuda.tensor +import forge.tensor import pytest import torch import os -import pybuda -import pybuda.op -from pybuda import PyBudaModule, Tensor, VerifyConfig +import forge +import forge.op +from forge import ForgeModule, Tensor, VerifyConfig from test.common import run -from pybuda.verify import TestKind, verify_module +from forge.verify import TestKind, verify_module verify_cfg = VerifyConfig( run_golden=True, run_net2pipe=True @@ -33,14 +33,14 @@ @pytest.mark.parametrize("stride", [1]) @pytest.mark.parametrize("num_operands", [2, 3]) def test_interleave(test_kind, test_device, input_shape, axis, stride, num_operands): - class Model(PyBudaModule): + class Model(ForgeModule): def __init__(self, name, axis, stride): super().__init__(name) self.axis = axis self.stride = stride def forward(self, *operands): - x = pybuda.op.Interleave( + x = forge.op.Interleave( "interleave0", *operands, axis=self.axis, stride=self.stride ) return x @@ -67,7 +67,7 @@ def test_concat(test_kind, test_device, dim, aligned): ), ) def simple_concat(a, b): - return pybuda.op.Concatenate("", a, b, axis=dim) + return forge.op.Concatenate("", a, b, axis=dim) if aligned: shapes = { @@ -94,32 +94,32 @@ def simple_concat(a, b): def test_concat_two_kinds_pad(test_device): - class Module(PyBudaModule): + class Module(ForgeModule): def __init__(self, name): super().__init__(name) self.add_parameter( - "w", pybuda.Parameter(*(1, 1, 352, 192), requires_grad=True) + "w", forge.Parameter(*(1, 1, 352, 192), requires_grad=True) ) def forward(self, in0, in1, in2, in3, in4, in5, y): - in0 = pybuda.op.Multiply("m0", in0, in0) - in1 = pybuda.op.Multiply("m1", in1, in2) - in2 = pybuda.op.Multiply("m2", in2, in3) - in3 = pybuda.op.Multiply("m3", in3, in4) - in4 = pybuda.op.Multiply("m4", in4, in4) - in5 = pybuda.op.Multiply("m5", in5, in1) - x = pybuda.op.Concatenate("", in0, in1, in2, in3, in4, in5, axis=-1) - x = pybuda.op.Multiply("m6", x, y) - x = pybuda.op.PadTile("p0", x, -1, 336) - x = pybuda.op.Matmul("mm0", x, self.get_parameter("w")) + in0 = forge.op.Multiply("m0", in0, in0) + in1 = forge.op.Multiply("m1", in1, in2) + in2 = forge.op.Multiply("m2", in2, in3) + in3 = forge.op.Multiply("m3", in3, in4) + in4 = forge.op.Multiply("m4", in4, in4) + in5 = forge.op.Multiply("m5", in5, in1) + x = forge.op.Concatenate("", in0, in1, in2, in3, in4, in5, axis=-1) + x = forge.op.Multiply("m6", x, y) + x = forge.op.PadTile("p0", x, -1, 336) + x = forge.op.Matmul("mm0", x, self.get_parameter("w")) return x - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "CNN" # compiler_cfg.place_on_new_epoch("m6_transpose_nop_0") - os.environ["PYBUDA_DISABLE_CONSTANT_FOLDING"] = "1" - os.environ["PYBUDA_PAD_SPARSE_MM"] = "{11:12}" - os.environ["PYBUDA_GRAPHSOLVER_SELF_CUT_TYPE"] = "ConsumerOperandDataEdgesFirst" + os.environ["FORGE_DISABLE_CONSTANT_FOLDING"] = "1" + os.environ["FORGE_PAD_SPARSE_MM"] = "{11:12}" + os.environ["FORGE_GRAPHSOLVER_SELF_CUT_TYPE"] = "ConsumerOperandDataEdgesFirst" # input shape common_len = 3136 @@ -143,7 +143,7 @@ def forward(self, in0, in1, in2, in3, in4, in5, y): ), ) - os.environ["PYBUDA_PAD_SPARSE_MM"] = "{}" + os.environ["FORGE_PAD_SPARSE_MM"] = "{}" diff --git a/pybuda/test/operators/nary/test_where.py b/forge/test/operators/nary/test_where.py similarity index 75% rename from pybuda/test/operators/nary/test_where.py rename to forge/test/operators/nary/test_where.py index f9fce3f27..1fb38249f 100644 --- a/pybuda/test/operators/nary/test_where.py +++ b/forge/test/operators/nary/test_where.py @@ -9,37 +9,37 @@ import torch -import pybuda -import pybuda.op -import pybuda.tensor +import forge +import forge.op +import forge.tensor -from pybuda import PyBudaModule, VerifyConfig -from pybuda.verify import TestKind, verify_module +from forge import ForgeModule, VerifyConfig +from forge.verify import TestKind, verify_module @pytest.mark.skip(reason="This test is failing due to not supporting 'BoolTensor' for a condition") def test_cond_bool_tensor_manual_inputs(test_device): - class Model(PyBudaModule): + class Model(ForgeModule): def __init__(self, name): super().__init__(name) def forward(self, cond, x, y): - return pybuda.op.Where("Where0", cond, x, y) + return forge.op.Where("Where0", cond, x, y) mod = Model("where_test_model") # manual creation of input tensors # contidion_tensor is a boolean tensor what it should be - condition_tensor = pybuda.tensor.Tensor.create_from_torch( + condition_tensor = forge.tensor.Tensor.create_from_torch( torch.tensor([[[1, 0], [1, 0], [1, 0]]], dtype=torch.bool) ) - x_tensor = pybuda.tensor.Tensor.create_from_torch( + x_tensor = forge.tensor.Tensor.create_from_torch( torch.tensor([[[0.1490, 0.3861], [1.4934, 0.4805], [-0.3992, -1.1574]]]) ) - y_tensor = pybuda.tensor.Tensor.create_from_torch( + y_tensor = forge.tensor.Tensor.create_from_torch( torch.tensor([[[1.0, 1.0], [1.0, 1.0], [1.0, 1.0]]]) @@ -58,26 +58,26 @@ def forward(self, cond, x, y): @pytest.mark.skip(reason="This test is failing when condition_tensor elements have values <> 0.0 or 1.0") def test_cond_non_bool_tensor_manual_inputs(test_device): - class Model(PyBudaModule): + class Model(ForgeModule): def __init__(self, name): super().__init__(name) def forward(self, cond, x, y): - return pybuda.op.Where("Where0", cond, x, y) + return forge.op.Where("Where0", cond, x, y) mod = Model("where_test_model") - condition_tensor = pybuda.tensor.Tensor.create_from_torch( + condition_tensor = forge.tensor.Tensor.create_from_torch( torch.tensor([[[0.2, 1.0], [0.0, 1.0], [1.1, 1.0]]]) ) - x_tensor = pybuda.tensor.Tensor.create_from_torch( + x_tensor = forge.tensor.Tensor.create_from_torch( torch.tensor([[[0.1490, 0.3861], [1.4934, 0.4805], [-0.3992, -1.1574]]]) ) - y_tensor = pybuda.tensor.Tensor.create_from_torch( + y_tensor = forge.tensor.Tensor.create_from_torch( torch.tensor([[[1.0, 1.0], [1.0, 1.0], [1.0, 1.0]]]) @@ -97,12 +97,12 @@ def forward(self, cond, x, y): @pytest.mark.skip(reason="This test is failing due assertion error - data mismatch detected") @pytest.mark.parametrize("input_shape", [(1, 3, 3)]) def test_where_input_shapes(test_device, input_shape): - class Model(PyBudaModule): + class Model(ForgeModule): def __init__(self, name): super().__init__(name) def forward(self, cond, x, y): - return pybuda.op.Where("Where0", cond, x, y) + return forge.op.Where("Where0", cond, x, y) mod = Model("where_test_model") input_shapes = tuple([input_shape for _ in range(3)]) @@ -117,7 +117,7 @@ def forward(self, cond, x, y): ), ) -# Manually test where operator with PyTorch and PyBuda. +# Manually test where operator with PyTorch and Forge. # Results are same for both, but verify_module fails due to different pcc values. # working @@ -133,41 +133,41 @@ def forward(self, cond, x, y): @pytest.mark.skip(reason="This test is failing due to verify_module calculates wrong pcc") @pytest.mark.parametrize("cond_values", [cond_values_1, cond_values_2]) def test_where_verify_module(test_device, cond_values): - class Model(PyBudaModule): + class Model(ForgeModule): def __init__(self, name): super().__init__(name) def forward(self, cond, x, y): - v = pybuda.op.Where("Where0", cond, x, y) - # PyBuda always works as expected: - print(f"\n\nPyBuda output value: {v}\n\n") + v = forge.op.Where("Where0", cond, x, y) + # Forge always works as expected: + print(f"\n\nForge output value: {v}\n\n") return v mod = Model("where_test_model") condition_torch = torch.tensor(cond_values, dtype=torch.bool) # torch works only with bool type - explicit define dtype - condition_buda = pybuda.tensor.Tensor.create_from_torch(torch.tensor(cond_values)) # buda can work also with other types + condition_buda = forge.tensor.Tensor.create_from_torch(torch.tensor(cond_values)) # buda can work also with other types print(f"condition_torch:\n{condition_torch}") # condition is a boolean tensor print(f"condition_buda:\n{condition_buda}") # condition is a float tensor - x_tensor = pybuda.tensor.Tensor.create_from_torch( + x_tensor = forge.tensor.Tensor.create_from_torch( torch.tensor([[[1000., 1000.], [1000., 1000.], [1000., 1000.]]]) ) - y_tensor = pybuda.tensor.Tensor.create_from_torch( + y_tensor = forge.tensor.Tensor.create_from_torch( torch.tensor([[[5.0, 5.0], [5.0, 5.0], [5.0, 5.0]]]) ) - result_torch = torch.where(condition_torch, pybuda.tensor.Tensor.to_pytorch(x_tensor), pybuda.tensor.Tensor.to_pytorch(y_tensor)) + result_torch = torch.where(condition_torch, forge.tensor.Tensor.to_pytorch(x_tensor), forge.tensor.Tensor.to_pytorch(y_tensor)) print(f"result_torch:\n{result_torch}") - result_buda = pybuda.op.Where("Where0", condition_buda, x_tensor, y_tensor) + result_buda = forge.op.Where("Where0", condition_buda, x_tensor, y_tensor) print(f"result_buda:\n{result_buda}") - output_are_the_same = torch.eq(result_torch, pybuda.tensor.Tensor.to_pytorch(result_buda)).all() + output_are_the_same = torch.eq(result_torch, forge.tensor.Tensor.to_pytorch(result_buda)).all() print(f"\nAre results equal: {output_are_the_same}") if not output_are_the_same: # never failing here diff --git a/pybuda/test/operators/reduce/__init__.py b/forge/test/operators/reduce/__init__.py similarity index 100% rename from pybuda/test/operators/reduce/__init__.py rename to forge/test/operators/reduce/__init__.py diff --git a/pybuda/test/operators/reduce/conftest.py b/forge/test/operators/reduce/conftest.py similarity index 100% rename from pybuda/test/operators/reduce/conftest.py rename to forge/test/operators/reduce/conftest.py diff --git a/pybuda/test/operators/reduce/models_4d/__init__.py b/forge/test/operators/reduce/models_4d/__init__.py similarity index 100% rename from pybuda/test/operators/reduce/models_4d/__init__.py rename to forge/test/operators/reduce/models_4d/__init__.py diff --git a/pybuda/test/operators/reduce/models_4d/model_0.py b/forge/test/operators/reduce/models_4d/model_0.py similarity index 75% rename from pybuda/test/operators/reduce/models_4d/model_0.py rename to forge/test/operators/reduce/models_4d/model_0.py index adaf92969..55df153d8 100644 --- a/pybuda/test/operators/reduce/models_4d/model_0.py +++ b/forge/test/operators/reduce/models_4d/model_0.py @@ -3,24 +3,24 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 0 -# Reduce operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Reduce operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaReduceTest(PyBudaModule): +class BudaReduceTest(ForgeModule): """ Buda Test 0 Args: - operator (function): PyBuda reduce operator. + operator (function): Forge reduce operator. opname (str): Operation name (e.g. reduce_sum, reduce_avg, ...). This name test uses to generate names of operation nodes in a graph/model. """ @@ -31,19 +31,19 @@ def __init__(self, operator, opname, shape): self.opname = opname self.testname = "Operator " + opname + " Test 0" self.shape = shape - self.train_param = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape))] self.set_parameter("train_param", torch.rand(*self.shape, requires_grad=True)) def forward(self, x): - mul = pybuda.op.Multiply("mul", x, self.train_param) + mul = forge.op.Multiply("mul", x, self.train_param) # # (W, Z, R, C) * (W, Z, R, C) --> (W, Z, R, C) red = self.operator(self.opname, mul, 2) # (W, Z, R, C) --> (W, Z, 1, C) # red2 = self.operator(self.opname + "2", mul, 3) # # (W, Z, R, C) --> (W, Z, R, 1) - # mm = pybuda.op.Matmul("mm", red2, red1) + # mm = forge.op.Matmul("mm", red2, red1) # # (W, Z, R, 1) x (W, Z, 1, C) --> (W, Z, R, C) return red diff --git a/pybuda/test/operators/reduce/models_4d/model_1.py b/forge/test/operators/reduce/models_4d/model_1.py similarity index 75% rename from pybuda/test/operators/reduce/models_4d/model_1.py rename to forge/test/operators/reduce/models_4d/model_1.py index 8df3c5764..4b1c63b65 100644 --- a/pybuda/test/operators/reduce/models_4d/model_1.py +++ b/forge/test/operators/reduce/models_4d/model_1.py @@ -3,24 +3,24 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 1 -# Reduce operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Reduce operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaReduceTest(PyBudaModule): +class BudaReduceTest(ForgeModule): """ Buda Test 1 Args: - operator (function): PyBuda reduce operator. + operator (function): Forge reduce operator. opname (str): Operation name (e.g. reduce_sum, reduce_avg, ...). This name test uses to generate names of operation nodes in a graph/model. """ @@ -31,19 +31,19 @@ def __init__(self, operator, opname, shape): self.opname = opname self.testname = "Operator " + opname + " Test 1" self.shape = shape - self.train_param = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape))] self.set_parameter("train_param", torch.rand(*self.shape, requires_grad=True)) def forward(self, x): - mul = pybuda.op.Multiply("mul", x, self.train_param) + mul = forge.op.Multiply("mul", x, self.train_param) # (W, Z, R, C) * (W, Z, R, C) --> (W, Z, R, C) red1 = self.operator(self.opname + "1", mul, 2) # (W, Z, R, C) --> (W, Z, 1, C) red2 = self.operator(self.opname + "2", mul, 3) # (W, Z, R, C) --> (W, Z, R, 1) - mm = pybuda.op.Matmul("mm", red2, red1) + mm = forge.op.Matmul("mm", red2, red1) # (W, Z, R, 1) x (W, Z, 1, C) --> (W, Z, R, C) return red1, red2, mm diff --git a/pybuda/test/operators/reduce/models_4d/model_2.py b/forge/test/operators/reduce/models_4d/model_2.py similarity index 76% rename from pybuda/test/operators/reduce/models_4d/model_2.py rename to forge/test/operators/reduce/models_4d/model_2.py index fa74d1546..60db0dc45 100644 --- a/pybuda/test/operators/reduce/models_4d/model_2.py +++ b/forge/test/operators/reduce/models_4d/model_2.py @@ -3,24 +3,24 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 2 -# Reduce operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Reduce operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaReduceTest(PyBudaModule): +class BudaReduceTest(ForgeModule): """ Buda Test 2 Args: - operator (function): PyBuda reduce operator. + operator (function): Forge reduce operator. opname (str): Operation name (e.g. reduce_sum, reduce_avg, ...). This name test uses to generate names of operation nodes in a graph/model. """ @@ -31,7 +31,7 @@ def __init__(self, operator, opname, shape): self.opname = opname self.testname = "Operator " + opname + " Test 2" self.shape = shape - self.train_param = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape))] self.set_parameter("train_param", torch.rand(*self.shape, requires_grad=True)) @@ -39,11 +39,11 @@ def __init__(self, operator, opname, shape): def forward(self, x): # Layer 2 - tr = pybuda.op.Transpose("tr", self.train_param, 3, 2) + tr = forge.op.Transpose("tr", self.train_param, 3, 2) # (W, Z, R, C) --> (W, Z, C, R) # Layer 3 - mm1 = pybuda.op.Matmul("mm1", x, tr) + mm1 = forge.op.Matmul("mm1", x, tr) # (W, Z, R, C) x (W, Z, C, R) --> (W, Z, R, R) # Layer 4 @@ -52,7 +52,7 @@ def forward(self, x): red2 = self.operator(self.opname + "2", self.train_param, 3) # (W, Z, R, C) --> (W, Z, R, 1) # Layer 5 - mm2 = pybuda.op.Matmul("mm2", red1, red2) + mm2 = forge.op.Matmul("mm2", red1, red2) # (W, Z, 1, R) x (W, Z, R, 1) --> (W, Z, 1, 1) # Layer 6 diff --git a/pybuda/test/operators/reduce/models_4d/model_3.py b/forge/test/operators/reduce/models_4d/model_3.py similarity index 79% rename from pybuda/test/operators/reduce/models_4d/model_3.py rename to forge/test/operators/reduce/models_4d/model_3.py index 412038ea7..723ce163f 100644 --- a/pybuda/test/operators/reduce/models_4d/model_3.py +++ b/forge/test/operators/reduce/models_4d/model_3.py @@ -3,24 +3,24 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 3 -# Reduce operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Reduce operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaReduceTest(PyBudaModule): +class BudaReduceTest(ForgeModule): """ Buda Test 3 Args: - operator (function): PyBuda reduce operator. + operator (function): Forge reduce operator. opname (str): Operation name (e.g. reduce_sum, reduce_avg, ...). This name test uses to generate names of operation nodes in a graph/model. """ @@ -32,8 +32,8 @@ def __init__(self, operator, opname, shape): self.testname = "Operator " + opname + " Test 3" self.shape = shape - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(2)] @@ -47,13 +47,13 @@ def forward(self, x1, x2): # (W, Z, R, C) --> (W, Z, R, 1) red2 = self.operator(self.opname + "2", self.train_param1, 2) # (W, Z, R, C) --> (W, Z, 1, C) - tr = pybuda.op.Transpose("tr", x2, 3, 2) + tr = forge.op.Transpose("tr", x2, 3, 2) # (W, Z, R, C) --> (W, Z, C, R) red3 = self.operator(self.opname + "3", self.train_param2, 3) # (W, Z, R, C) --> (W, Z, R, 1) # Layer 3 - mm1 = pybuda.op.Matmul("mm1", red1, red2) + mm1 = forge.op.Matmul("mm1", red1, red2) # (W, Z, R, 1) x (W, Z, 1, C) --> (W, Z, R, C) red4 = self.operator(self.opname + "4", tr, 2) # (W, Z, C, R) --> (W, Z, 1, R) @@ -63,7 +63,7 @@ def forward(self, x1, x2): # Layer 4 red6 = self.operator(self.opname + "6", mm1, 3) # (W, Z, R, C) --> (W, Z, R, 1) - mm2 = pybuda.op.Matmul("mm2", red6, red4) + mm2 = forge.op.Matmul("mm2", red6, red4) # (W, Z, R, 1) x (W, Z, 1, R) --> (W, Z, R, R) # Layer 5 @@ -71,7 +71,7 @@ def forward(self, x1, x2): # (W, Z, R, R) --> (W, Z, R, 1) # Layer 6 - mm3 = pybuda.op.Matmul("mm3", red7, red5) + mm3 = forge.op.Matmul("mm3", red7, red5) # (W, Z, R, 1) x (W, Z, 1, 1) --> (W, Z, R, 1) # Layer 7 diff --git a/pybuda/test/operators/reduce/models_4d/model_4.py b/forge/test/operators/reduce/models_4d/model_4.py similarity index 70% rename from pybuda/test/operators/reduce/models_4d/model_4.py rename to forge/test/operators/reduce/models_4d/model_4.py index dfbc8338f..8d26be75c 100644 --- a/pybuda/test/operators/reduce/models_4d/model_4.py +++ b/forge/test/operators/reduce/models_4d/model_4.py @@ -3,24 +3,24 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 4 -# Reduce operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Reduce operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaReduceTest(PyBudaModule): +class BudaReduceTest(ForgeModule): """ Buda Test 4 Args: - operator (function): PyBuda reduce operator. + operator (function): Forge reduce operator. opname (str): Operation name (e.g. reduce_sum, reduce_avg, ...). This name test uses to generate names of operation nodes in a graph/model. """ @@ -32,8 +32,8 @@ def __init__(self, operator, opname, shape): self.testname = "Operator " + opname + " Test 4" self.shape = shape - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(2)] @@ -43,15 +43,15 @@ def __init__(self, operator, opname, shape): def forward(self, x1, x2): # Layer 2 - tr1 = pybuda.op.Transpose("tr1", self.train_param1, 3, 2) + tr1 = forge.op.Transpose("tr1", self.train_param1, 3, 2) # (W, Z, R, C) --> (W, Z, C, R) - tr2 = pybuda.op.Transpose("tr2", x2, 3, 2) + tr2 = forge.op.Transpose("tr2", x2, 3, 2) # (W, Z, R, C) --> (W, Z, C, R) # Layer 3 - mm1 = pybuda.op.Matmul("mm1", x1, tr1) + mm1 = forge.op.Matmul("mm1", x1, tr1) # (W, Z, R, C) x (W, Z, C, R) --> (W, Z, R, R) - mm2 = pybuda.op.Matmul("mm2", self.train_param2, tr2) + mm2 = forge.op.Matmul("mm2", self.train_param2, tr2) # (W, Z, R, C) x (W, Z, C, R) --> (W, Z, R, R) # Layer 4 @@ -61,7 +61,7 @@ def forward(self, x1, x2): # (W, Z, R, R) --> (W, Z, 1, R) # Layer 5 - mm3 = pybuda.op.Matmul("mm3", red1, red2) + mm3 = forge.op.Matmul("mm3", red1, red2) # (W, Z, R, 1) x (W, Z, 1, R) --> (W, Z, R, R) # Layer 6 @@ -69,16 +69,16 @@ def forward(self, x1, x2): # (W, Z, R, R) --> (W, Z, 1, R) # Layer 7 - tr3 = pybuda.op.Transpose("tr3", red3, 3, 2) + tr3 = forge.op.Transpose("tr3", red3, 3, 2) # (W, Z, 1, R) --> (W, Z, R, 1) - mm4 = pybuda.op.Matmul("mm4", mm3, tr3) + mm4 = forge.op.Matmul("mm4", mm3, tr3) # (W, Z, R, R) x (W, Z, R, 1) --> (W, Z, R, 1) - mm5 = pybuda.op.Matmul("mm5", red3, self.train_param2) + mm5 = forge.op.Matmul("mm5", red3, self.train_param2) # (W, Z, 1, R) x (W, Z, R, C) --> (W, Z, 1, C) # Layer 8 - mm6 = pybuda.op.Matmul("mm6", mm4, mm5) + mm6 = forge.op.Matmul("mm6", mm4, mm5) # (W, Z, R, 1) x (W, Z, 1, C) --> (W, Z, R, C) # Layer 9 diff --git a/pybuda/test/operators/reduce/models_4d/model_5.py b/forge/test/operators/reduce/models_4d/model_5.py similarity index 71% rename from pybuda/test/operators/reduce/models_4d/model_5.py rename to forge/test/operators/reduce/models_4d/model_5.py index 42d4139ff..616fd369b 100644 --- a/pybuda/test/operators/reduce/models_4d/model_5.py +++ b/forge/test/operators/reduce/models_4d/model_5.py @@ -3,24 +3,24 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 5 -# Reduce operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Reduce operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaReduceTest(PyBudaModule): +class BudaReduceTest(ForgeModule): """ Buda Test 5 Args: - operator (function): PyBuda reduce operator. + operator (function): Forge reduce operator. opname (str): Operation name (e.g. reduce_sum, reduce_avg, ...). This name test uses to generate names of operation nodes in a graph/model. """ @@ -32,9 +32,9 @@ def __init__(self, operator, opname, shape): self.testname = "Operator " + opname + " Test 5" self.shape = shape - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(3)] @@ -44,21 +44,21 @@ def __init__(self, operator, opname, shape): def forward(self, x1, x2, x3): # Layer 2 - tr1 = pybuda.op.Transpose("tr1", self.train_param1, 3, 2) + tr1 = forge.op.Transpose("tr1", self.train_param1, 3, 2) # (W, Z, R, C) --> (W, Z, C, R) - tr2 = pybuda.op.Transpose("tr2", x2, 3, 2) + tr2 = forge.op.Transpose("tr2", x2, 3, 2) # (W, Z, R, C) --> (W, Z, C, R) - tr3 = pybuda.op.Transpose("tr3", self.train_param3, 3, 2) + tr3 = forge.op.Transpose("tr3", self.train_param3, 3, 2) # (W, Z, R, C) --> (W, Z, C, R) - tr4 = pybuda.op.Transpose("tr4", x3, 3, 2) + tr4 = forge.op.Transpose("tr4", x3, 3, 2) # (W, Z, R, C) --> (W, Z, C, R) # Layer 3 - mm1 = pybuda.op.Matmul("mm1", x1, tr1) + mm1 = forge.op.Matmul("mm1", x1, tr1) # (W, Z, R, C) x (W, Z, C, R) --> (W, Z, R, R) - mm2 = pybuda.op.Matmul("mm2", tr2, self.train_param2) + mm2 = forge.op.Matmul("mm2", tr2, self.train_param2) # (W, Z, C, R) x (W, Z, R, C) --> (W, Z, C, C) - mm3 = pybuda.op.Matmul("mm3", x3, tr3) + mm3 = forge.op.Matmul("mm3", x3, tr3) # (W, Z, R, C) x (W, Z, C, R) --> (W, Z, R, R) # Layer 4 @@ -70,11 +70,11 @@ def forward(self, x1, x2, x3): # (W, Z, R, R) --> (W, Z, 1, R) # Layer 5 - tr5 = pybuda.op.Transpose("tr5", red2, 3, 2) + tr5 = forge.op.Transpose("tr5", red2, 3, 2) # (W, Z, 1, C) --> (W, Z, C, 1) - mm4 = pybuda.op.Matmul("mm4", red1, red2) + mm4 = forge.op.Matmul("mm4", red1, red2) # (W, Z, R, 1) x (W, Z, 1, C) --> (W, Z, R, C) - mm5 = pybuda.op.Matmul("mm5", tr5, red3) + mm5 = forge.op.Matmul("mm5", tr5, red3) # (W, Z, C, 1) x (W, Z, 1, R) --> (W, Z, C, R) # Layer 6 @@ -88,9 +88,9 @@ def forward(self, x1, x2, x3): # (W, Z, C, R) --> (W, Z, 1, R) # Layer 7 - mm6 = pybuda.op.Matmul("mm6", red4, red7) + mm6 = forge.op.Matmul("mm6", red4, red7) # (W, Z, R, 1) x (W, Z, 1, C) --> (W, Z, R, C) - mm7 = pybuda.op.Matmul("mm7", red5, red6) + mm7 = forge.op.Matmul("mm7", red5, red6) # (W, Z, C, 1) x (W, Z, 1, R) --> (W, Z, C, R) # Layer 8 @@ -100,7 +100,7 @@ def forward(self, x1, x2, x3): # (W, Z, C, R) --> (W, Z, 1, R) # Layer 9 - mm8 = pybuda.op.Matmul("mm8", red8, red9) + mm8 = forge.op.Matmul("mm8", red8, red9) # (W, Z, R, 1) x (W, Z, 1, R) --> (W, Z, R, R) return mm8, red8, red9 diff --git a/pybuda/test/operators/reduce/models_nd/__init__.py b/forge/test/operators/reduce/models_nd/__init__.py similarity index 100% rename from pybuda/test/operators/reduce/models_nd/__init__.py rename to forge/test/operators/reduce/models_nd/__init__.py diff --git a/pybuda/test/operators/reduce/models_nd/model_1.py b/forge/test/operators/reduce/models_nd/model_1.py similarity index 77% rename from pybuda/test/operators/reduce/models_nd/model_1.py rename to forge/test/operators/reduce/models_nd/model_1.py index 5694b85de..a1c05468c 100644 --- a/pybuda/test/operators/reduce/models_nd/model_1.py +++ b/forge/test/operators/reduce/models_nd/model_1.py @@ -3,24 +3,24 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 1 -# Reduce operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Reduce operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaReduceTest(PyBudaModule): +class BudaReduceTest(ForgeModule): """ Buda Test 1 Args: - operator (function): PyBuda reduce operator. + operator (function): Forge reduce operator. opname (str): Operation name (e.g. reduce_sum, reduce_avg, ...). This name test uses to generate names of operation nodes in a graph/model. """ @@ -45,7 +45,7 @@ def __init__( self.dim = dim self.keepdim = keepdim - self.train_param = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape))] self.set_parameter("train_param", torch.rand(*self.shape, requires_grad=True)) @@ -53,7 +53,7 @@ def __init__( def forward(self, x): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x, self.train_param) + mul1 = forge.op.Multiply("mul1", x, self.train_param) # Layer 3 red1 = self.operator(self.opname + "1", x, self.dim, self.keepdim) @@ -61,10 +61,10 @@ def forward(self, x): red3 = self.operator(self.opname + "3", self.train_param, self.dim, self.keepdim) # Layer 4 - mul2 = pybuda.op.Multiply("mul2", red1, red2) + mul2 = forge.op.Multiply("mul2", red1, red2) # Layer 5 - mul3 = pybuda.op.Multiply("mul3", mul2, red3) + mul3 = forge.op.Multiply("mul3", mul2, red3) return mul3 diff --git a/pybuda/test/operators/reduce/models_nd/model_2.py b/forge/test/operators/reduce/models_nd/model_2.py similarity index 71% rename from pybuda/test/operators/reduce/models_nd/model_2.py rename to forge/test/operators/reduce/models_nd/model_2.py index f436ca164..66960231e 100644 --- a/pybuda/test/operators/reduce/models_nd/model_2.py +++ b/forge/test/operators/reduce/models_nd/model_2.py @@ -3,24 +3,24 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 2 -# Reduce operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Reduce operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import random import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaReduceTest(PyBudaModule): +class BudaReduceTest(ForgeModule): """ Buda Test 2 Args: - operator (function): PyBuda reduce operator. + operator (function): Forge reduce operator. opname (str): Operation name (e.g. reduce_sum, reduce_avg, ...). This name test uses to generate names of operation nodes in a graph/model. """ @@ -45,8 +45,8 @@ def __init__( self.dim = dim self.keepdim = keepdim - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(2)] for i in range(1, 3): @@ -55,8 +55,8 @@ def __init__( def forward(self, x1, x2): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x2, self.train_param2) + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x2, self.train_param2) # Layer 3 operands = [x1, mul1, self.train_param1, x2, mul2, self.train_param2] @@ -66,13 +66,13 @@ def forward(self, x1, x2): self.shape = reds[0].shape # Layer 4 - add1 = pybuda.op.Add("add1", reds[0], reds[1]) - add2 = pybuda.op.Add("add2", reds[2], reds[3]) - add3 = pybuda.op.Add("add3", reds[4], reds[5]) + add1 = forge.op.Add("add1", reds[0], reds[1]) + add2 = forge.op.Add("add2", reds[2], reds[3]) + add3 = forge.op.Add("add3", reds[4], reds[5]) # Layer 5 - mul3 = pybuda.op.Multiply("mul3", add1, add2) - mul4 = pybuda.op.Multiply("mul4", reds[4], add3) + mul3 = forge.op.Multiply("mul3", add1, add2) + mul4 = forge.op.Multiply("mul4", reds[4], add3) if self.keepdim or len(self.shape) > 0: self.dim = random.randint(0, len(self.shape) - 1) diff --git a/pybuda/test/operators/reduce/models_nd/model_3.py b/forge/test/operators/reduce/models_nd/model_3.py similarity index 65% rename from pybuda/test/operators/reduce/models_nd/model_3.py rename to forge/test/operators/reduce/models_nd/model_3.py index f95891191..37a2f8fd4 100644 --- a/pybuda/test/operators/reduce/models_nd/model_3.py +++ b/forge/test/operators/reduce/models_nd/model_3.py @@ -3,26 +3,26 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 3 -# Reduce operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Reduce operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import random import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaReduceTest(PyBudaModule): +class BudaReduceTest(ForgeModule): """ Buda Test 3 Args: - operator (function): PyBuda reduce operator. + operator (function): Forge reduce operator. opname (str): Operation name (e.g. reduce_sum, reduce_avg, ...). This name test uses to generate names of operation nodes in a graph/model. """ @@ -47,9 +47,9 @@ def __init__( self.dim = dim self.keepdim = keepdim - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(3)] for i in range(1, 4): @@ -58,9 +58,9 @@ def __init__( def forward(self, x1, x2, x3): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x2, x3) - mul3 = pybuda.op.Multiply("mul3", self.train_param2, self.train_param3) + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x2, x3) + mul3 = forge.op.Multiply("mul3", self.train_param2, self.train_param3) # Layer 3 operands = [mul1, self.train_param1, mul2, mul3, x3, self.train_param3] @@ -70,9 +70,9 @@ def forward(self, x1, x2, x3): self.shape = reds[0].shape # Layer 4 - mul4 = pybuda.op.Multiply("mul4", reds[0], reds[1]) - mul5 = pybuda.op.Multiply("mul5", reds[2], reds[4]) - mul6 = pybuda.op.Multiply("mul6", reds[3], reds[5]) + mul4 = forge.op.Multiply("mul4", reds[0], reds[1]) + mul5 = forge.op.Multiply("mul5", reds[2], reds[4]) + mul6 = forge.op.Multiply("mul6", reds[3], reds[5]) if self.keepdim or len(self.shape) > 0: self.dim = random.randint(0, len(self.shape) - 1) @@ -83,15 +83,15 @@ def forward(self, x1, x2, x3): for i in range(len(operands)): preds.append(self.operator(self.opname + str(i + 1 + lenop), operands[i], self.dim, self.keepdim)) # Layer 6 - mul7 = pybuda.op.Multiply("mul7", preds[0], preds[1]) - mul8 = pybuda.op.Multiply("mul8", preds[2], preds[3]) + mul7 = forge.op.Multiply("mul7", preds[0], preds[1]) + mul8 = forge.op.Multiply("mul8", preds[2], preds[3]) else: # Layer 6 - mul7 = pybuda.op.Multiply("mul7", mul4, reds[2]) - mul8 = pybuda.op.Multiply("mul8", mul5, mul6) + mul7 = forge.op.Multiply("mul7", mul4, reds[2]) + mul8 = forge.op.Multiply("mul8", mul5, mul6) # Layer 7 - mul9 = pybuda.op.Multiply("mul9", mul7, mul8) + mul9 = forge.op.Multiply("mul9", mul7, mul8) return mul9 diff --git a/pybuda/test/operators/reduce/models_nd/model_4.py b/forge/test/operators/reduce/models_nd/model_4.py similarity index 54% rename from pybuda/test/operators/reduce/models_nd/model_4.py rename to forge/test/operators/reduce/models_nd/model_4.py index 912e0d066..f455a6136 100644 --- a/pybuda/test/operators/reduce/models_nd/model_4.py +++ b/forge/test/operators/reduce/models_nd/model_4.py @@ -3,8 +3,8 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 4 -# Reduce operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Reduce operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # @@ -12,18 +12,18 @@ import random import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaReduceTest(PyBudaModule): +class BudaReduceTest(ForgeModule): """ Buda Test 4 Args: - operator (function): PyBuda reduce operator. + operator (function): Forge reduce operator. opname (str): Operation name (e.g. reduce_sum, reduce_avg, ...). This name test uses to generate names of operation nodes in a graph/model. """ @@ -48,9 +48,9 @@ def __init__( self.dim = dim self.keepdim = keepdim - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(3)] for i in range(1, 4): @@ -60,10 +60,10 @@ def forward(self, x1, x2, x3): pass # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, x2) - mul2 = pybuda.op.Multiply("mul2", self.train_param1, self.train_param2) - mul3 = pybuda.op.Multiply("mul3", self.train_param2, self.train_param3) - mul4 = pybuda.op.Multiply("mul4", x2, x3) + mul1 = forge.op.Multiply("mul1", x1, x2) + mul2 = forge.op.Multiply("mul2", self.train_param1, self.train_param2) + mul3 = forge.op.Multiply("mul3", self.train_param2, self.train_param3) + mul4 = forge.op.Multiply("mul4", x2, x3) # Layer 3 operands = [x1, self.train_param1, mul1, mul2, x2, self.train_param2, mul3, mul4, x3, self.train_param3] @@ -73,25 +73,25 @@ def forward(self, x1, x2, x3): self.shape = reds[0].shape # Layer 4 - mul5 = pybuda.op.Multiply("mul5", reds[0], reds[1]) - mul6 = pybuda.op.Multiply("mul6", reds[1], reds[2]) - mul7 = pybuda.op.Multiply("mul7", reds[4], reds[5]) - mul8 = pybuda.op.Multiply("mul8", reds[7], reds[8]) + mul5 = forge.op.Multiply("mul5", reds[0], reds[1]) + mul6 = forge.op.Multiply("mul6", reds[1], reds[2]) + mul7 = forge.op.Multiply("mul7", reds[4], reds[5]) + mul8 = forge.op.Multiply("mul8", reds[7], reds[8]) # Layer 5 - hvs1 = pybuda.op.Heaviside("hvs1", mul6, reds[3]) - hvs2 = pybuda.op.Heaviside("hvs2", mul7, reds[6]) - hvs3 = pybuda.op.Heaviside("hvs3", mul8, reds[9]) + hvs1 = forge.op.Heaviside("hvs1", mul6, reds[3]) + hvs2 = forge.op.Heaviside("hvs2", mul7, reds[6]) + hvs3 = forge.op.Heaviside("hvs3", mul8, reds[9]) # Layer 6 - max1 = pybuda.op.Max("max1", mul5, hvs1) - max2 = pybuda.op.Multiply("max2", reds[4], hvs2) - max3 = pybuda.op.Multiply("max3", reds[7], hvs3) + max1 = forge.op.Max("max1", mul5, hvs1) + max2 = forge.op.Multiply("max2", reds[4], hvs2) + max3 = forge.op.Multiply("max3", reds[7], hvs3) # Layer 7 - add1 = pybuda.op.Add("add1", reds[1], max1) - add2 = pybuda.op.Add("add2", reds[3], max2) - add3 = pybuda.op.Add("add3", reds[6], max3) + add1 = forge.op.Add("add1", reds[1], max1) + add2 = forge.op.Add("add2", reds[3], max2) + add3 = forge.op.Add("add3", reds[6], max3) if self.keepdim or len(self.shape) > 0: self.dim = random.randint(0, len(self.shape) - 1) @@ -102,20 +102,20 @@ def forward(self, x1, x2, x3): for i in range(len(operands)): preds.append(self.operator(self.opname + str(i + 1 + lenop), operands[i], self.dim, self.keepdim)) # Layer 9 - mul9 = pybuda.op.Multiply("mul9", preds[0], preds[1]) - mul10 = pybuda.op.Multiply("mul10", preds[2], preds[3]) - mul11 = pybuda.op.Multiply("mul11", preds[4], preds[5]) + mul9 = forge.op.Multiply("mul9", preds[0], preds[1]) + mul10 = forge.op.Multiply("mul10", preds[2], preds[3]) + mul11 = forge.op.Multiply("mul11", preds[4], preds[5]) else: # Layer 9 - mul9 = pybuda.op.Multiply("mul9", add1, reds[3]) - mul10 = pybuda.op.Multiply("mul10", add2, reds[6]) - mul11 = pybuda.op.Multiply("mul11", add3, reds[9]) + mul9 = forge.op.Multiply("mul9", add1, reds[3]) + mul10 = forge.op.Multiply("mul10", add2, reds[6]) + mul11 = forge.op.Multiply("mul11", add3, reds[9]) # Layer 10 - mul12 = pybuda.op.Multiply("mul12", mul9, mul10) + mul12 = forge.op.Multiply("mul12", mul9, mul10) # Layer 11 - add4 = pybuda.op.Add("add4", mul12, mul11) + add4 = forge.op.Add("add4", mul12, mul11) return add4 diff --git a/pybuda/test/operators/reduce/models_nd/model_5.py b/forge/test/operators/reduce/models_nd/model_5.py similarity index 60% rename from pybuda/test/operators/reduce/models_nd/model_5.py rename to forge/test/operators/reduce/models_nd/model_5.py index 719a0258b..b342deedf 100644 --- a/pybuda/test/operators/reduce/models_nd/model_5.py +++ b/forge/test/operators/reduce/models_nd/model_5.py @@ -3,8 +3,8 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 5 -# Reduce operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Reduce operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # @@ -12,18 +12,18 @@ import random import torch -import pybuda +import forge -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaReduceTest(PyBudaModule): +class BudaReduceTest(ForgeModule): """ Buda Test 5 Args: - operator (function): PyBuda reduce operator. + operator (function): Forge reduce operator. opname (str): Operation name (e.g. reduce_sum, reduce_avg, ...). This name test uses to generate names of operation nodes in a graph/model. """ @@ -48,9 +48,9 @@ def __init__( self.dim = dim self.keepdim = keepdim - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(3)] for i in range(1, 4): @@ -59,9 +59,9 @@ def __init__( def forward(self, x1, x2, x3): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x2, self.train_param2) - mul3 = pybuda.op.Multiply("mul3", x3, self.train_param3) + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x2, self.train_param2) + mul3 = forge.op.Multiply("mul3", x3, self.train_param3) # Layer 3 operands = [mul1, self.train_param1, mul2, self.train_param2, mul3, self.train_param3] @@ -71,11 +71,11 @@ def forward(self, x1, x2, x3): self.shape = reds[0].shape # Layer 4 - add1 = pybuda.op.Add("add1", reds[0], reds[1]) - mul4 = pybuda.op.Multiply("mul4", reds[1], reds[2]) - add2 = pybuda.op.Add("add2", reds[2], reds[3]) - mul5 = pybuda.op.Multiply("mul5", reds[3], reds[4]) - add3 = pybuda.op.Add("add3", reds[4], reds[5]) + add1 = forge.op.Add("add1", reds[0], reds[1]) + mul4 = forge.op.Multiply("mul4", reds[1], reds[2]) + add2 = forge.op.Add("add2", reds[2], reds[3]) + mul5 = forge.op.Multiply("mul5", reds[3], reds[4]) + add3 = forge.op.Add("add3", reds[4], reds[5]) if self.keepdim or len(self.shape) > 0: self.dim = random.randint(0, len(self.shape) - 1) @@ -87,16 +87,16 @@ def forward(self, x1, x2, x3): preds.append(self.operator(self.opname + str(i + 1 + lenop), self.dim, self.keepdim)) self.shape = preds[0].shape # Layer 6 - add4 = pybuda.op.Add("add4", preds[0], preds[1]) - sub1 = pybuda.op.Subtract("sub1", preds[2], preds[3]) - max1 = pybuda.op.Max("max1", preds[4], preds[5]) - sub2 = pybuda.op.Subtract("sub2", preds[6], preds[7]) + add4 = forge.op.Add("add4", preds[0], preds[1]) + sub1 = forge.op.Subtract("sub1", preds[2], preds[3]) + max1 = forge.op.Max("max1", preds[4], preds[5]) + sub2 = forge.op.Subtract("sub2", preds[6], preds[7]) else: # Layer 6 - add4 = pybuda.op.Add("add4", add1, reds[1]) - sub1 = pybuda.op.Subtract("sub1", mul4, add2) - max1 = pybuda.op.Max("max1", reds[3], mul5) - sub2 = pybuda.op.Subtract("sub2", add3, reds[5]) + add4 = forge.op.Add("add4", add1, reds[1]) + sub1 = forge.op.Subtract("sub1", mul4, add2) + max1 = forge.op.Max("max1", reds[3], mul5) + sub2 = forge.op.Subtract("sub2", add3, reds[5]) if self.keepdim or len(self.shape) > 0: self.dim = random.randint(0, len(self.shape) - 1) @@ -107,14 +107,14 @@ def forward(self, x1, x2, x3): for i in range(len(operands)): preds.append(self.operator(self.opname + str(i + 1 + lenop), self.dim, self.keepdim)) # Layer 8 - mul6 = pybuda.op.Multiply("mul6", preds[0], preds[1]) - mul7 = pybuda.op.Multiply("mul7", preds[2], preds[3]) - mul8 = pybuda.op.Multiply("mul8", preds[4], preds[5]) + mul6 = forge.op.Multiply("mul6", preds[0], preds[1]) + mul7 = forge.op.Multiply("mul7", preds[2], preds[3]) + mul8 = forge.op.Multiply("mul8", preds[4], preds[5]) else: # Layer 8 - mul6 = pybuda.op.Multiply("mul6", reds[0], add4) - mul7 = pybuda.op.Multiply("mul7", sub1, max1) - mul8 = pybuda.op.Multiply("mul8", reds[4], sub2) + mul6 = forge.op.Multiply("mul6", reds[0], add4) + mul7 = forge.op.Multiply("mul7", sub1, max1) + mul8 = forge.op.Multiply("mul8", reds[4], sub2) return mul6, mul7, mul8 diff --git a/pybuda/test/operators/reduce/test_command.sh b/forge/test/operators/reduce/test_command.sh similarity index 100% rename from pybuda/test/operators/reduce/test_command.sh rename to forge/test/operators/reduce/test_command.sh diff --git a/pybuda/test/operators/reduce/test_reduce_4d.py b/forge/test/operators/reduce/test_reduce_4d.py similarity index 86% rename from pybuda/test/operators/reduce/test_reduce_4d.py rename to forge/test/operators/reduce/test_reduce_4d.py index 9769ba4a9..4cdcae54a 100644 --- a/pybuda/test/operators/reduce/test_reduce_4d.py +++ b/forge/test/operators/reduce/test_reduce_4d.py @@ -14,15 +14,15 @@ import pytest import numpy as np -import pybuda -import pybuda.op -from pybuda import TTDevice, BackendType, pybuda_compile, VerifyConfig, CompilerConfig +import forge +import forge.op +from forge import TTDevice, BackendType, forge_compile, VerifyConfig, CompilerConfig -from pybuda.verify.config import TestKind +from forge.verify.config import TestKind from . import models_4d -MODELS_PATH = "./pybuda/test/operators/reduce/models_4d/" +MODELS_PATH = "./forge/test/operators/reduce/models_4d/" SHAPE_NO = 1 SHAPE_SIZE = 4 @@ -70,11 +70,11 @@ def test_reduce( pytest.skip("These models return intermediate nodes. That's not supported today." "Autograd is trying to do backward pass twice for the same subpath in the graph and that's not correct. ") - architecture = f'models_4d.{model}.BudaReduceTest(operator=pybuda.op.{operation}, opname="{operation}", shape={shape})' + architecture = f'models_4d.{model}.BudaReduceTest(operator=forge.op.{operation}, opname="{operation}", shape={shape})' model = eval(architecture) tt0 = TTDevice("tt0", devtype=BackendType.Golden) tt0.place_module(model) - pybuda_compile( + forge_compile( tt0, model.testname, *model.inputs, diff --git a/pybuda/test/operators/reduce/test_reduce_nd.py b/forge/test/operators/reduce/test_reduce_nd.py similarity index 87% rename from pybuda/test/operators/reduce/test_reduce_nd.py rename to forge/test/operators/reduce/test_reduce_nd.py index bdbc7934b..e043b8b19 100644 --- a/pybuda/test/operators/reduce/test_reduce_nd.py +++ b/forge/test/operators/reduce/test_reduce_nd.py @@ -11,13 +11,13 @@ import pytest import numpy as np -import pybuda -import pybuda.op -from pybuda import TTDevice, BackendType, pybuda_compile, VerifyConfig, CompilerConfig +import forge +import forge.op +from forge import TTDevice, BackendType, forge_compile, VerifyConfig, CompilerConfig from . import models_nd -MODELS_PATH = "./pybuda/test/operators/reduce/models_nd/" +MODELS_PATH = "./forge/test/operators/reduce/models_nd/" SHAPE_NO = 1 SHAPE_SIZE_MIN = 2 @@ -74,11 +74,11 @@ def test_reduce( if not training and recompute: pytest.skip("Inference and recompute is the same as just inference.") - architecture = f'models_nd.{model}.BudaReduceTest(operator=pybuda.op.{operation}, opname="{operation}", shape={shape}, dim={dim}, keepdim={keepdim})' + architecture = f'models_nd.{model}.BudaReduceTest(operator=forge.op.{operation}, opname="{operation}", shape={shape}, dim={dim}, keepdim={keepdim})' model = eval(architecture) tt0 = TTDevice("tt0", devtype=BackendType.Golden) tt0.place_module(model) - pybuda_compile( + forge_compile( tt0, model.testname, *model.inputs, diff --git a/pybuda/test/operators/reduce/test_reduce_nd_single.py b/forge/test/operators/reduce/test_reduce_nd_single.py similarity index 86% rename from pybuda/test/operators/reduce/test_reduce_nd_single.py rename to forge/test/operators/reduce/test_reduce_nd_single.py index b35190957..c4a9dcded 100644 --- a/pybuda/test/operators/reduce/test_reduce_nd_single.py +++ b/forge/test/operators/reduce/test_reduce_nd_single.py @@ -11,13 +11,13 @@ import pytest import numpy as np -import pybuda -import pybuda.op -from pybuda import TTDevice, BackendType, pybuda_compile, VerifyConfig, CompilerConfig +import forge +import forge.op +from forge import TTDevice, BackendType, forge_compile, VerifyConfig, CompilerConfig from . import models_nd -MODELS_PATH = "./pybuda/test/operators/reduce/models_nd/" +MODELS_PATH = "./forge/test/operators/reduce/models_nd/" @pytest.mark.xfail def test_reduce( @@ -69,11 +69,11 @@ def test_reduce( print(f"Keepdim --> {keepdim}") print("\n") - architecture = f'models_nd.{model}.BudaReduceTest(operator=pybuda.op.{operation}, opname="{operation}", shape={shape}, dim={dim}, keepdim={keepdim})' + architecture = f'models_nd.{model}.BudaReduceTest(operator=forge.op.{operation}, opname="{operation}", shape={shape}, dim={dim}, keepdim={keepdim})' model = eval(architecture) tt0 = TTDevice("tt0", devtype=BackendType.Golden) tt0.place_module(model) - pybuda_compile( + forge_compile( tt0, model.testname, *model.inputs, diff --git a/pybuda/test/operators/tm/__init__.py b/forge/test/operators/tm/__init__.py similarity index 100% rename from pybuda/test/operators/tm/__init__.py rename to forge/test/operators/tm/__init__.py diff --git a/pybuda/test/operators/tm/fuse/__init__.py b/forge/test/operators/tm/fuse/__init__.py similarity index 100% rename from pybuda/test/operators/tm/fuse/__init__.py rename to forge/test/operators/tm/fuse/__init__.py diff --git a/pybuda/test/operators/tm/fuse/test_fuse_tm_sequence.py b/forge/test/operators/tm/fuse/test_fuse_tm_sequence.py similarity index 56% rename from pybuda/test/operators/tm/fuse/test_fuse_tm_sequence.py rename to forge/test/operators/tm/fuse/test_fuse_tm_sequence.py index 77c31cb48..530de31e4 100644 --- a/pybuda/test/operators/tm/fuse/test_fuse_tm_sequence.py +++ b/forge/test/operators/tm/fuse/test_fuse_tm_sequence.py @@ -2,36 +2,36 @@ # SPDX-License-Identifier: Apache-2.0 -import pybuda -import pybuda.op -from pybuda import PyBudaModule +import forge +import forge.op +from forge import ForgeModule import torch import os -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda.verify.config import TestKind +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge.verify.config import TestKind -class PtFuseTMMultiUser(PyBudaModule): +class PtFuseTMMultiUser(ForgeModule): def __init__(self, name): super().__init__(name) - self.add_parameter("segformer.encoder.layer_norm.0.weight", pybuda.Parameter(*(32,), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("segformer.encoder.layer_norm.0.bias", pybuda.Parameter(*(32,), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("segformer.encoder.patch_embeddings.1.proj.weight", pybuda.Parameter(*(64, 32, 3, 3), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("segformer.encoder.patch_embeddings.1.proj.bias", pybuda.Parameter(*(64,), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) + self.add_parameter("segformer.encoder.layer_norm.0.weight", forge.Parameter(*(32,), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("segformer.encoder.layer_norm.0.bias", forge.Parameter(*(32,), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("segformer.encoder.patch_embeddings.1.proj.weight", forge.Parameter(*(64, 32, 3, 3), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("segformer.encoder.patch_embeddings.1.proj.bias", forge.Parameter(*(64,), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) def forward(self, input): - layernorm_340 = pybuda.op.Layernorm("", input, self.get_parameter("segformer.encoder.layer_norm.0.weight"), self.get_parameter("segformer.encoder.layer_norm.0.bias"), dim=-1, epsilon=1e-05) - reshape_341 = pybuda.op.Reshape("", layernorm_340, shape=(1, 128, 128, 32)) - transpose_342 = pybuda.op.Transpose("", reshape_341, dim0=-3, dim1=-1, z_dim_slice=32, out_dtype=torch.float32) - transpose_343 = pybuda.op.Transpose("", transpose_342, dim0=-2, dim1=-1, out_dtype=torch.float32) - conv2d_344 = pybuda.op.Conv2d("", transpose_343, self.get_parameter("segformer.encoder.patch_embeddings.1.proj.weight"), self.get_parameter("segformer.encoder.patch_embeddings.1.proj.bias"), stride=[2, 2], padding=[1, 1, 1, 1], dilation=1, groups=1, channel_last=0) - reshape_783 = pybuda.op.Reshape("", transpose_343, shape=(1, 32, 16384)) - transpose_784 = pybuda.op.Transpose("", reshape_783, dim0=-2, dim1=-1, out_dtype=torch.float32) - reshape_785 = pybuda.op.Reshape("", transpose_784, shape=(16384, 32)) + layernorm_340 = forge.op.Layernorm("", input, self.get_parameter("segformer.encoder.layer_norm.0.weight"), self.get_parameter("segformer.encoder.layer_norm.0.bias"), dim=-1, epsilon=1e-05) + reshape_341 = forge.op.Reshape("", layernorm_340, shape=(1, 128, 128, 32)) + transpose_342 = forge.op.Transpose("", reshape_341, dim0=-3, dim1=-1, z_dim_slice=32, out_dtype=torch.float32) + transpose_343 = forge.op.Transpose("", transpose_342, dim0=-2, dim1=-1, out_dtype=torch.float32) + conv2d_344 = forge.op.Conv2d("", transpose_343, self.get_parameter("segformer.encoder.patch_embeddings.1.proj.weight"), self.get_parameter("segformer.encoder.patch_embeddings.1.proj.bias"), stride=[2, 2], padding=[1, 1, 1, 1], dilation=1, groups=1, channel_last=0) + reshape_783 = forge.op.Reshape("", transpose_343, shape=(1, 32, 16384)) + transpose_784 = forge.op.Transpose("", reshape_783, dim0=-2, dim1=-1, out_dtype=torch.float32) + reshape_785 = forge.op.Reshape("", transpose_784, shape=(16384, 32)) return conv2d_344, reshape_785 def test_fuse_tm_sequence_multi_user(test_device): @@ -92,14 +92,14 @@ def test_fuse_tm_sequence_multi_user(test_device): """ - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b - os.environ["PYBUDA_RIBBON2"]="1" + compiler_cfg.default_df_override = forge.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"]="1" tt_model = PtFuseTMMultiUser("fuse_tm_sequence_multi_user") - pt_tensor = pybuda.Tensor.create_from_torch(torch.rand((1, 16384, 32))) + pt_tensor = forge.Tensor.create_from_torch(torch.rand((1, 16384, 32))) verify_module( tt_model, diff --git a/pybuda/test/operators/tm/hstack_hslice/__init__.py b/forge/test/operators/tm/hstack_hslice/__init__.py similarity index 100% rename from pybuda/test/operators/tm/hstack_hslice/__init__.py rename to forge/test/operators/tm/hstack_hslice/__init__.py diff --git a/pybuda/test/operators/tm/hstack_hslice/conftest.py b/forge/test/operators/tm/hstack_hslice/conftest.py similarity index 100% rename from pybuda/test/operators/tm/hstack_hslice/conftest.py rename to forge/test/operators/tm/hstack_hslice/conftest.py diff --git a/pybuda/test/operators/tm/hstack_hslice/models/__init__.py b/forge/test/operators/tm/hstack_hslice/models/__init__.py similarity index 100% rename from pybuda/test/operators/tm/hstack_hslice/models/__init__.py rename to forge/test/operators/tm/hstack_hslice/models/__init__.py diff --git a/pybuda/test/operators/tm/hstack_hslice/models/model_1.py b/forge/test/operators/tm/hstack_hslice/models/model_1.py similarity index 58% rename from pybuda/test/operators/tm/hstack_hslice/models/model_1.py rename to forge/test/operators/tm/hstack_hslice/models/model_1.py index affdfc5b8..41fef6276 100644 --- a/pybuda/test/operators/tm/hstack_hslice/models/model_1.py +++ b/forge/test/operators/tm/hstack_hslice/models/model_1.py @@ -3,21 +3,21 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 1 -# HStack, HSlice operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# HStack, HSlice operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaHStackHSliceTest(PyBudaModule): +class BudaHStackHSliceTest(ForgeModule): """ Buda Test 1 @@ -38,7 +38,7 @@ def __init__( self.shape = shape self.slice = slice - self.train_param = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape))] self.set_parameter("train_param", torch.rand(*self.shape, requires_grad=True)) @@ -46,22 +46,22 @@ def __init__( def forward(self, x): # Layer 2 - hst1 = pybuda.op.HStack("hst1", x, self.slice) - hst2 = pybuda.op.HStack("hst2", self.train_param, self.slice) - mul1 = pybuda.op.Multiply("mul1", x, self.train_param) + hst1 = forge.op.HStack("hst1", x, self.slice) + hst2 = forge.op.HStack("hst2", self.train_param, self.slice) + mul1 = forge.op.Multiply("mul1", x, self.train_param) # Layer 3 - mul2 = pybuda.op.Multiply("mul2", hst1, hst2) - hst3 = pybuda.op.HStack("hst3", mul1, self.slice) + mul2 = forge.op.Multiply("mul2", hst1, hst2) + hst3 = forge.op.HStack("hst3", mul1, self.slice) # Layer 4 - mul3 = pybuda.op.Multiply("mul3", mul2, hst3) + mul3 = forge.op.Multiply("mul3", mul2, hst3) # Layer 5 - hsl1 = pybuda.op.HSlice("hsl1", mul3, self.slice) + hsl1 = forge.op.HSlice("hsl1", mul3, self.slice) # Layer 6 - mul4 = pybuda.op.Multiply("mul4", hsl1, self.train_param) + mul4 = forge.op.Multiply("mul4", hsl1, self.train_param) return mul4 diff --git a/pybuda/test/operators/tm/hstack_hslice/models/model_2.py b/forge/test/operators/tm/hstack_hslice/models/model_2.py similarity index 56% rename from pybuda/test/operators/tm/hstack_hslice/models/model_2.py rename to forge/test/operators/tm/hstack_hslice/models/model_2.py index c11514e44..1008e624e 100644 --- a/pybuda/test/operators/tm/hstack_hslice/models/model_2.py +++ b/forge/test/operators/tm/hstack_hslice/models/model_2.py @@ -3,21 +3,21 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 2 -# HStack, HSlice operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# HStack, HSlice operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaHStackHSliceTest(PyBudaModule): +class BudaHStackHSliceTest(ForgeModule): """ Buda Test 2 @@ -38,8 +38,8 @@ def __init__( self.shape = shape self.slice = slice - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(2)] for i in range(1, 3): @@ -48,25 +48,25 @@ def __init__( def forward(self, x1, x2): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x2, self.train_param2) + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x2, self.train_param2) # Layer 3 - hsl1 = pybuda.op.HSlice("hsl1", mul1, self.slice) - hsl2 = pybuda.op.HSlice("hsl2", mul2, self.slice) + hsl1 = forge.op.HSlice("hsl1", mul1, self.slice) + hsl2 = forge.op.HSlice("hsl2", mul2, self.slice) # Layer 4 - mul3 = pybuda.op.Multiply("mul3", hsl1, hsl2) - mul4 = pybuda.op.Multiply("mul4", self.train_param1, self.train_param2) + mul3 = forge.op.Multiply("mul3", hsl1, hsl2) + mul4 = forge.op.Multiply("mul4", self.train_param1, self.train_param2) # Layer 5 - hst1 = pybuda.op.HStack("hst1", mul3, self.slice) + hst1 = forge.op.HStack("hst1", mul3, self.slice) # Layer 6 - add1 = pybuda.op.Add("add1", hst1, mul4) + add1 = forge.op.Add("add1", hst1, mul4) # Layer 7 - hst2 = pybuda.op.HStack("hst2", add1, self.slice) + hst2 = forge.op.HStack("hst2", add1, self.slice) return hst2 diff --git a/pybuda/test/operators/tm/hstack_hslice/models/model_3.py b/forge/test/operators/tm/hstack_hslice/models/model_3.py similarity index 50% rename from pybuda/test/operators/tm/hstack_hslice/models/model_3.py rename to forge/test/operators/tm/hstack_hslice/models/model_3.py index 478856734..6e7d67b47 100644 --- a/pybuda/test/operators/tm/hstack_hslice/models/model_3.py +++ b/forge/test/operators/tm/hstack_hslice/models/model_3.py @@ -3,21 +3,21 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 3 -# HStack, HSlice operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# HStack, HSlice operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaHStackHSliceTest(PyBudaModule): +class BudaHStackHSliceTest(ForgeModule): """ Buda Test 3 @@ -43,8 +43,8 @@ def __init__( self.shape[1] *= self.slice self.shape[-1] *= self.slice - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(2)] for i in range(1, 3): @@ -53,63 +53,63 @@ def __init__( def forward(self, x1, x2): # Layer 2 - hst1 = pybuda.op.HStack("hst1", x1, self.slice) + hst1 = forge.op.HStack("hst1", x1, self.slice) # +1 - hst2 = pybuda.op.HStack("hst2", self.train_param1, self.slice) + hst2 = forge.op.HStack("hst2", self.train_param1, self.slice) # +1 - hst3 = pybuda.op.HStack("hst3", x2, self.slice) + hst3 = forge.op.HStack("hst3", x2, self.slice) # +1 - hst4 = pybuda.op.HStack("hst4", self.train_param2, self.slice) + hst4 = forge.op.HStack("hst4", self.train_param2, self.slice) # +1 # Layer 3 - mul1 = pybuda.op.Multiply("mul1", hst1, hst2) + mul1 = forge.op.Multiply("mul1", hst1, hst2) # +1 - mul2 = pybuda.op.Multiply("mul2", hst3, hst4) + mul2 = forge.op.Multiply("mul2", hst3, hst4) # +1 # Layer 4 - hsl1 = pybuda.op.HSlice("hsl1", mul1, self.slice) + hsl1 = forge.op.HSlice("hsl1", mul1, self.slice) # 0 - mul3 = pybuda.op.Multiply("mul3", hst2, mul2) + mul3 = forge.op.Multiply("mul3", hst2, mul2) # +1 # Layer 5 - mul4 = pybuda.op.Multiply("mul4", hsl1, x2) + mul4 = forge.op.Multiply("mul4", hsl1, x2) # 0 # Layer 6 - hsl2 = pybuda.op.HSlice("hsl2", mul4, self.slice) + hsl2 = forge.op.HSlice("hsl2", mul4, self.slice) # -1 - hsl3 = pybuda.op.HSlice("hsl3", mul3, self.slice) + hsl3 = forge.op.HSlice("hsl3", mul3, self.slice) # 0 - hst5 = pybuda.op.HStack("hst5", self.train_param1, self.slice) + hst5 = forge.op.HStack("hst5", self.train_param1, self.slice) # +1 - hst6 = pybuda.op.HStack("hst6", self.train_param2, self.slice) + hst6 = forge.op.HStack("hst6", self.train_param2, self.slice) # +1 # Layer 7 - # hst7 = pybuda.op.HStack("hst7", hst6, self.slice) + # hst7 = forge.op.HStack("hst7", hst6, self.slice) # Layer 8 - add1 = pybuda.op.Add("add1", hsl2,pybuda.op.HSlice("hsl5", hsl3, self.slice)) + add1 = forge.op.Add("add1", hsl2,forge.op.HSlice("hsl5", hsl3, self.slice)) # -1 - mul5 = pybuda.op.Multiply("mul5", hst5, hst6) + mul5 = forge.op.Multiply("mul5", hst5, hst6) # +1 # Layer 9 - hst8 = pybuda.op.HStack("hst8", add1, self.slice) + hst8 = forge.op.HStack("hst8", add1, self.slice) # 0 - hst9 = pybuda.op.HStack("hst9", hst8, self.slice) + hst9 = forge.op.HStack("hst9", hst8, self.slice) # +1 - # hst10 = pybuda.op.HStack("hsl10", mul5, self.slice) + # hst10 = forge.op.HStack("hsl10", mul5, self.slice) # Layer 10 - sub1 = pybuda.op.Subtract("sub1", hst9, mul5) + sub1 = forge.op.Subtract("sub1", hst9, mul5) # +1 # Layer 11 - hst10 = pybuda.op.HStack("hst10", sub1, self.slice) + hst10 = forge.op.HStack("hst10", sub1, self.slice) return hst10 diff --git a/pybuda/test/operators/tm/hstack_hslice/models/model_4.py b/forge/test/operators/tm/hstack_hslice/models/model_4.py similarity index 51% rename from pybuda/test/operators/tm/hstack_hslice/models/model_4.py rename to forge/test/operators/tm/hstack_hslice/models/model_4.py index f4f8ac181..eecbdc626 100644 --- a/pybuda/test/operators/tm/hstack_hslice/models/model_4.py +++ b/forge/test/operators/tm/hstack_hslice/models/model_4.py @@ -3,21 +3,21 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 4 -# HStack, HSlice operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# HStack, HSlice operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaHStackHSliceTest(PyBudaModule): +class BudaHStackHSliceTest(ForgeModule): """ Buda Test 4 @@ -43,8 +43,8 @@ def __init__( self.shape[1] *= self.slice self.shape[-1] *= self.slice - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(2)] for i in range(1, 3): @@ -53,36 +53,36 @@ def __init__( def forward(self, x1, x2): # Layer 2 - hsl1 = pybuda.op.HSlice("hsl1", x1, self.slice) - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x2, self.train_param2) + hsl1 = forge.op.HSlice("hsl1", x1, self.slice) + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x2, self.train_param2) # Layer 3 - hsl2 = pybuda.op.HSlice("hsl2", mul1, self.slice) - hsl3 = pybuda.op.HSlice("hsl3", mul2, self.slice) - hsl4 = pybuda.op.HSlice("hsl4", self.train_param2, self.slice) + hsl2 = forge.op.HSlice("hsl2", mul1, self.slice) + hsl3 = forge.op.HSlice("hsl3", mul2, self.slice) + hsl4 = forge.op.HSlice("hsl4", self.train_param2, self.slice) # Layer 4 - add1 = pybuda.op.Add("add1", hsl1, hsl2) - sub1 = pybuda.op.Subtract("sub1", hsl3, hsl4) + add1 = forge.op.Add("add1", hsl1, hsl2) + sub1 = forge.op.Subtract("sub1", hsl3, hsl4) # Layer 5 - hsl5 = pybuda.op.HSlice("hsl5", add1, self.slice) - hsl6 = pybuda.op.HSlice("hsl6", sub1, self.slice) + hsl5 = forge.op.HSlice("hsl5", add1, self.slice) + hsl6 = forge.op.HSlice("hsl6", sub1, self.slice) # Layer 6 - sub2 = pybuda.op.Subtract("sub2", self.train_param1, self.train_param2) - add2 = pybuda.op.Add("add2", hsl5, hsl6) + sub2 = forge.op.Subtract("sub2", self.train_param1, self.train_param2) + add2 = forge.op.Add("add2", hsl5, hsl6) # Layer 7 - hsl7 = pybuda.op.HSlice("hsl7", sub2, self.slice) - hst1 = pybuda.op.HStack("hst1", add2, self.slice) + hsl7 = forge.op.HSlice("hsl7", sub2, self.slice) + hst1 = forge.op.HStack("hst1", add2, self.slice) # Layer 8 - add3 = pybuda.op.Add("add3", hsl7, hst1) + add3 = forge.op.Add("add3", hsl7, hst1) # Layer 9 - hsl8 = pybuda.op.HSlice("hsl8", add3, self.slice) + hsl8 = forge.op.HSlice("hsl8", add3, self.slice) return hsl8 diff --git a/pybuda/test/operators/tm/hstack_hslice/models/model_5.py b/forge/test/operators/tm/hstack_hslice/models/model_5.py similarity index 68% rename from pybuda/test/operators/tm/hstack_hslice/models/model_5.py rename to forge/test/operators/tm/hstack_hslice/models/model_5.py index b69020330..f56490f09 100644 --- a/pybuda/test/operators/tm/hstack_hslice/models/model_5.py +++ b/forge/test/operators/tm/hstack_hslice/models/model_5.py @@ -3,21 +3,21 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 5 -# HStack, HSlice operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# HStack, HSlice operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaHStackHSliceTest(PyBudaModule): +class BudaHStackHSliceTest(ForgeModule): """ Buda Test 5 @@ -44,8 +44,8 @@ def __init__( # self.shape[1] *= self.slice # self.shape[-1] *= self.slice - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(2)] for i in range(1, 3): @@ -54,49 +54,49 @@ def __init__( def forward(self, x1, x2): # Layer 2 - hst1 = pybuda.op.HStack("hst1", x1, self.slice) + hst1 = forge.op.HStack("hst1", x1, self.slice) # (W, Z, R, C) --> (W, Z // SLICE, R, C * SLICE) - hst2 = pybuda.op.HStack("hst2", self.train_param1, self.slice) + hst2 = forge.op.HStack("hst2", self.train_param1, self.slice) # (W, Z, R, C) --> (W, Z // SLICE, R, C * SLICE) - hst3 = pybuda.op.HStack("hst3", x2, self.slice) + hst3 = forge.op.HStack("hst3", x2, self.slice) # (W, Z, R, C) --> (W, Z // SLICE, R, C * SLICE) - hst4 = pybuda.op.HStack("hst4", self.train_param2, self.slice) + hst4 = forge.op.HStack("hst4", self.train_param2, self.slice) # (W, Z, R, C) --> (W, Z // SLICE, C * SLICE) # Layer 3 - hst5 = pybuda.op.HStack("hst5", hst1, self.slice) + hst5 = forge.op.HStack("hst5", hst1, self.slice) # (W, Z // SLICE, R, C * SLICE) --> (W, Z // SLICE ** 2, R, C * SLICE ** 2) - hst6 = pybuda.op.HStack("hst6", hst2, self.slice) + hst6 = forge.op.HStack("hst6", hst2, self.slice) # (W, Z // SLICE, R, C * SLICE) --> (W, Z // SLICE ** 2, R, C * SLICE ** 2) - hst7 = pybuda.op.HStack("hst7", self.train_param2, self.slice) + hst7 = forge.op.HStack("hst7", self.train_param2, self.slice) # (W, Z, R, C) --> (W, Z // SLICE, R, C * SLICE) - add1 = pybuda.op.Add("add1", hst3, hst4) + add1 = forge.op.Add("add1", hst3, hst4) # (W, Z // SLICE, R, C * SLICE) + (W, Z // SLICE, R, C * SLICE) --> (W, Z // SLICE, R, C * SLICE) # Layer 4 - add2 = pybuda.op.Add("add2", hst5, hst6) + add2 = forge.op.Add("add2", hst5, hst6) # (W, Z // SLICE ** 2, R, C * SLICE ** 2) + (W, Z // SLICE ** 2, R, C * SLICE ** 2) --> (W, Z // SLICE ** 2, R, C * SLICE ** 2) - hst8 = pybuda.op.HStack("hst8", hst7, self.slice) + hst8 = forge.op.HStack("hst8", hst7, self.slice) # (W, Z // SLICE, R, C * SLICE) --> (W, Z // SLICE ** 2, R, C * SLICE ** 2) - hst9 = pybuda.op.HStack("hst9", hst8, self.slice) + hst9 = forge.op.HStack("hst9", hst8, self.slice) # (W, Z // SLICE ** 2, R, C * SLICE ** 2) --> (W, Z // SLICE ** 3, R, C * SLICE ** 3) - hst10 = pybuda.op.HStack("hst10", add1, self.slice) + hst10 = forge.op.HStack("hst10", add1, self.slice) # (W, Z // SLICE, R, C * SLICE) --> (W, Z // SLICE ** 2, R, C * SLICE ** 2) # Layer 5 - hst11 = pybuda.op.HStack("hst11", add2, self.slice) + hst11 = forge.op.HStack("hst11", add2, self.slice) # (W, Z // SLICE ** 2, R, C * SLICE ** 2) --> (W, Z // SLICE ** 3, R, C * SLICE ** 3) - add3 = pybuda.op.Add("add3", hst11, hst9) + add3 = forge.op.Add("add3", hst11, hst9) # (W, Z // SLICE ** 3, R, C * SLICE ** 3) + (W, Z // SLICE ** 3, R, C * SLICE ** 3) --> (W, Z // SLICE ** 3, R, C * SLICE ** 3) - add4 = pybuda.op.Add("add4", hst10, hst10) + add4 = forge.op.Add("add4", hst10, hst10) # (W, Z // SLICE ** 2, R, C * SLICE ** 2) + (W, Z // SLICE ** 2, R, C * SLICE ** 2) --> (W, Z // SLICE ** 2, R, C * SLICE ** 2) # Layer 6 - hsl1 = pybuda.op.HSlice("hsl1", add3, self.slice) + hsl1 = forge.op.HSlice("hsl1", add3, self.slice) # (W, Z // SLICE ** 3, R, C * SLICE ** 3) --> (W, Z // SLICE ** 2, R, C * SLICE ** 2) # Layer 7 - mul1 = pybuda.op.Multiply("mul1", hsl1, add4) + mul1 = forge.op.Multiply("mul1", hsl1, add4) # (W, Z // SLICE ** 2, R, C * SLICE ** 2) * (W, Z // SLICE ** 2, R, C * SLICE ** 2) --> (W, Z // SLICE ** 2, R, C * SLICE ** 2) return mul1, hst11, hst9 diff --git a/pybuda/test/operators/tm/hstack_hslice/test_command.sh b/forge/test/operators/tm/hstack_hslice/test_command.sh similarity index 100% rename from pybuda/test/operators/tm/hstack_hslice/test_command.sh rename to forge/test/operators/tm/hstack_hslice/test_command.sh diff --git a/pybuda/test/operators/tm/hstack_hslice/test_hstack_hslice.py b/forge/test/operators/tm/hstack_hslice/test_hstack_hslice.py similarity index 91% rename from pybuda/test/operators/tm/hstack_hslice/test_hstack_hslice.py rename to forge/test/operators/tm/hstack_hslice/test_hstack_hslice.py index 64dbef931..d8e6ec3a5 100644 --- a/pybuda/test/operators/tm/hstack_hslice/test_hstack_hslice.py +++ b/forge/test/operators/tm/hstack_hslice/test_hstack_hslice.py @@ -11,15 +11,15 @@ import pytest import numpy as np -import pybuda -import pybuda.op -from pybuda import TTDevice, BackendType, pybuda_compile, VerifyConfig, CompilerConfig +import forge +import forge.op +from forge import TTDevice, BackendType, forge_compile, VerifyConfig, CompilerConfig from . import models TILE_DIM = 32 -MODELS_PATH = "./pybuda/test/operators/tm/hstack_hslice/models" +MODELS_PATH = "./forge/test/operators/tm/hstack_hslice/models" SHAPE_NO = 1 SHAPE_DIM_MIN = 1 @@ -73,7 +73,7 @@ def test_hstack_hslice( model = eval(architecture) tt0 = TTDevice("tt0", devtype=BackendType.Golden) tt0.place_module(model) - pybuda_compile( + forge_compile( tt0, model.testname, *model.inputs, diff --git a/pybuda/test/operators/tm/hstack_hslice/test_hstack_hslice_single.py b/forge/test/operators/tm/hstack_hslice/test_hstack_hslice_single.py similarity index 90% rename from pybuda/test/operators/tm/hstack_hslice/test_hstack_hslice_single.py rename to forge/test/operators/tm/hstack_hslice/test_hstack_hslice_single.py index ef1675861..442dcce13 100644 --- a/pybuda/test/operators/tm/hstack_hslice/test_hstack_hslice_single.py +++ b/forge/test/operators/tm/hstack_hslice/test_hstack_hslice_single.py @@ -11,13 +11,13 @@ import pytest import numpy as np -import pybuda -import pybuda.op -from pybuda import TTDevice, BackendType, pybuda_compile, VerifyConfig, CompilerConfig +import forge +import forge.op +from forge import TTDevice, BackendType, forge_compile, VerifyConfig, CompilerConfig from . import models -MODELS_PATH = "./pybuda/test/operators/tm/hstack_hslice/models" +MODELS_PATH = "./forge/test/operators/tm/hstack_hslice/models" def test_hstack_hslice( tmh_train, @@ -65,7 +65,7 @@ def test_hstack_hslice( model = eval(architecture) tt0 = TTDevice("tt0", devtype=BackendType.Golden) tt0.place_module(model) - pybuda_compile( + forge_compile( tt0, model.testname, *model.inputs, diff --git a/pybuda/test/operators/tm/pad/__init__.py b/forge/test/operators/tm/pad/__init__.py similarity index 100% rename from pybuda/test/operators/tm/pad/__init__.py rename to forge/test/operators/tm/pad/__init__.py diff --git a/pybuda/test/operators/tm/pad/models/__init__.py b/forge/test/operators/tm/pad/models/__init__.py similarity index 100% rename from pybuda/test/operators/tm/pad/models/__init__.py rename to forge/test/operators/tm/pad/models/__init__.py diff --git a/pybuda/test/operators/tm/pad/models/model_1.py b/forge/test/operators/tm/pad/models/model_1.py similarity index 63% rename from pybuda/test/operators/tm/pad/models/model_1.py rename to forge/test/operators/tm/pad/models/model_1.py index 273ea319c..fd827e27f 100644 --- a/pybuda/test/operators/tm/pad/models/model_1.py +++ b/forge/test/operators/tm/pad/models/model_1.py @@ -3,21 +3,21 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 1 -# Pad operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Pad operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaPadTest(PyBudaModule): +class BudaPadTest(ForgeModule): """ Buda Test 1 @@ -35,7 +35,7 @@ def __init__( self.shape = shape self.pad = pad - self.train_param = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape))] self.set_parameter("train_param", torch.rand(*self.shape, requires_grad=True)) @@ -43,9 +43,9 @@ def __init__( def forward(self, x): # Layer 2 - mul = pybuda.op.Multiply("mul", x, self.train_param) + mul = forge.op.Multiply("mul", x, self.train_param) # Layer 3 - pad = pybuda.op.Pad("pad", mul, self.pad) + pad = forge.op.Pad("pad", mul, self.pad) return pad \ No newline at end of file diff --git a/forge/test/operators/tm/pad/models/model_2.py b/forge/test/operators/tm/pad/models/model_2.py new file mode 100644 index 000000000..3d1abbc05 --- /dev/null +++ b/forge/test/operators/tm/pad/models/model_2.py @@ -0,0 +1,72 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 +# +# Test 2 +# Pad operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures +# + + +import torch + +import forge +import forge.op +import forge.op.nn as nn + +from forge import ForgeModule, Tensor + + +class BudaPadTest(ForgeModule): + """ + Buda Test 2 + + """ + + def __init__( + self, + shape, + pad + ): + super().__init__("Buda Test 2") + + + self.testname = "Operator Pad, Test 2" + self.shape = shape + self.pad = pad + + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + + self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for _ in range(2)] + + self.set_parameter("train_param1", torch.rand(*self.shape, requires_grad=True)) + self.set_parameter("train_param2", torch.rand(*self.shape, requires_grad=True)) + + def forward(self, x1, x2): + + # Layer 2 + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x2, self.train_param2) + + # Layer 3 + pad1 = forge.op.Pad("pad1", mul1, self.pad) + pad2 = forge.op.Pad("pad2", mul2, self.pad) + + # Layer 4 + mul3 = forge.op.Multiply("mul3", pad1, pad2) + pad3 = forge.op.Pad("pad3", x1, self.pad) + pad4 = forge.op.Pad("pad4", self.train_param2, self.pad) + + # Layer 5 + mul4 = forge.op.Multiply("mul4", pad3, mul3) + mul5 = forge.op.Multiply("mul5", mul3, pad4) + + # Layer 6 + pad5 = forge.op.Pad("pad5", mul4, self.pad) + pad6 = forge.op.Pad("pad6", mul5, self.pad) + + # Layer 7 + mul6 = forge.op.Multiply("mul6", pad5, pad6) + + return mul6 \ No newline at end of file diff --git a/forge/test/operators/tm/pad/models/model_3.py b/forge/test/operators/tm/pad/models/model_3.py new file mode 100644 index 000000000..ce7f102c0 --- /dev/null +++ b/forge/test/operators/tm/pad/models/model_3.py @@ -0,0 +1,86 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 +# +# Test 3 +# Pad operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures +# + + +import torch + +import forge +import forge.op +import forge.op.nn as nn + +from forge import ForgeModule, Tensor + + +class BudaPadTest(ForgeModule): + """ + Buda Test 3 + + """ + + def __init__( + self, + shape, + pad + ): + super().__init__("Buda Test 3") + + + self.testname = "Operator Pad, Test 3" + self.shape = shape + self.pad = pad + + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + + self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for _ in range(2)] + + self.set_parameter("train_param1", torch.rand(*self.shape, requires_grad=True)) + self.set_parameter("train_param2", torch.rand(*self.shape, requires_grad=True)) + + def forward(self, x1, x2): + + # Layer 2 + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", self.train_param1, x2) + mul3 = forge.op.Multiply("mul3", x2, self.train_param2) + + # Layer 3 + pad1 = forge.op.Pad("pad1", mul1, self.pad) + pad2 = forge.op.Pad("pad2", mul2, self.pad) + pad3 = forge.op.Pad("pad3", mul3, self.pad) + + # Layer 4 + mul4 = forge.op.Multiply("mul4", self.train_param1, x2) + add1 = forge.op.Add("add1", x2, self.train_param2) + + # Layer 5 + pad4 = forge.op.Pad("pad4", mul4, self.pad) + pad5 = forge.op.Pad("pad5", add1, self.pad) + + # Layer 6 + mul5 = forge.op.Multiply("mul5", pad1, pad4) + mul6 = forge.op.Multiply("mul6", pad2, pad3) + add2 = forge.op.Add("add2", pad3, pad5) + + # Layer 7 + pad6 = forge.op.Pad("pad6", mul5, self.pad) + pad7 = forge.op.Pad("pad7", mul6, self.pad) + pad8 = forge.op.Pad("pad8", add2, self.pad) + + # Layer 8 + add4 = forge.op.Add("add4", pad6, pad7) + add5 = forge.op.Add("add5", pad6, pad8) + add6 = forge.op.Add("add6", pad7, pad8) + + # Layer 9 + pad9 = forge.op.Pad("pad9", add4, self.pad) + pad10 = forge.op.Pad("pad10", add5, self.pad) + pad11 = forge.op.Pad("pad11", add6, self.pad) + + return pad9, pad10, pad11 \ No newline at end of file diff --git a/forge/test/operators/tm/pad/models/model_4.py b/forge/test/operators/tm/pad/models/model_4.py new file mode 100644 index 000000000..4eab2090a --- /dev/null +++ b/forge/test/operators/tm/pad/models/model_4.py @@ -0,0 +1,101 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 +# +# Test 4 +# Pad operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures +# + + +import torch + +import forge +import forge.op +import forge.op.nn as nn + +from forge import ForgeModule, Tensor + + +class BudaPadTest(ForgeModule): + """ + Buda Test 4 + + """ + + def __init__( + self, + shape, + pad + ): + super().__init__("Buda Test 4") + + + self.testname = "Operator Pad, Test 4" + self.shape = shape + self.pad = pad + + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) + + self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for _ in range(3)] + + self.set_parameter("train_param1", torch.rand(*self.shape, requires_grad=True)) + self.set_parameter("train_param2", torch.rand(*self.shape, requires_grad=True)) + self.set_parameter("train_param3", torch.rand(*self.shape, requires_grad=True)) + + def forward(self, x1, x2, x3): + + # Layer 2 + pad1 = forge.op.Pad("pad1", x1, self.pad) + pad2 = forge.op.Pad("pad2", self.train_param1, self.pad) + pad3 = forge.op.Pad("pad3", x2, self.pad) + pad4 = forge.op.Pad("pad4", self.train_param2, self.pad) + pad5 = forge.op.Pad("pad5", x3, self.pad) + pad6 = forge.op.Pad("pad6", self.train_param3, self.pad) + + # Layer 3 + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x2, self.train_param2) + mul3 = forge.op.Multiply("mul3", x3, self.train_param3) + + # Layer 4 + pad7 = forge.op.Pad("pad7", mul1, self.pad) + pad8 = forge.op.Pad("pad8", mul2, self.pad) + pad9 = forge.op.Pad("pad9", mul3, self.pad) + + # Layer 5 + mul4 = forge.op.Multiply("mul4", pad7, pad1) + mul5 = forge.op.Multiply("mul5", pad2, pad8) + mul6 = forge.op.Multiply("mul6", pad8, pad4) + mul7 = forge.op.Multiply("mul7", pad3, pad9) + mul8 = forge.op.Multiply("mul8", pad5, pad6) + + # Layer 6 + pad10 = forge.op.Pad("pad10", pad7, self.pad) + pad11 = forge.op.Pad("pad11", mul4, self.pad) + pad12 = forge.op.Pad("pad12", mul5, self.pad) + pad13 = forge.op.Pad("pad13", mul6, self.pad) + pad14 = forge.op.Pad("pad14", mul7, self.pad) + pad15 = forge.op.Pad("pad15", mul8, self.pad) + pad16 = forge.op.Pad("pad16", pad6, self.pad) + + # Layer 7 + mul9 = forge.op.Multiply("mul9", pad10, pad12) + mul10 = forge.op.Multiply("mul10", pad11, pad14) + mul11 = forge.op.Multiply("mul11", pad13, pad15) + mul12 = forge.op.Multiply("mul12", pad15, pad16) + + # Layer 8 + pad17 = forge.op.Pad("pad17", mul9, self.pad) + pad18 = forge.op.Pad("pad18", mul10, self.pad) + pad19 = forge.op.Pad("pad19", mul11, self.pad) + pad20 = forge.op.Pad("pad20", mul12, self.pad) + + # Layer 9 + mul13 = forge.op.Multiply("mul13", pad17, pad18) + mul14 = forge.op.Multiply("mul14", pad18, pad19) + mul15 = forge.op.Multiply("mul15", pad19, pad20) + + return mul13, mul14, mul15 \ No newline at end of file diff --git a/forge/test/operators/tm/pad/models/model_5.py b/forge/test/operators/tm/pad/models/model_5.py new file mode 100644 index 000000000..d1618ce03 --- /dev/null +++ b/forge/test/operators/tm/pad/models/model_5.py @@ -0,0 +1,123 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 +# +# Test 5 +# Pad operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures +# + + +import torch + +import forge +import forge.op +import forge.op.nn as nn + +from forge import ForgeModule, Tensor + + +class BudaPadTest(ForgeModule): + """ + Buda Test 5 + + """ + + def __init__( + self, + shape, + pad + ): + super().__init__("Buda Test 5") + + + self.testname = "Operator Pad, Test 5" + self.shape = shape + self.pad = pad + + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) + + self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for _ in range(3)] + + self.set_parameter("train_param1", torch.rand(*self.shape, requires_grad=True)) + self.set_parameter("train_param2", torch.rand(*self.shape, requires_grad=True)) + self.set_parameter("train_param3", torch.rand(*self.shape, requires_grad=True)) + + def forward(self, x1, x2, x3): + + # Layer 2 + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x2, self.train_param2) + mul3 = forge.op.Multiply("mul3", x3, self.train_param3) + + # Layer 3 + pad1 = forge.op.Pad("pad1", x1, self.pad) + pad2 = forge.op.Pad("pad2", mul1, self.pad) + pad3 = forge.op.Pad("pad3", self.train_param1, self.pad) + pad4 = forge.op.Pad("pad4", x2, self.pad) + pad5 = forge.op.Pad("pad5", mul2, self.pad) + pad6 = forge.op.Pad("pad6", self.train_param2, self.pad) + pad7 = forge.op.Pad("pad7", x3, self.pad) + pad8 = forge.op.Pad("pad8", mul3, self.pad) + pad9 = forge.op.Pad("pad9", self.train_param3, self.pad) + + # Layer 4 + pad10 = forge.op.Pad("pad10", x1, self.pad) + mul4 = forge.op.Multiply("mul4", pad1, pad2) + mul5 = forge.op.Multiply("mul5", pad2, pad3) + mul6 = forge.op.Multiply("mul6", pad4, pad5) + mul7 = forge.op.Multiply("mul7", pad5, pad6) + mul8 = forge.op.Multiply("mul8", pad7, pad8) + mul9 = forge.op.Multiply("mul9", pad8, pad9) + + # Layer 5 + mul10 = forge.op.Multiply("mul10", pad10, mul4) + pad11 = forge.op.Pad("pad11", x2, self.pad) + mul11 = forge.op.Multiply("mul11", mul5, pad11) + pad12 = forge.op.Pad("pad12", x3, self.pad) + mul12 = forge.op.Multiply("mul12", mul7, pad12) + pad13 = forge.op.Pad("pad13", self.train_param3, self.pad) + mul13 = forge.op.Multiply("mul13", mul9, pad13) + + # Layer 6 + pad14 = forge.op.Pad("pad14", mul10, self.pad) + pad15 = forge.op.Pad("pad15", mul11, self.pad) + pad16 = forge.op.Pad("pad16", mul6, self.pad) + pad17 = forge.op.Pad("pad17", mul12, self.pad) + pad18 = forge.op.Pad("pad18", mul8, self.pad) + pad19 = forge.op.Pad("pad19", mul13, self.pad) + + # Layer 7 + mul14 = forge.op.Multiply("mul14", pad14, pad15) + mul15 = forge.op.Multiply("mul15", pad16, pad17) + mul16 = forge.op.Multiply("mul16", pad18, pad19) + + # Layer 8 + pad20 = forge.op.Pad("pad20", pad14, self.pad) + pad21 = forge.op.Pad("pad21", mul14, self.pad) + pad22 = forge.op.Pad("pad22", pad16, self.pad) + pad23 = forge.op.Pad("pad23", mul15, self.pad) + pad24 = forge.op.Pad("pad24", pad19, self.pad) + pad25 = forge.op.Pad("pad25", mul16, self.pad) + + # Layer 9 + mul17 = forge.op.Multiply("mul17", pad20, pad23) + mul18 = forge.op.Multiply("mul18", pad22, pad25) + mul19 = forge.op.Multiply("mul19", pad21, pad24) + + # Layer 10 + pad26 = forge.op.Pad("pad26", mul17, self.pad) + pad27 = forge.op.Pad("pad27", mul18, self.pad) + pad28 = forge.op.Pad("pad28", mul19, self.pad) + + # Layer 11 + add1 = forge.op.Add("add1", pad26, pad27) + add2 = forge.op.Add("add2", pad27, pad28) + + # Layer 12 + pad29 = forge.op.Pad("pad29", add1, self.pad) + pad30 = forge.op.Pad("pad30", add2, self.pad) + + return pad29, pad30 \ No newline at end of file diff --git a/pybuda/test/operators/tm/pad/test_pad.py b/forge/test/operators/tm/pad/test_pad.py similarity index 90% rename from pybuda/test/operators/tm/pad/test_pad.py rename to forge/test/operators/tm/pad/test_pad.py index 9e06ee61b..0604ee979 100644 --- a/pybuda/test/operators/tm/pad/test_pad.py +++ b/forge/test/operators/tm/pad/test_pad.py @@ -11,14 +11,14 @@ import pytest import numpy as np -import pybuda -import pybuda.op -from pybuda import TTDevice, BackendType, pybuda_compile, VerifyConfig, CompilerConfig -from pybuda.config import CompileDepth, _get_global_compiler_config +import forge +import forge.op +from forge import TTDevice, BackendType, forge_compile, VerifyConfig, CompilerConfig +from forge.config import CompileDepth, _get_global_compiler_config from . import models -MODELS_PATH = "./pybuda/test/operators/tm/pad/models" +MODELS_PATH = "./forge/test/operators/tm/pad/models" SHAPE_NO = 5 SHAPE_DIM_MIN = 1 @@ -84,7 +84,7 @@ def test_hstack_hslice( model = eval(architecture) tt0 = TTDevice("tt0", devtype=test_device.devtype, arch=test_device.arch) tt0.place_module(model) - pybuda_compile( + forge_compile( tt0, model.testname, *model.inputs, diff --git a/pybuda/test/operators/tm/reshape/__init__.py b/forge/test/operators/tm/reshape/__init__.py similarity index 100% rename from pybuda/test/operators/tm/reshape/__init__.py rename to forge/test/operators/tm/reshape/__init__.py diff --git a/pybuda/test/operators/tm/reshape/conftest.py b/forge/test/operators/tm/reshape/conftest.py similarity index 100% rename from pybuda/test/operators/tm/reshape/conftest.py rename to forge/test/operators/tm/reshape/conftest.py diff --git a/pybuda/test/operators/tm/reshape/models/__init__.py b/forge/test/operators/tm/reshape/models/__init__.py similarity index 100% rename from pybuda/test/operators/tm/reshape/models/__init__.py rename to forge/test/operators/tm/reshape/models/__init__.py diff --git a/pybuda/test/operators/tm/reshape/models/model_1.py b/forge/test/operators/tm/reshape/models/model_1.py similarity index 63% rename from pybuda/test/operators/tm/reshape/models/model_1.py rename to forge/test/operators/tm/reshape/models/model_1.py index 47cbd917f..8680441b4 100644 --- a/pybuda/test/operators/tm/reshape/models/model_1.py +++ b/forge/test/operators/tm/reshape/models/model_1.py @@ -3,22 +3,22 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 1 -# Reshape operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Reshape operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch import numpy as np -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaReshapeTest(PyBudaModule): +class BudaReshapeTest(ForgeModule): """ Buda Test 1 @@ -36,7 +36,7 @@ def __init__( self.old_shape = old_shape self.new_shape = new_shape - self.train_param = pybuda.Parameter(*self.old_shape, requires_grad=True) + self.train_param = forge.Parameter(*self.old_shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.old_shape))] self.set_parameter("train_param", torch.rand(*self.old_shape, requires_grad=True)) @@ -44,14 +44,14 @@ def __init__( def forward(self, x): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x, self.train_param) + mul1 = forge.op.Multiply("mul1", x, self.train_param) # Layer 3 - rsh1 = pybuda.op.Reshape("rsh1", x, self.new_shape) - rsh2 = pybuda.op.Reshape("rsh2", self.train_param, self.new_shape) + rsh1 = forge.op.Reshape("rsh1", x, self.new_shape) + rsh2 = forge.op.Reshape("rsh2", self.train_param, self.new_shape) # Layer 4 - mul2 = pybuda.op.Multiply("mul2", rsh1, rsh2) + mul2 = forge.op.Multiply("mul2", rsh1, rsh2) return mul1, mul2 diff --git a/pybuda/test/operators/tm/reshape/models/model_2.py b/forge/test/operators/tm/reshape/models/model_2.py similarity index 54% rename from pybuda/test/operators/tm/reshape/models/model_2.py rename to forge/test/operators/tm/reshape/models/model_2.py index 7b1dea6e7..3391a5d19 100644 --- a/pybuda/test/operators/tm/reshape/models/model_2.py +++ b/forge/test/operators/tm/reshape/models/model_2.py @@ -3,22 +3,22 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 2 -# Reshape operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# Reshape operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch import numpy as np -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaReshapeTest(PyBudaModule): +class BudaReshapeTest(ForgeModule): """ Buda Test 2 @@ -36,8 +36,8 @@ def __init__( self.old_shape = old_shape self.new_shape = new_shape - self.train_param1 = pybuda.Parameter(*self.old_shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.old_shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.old_shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.old_shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.old_shape)) for i in range(2)] for i in range(1, 3): @@ -46,22 +46,22 @@ def __init__( def forward(self, x1, x2): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x2, self.train_param2) + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x2, self.train_param2) # Layer 3 - rsh1 = pybuda.op.Reshape("rsh1", mul1, self.new_shape) - rsh2 = pybuda.op.Reshape("rsh2", mul2, self.new_shape) + rsh1 = forge.op.Reshape("rsh1", mul1, self.new_shape) + rsh2 = forge.op.Reshape("rsh2", mul2, self.new_shape) # Layer 4 - mul3 = pybuda.op.Multiply("mul3", rsh1, rsh2) - mul4 = pybuda.op.Multiply("mul4", self.train_param1, x2) + mul3 = forge.op.Multiply("mul3", rsh1, rsh2) + mul4 = forge.op.Multiply("mul4", self.train_param1, x2) # Layer 5 - rsh3 = pybuda.op.Reshape("rsh3", mul4, self.new_shape) + rsh3 = forge.op.Reshape("rsh3", mul4, self.new_shape) # Layer 6 - mul5 = pybuda.op.Multiply("mul5", mul3, rsh3) + mul5 = forge.op.Multiply("mul5", mul3, rsh3) return mul5 diff --git a/forge/test/operators/tm/reshape/models/model_3.py b/forge/test/operators/tm/reshape/models/model_3.py new file mode 100644 index 000000000..ab2d5d7b1 --- /dev/null +++ b/forge/test/operators/tm/reshape/models/model_3.py @@ -0,0 +1,73 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 +# +# Test 3 +# Reshape operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures +# + + +import torch +import numpy as np + +import forge +import forge.op +import forge.op.nn as nn + +from forge import ForgeModule, Tensor + + +class BudaReshapeTest(ForgeModule): + """ + Buda Test 3 + + """ + + def __init__( + self, + old_shape, + new_shape): + super().__init__("Buda Test 3") + + assert np.prod(old_shape) == np.prod(new_shape), "Size of a tensor should stay the same" + + self.testname = "Operator reshape Test 3" + self.old_shape = old_shape + self.new_shape = new_shape + + self.train_param1 = forge.Parameter(*self.old_shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.old_shape, requires_grad=True) + + self.inputs = [Tensor.create_from_torch(torch.rand(*self.old_shape)) for i in range(2)] + for i in range(1, 3): + self.set_parameter("train_param" + str(i), torch.rand(*self.old_shape, requires_grad=True)) + + def forward(self, x1, x2): + + # Layer 2 + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", self.train_param1, x2) + mul3 = forge.op.Multiply("mul3", x2, self.train_param2) + + # Layer 3 + rsh1 = forge.op.Reshape("rsh1", mul1, self.new_shape) + rsh2 = forge.op.Reshape("rsh2", mul2, self.new_shape) + rsh3 = forge.op.Reshape("rsh3", mul3, self.new_shape) + + # Layer 4 + mul4 = forge.op.Multiply("mul4", rsh1, rsh2) + mul5 = forge.op.Multiply("mul5", rsh2, rsh3) + + # Layer 5 + rsh4 = forge.op.Reshape("rsh4", mul4, self.old_shape) + rsh5 = forge.op.Reshape("rsh5", mul5, self.old_shape) + + # Layer 6 + mul6 = forge.op.Multiply("mul6", rsh4, self.train_param1) + mul7 = forge.op.Multiply("mul7", rsh5, self.train_param2) + + return mul6, mul7 + + def values(self): + return [item.value() for item in self.inputs] \ No newline at end of file diff --git a/forge/test/operators/tm/reshape/models/model_4.py b/forge/test/operators/tm/reshape/models/model_4.py new file mode 100644 index 000000000..68cdf86b8 --- /dev/null +++ b/forge/test/operators/tm/reshape/models/model_4.py @@ -0,0 +1,84 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 +# +# Test 4 +# Reshape operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures +# + + +import torch +import numpy as np + +import forge +import forge.op +import forge.op.nn as nn + +from forge import ForgeModule, Tensor + + +class BudaReshapeTest(ForgeModule): + """ + Buda Test 4 + + """ + + def __init__( + self, + old_shape, + new_shape): + super().__init__("Buda Test 4") + + assert np.prod(old_shape) == np.prod(new_shape), "Size of a tensor should stay the same" + + self.testname = "Operator reshape Test 4" + self.old_shape = old_shape + self.new_shape = new_shape + + self.train_param1 = forge.Parameter(*self.old_shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.old_shape, requires_grad=True) + + self.inputs = [Tensor.create_from_torch(torch.rand(*self.old_shape)) for i in range(2)] + for i in range(1, 3): + self.set_parameter("train_param" + str(i), torch.rand(*self.old_shape, requires_grad=True)) + + def forward(self, x1, x2): + + # Layer 2 + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x2, self.train_param2) + mul3 = forge.op.Multiply("mul3", mul1, mul2) + + # Layer 3 + rsh1 = forge.op.Reshape("rsh1", x1, self.new_shape) + rsh2 = forge.op.Reshape("rsh2", self.train_param1, self.new_shape) + rsh3 = forge.op.Reshape("rsh3", mul3, self.new_shape) + rsh4 = forge.op.Reshape("rsh4", x2, self.new_shape) + rsh5 = forge.op.Reshape("rsh5", self.train_param2, self.new_shape) + + # Layer 4 + mul4 = forge.op.Multiply("mul4", rsh1, rsh2) + mul5 = forge.op.Multiply("mul5", self.train_param1, mul3) + mul6 = forge.op.Multiply("mul6", rsh3, rsh4) + mul7 = forge.op.Multiply("mul7", rsh5, rsh5) + + # Layer 5 + rsh6 = forge.op.Reshape("rsh6", mul4, self.old_shape) + rsh7 = forge.op.Reshape("rsh7", mul5, self.old_shape) + rsh8 = forge.op.Reshape("rsh8", mul6, self.old_shape) + rsh9 = forge.op.Reshape("rsh9", mul7, self.old_shape) + + # Layer 6 + add1 = forge.op.Add("add1", rsh6, rsh7) + + # Layer 7 + add2 = forge.op.Add("add2", add1, rsh8) + + # Layer 8 + add3 = forge.op.Add("add3", add2, rsh9) + + return add3 + + def values(self): + return [item.value() for item in self.inputs] \ No newline at end of file diff --git a/forge/test/operators/tm/reshape/models/model_5.py b/forge/test/operators/tm/reshape/models/model_5.py new file mode 100644 index 000000000..7b329817d --- /dev/null +++ b/forge/test/operators/tm/reshape/models/model_5.py @@ -0,0 +1,72 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 +# +# Test 5 +# Reshape operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures +# + + +import torch +import numpy as np + +import forge +import forge.op +import forge.op.nn as nn + +from forge import ForgeModule, Tensor + + +class BudaReshapeTest(ForgeModule): + """ + Buda Test 5 + + """ + + def __init__( + self, + old_shape, + new_shape): + super().__init__("Buda Test 5") + + assert np.prod(old_shape) == np.prod(new_shape), "Size of a tensor should stay the same" + + self.testname = "Operator reshape Test 5" + self.old_shape = old_shape + self.new_shape = new_shape + + self.train_param1 = forge.Parameter(*self.old_shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.old_shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.old_shape, requires_grad=True) + self.train_param4 = forge.Parameter(*self.old_shape, requires_grad=True) + + self.inputs = [Tensor.create_from_torch(torch.rand(*self.old_shape)) for i in range(2)] + for i in range(1, 5): + self.set_parameter("train_param" + str(i), torch.rand(*self.old_shape, requires_grad=True)) + + def forward(self, x1, x2): + + # Layer 2 + add1 = forge.op.Add("add1", x1, self.train_param1) + add2 = forge.op.Add("add2", x2, self.train_param2) + + # Layer 3 + mul1 = forge.op.Multiply("mul1", add1, add2) + + # Layer 4 + rsh1 = forge.op.Reshape("rsh1", add1, self.new_shape) + rsh2 = forge.op.Reshape("rsh2", add2, self.new_shape) + + # Layer 5 + mul2 = forge.op.Multiply("mul2", rsh1, rsh2) + + # Layer 6 + mul3 = forge.op.Multiply("mul3", mul1, self.train_param3) + rsh3 = forge.op.Reshape("rsh3", self.train_param4, self.new_shape) + mul4 = forge.op.Multiply("mul4", mul2, rsh3) + + return mul3, mul4 + + def values(self): + return [item.value() for item in self.inputs] \ No newline at end of file diff --git a/pybuda/test/operators/tm/reshape/test_command.sh b/forge/test/operators/tm/reshape/test_command.sh similarity index 100% rename from pybuda/test/operators/tm/reshape/test_command.sh rename to forge/test/operators/tm/reshape/test_command.sh diff --git a/pybuda/test/operators/tm/reshape/test_reshape.py b/forge/test/operators/tm/reshape/test_reshape.py similarity index 90% rename from pybuda/test/operators/tm/reshape/test_reshape.py rename to forge/test/operators/tm/reshape/test_reshape.py index 590a7b74f..f1bb09734 100644 --- a/pybuda/test/operators/tm/reshape/test_reshape.py +++ b/forge/test/operators/tm/reshape/test_reshape.py @@ -12,13 +12,13 @@ import pytest import numpy as np -import pybuda.op -from pybuda import TTDevice, BackendType, pybuda_compile, VerifyConfig, CompilerConfig -from pybuda.verify.config import TestKind +import forge.op +from forge import TTDevice, BackendType, forge_compile, VerifyConfig, CompilerConfig +from forge.verify.config import TestKind from . import models -MODELS_PATH = "./pybuda/test/operators/tm/reshape/models" +MODELS_PATH = "./forge/test/operators/tm/reshape/models" SHAPE_NO = 2 SHAPE_SIZE_MIN = 2 @@ -66,7 +66,7 @@ def test_reshape( model = eval(architecture) tt0 = TTDevice("tt0", devtype=BackendType.Golden) tt0.place_module(model) - pybuda_compile( + forge_compile( tt0, model.testname, *model.inputs, diff --git a/pybuda/test/operators/tm/reshape/test_reshape_single.py b/forge/test/operators/tm/reshape/test_reshape_single.py similarity index 92% rename from pybuda/test/operators/tm/reshape/test_reshape_single.py rename to forge/test/operators/tm/reshape/test_reshape_single.py index 3f694af92..c9efe5e9f 100644 --- a/pybuda/test/operators/tm/reshape/test_reshape_single.py +++ b/forge/test/operators/tm/reshape/test_reshape_single.py @@ -12,12 +12,12 @@ import pytest import numpy as np -import pybuda.op -from pybuda import TTDevice, BackendType, pybuda_compile, VerifyConfig, CompilerConfig +import forge.op +from forge import TTDevice, BackendType, forge_compile, VerifyConfig, CompilerConfig from . import models -MODELS_PATH = "./pybuda/test/operators/tm/reshape/models" +MODELS_PATH = "./forge/test/operators/tm/reshape/models" def test_reshape( resh_train, @@ -64,7 +64,7 @@ def test_reshape( model = eval(architecture) tt0 = TTDevice("tt0", devtype=BackendType.Golden) tt0.place_module(model) - pybuda_compile( + forge_compile( tt0, model.testname, *model.inputs, diff --git a/pybuda/test/operators/tm/vstack_vslice/__init__.py b/forge/test/operators/tm/vstack_vslice/__init__.py similarity index 100% rename from pybuda/test/operators/tm/vstack_vslice/__init__.py rename to forge/test/operators/tm/vstack_vslice/__init__.py diff --git a/pybuda/test/operators/tm/vstack_vslice/models/__init__.py b/forge/test/operators/tm/vstack_vslice/models/__init__.py similarity index 100% rename from pybuda/test/operators/tm/vstack_vslice/models/__init__.py rename to forge/test/operators/tm/vstack_vslice/models/__init__.py diff --git a/pybuda/test/operators/tm/vstack_vslice/models/model_1.py b/forge/test/operators/tm/vstack_vslice/models/model_1.py similarity index 58% rename from pybuda/test/operators/tm/vstack_vslice/models/model_1.py rename to forge/test/operators/tm/vstack_vslice/models/model_1.py index 366d9f750..23c831af6 100644 --- a/pybuda/test/operators/tm/vstack_vslice/models/model_1.py +++ b/forge/test/operators/tm/vstack_vslice/models/model_1.py @@ -3,21 +3,21 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 1 -# VStack, VSlice operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures/graphs +# VStack, VSlice operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures/graphs # import torch -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaVStackVSliceTest(PyBudaModule): +class BudaVStackVSliceTest(ForgeModule): """ Buda Test 1 """ @@ -40,7 +40,7 @@ def __init__( print(f"SHAPE: {self.shape}") print(f"SLICE: {self.slice}") - self.train_param = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape))] self.set_parameter("train_param", torch.rand(*self.shape, requires_grad=True)) @@ -48,21 +48,21 @@ def __init__( def forward(self, x): # Layer 2 - vst1 = pybuda.op.VStack("vst1", x, self.slice) - vst2 = pybuda.op.VStack("vst2", self.train_param, self.slice) - mul1 = pybuda.op.Multiply("mul1", x, self.train_param) + vst1 = forge.op.VStack("vst1", x, self.slice) + vst2 = forge.op.VStack("vst2", self.train_param, self.slice) + mul1 = forge.op.Multiply("mul1", x, self.train_param) # Layer 3 - mul2 = pybuda.op.Multiply("mul2", vst1, vst2) - vst3 = pybuda.op.VStack("vst3", mul1, self.slice) + mul2 = forge.op.Multiply("mul2", vst1, vst2) + vst3 = forge.op.VStack("vst3", mul1, self.slice) # Layer 4 - mul3 = pybuda.op.Multiply("mul3", mul2, vst3) + mul3 = forge.op.Multiply("mul3", mul2, vst3) # Layer 5 - vsl1 = pybuda.op.VSlice("vsl1", mul3, self.slice) + vsl1 = forge.op.VSlice("vsl1", mul3, self.slice) # Layer 6 - mul4 = pybuda.op.Multiply("mul4", vsl1, self.train_param) + mul4 = forge.op.Multiply("mul4", vsl1, self.train_param) return mul4 \ No newline at end of file diff --git a/pybuda/test/operators/tm/vstack_vslice/models/model_2.py b/forge/test/operators/tm/vstack_vslice/models/model_2.py similarity index 56% rename from pybuda/test/operators/tm/vstack_vslice/models/model_2.py rename to forge/test/operators/tm/vstack_vslice/models/model_2.py index 28fedfdf2..e8b6906db 100644 --- a/pybuda/test/operators/tm/vstack_vslice/models/model_2.py +++ b/forge/test/operators/tm/vstack_vslice/models/model_2.py @@ -3,21 +3,21 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 2 -# VStack, VSlice operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# VStack, VSlice operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaVStackVSliceTest(PyBudaModule): +class BudaVStackVSliceTest(ForgeModule): """ Buda Test 2 @@ -41,8 +41,8 @@ def __init__( print(f"SHAPE: {self.shape}") print(f"SLICE: {self.slice}") - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(2)] for i in range(1, 3): @@ -51,24 +51,24 @@ def __init__( def forward(self, x1, x2): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x2, self.train_param2) + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x2, self.train_param2) # Layer 3 - vsl1 = pybuda.op.VSlice("vsl1", mul1, self.slice) - vsl2 = pybuda.op.VSlice("vsl2", mul2, self.slice) + vsl1 = forge.op.VSlice("vsl1", mul1, self.slice) + vsl2 = forge.op.VSlice("vsl2", mul2, self.slice) # Layer 4 - mul3 = pybuda.op.Multiply("mul3", vsl1, vsl2) - mul4 = pybuda.op.Multiply("mul4", self.train_param1, self.train_param2) + mul3 = forge.op.Multiply("mul3", vsl1, vsl2) + mul4 = forge.op.Multiply("mul4", self.train_param1, self.train_param2) # Layer 5 - vst1 = pybuda.op.VStack("vst1", mul3, self.slice) + vst1 = forge.op.VStack("vst1", mul3, self.slice) # Layer 6 - add1 = pybuda.op.Add("add1", vst1, mul4) + add1 = forge.op.Add("add1", vst1, mul4) # Layer 7 - vst2 = pybuda.op.VStack("vst2", add1, self.slice) + vst2 = forge.op.VStack("vst2", add1, self.slice) return vst2 \ No newline at end of file diff --git a/forge/test/operators/tm/vstack_vslice/models/model_3.py b/forge/test/operators/tm/vstack_vslice/models/model_3.py new file mode 100644 index 000000000..c1f2ebed9 --- /dev/null +++ b/forge/test/operators/tm/vstack_vslice/models/model_3.py @@ -0,0 +1,95 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 +# +# Test 3 +# VStack, VSlice operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures +# + + +import torch + +import forge +import forge.op +import forge.op.nn as nn + +from forge import ForgeModule, Tensor + + +class BudaVStackVSliceTest(ForgeModule): + """ + Buda Test 3 + + """ + + def __init__( + self, + shape, + slice): + super().__init__("Buda Test 3") + + assert hasattr(shape, '__iter__'), "Shape must be iterable" + assert len(shape) == 4, "Shape must be 4" + assert shape[1] > 1, "Z dimension must be bigger than 1" + assert shape[-2] % slice == 0, "The last dimension must be divisible by slice" + + self.testname = "Operator VStack, VSLice, Test 3" + self.shape = shape + self.slice = slice + + if type(self.shape) == tuple: + self.shape = list(self.shape) + self.shape[1] *= self.slice + self.shape[-2] *= self.slice + + print(f"SHAPE: {self.shape}") + print(f"SLICE: {self.slice}") + + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + + self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(2)] + for i in range(1, 3): + self.set_parameter("train_param" + str(i), torch.rand(*self.shape, requires_grad=True)) + + def forward(self, x1, x2): + + # Layer 2 + vst1 = forge.op.VStack("vst1", x1, self.slice) + vst2 = forge.op.VStack("vst2", self.train_param1, self.slice) + vst3 = forge.op.VStack("vst3", x2, self.slice) + vst4 = forge.op.VStack("vst4", self.train_param2, self.slice) + + # Layer 3 + mul1 = forge.op.Multiply("mul1", vst1, vst2) + mul2 = forge.op.Multiply("mul2", vst3, vst4) + + # Layer 4 + vsl1 = forge.op.VSlice("vsl1", mul1, self.slice) + mul3 = forge.op.Multiply("mul3", vst2, mul2) + + # Layer 5 + mul4 = forge.op.Multiply("mul4", vsl1, x2) + + # Layer 6 + vsl2 = forge.op.VSlice("vsl2", mul4, self.slice) + vsl3 = forge.op.VSlice("vsl3", mul3, self.slice) + vst5 = forge.op.VStack("vst5", self.train_param1, self.slice) + vst6 = forge.op.VStack("vst6", self.train_param2, self.slice) + + # Layer 7 + add1 = forge.op.Add("add1", vsl2, forge.op.VSlice("hsl5", vsl3, self.slice)) + mul5 = forge.op.Multiply("mul5", vst5, vst6) + + # Layer 8 + vst8 = forge.op.VStack("vst8", add1, self.slice) + vst9 = forge.op.VStack("vst9", vst8, self.slice) + + # Layer 9 + sub1 = forge.op.Subtract("sub1", vst9, mul5) + + # Layer 10 + vst10 = forge.op.VStack("vst10", sub1, self.slice) + + return vst10 \ No newline at end of file diff --git a/pybuda/test/operators/tm/vstack_vslice/models/model_4.py b/forge/test/operators/tm/vstack_vslice/models/model_4.py similarity index 51% rename from pybuda/test/operators/tm/vstack_vslice/models/model_4.py rename to forge/test/operators/tm/vstack_vslice/models/model_4.py index 8b86c1bf6..b9a3c868f 100644 --- a/pybuda/test/operators/tm/vstack_vslice/models/model_4.py +++ b/forge/test/operators/tm/vstack_vslice/models/model_4.py @@ -3,21 +3,21 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 4 -# VStack, VSlice operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# VStack, VSlice operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaVStackVSliceTest(PyBudaModule): +class BudaVStackVSliceTest(ForgeModule): """ Buda Test 4 @@ -46,8 +46,8 @@ def __init__( print(f"SHAPE: {self.shape}") print(f"SLICE: {self.slice}") - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(2)] for i in range(1, 3): @@ -56,35 +56,35 @@ def __init__( def forward(self, x1, x2): # Layer 2 - vsl1 = pybuda.op.VSlice("vsl1", x1, self.slice) - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x2, self.train_param2) + vsl1 = forge.op.VSlice("vsl1", x1, self.slice) + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x2, self.train_param2) # Layer 3 - vsl2 = pybuda.op.VSlice("vsl2", mul1, self.slice) - vsl3 = pybuda.op.VSlice("vsl3", mul2, self.slice) - vsl4 = pybuda.op.VSlice("vsl4", self.train_param2, self.slice) + vsl2 = forge.op.VSlice("vsl2", mul1, self.slice) + vsl3 = forge.op.VSlice("vsl3", mul2, self.slice) + vsl4 = forge.op.VSlice("vsl4", self.train_param2, self.slice) # Layer 4 - add1 = pybuda.op.Add("add1", vsl1, vsl2) - sub1 = pybuda.op.Subtract("sub1", vsl3, vsl4) + add1 = forge.op.Add("add1", vsl1, vsl2) + sub1 = forge.op.Subtract("sub1", vsl3, vsl4) # Layer 5 - vsl5 = pybuda.op.VSlice("vsl5", add1, self.slice) - vsl6 = pybuda.op.VSlice("vsl6", sub1, self.slice) + vsl5 = forge.op.VSlice("vsl5", add1, self.slice) + vsl6 = forge.op.VSlice("vsl6", sub1, self.slice) # Layer 6 - sub2 = pybuda.op.Subtract("sub2", self.train_param1, self.train_param2) - add2 = pybuda.op.Add("add2", vsl5, vsl6) + sub2 = forge.op.Subtract("sub2", self.train_param1, self.train_param2) + add2 = forge.op.Add("add2", vsl5, vsl6) # Layer 7 - vsl7 = pybuda.op.VSlice("vsl7", sub2, self.slice) - hst1 = pybuda.op.VStack("hst1", add2, self.slice) + vsl7 = forge.op.VSlice("vsl7", sub2, self.slice) + hst1 = forge.op.VStack("hst1", add2, self.slice) # Layer 8 - add3 = pybuda.op.Add("add3", vsl7, hst1) + add3 = forge.op.Add("add3", vsl7, hst1) # Layer 9 - vsl8 = pybuda.op.VSlice("vsl8", add3, self.slice) + vsl8 = forge.op.VSlice("vsl8", add3, self.slice) return vsl8 \ No newline at end of file diff --git a/pybuda/test/operators/tm/vstack_vslice/models/model_5.py b/forge/test/operators/tm/vstack_vslice/models/model_5.py similarity index 50% rename from pybuda/test/operators/tm/vstack_vslice/models/model_5.py rename to forge/test/operators/tm/vstack_vslice/models/model_5.py index 7c97b5b63..0ce25f42c 100644 --- a/pybuda/test/operators/tm/vstack_vslice/models/model_5.py +++ b/forge/test/operators/tm/vstack_vslice/models/model_5.py @@ -3,21 +3,21 @@ # SPDX-License-Identifier: Apache-2.0 # # Test 5 -# VStack, VSlice operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures +# VStack, VSlice operators defined by Forge API +# These kinds of tests test only single specific operator through different Forge architectures # import torch -import pybuda -import pybuda.op -import pybuda.op.nn as nn +import forge +import forge.op +import forge.op.nn as nn -from pybuda import PyBudaModule, Tensor +from forge import ForgeModule, Tensor -class BudaVStackVSliceTest(PyBudaModule): +class BudaVStackVSliceTest(ForgeModule): """ Buda Test 5 @@ -47,8 +47,8 @@ def __init__( print(f"SHAPE: {self.shape}") print(f"SLICE: {self.slice}") - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(2)] for i in range(1, 3): @@ -57,32 +57,32 @@ def __init__( def forward(self, x1, x2): # Layer 2 - vst1 = pybuda.op.VStack("vst1", x1, self.slice) - vst2 = pybuda.op.VStack("vst2", self.train_param1, self.slice) - vst3 = pybuda.op.VStack("vst3", x2, self.slice) - vst4 = pybuda.op.VStack("vst4", self.train_param2, self.slice) + vst1 = forge.op.VStack("vst1", x1, self.slice) + vst2 = forge.op.VStack("vst2", self.train_param1, self.slice) + vst3 = forge.op.VStack("vst3", x2, self.slice) + vst4 = forge.op.VStack("vst4", self.train_param2, self.slice) # Layer 3 - vst5 = pybuda.op.VStack("vst5", vst1, self.slice) - vst6 = pybuda.op.VStack("vst6", vst2, self.slice) - vst7 = pybuda.op.VStack("vst7", self.train_param2, self.slice) - add1 = pybuda.op.Add("add1", vst3, vst4) + vst5 = forge.op.VStack("vst5", vst1, self.slice) + vst6 = forge.op.VStack("vst6", vst2, self.slice) + vst7 = forge.op.VStack("vst7", self.train_param2, self.slice) + add1 = forge.op.Add("add1", vst3, vst4) # Layer 4 - add2 = pybuda.op.Add("add2", vst5, vst6) - vst8 = pybuda.op.VStack("vst8", vst7, self.slice) - vst9 = pybuda.op.VStack("vst9", vst8, self.slice) - vst10 = pybuda.op.VStack("vst10", add1, self.slice) + add2 = forge.op.Add("add2", vst5, vst6) + vst8 = forge.op.VStack("vst8", vst7, self.slice) + vst9 = forge.op.VStack("vst9", vst8, self.slice) + vst10 = forge.op.VStack("vst10", add1, self.slice) # Layer 5 - vst11 = pybuda.op.VStack("vst11", add2, self.slice) - add3 = pybuda.op.Add("add3", vst11, vst9) - add4 = pybuda.op.Add("add4", vst10, vst10) + vst11 = forge.op.VStack("vst11", add2, self.slice) + add3 = forge.op.Add("add3", vst11, vst9) + add4 = forge.op.Add("add4", vst10, vst10) # Layer 6 - vsl1 = pybuda.op.VSlice("vsl1", add3, self.slice) + vsl1 = forge.op.VSlice("vsl1", add3, self.slice) # Layer 7 - mul1 = pybuda.op.Multiply("mul1", vsl1, add4) + mul1 = forge.op.Multiply("mul1", vsl1, add4) return mul1, vst11, vst9 \ No newline at end of file diff --git a/pybuda/test/operators/tm/vstack_vslice/test_vstack_vslice.py b/forge/test/operators/tm/vstack_vslice/test_vstack_vslice.py similarity index 90% rename from pybuda/test/operators/tm/vstack_vslice/test_vstack_vslice.py rename to forge/test/operators/tm/vstack_vslice/test_vstack_vslice.py index dea76799c..85b524850 100644 --- a/pybuda/test/operators/tm/vstack_vslice/test_vstack_vslice.py +++ b/forge/test/operators/tm/vstack_vslice/test_vstack_vslice.py @@ -11,15 +11,15 @@ import pytest import numpy as np -import pybuda -import pybuda.op -from pybuda import TTDevice, BackendType, pybuda_compile, VerifyConfig, CompilerConfig +import forge +import forge.op +from forge import TTDevice, BackendType, forge_compile, VerifyConfig, CompilerConfig from . import models TILE_DIM = 32 -MODELS_PATH = "./pybuda/test/operators/tm/vstack_vslice/models" +MODELS_PATH = "./forge/test/operators/tm/vstack_vslice/models" SHAPE_NO = 5 SHAPE_DIM_MIN = 1 @@ -52,7 +52,7 @@ shape.append(sh) @pytest.mark.xfail( - reason="tenstorrent/pybuda#133" + reason="tenstorrent/forge#133" ) @pytest.mark.parametrize("shape, slice", zip(shape, slices), ids=["shape=" + "x".join([str(item) for item in sh])+ "-slice=" + str(sl) for sh, sl in zip(shape, slices)]) @pytest.mark.parametrize("recompute", (True, False), ids=["Recompute", "NoRecompute"]) @@ -75,7 +75,7 @@ def test_vstack_vslice( model = eval(architecture) tt0 = TTDevice("tt0", devtype=BackendType.Golden) tt0.place_module(model) - pybuda_compile( + forge_compile( tt0, model.testname, *model.inputs, diff --git a/pybuda/test/quantized/test_onnx_quantized.py b/forge/test/quantized/test_onnx_quantized.py similarity index 80% rename from pybuda/test/quantized/test_onnx_quantized.py rename to forge/test/quantized/test_onnx_quantized.py index 648eb0991..bbbf39364 100644 --- a/pybuda/test/quantized/test_onnx_quantized.py +++ b/forge/test/quantized/test_onnx_quantized.py @@ -9,29 +9,29 @@ import numpy as np import onnxruntime import torch -import pybuda -from pybuda import ( +import forge +from forge import ( OnnxModule, VerifyConfig, DataFormat, BackendDevice, ) -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind -from pybuda.config import _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind +from forge.config import _get_global_compiler_config def test_onnx_quantized_mlp_gelu(test_device): pytest.skip() # Download ONNX model - save_path = "pybuda/test/quantized/simple_models/mlp_gelu-QOperator.onnx" + save_path = "forge/test/quantized/simple_models/mlp_gelu-QOperator.onnx" if not os.path.exists(save_path): raise RuntimeError("Model not found") # LOAD ONNX model onnx_model = onnx.load(save_path) onnx.checker.check_model(onnx_model) - pybuda_onnx_model = OnnxModule( + forge_onnx_model = OnnxModule( "onnx_quantized_mlp_gelu", onnx_model, save_path, @@ -50,14 +50,14 @@ def test_onnx_quantized_mlp_gelu(test_device): # Compile and verify verify_module( - pybuda_onnx_model, + forge_onnx_model, input_shape, verify_cfg=VerifyConfig( arch=test_device.arch, devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - verify_pybuda_codegen_vs_framework=True, + verify_forge_codegen_vs_framework=True, ), ) @@ -65,14 +65,14 @@ def test_onnx_quantized_mlp(test_device): pytest.skip() # Download ONNX model - save_path = "pybuda/test/quantized/simple_models/mlp-QOperator.onnx" + save_path = "forge/test/quantized/simple_models/mlp-QOperator.onnx" if not os.path.exists(save_path): raise RuntimeError("Model not found") # LOAD ONNX model onnx_model = onnx.load(save_path) onnx.checker.check_model(onnx_model) - pybuda_onnx_model = OnnxModule( + forge_onnx_model = OnnxModule( "onnx_quantized_mlp", onnx_model, save_path, @@ -91,14 +91,14 @@ def test_onnx_quantized_mlp(test_device): # Compile and verify verify_module( - pybuda_onnx_model, + forge_onnx_model, input_shape, verify_cfg=VerifyConfig( arch=test_device.arch, devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - verify_pybuda_codegen_vs_framework=True, + verify_forge_codegen_vs_framework=True, ), ) @@ -107,14 +107,14 @@ def test_onnx_quantized_conv(test_device): pytest.skip() # Download ONNX model - save_path = "pybuda/test/quantized/simple_models/conv2d_with_bias-Int8.onnx" + save_path = "forge/test/quantized/simple_models/conv2d_with_bias-Int8.onnx" if not os.path.exists(save_path): raise RuntimeError("Model not found") # LOAD ONNX model onnx_model = onnx.load(save_path) onnx.checker.check_model(onnx_model) - pybuda_onnx_model = OnnxModule( + forge_onnx_model = OnnxModule( "onnx_quantized_conv", onnx_model, save_path, @@ -133,35 +133,35 @@ def test_onnx_quantized_conv(test_device): tti_path = "int8_conv_bias.tti" if not os.path.exists(tti_path): - tt_module = pybuda_onnx_model - device = pybuda.TTDevice( - "tt0", module=tt_module,arch=pybuda.BackendDevice.Wormhole_B0, devtype=pybuda.BackendType.Silicon) + tt_module = forge_onnx_model + device = forge.TTDevice( + "tt0", module=tt_module,arch=forge.BackendDevice.Wormhole_B0, devtype=forge.BackendType.Silicon) tti_img = device.compile_to_image( img_path=tti_path, training=False, sample_inputs=[torch.randn(shape) for shape in input_shape], ) - device_img: pybuda.TTDeviceImage = pybuda.TTDeviceImage.load_from_disk(tti_path) - ttdevice = pybuda.TTDevice.load_image(img=device_img) + device_img: forge.TTDeviceImage = forge.TTDeviceImage.load_from_disk(tti_path) + ttdevice = forge.TTDevice.load_image(img=device_img) inputs = [torch.randn(shape) for shape in input_shape] ttdevice.push_to_inputs(*inputs) - output_q = pybuda.run_inference(_sequential=True) + output_q = forge.run_inference(_sequential=True) output = output_q.get()[0].value().detach() - golden_output = pybuda_onnx_model.forward(*inputs) + golden_output = forge_onnx_model.forward(*inputs) assert np.allclose(output, golden_output[0], atol=1e-3, rtol=1e-3) # # Compile and verify # verify_module( - # pybuda_onnx_model, + # forge_onnx_model, # input_shape, # verify_cfg=VerifyConfig( # arch=test_device.arch, # devtype=test_device.devtype, # devmode=test_device.devmode, # test_kind=test_kind, - # verify_pybuda_codegen_vs_framework=True, + # verify_forge_codegen_vs_framework=True, # ), # ) @@ -169,14 +169,14 @@ def test_onnx_quantized_mm_int8_no_bias(test_device): pytest.skip() # Download ONNX model - save_path = "pybuda/test/quantized/simple_models/matmul_no_bias-Int8.onnx" + save_path = "forge/test/quantized/simple_models/matmul_no_bias-Int8.onnx" if not os.path.exists(save_path): raise RuntimeError("Model not found") # LOAD ONNX model onnx_model = onnx.load(save_path) onnx.checker.check_model(onnx_model) - pybuda_onnx_model = OnnxModule( + forge_onnx_model = OnnxModule( "onnx_quantized_mm_int8_no_bias", onnx_model, save_path, @@ -195,14 +195,14 @@ def test_onnx_quantized_mm_int8_no_bias(test_device): # Compile and verify verify_module( - pybuda_onnx_model, + forge_onnx_model, input_shape, verify_cfg=VerifyConfig( arch=test_device.arch, devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - verify_pybuda_codegen_vs_framework=True, + verify_forge_codegen_vs_framework=True, # verify_all=True, # need to update matmul eval in buda ), ) @@ -211,14 +211,14 @@ def test_onnx_quantized_mm_int8_bias(test_device): pytest.skip() # Download ONNX model - save_path = "pybuda/test/quantized/simple_models/matmul_with_bias-Int8.onnx" + save_path = "forge/test/quantized/simple_models/matmul_with_bias-Int8.onnx" if not os.path.exists(save_path): raise RuntimeError("Model not found") # LOAD ONNX model onnx_model = onnx.load(save_path) onnx.checker.check_model(onnx_model) - pybuda_onnx_model = OnnxModule( + forge_onnx_model = OnnxModule( "onnx_quantized_mm_int8_bias", onnx_model, save_path, @@ -237,14 +237,14 @@ def test_onnx_quantized_mm_int8_bias(test_device): # Compile and verify verify_module( - pybuda_onnx_model, + forge_onnx_model, input_shape, verify_cfg=VerifyConfig( arch=test_device.arch, devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - verify_pybuda_codegen_vs_framework=True, + verify_forge_codegen_vs_framework=True, # verify_all=True, ), ) @@ -253,14 +253,14 @@ def test_onnx_quantized_mm_uint8_no_bias(test_device): pytest.skip() # Download ONNX model - save_path = "pybuda/test/quantized/simple_models/matmul_no_bias-UInt8.onnx" + save_path = "forge/test/quantized/simple_models/matmul_no_bias-UInt8.onnx" if not os.path.exists(save_path): raise RuntimeError("Model not found") # LOAD ONNX model onnx_model = onnx.load(save_path) onnx.checker.check_model(onnx_model) - pybuda_onnx_model = OnnxModule( + forge_onnx_model = OnnxModule( "onnx_quantized_mm_uint8_no_bias", onnx_model, save_path, @@ -279,14 +279,14 @@ def test_onnx_quantized_mm_uint8_no_bias(test_device): # Compile and verify verify_module( - pybuda_onnx_model, + forge_onnx_model, input_shape, verify_cfg=VerifyConfig( arch=test_device.arch, devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - verify_pybuda_codegen_vs_framework=True, + verify_forge_codegen_vs_framework=True, verify_all=True, ), ) diff --git a/pybuda/test/quantized/test_onnx_quantized_mobilenet.py b/forge/test/quantized/test_onnx_quantized_mobilenet.py similarity index 83% rename from pybuda/test/quantized/test_onnx_quantized_mobilenet.py rename to forge/test/quantized/test_onnx_quantized_mobilenet.py index 8225c3fc5..594e40248 100644 --- a/pybuda/test/quantized/test_onnx_quantized_mobilenet.py +++ b/forge/test/quantized/test_onnx_quantized_mobilenet.py @@ -9,20 +9,20 @@ import numpy as np import onnxruntime import torch -import pybuda -from pybuda import ( +import forge +from forge import ( OnnxModule, VerifyConfig, DataFormat, BackendDevice, BackendType, ) -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind -from pybuda.config import _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind +from forge.config import _get_global_compiler_config def test_onnx_quantized_mb_v2_depth(test_device): - # Skip test on blackhole until we have support for quantized models on blackhole pybuda#2700 + # Skip test on blackhole until we have support for quantized models on blackhole forge#2700 if test_device.arch == BackendDevice.Blackhole: pytest.skip("Blackhole does not support quantized models") @@ -34,7 +34,7 @@ def test_onnx_quantized_mb_v2_depth(test_device): # LOAD ONNX model onnx_model = onnx.load(save_path) onnx.checker.check_model(onnx_model) - pybuda_onnx_model = OnnxModule( + forge_onnx_model = OnnxModule( "onnx_quantized_mb_v2_depthwise", onnx_model, save_path, @@ -44,7 +44,7 @@ def test_onnx_quantized_mb_v2_depth(test_device): compiler_cfg.balancer_policy = "Ribbon" compiler_cfg.enable_t_streaming = True compiler_cfg.enable_auto_fusing = False - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" if test_device.devtype == BackendType.Silicon: os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{80*1024}" @@ -57,7 +57,7 @@ def test_onnx_quantized_mb_v2_depth(test_device): # Compile and verify verify_module( - pybuda_onnx_model, + forge_onnx_model, input_shape, verify_cfg=VerifyConfig( arch=test_device.arch, @@ -65,14 +65,14 @@ def test_onnx_quantized_mb_v2_depth(test_device): devmode=test_device.devmode, test_kind=TestKind.INFERENCE, enabled = False if test_device.devtype == BackendType.Silicon else True, - # verify_pybuda_codegen_vs_framework=True, + # verify_forge_codegen_vs_framework=True, # verify_all=True ), ) def test_onnx_quantized_mb_v2(test_device): - # Skip test on blackhole until we have support for quantized models on blackhole pybuda#2700 + # Skip test on blackhole until we have support for quantized models on blackhole forge#2700 if test_device.arch == BackendDevice.Blackhole: pytest.skip("Blackhole does not support quantized models") @@ -84,7 +84,7 @@ def test_onnx_quantized_mb_v2(test_device): # LOAD ONNX model onnx_model = onnx.load(save_path) onnx.checker.check_model(onnx_model) - pybuda_onnx_model = OnnxModule( + forge_onnx_model = OnnxModule( "onnx_quantized_mb_v2", onnx_model, save_path, @@ -96,10 +96,10 @@ def test_onnx_quantized_mb_v2(test_device): compiler_cfg.enable_auto_fusing = False compiler_cfg.graph_solver_self_cut_type = "ConsumerOperandDataEdgesFirst" compiler_cfg.place_on_new_epoch("conv2d_118.dc.reshape.0.dc.sparse_matmul.14.lc2") - os.environ["PYBUDA_RIBBON2"] = "1" - os.environ["PYBUDA_DISABLE_CONV_MULTI_OP_FRACTURE"] = "1" - os.environ["PYBUDA_FRACTURIZATION_DISABLE"] = "1" - os.environ["PYBUDA_DISABLE_PADDING_PASS"] = "1" + os.environ["FORGE_RIBBON2"] = "1" + os.environ["FORGE_DISABLE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_FRACTURIZATION_DISABLE"] = "1" + os.environ["FORGE_DISABLE_PADDING_PASS"] = "1" os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{80*1024}" if test_device.devtype == BackendType.Silicon: os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{96*1024}" @@ -113,7 +113,7 @@ def test_onnx_quantized_mb_v2(test_device): # Compile and verify verify_module( - pybuda_onnx_model, + forge_onnx_model, input_shape, verify_cfg=VerifyConfig( arch=test_device.arch, @@ -121,7 +121,7 @@ def test_onnx_quantized_mb_v2(test_device): devmode=test_device.devmode, test_kind=TestKind.INFERENCE, enabled = False if test_device.devtype == BackendType.Silicon else True, - # verify_pybuda_codegen_vs_framework=True, + # verify_forge_codegen_vs_framework=True, # verify_all=True ), ) \ No newline at end of file diff --git a/pybuda/test/quantized/test_onnx_quantized_resnet.py b/forge/test/quantized/test_onnx_quantized_resnet.py similarity index 67% rename from pybuda/test/quantized/test_onnx_quantized_resnet.py rename to forge/test/quantized/test_onnx_quantized_resnet.py index bfbf16c00..29f6d4f26 100644 --- a/pybuda/test/quantized/test_onnx_quantized_resnet.py +++ b/forge/test/quantized/test_onnx_quantized_resnet.py @@ -9,22 +9,22 @@ import numpy as np import onnxruntime import torch -import pybuda -from pybuda import ( +import forge +from forge import ( OnnxModule, VerifyConfig, DataFormat, BackendDevice, ) -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind -from pybuda.config import _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind +from forge.config import _get_global_compiler_config def test_onnx_quantized_resnet(test_device): - # Skip test on blackhole until we have support for quantized models on blackhole pybuda#2700 + # Skip test on blackhole until we have support for quantized models on blackhole forge#2700 if test_device.arch == BackendDevice.Blackhole: pytest.skip("Blackhole does not support quantized models") @@ -39,7 +39,7 @@ def test_onnx_quantized_resnet(test_device): # LOAD ONNX model onnx_model = onnx.load(save_path) onnx.checker.check_model(onnx_model) - pybuda_onnx_model = OnnxModule( + forge_onnx_model = OnnxModule( "onnx_quantized_ResNet50", onnx_model, save_path, @@ -51,11 +51,11 @@ def test_onnx_quantized_resnet(test_device): compiler_cfg.graph_solver_self_cut_type = "FastCut" compiler_cfg.default_df_override = DataFormat.Float32 - # os.environ["PYBUDA_DISABLE_CONV_MULTI_OP_FRACTURE"] = "1" - os.environ["PYBUDA_FRACTURIZATION_DISABLE"] = "1" - # os.environ["PYBUDA_REPRODUCE_SUBGRAPH"] = "1" - # os.environ["PYBUDA_REPRODUCE_SUBGRAPH_INPUT"] = "quantize_0.dc.buda_quantize.1" - # os.environ["PYBUDA_REPRODUCE_SUBGRAPH_OUTPUT"] = "conv2d_1.dc.matmul.11" + # os.environ["FORGE_DISABLE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_FRACTURIZATION_DISABLE"] = "1" + # os.environ["FORGE_REPRODUCE_SUBGRAPH"] = "1" + # os.environ["FORGE_REPRODUCE_SUBGRAPH_INPUT"] = "quantize_0.dc.buda_quantize.1" + # os.environ["FORGE_REPRODUCE_SUBGRAPH_OUTPUT"] = "conv2d_1.dc.matmul.11" # Sanity run input_shape = [] @@ -66,9 +66,9 @@ def test_onnx_quantized_resnet(test_device): # tti_path = "onnx_int8_resnet50_epoch_0.tti" # if not os.path.exists(tti_path): - # tt_module = pybuda_onnx_model - # device = pybuda.TTDevice( - # "tt0", module=tt_module,arch=pybuda.BackendDevice.Wormhole_B0, devtype=pybuda.BackendType.Silicon) + # tt_module = forge_onnx_model + # device = forge.TTDevice( + # "tt0", module=tt_module,arch=forge.BackendDevice.Wormhole_B0, devtype=forge.BackendType.Silicon) # tti_img = device.compile_to_image( # img_path=tti_path, # training=False, @@ -76,26 +76,26 @@ def test_onnx_quantized_resnet(test_device): # ) - # device_img: pybuda.TTDeviceImage = pybuda.TTDeviceImage.load_from_disk(tti_path) - # ttdevice = pybuda.TTDevice.load_image(img=device_img) + # device_img: forge.TTDeviceImage = forge.TTDeviceImage.load_from_disk(tti_path) + # ttdevice = forge.TTDevice.load_image(img=device_img) # inputs = [torch.randn(shape) for shape in input_shape] # ttdevice.push_to_inputs(*inputs) - # output_q = pybuda.run_inference(_sequential=True) + # output_q = forge.run_inference(_sequential=True) # output = output_q.get()[0].value().detach() - # golden_output = pybuda_onnx_model.forward(*inputs) + # golden_output = forge_onnx_model.forward(*inputs) # assert np.allclose(output, golden_output[0], atol=1e-3, rtol=1e-3) # Compile and verify verify_module( - pybuda_onnx_model, + forge_onnx_model, input_shape, verify_cfg=VerifyConfig( arch=test_device.arch, devtype=test_device.devtype, devmode=test_device.devmode, test_kind=TestKind.INFERENCE, - # verify_pybuda_codegen_vs_framework=True, + # verify_forge_codegen_vs_framework=True, verify_all=True, ), ) diff --git a/pybuda/test/quantized/test_onnx_quantized_vit.py b/forge/test/quantized/test_onnx_quantized_vit.py similarity index 83% rename from pybuda/test/quantized/test_onnx_quantized_vit.py rename to forge/test/quantized/test_onnx_quantized_vit.py index 7601c77d8..ce770873b 100644 --- a/pybuda/test/quantized/test_onnx_quantized_vit.py +++ b/forge/test/quantized/test_onnx_quantized_vit.py @@ -5,19 +5,19 @@ import onnx import pytest -from pybuda import ( +from forge import ( OnnxModule, VerifyConfig, DataFormat, BackendDevice, BackendType, ) -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind -from pybuda.config import _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind +from forge.config import _get_global_compiler_config def test_int8_onnx_vit_calibrated(test_device): - # Skip test on blackhole until we have support for quantized models on blackhole pybuda#2700 + # Skip test on blackhole until we have support for quantized models on blackhole forge#2700 if test_device.arch == BackendDevice.Blackhole: pytest.skip("Blackhole does not support quantized models") @@ -32,7 +32,7 @@ def test_int8_onnx_vit_calibrated(test_device): # LOAD ONNX model onnx_model = onnx.load(save_path) onnx.checker.check_model(onnx_model) - pybuda_onnx_model = OnnxModule( + forge_onnx_model = OnnxModule( "onnx_quantized_vit_calibrated", onnx_model, save_path, @@ -45,8 +45,8 @@ def test_int8_onnx_vit_calibrated(test_device): compiler_cfg.graph_solver_self_cut_type = "FastCut" compiler_cfg.default_df_override = DataFormat.Float32 - # os.environ["PYBUDA_DISABLE_CONV_MULTI_OP_FRACTURE"] = "1" - os.environ["PYBUDA_FRACTURIZATION_DISABLE"] = "1" + # os.environ["FORGE_DISABLE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_FRACTURIZATION_DISABLE"] = "1" # Sanity run input_shape = [] @@ -59,7 +59,7 @@ def test_int8_onnx_vit_calibrated(test_device): # Compile and verify pcc = 0.97 if test_device.devtype == BackendType.Silicon else 0.99 verify_module( - pybuda_onnx_model, + forge_onnx_model, input_shape, verify_cfg=VerifyConfig( arch=test_device.arch, diff --git a/pybuda/test/random/__init__.py b/forge/test/random/__init__.py similarity index 100% rename from pybuda/test/random/__init__.py rename to forge/test/random/__init__.py diff --git a/pybuda/test/random/conftest.py b/forge/test/random/conftest.py similarity index 80% rename from pybuda/test/random/conftest.py rename to forge/test/random/conftest.py index 640b16f2b..4cc94ff11 100644 --- a/pybuda/test/random/conftest.py +++ b/forge/test/random/conftest.py @@ -4,25 +4,25 @@ import pytest import random import os -import pybuda +import forge test_rg = random.Random() seeds = [] @pytest.fixture(autouse=True) def run_test(test_index, random_seeds): - pybuda.config.set_configuration_options(balancer_policy="Random", use_interactive_placer=True) - os.environ["PYBUDA_BALANCER_RANDOM_POLICY_SEED"] = str(random_seeds[test_index]) + forge.config.set_configuration_options(balancer_policy="Random", use_interactive_placer=True) + os.environ["FORGE_BALANCER_RANDOM_POLICY_SEED"] = str(random_seeds[test_index]) rng = random.Random(random_seeds[test_index]) # Pick a random data format, bfp8 and up - df = rng.choice([pybuda.DataFormat.Bfp8_b, pybuda.DataFormat.Float16_b, pybuda.DataFormat.Float16, pybuda.DataFormat.Float32]) - pybuda.config.set_configuration_options(default_df_override=df) + df = rng.choice([forge.DataFormat.Bfp8_b, forge.DataFormat.Float16_b, forge.DataFormat.Float16, forge.DataFormat.Float32]) + forge.config.set_configuration_options(default_df_override=df) # Enable AMP amp = rng.choice([0, 1, 2]) - pybuda.config.set_configuration_options(amp_level=amp) + forge.config.set_configuration_options(amp_level=amp) yield diff --git a/pybuda/test/random/test_bert.py b/forge/test/random/test_bert.py similarity index 87% rename from pybuda/test/random/test_bert.py rename to forge/test/random/test_bert.py index cba5c4359..ac45b10eb 100644 --- a/pybuda/test/random/test_bert.py +++ b/forge/test/random/test_bert.py @@ -4,10 +4,10 @@ import torch import math -from pybuda.verify import verify_module, VerifyConfig, TestKind +from forge.verify import verify_module, VerifyConfig, TestKind from test.bert.modules import ( - PyBudaBertEncoder, + ForgeBertEncoder, get_bert_parameters ) @@ -20,7 +20,7 @@ def test_encoder(test_index, random_seeds, test_device): params = get_bert_parameters("encoder", hidden_dim=hidden_dim) config = { "num_heads": num_heads, "encoder_index": 0 } - mod = PyBudaBertEncoder("encoder", params, config) + mod = ForgeBertEncoder("encoder", params, config) params["reciprocal_of_sqrt_of_head_size_0"].set_value(torch.full((1, 1, 1, 1), 1/math.sqrt(num_heads))) # Ignore pcc errors, we don't care about them here - with random formats and AMP, it's not going to be particularly accurate diff --git a/pybuda/test/random/test_resnet.py b/forge/test/random/test_resnet.py similarity index 78% rename from pybuda/test/random/test_resnet.py rename to forge/test/random/test_resnet.py index 489b442b6..7980cdc0f 100644 --- a/pybuda/test/random/test_resnet.py +++ b/forge/test/random/test_resnet.py @@ -1,10 +1,10 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -import pybuda -from pybuda.verify import verify_module, VerifyConfig, TestKind +import forge +from forge.verify import verify_module, VerifyConfig, TestKind -class ResnetBottleneck(pybuda.PyBudaModule): +class ResnetBottleneck(forge.ForgeModule): def __init__( self, name: str, @@ -18,7 +18,7 @@ def __init__( self.use_relu = True # right branch - self.conv_r0 = pybuda.op.nn.Conv2dModule( + self.conv_r0 = forge.op.nn.Conv2dModule( name=name + "_r0", in_channels=ch_hi, out_channels=ch_lo, @@ -29,7 +29,7 @@ def __init__( groups=1, bias=False, ) - self.conv_r1 = pybuda.op.nn.Conv2dModule( + self.conv_r1 = forge.op.nn.Conv2dModule( name=name + "_r1", in_channels=ch_lo, out_channels=ch_lo, @@ -40,7 +40,7 @@ def __init__( groups=1, bias=False, ) - self.conv_r2 = pybuda.op.nn.Conv2dModule( + self.conv_r2 = forge.op.nn.Conv2dModule( name=name + "_r2", in_channels=ch_lo, out_channels=ch_hi, @@ -56,15 +56,15 @@ def set_relu(self, use_relu: bool): self.use_relu = use_relu def forward(self, x): - r = pybuda.op.Relu(f"", self.conv_r0(x)) - r = pybuda.op.Relu(f"", self.conv_r1(r)) + r = forge.op.Relu(f"", self.conv_r0(x)) + r = forge.op.Relu(f"", self.conv_r1(r)) r = self.conv_r2(r) if self.use_skip: - r = pybuda.op.Add(f"", x, r) + r = forge.op.Add(f"", x, r) if self.use_relu: - r = pybuda.op.Relu(f"", r) + r = forge.op.Relu(f"", r) return r diff --git a/pybuda/test/random/test_three_ops.py b/forge/test/random/test_three_ops.py similarity index 93% rename from pybuda/test/random/test_three_ops.py rename to forge/test/random/test_three_ops.py index 2091dc801..58e55a8a0 100644 --- a/pybuda/test/random/test_three_ops.py +++ b/forge/test/random/test_three_ops.py @@ -4,9 +4,9 @@ # Randomize 3 ops in a fork-join setup ( A -> B, C, B -> C ) import torch -import pybuda +import forge import random -from pybuda.verify import verify_module, VerifyConfig, TestKind +from forge.verify import verify_module, VerifyConfig, TestKind class ThreeOpModel(torch.nn.Module): def __init__(self, rng, cols1, cols2): @@ -68,6 +68,6 @@ def test_three_ops(test_index, random_seeds, test_device): model = ThreeOpModel(rng, cols1, cols2) input_shape = (microbatch_size, rows, cols1) if model.op1 == "matmul" else (microbatch_size, cols1, rows, 32) - verify_module(pybuda.PyTorchModule(f"three_op_model_{test_index}", model), [input_shape], + verify_module(forge.PyTorchModule(f"three_op_model_{test_index}", model), [input_shape], VerifyConfig(test_kind=TestKind.INFERENCE, devtype=test_device.devtype, arch=test_device.arch)) diff --git a/pybuda/test/santacoder/README.md b/forge/test/santacoder/README.md similarity index 87% rename from pybuda/test/santacoder/README.md rename to forge/test/santacoder/README.md index 49f66bcdd..c4f98ead1 100644 --- a/pybuda/test/santacoder/README.md +++ b/forge/test/santacoder/README.md @@ -4,7 +4,7 @@ pip install -r requirements.txt -These should be the pybuda-head compatible torch and transformers packages, but for reproducibility the versions are also stored here. +These should be the forge-head compatible torch and transformers packages, but for reproducibility the versions are also stored here. ## Usage @@ -14,7 +14,7 @@ These should be the pybuda-head compatible torch and transformers packages, but $ python decode.py --device silicon --fuse --precision fp16 --amp-level 2 --arch wormhole_b0 - + print("Hello World!") @@ -24,11 +24,11 @@ The options: * `--precision fp16`: Santacoder is trained with CUDA AMP FP16, so FP16A is a good match on our hardware. With this setting the example output exactly matches CPU in FP32. * `--amp-level 2`: Uses bfp8_a for all matmuls and buffers and fp16 for fused ops, as used for BERT Large. With this setting the example output exactly matches CPU in FP32. - * `--fuse`: Use op fusion. Without this pybuda adds a lot of weird ops between the attention mask and its consumers. But we want fusion anyway, it's good. + * `--fuse`: Use op fusion. Without this forge adds a lot of weird ops between the attention mask and its consumers. But we want fusion anyway, it's good. ## Loading and saving TTImages -*Note: this is implemented but doesn't work with current pybuda at time of writing.* +*Note: this is implemented but doesn't work with current forge at time of writing.* You can compile once and save a TTImage to save recompiling the model every run: diff --git a/pybuda/test/santacoder/configuration_gpt2_mq.py b/forge/test/santacoder/configuration_gpt2_mq.py similarity index 100% rename from pybuda/test/santacoder/configuration_gpt2_mq.py rename to forge/test/santacoder/configuration_gpt2_mq.py diff --git a/pybuda/test/santacoder/decode.py b/forge/test/santacoder/decode.py similarity index 91% rename from pybuda/test/santacoder/decode.py rename to forge/test/santacoder/decode.py index 9309566f1..7e54e08da 100644 --- a/pybuda/test/santacoder/decode.py +++ b/forge/test/santacoder/decode.py @@ -10,8 +10,8 @@ #from monkeypatch import monkeypatch import pytest -import pybuda -from pybuda.config import _get_global_compiler_config +import forge +from forge.config import _get_global_compiler_config # Pytest to run santacoder model @pytest.mark.parametrize("tokens", [10, 100]) @@ -22,16 +22,16 @@ @pytest.mark.parametrize("num_chips", ["chip1", "chip2", "chip32"]) @pytest.mark.parametrize("fuse", ["fuse", "no_fuse"]) def test_santacoder(tokens, device, arch, precision, amp_level, num_chips, fuse): - pybuda.config.set_configuration_options(default_df_override=pybuda.DataFormat.Float16) + forge.config.set_configuration_options(default_df_override=forge.DataFormat.Float16) compiler_cfg = _get_global_compiler_config() - compiler_cfg.amp_properties.append(pybuda._C.AMPNodeProperties( + compiler_cfg.amp_properties.append(forge._C.AMPNodeProperties( op_type="splice", - output_df=pybuda._C.DataFormat.Float16, - accumulate_df=pybuda._C.DataFormat.Float16, - math_fidelity=pybuda.MathFidelity.HiFi3, - intermediate_df=pybuda._C.DataFormat.Float16, - input_df= {0: [pybuda._C.DataFormat.Float16, True], 1: [pybuda._C.DataFormat.Float16, True]} + output_df=forge._C.DataFormat.Float16, + accumulate_df=forge._C.DataFormat.Float16, + math_fidelity=forge.MathFidelity.HiFi3, + intermediate_df=forge._C.DataFormat.Float16, + input_df= {0: [forge._C.DataFormat.Float16, True], 1: [forge._C.DataFormat.Float16, True]} )) @@ -59,7 +59,7 @@ def test_santacoder(tokens, device, arch, precision, amp_level, num_chips, fuse) # Construct parameters object parameters = { 'model': 'bigcode/santacoder', - 'kv_cache': 'pybuda/test/santacoder/kv_cache.pt', + 'kv_cache': 'forge/test/santacoder/kv_cache.pt', 'stop': '\n\n', 'num_tokens': tokens, 'output_at_end': False, @@ -152,7 +152,7 @@ def main(parameters): parser.add_argument('-n', '--num-tokens', type=int, default=10, help='Maximum number of tokens to generate') parser.add_argument('--output-at-end', action='store_true', help='Output at the end of generation instead of token by token') - parser.add_argument('-d', '--device', choices=['huggingface', 'pytorch', 'golden', 'silicon'], default='huggingface', help='huggingface: run using HF code only, pytorch: use our shim but run in PyTorch, golden/silicon: run via pybuda') + parser.add_argument('-d', '--device', choices=['huggingface', 'pytorch', 'golden', 'silicon'], default='huggingface', help='huggingface: run using HF code only, pytorch: use our shim but run in PyTorch, golden/silicon: run via forge') parser.add_argument('--arch', choices=['greyskull', 'wormhole_b0'], default='wormhole_b0', help='Architecture to use for silicon') parser.add_argument('--precision', choices=['fp32', 'fp16', 'bf16', 'fp8', 'fp8b'], default='fp32', help='Precision to use for all silicon tensors') parser.add_argument('--amp-level', type=int, choices=[0, 1, 2], help='Automatic mixed precision level (0=off, 1=mixed b-formats, 2=mixed a-formats)') diff --git a/pybuda/test/santacoder/gpt2_mq.py b/forge/test/santacoder/gpt2_mq.py similarity index 100% rename from pybuda/test/santacoder/gpt2_mq.py rename to forge/test/santacoder/gpt2_mq.py diff --git a/pybuda/test/santacoder/kv_cache.pt b/forge/test/santacoder/kv_cache.pt similarity index 100% rename from pybuda/test/santacoder/kv_cache.pt rename to forge/test/santacoder/kv_cache.pt diff --git a/pybuda/test/santacoder/modeling_gpt2.py b/forge/test/santacoder/modeling_gpt2.py similarity index 99% rename from pybuda/test/santacoder/modeling_gpt2.py rename to forge/test/santacoder/modeling_gpt2.py index a7eb37dd7..aa28745ae 100644 --- a/pybuda/test/santacoder/modeling_gpt2.py +++ b/forge/test/santacoder/modeling_gpt2.py @@ -347,7 +347,7 @@ def __init__(self, intermediate_size, config): self.c_fc = Conv1D(intermediate_size, embed_dim) self.c_proj = Conv1D(embed_dim, intermediate_size) assert "gelu" in config.activation_function, f"Only GELU activation function is supported, found {config.activation_function}" - config.activation_function = "gelu_new" # override as pybuda detects and replaces this variant + config.activation_function = "gelu_new" # override as forge detects and replaces this variant self.act = ACT2FN[config.activation_function] self.dropout = nn.Dropout(config.resid_pdrop) diff --git a/pybuda/test/santacoder/prefill.py b/forge/test/santacoder/prefill.py similarity index 100% rename from pybuda/test/santacoder/prefill.py rename to forge/test/santacoder/prefill.py diff --git a/pybuda/test/santacoder/pybudify.py b/forge/test/santacoder/pybudify.py similarity index 62% rename from pybuda/test/santacoder/pybudify.py rename to forge/test/santacoder/pybudify.py index 786018f5f..f2d9e90a8 100644 --- a/pybuda/test/santacoder/pybudify.py +++ b/forge/test/santacoder/pybudify.py @@ -16,65 +16,65 @@ def __init__(self, pt_module, device='silicon', arch='wormhole_b0', precision='f self.tti_load = tti_load if device != 'pytorch': - # pybuda workarounds + # forge workarounds os.environ["GOLDEN_WORMHOLE_B0"] = "1" - os.environ["PYBUDA_ENABLE_BROADCAST_SPLITTING"] = "1" - #os.environ["PYBUDA_DISABLE_FORK_JOIN_BUF"] = "1" - os.environ["PYBUDA_DRAM_PICK_CAPACITY"] = "1" + os.environ["FORGE_ENABLE_BROADCAST_SPLITTING"] = "1" + #os.environ["FORGE_DISABLE_FORK_JOIN_BUF"] = "1" + os.environ["FORGE_DRAM_PICK_CAPACITY"] = "1" os.environ["WHA0_DISABLE_RELAY_BUFS"] = "1" - #os.environ["PYBUDA_FUSE_STOP_ON_RECIPROCAL"] = "1" - os.environ["PYBUDA_PLACER_SNAKE"] = "1" + #os.environ["FORGE_FUSE_STOP_ON_RECIPROCAL"] = "1" + os.environ["FORGE_PLACER_SNAKE"] = "1" os.environ["LOGGER_LEVEL"] = log_level os.environ["LOGURU_LEVEL"] = log_level - pybuda = self.pybuda = __import__('pybuda') # let us set log levels before importing pybuda + forge = self.forge = __import__('forge') # let us set log levels before importing forge if device == 'pytorch': pass else: - devtype = { 'golden' : pybuda.BackendType.Golden, - 'silicon': pybuda.BackendType.Silicon, + devtype = { 'golden' : forge.BackendType.Golden, + 'silicon': forge.BackendType.Silicon, }[device] - module = pybuda.PyTorchModule("pybudify_module", self.bound_module) + module = forge.PyTorchModule("pybudify_module", self.bound_module) if precision == 'fp32': - fallback = pybuda.DataFormat.Float32 + fallback = forge.DataFormat.Float32 elif precision == 'fp16': - fallback = pybuda.DataFormat.Float16 + fallback = forge.DataFormat.Float16 elif precision == 'bf16': - fallback = pybuda.DataFormat.Float16_b + fallback = forge.DataFormat.Float16_b elif precision == 'fp8': - fallback = pybuda.DataFormat.Bfp8 + fallback = forge.DataFormat.Bfp8 elif precision == 'fp8b': - fallback = pybuda.DataFormat.Bfp8_b + fallback = forge.DataFormat.Bfp8_b else: raise ValueError('Precision "%s" not implemented' % precision) # if manual_placement: -# manual_placer(pybuda.config, manual_placement) +# manual_placer(forge.config, manual_placement) OFFSET = 65 - 7 # = 58 for layer_num in range(24): k = OFFSET * layer_num - #pybuda.config.set_epoch_break([f'add_{17+k}', f'matmul_{16+k}']) - pybuda.config.add_schedule_constraint([f'pybudify_module.output_transpose_{9+k}_tm_nop', f'add_{14+k}_output_nop_0', f'concatenate_{35+k}.dc.concatenate.2']) + #forge.config.set_epoch_break([f'add_{17+k}', f'matmul_{16+k}']) + forge.config.add_schedule_constraint([f'pybudify_module.output_transpose_{9+k}_tm_nop', f'add_{14+k}_output_nop_0', f'concatenate_{35+k}.dc.concatenate.2']) perf_level = { None : None, 'none' : None, - 'light' : pybuda.PerfTraceLevel.LIGHT, - 'verbose': pybuda.PerfTraceLevel.VERBOSE }[perf] - pybuda.set_configuration_options(default_df_override=fallback, accumulate_df=fallback, amp_level=amp_level, enable_auto_fusing=fuse, performance_trace=perf_level, backend_opt_level=3) + 'light' : forge.PerfTraceLevel.LIGHT, + 'verbose': forge.PerfTraceLevel.VERBOSE }[perf] + forge.set_configuration_options(default_df_override=fallback, accumulate_df=fallback, amp_level=amp_level, enable_auto_fusing=fuse, performance_trace=perf_level, backend_opt_level=3) - pybuda_arch = { 'grayskull': pybuda.BackendDevice.Grayskull, - 'wormhole_b0': pybuda.BackendDevice.Wormhole_B0 }[arch] + forge_arch = { 'grayskull': forge.BackendDevice.Grayskull, + 'wormhole_b0': forge.BackendDevice.Wormhole_B0 }[arch] if tti_load is not None: - self.tt0 = pybuda.TTDevice.load_image(img_path=tti_load) + self.tt0 = forge.TTDevice.load_image(img_path=tti_load) else: - self.tt0 = pybuda.TTDevice('tt0', module=module, + self.tt0 = forge.TTDevice('tt0', module=module, fp32_fallback=fallback, - arch=pybuda_arch, + arch=forge_arch, devtype=devtype, chip_ids=list(range(num_chips))) @@ -82,10 +82,10 @@ def __init__(self, pt_module, device='silicon', arch='wormhole_b0', precision='f self.output_q = mp.Queue() if verify: - self.verify_cfg = pybuda.VerifyConfig(verify_all=True, + self.verify_cfg = forge.VerifyConfig(verify_all=True, verify_last=True, - devtype=pybuda.BackendType.Silicon, - arch=pybuda_arch,) + devtype=forge.BackendType.Silicon, + arch=forge_arch,) else: self.verify_cfg = None @@ -107,7 +107,7 @@ def __call__(self, *args, **kwargs): ) print(f'Saved image to {self.tti_save}') sys.exit(0) - self.pybuda.initialize_pipeline(training=False, + self.forge.initialize_pipeline(training=False, sample_inputs=args, output_queue=self.output_q, microbatch_count=self.micro_batch_size, @@ -117,9 +117,9 @@ def __call__(self, *args, **kwargs): self.initialized = True self.tt0.push_to_inputs(*args) - self.pybuda.run_forward(input_count=1, _sequential=True) + self.forge.run_forward(input_count=1, _sequential=True) ys = self.output_q.get() - outputs = tuple([ y.value().float() for y in ys if isinstance(y, self.pybuda.tensor.TensorFromPytorch)]) + outputs = tuple([ y.value().float() for y in ys if isinstance(y, self.forge.tensor.TensorFromPytorch)]) if len(outputs) == 1: outputs = outputs[0] if self.verify_cfg: diff --git a/pybuda/test/santacoder/requirements.txt b/forge/test/santacoder/requirements.txt similarity index 100% rename from pybuda/test/santacoder/requirements.txt rename to forge/test/santacoder/requirements.txt diff --git a/pybuda/test/serve/README.md b/forge/test/serve/README.md similarity index 92% rename from pybuda/test/serve/README.md rename to forge/test/serve/README.md index 90d80f734..bde531042 100644 --- a/pybuda/test/serve/README.md +++ b/forge/test/serve/README.md @@ -1,6 +1,6 @@ # Bert QA -This is a simple demo of Bert QA using PyBuda. +This is a simple demo of Bert QA using Forge. ## Env setup diff --git a/pybuda/test/serve/ask.py b/forge/test/serve/ask.py similarity index 100% rename from pybuda/test/serve/ask.py rename to forge/test/serve/ask.py diff --git a/pybuda/test/serve/qa_serve.py b/forge/test/serve/qa_serve.py similarity index 91% rename from pybuda/test/serve/qa_serve.py rename to forge/test/serve/qa_serve.py index 851a2e07f..4d8d20077 100644 --- a/pybuda/test/serve/qa_serve.py +++ b/forge/test/serve/qa_serve.py @@ -4,16 +4,16 @@ import ray from ray import serve import torch -import pybuda +import forge from transformers.pipelines import pipeline -import pybuda +import forge from loguru import logger import json -from pybuda import PyTorchModule +from forge import PyTorchModule from transformers import BertModel, BertConfig, BertForPreTraining, BertTokenizer, BertForQuestionAnswering -from pybuda.verify import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice +from forge.verify import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice from dataclasses import dataclass # Embedding wrapper that extends and passes attention mask through - to run on host @@ -69,8 +69,8 @@ def __init__(self): # Create pipeline, with encoders on TT - self.cpu0 = pybuda.CPUDevice("cpu0", module=PyTorchModule("bert_embeddings", EmbWrapper(model.bert))) - tt1 = pybuda.TTDevice("tt1", + self.cpu0 = forge.CPUDevice("cpu0", module=PyTorchModule("bert_embeddings", EmbWrapper(model.bert))) + tt1 = forge.TTDevice("tt1", devtype=test_device.devtype, arch=test_device.arch, module=PyTorchModule("encoder", EncoderWrapper(model))) @@ -95,7 +95,7 @@ async def __call__(self, request): input = inputs[0] logger.info("Running on TT") self.cpu0.push_to_inputs(input["data"]) - output_q = pybuda.run_inference(_verify_cfg=VerifyConfig.disabled(), _sequential=True) + output_q = forge.run_inference(_verify_cfg=VerifyConfig.disabled(), _sequential=True) outputs = output_q.get() logits = outputs[0].value().detach() diff --git a/pybuda/test/test_api.py b/forge/test/test_api.py similarity index 79% rename from pybuda/test/test_api.py rename to forge/test/test_api.py index 031a6a12b..59a8dc553 100644 --- a/pybuda/test/test_api.py +++ b/forge/test/test_api.py @@ -9,9 +9,9 @@ import tensorflow as tf -import pybuda -import pybuda.config -from pybuda.tensor import to_buda_tensors, to_pt_tensors +import forge +import forge.config +from forge.tensor import to_buda_tensors, to_pt_tensors def test_torch(): class Add(nn.Module): @@ -27,7 +27,7 @@ def forward(self, x1, x2): golden = model(*inputs) - compiled_model = pybuda.compile(model, sample_inputs=[torch.rand(shape), torch.rand(shape)]) + compiled_model = forge.compile(model, sample_inputs=[torch.rand(shape), torch.rand(shape)]) output = compiled_model(*inputs) @@ -52,7 +52,7 @@ def call(self, x1, x2): golden = model(inputs_tf[0], inputs_tf[1]) golden = torch.tensor(golden.numpy()) - compiled_model = pybuda.compile(model, sample_inputs=[torch.rand(shape), torch.rand(shape)]) + compiled_model = forge.compile(model, sample_inputs=[torch.rand(shape), torch.rand(shape)]) output = compiled_model(*inputs) @@ -62,19 +62,19 @@ def call(self, x1, x2): raise ValueError("Output does not match the golden output") def test_forge(): - class ForgeAdd(pybuda.PyBudaModule): + class ForgeAdd(forge.ForgeModule): def __init__(self): - super().__init__("PyBudaTest") + super().__init__("ForgeTest") def forward(self, x, y): - return pybuda.op.Add("", x, y) + return forge.op.Add("", x, y) inputs = to_buda_tensors([torch.rand(1, 32, 32), torch.rand(1, 32, 32)]) model = ForgeAdd() golden = model(*inputs) - compiled_model = pybuda.compile(model, sample_inputs=inputs) + compiled_model = forge.compile(model, sample_inputs=inputs) # Issue #161 : currently, we expect inputs to be torch tensors inputs = to_pt_tensors(inputs) diff --git a/pybuda/test/test_bert.py b/forge/test/test_bert.py similarity index 93% rename from pybuda/test/test_bert.py rename to forge/test/test_bert.py index 18544f987..b9d34435b 100644 --- a/pybuda/test/test_bert.py +++ b/forge/test/test_bert.py @@ -10,22 +10,22 @@ import pytest import torch -from pybuda import ( +from forge import ( TTDevice, BackendType, Tensor, Parameter, - pybuda_compile, + forge_compile, CompilerConfig, VerifyConfig, SGD, ) from .bert.modules import ( - PyBudaBertMHA, - PyBudaFeedForward, - PyBudaBertEncoder, - PyBudaFFNorm, + ForgeBertMHA, + ForgeFeedForward, + ForgeBertEncoder, + ForgeFFNorm, get_bert_parameters ) @@ -41,7 +41,7 @@ def test_mha(training, recompute): "num_heads": 4, "encoder_index": 0, } - mod = PyBudaBertMHA("mha", params, config) + mod = ForgeBertMHA("mha", params, config) sgd_optimizer = SGD(learning_rate=0.5, parameters=mod.get_parameters()) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) tt0.place_module(mod) @@ -57,7 +57,7 @@ def test_mha(training, recompute): params["reciprocal_of_sqrt_of_head_size_0"].set_value(torch.full((1, 1, 1, 1), 1/math.sqrt(4))) sgd_optimizer.set_optimizer_parameters() - pybuda_compile( + forge_compile( tt0, "bert_mha", encoder_input, @@ -84,7 +84,7 @@ def test_ff(training, recompute): "num_heads": 4, "encoder_index": 0, } - mod = PyBudaFeedForward("ff", params, config) + mod = ForgeFeedForward("ff", params, config) sgd_optimizer = SGD(learning_rate=0.5, parameters=mod.get_parameters()) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) tt0.place_module(mod) @@ -98,7 +98,7 @@ def test_ff(training, recompute): sgd_optimizer.set_optimizer_parameters() # Adjust atol/rtol due to differences in gelu backwards implementation - pybuda_compile( + forge_compile( tt0, "bert_ff", encoder_input, @@ -121,7 +121,7 @@ def test_ffnorm(training, recompute): "num_heads": 4, "encoder_index": 0, } - mod = PyBudaFFNorm("ffnorm", params, config) + mod = ForgeFFNorm("ffnorm", params, config) sgd_optimizer = SGD(learning_rate=0.5, parameters=mod.get_parameters()) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) tt0.place_module(mod) @@ -135,7 +135,7 @@ def test_ffnorm(training, recompute): sgd_optimizer.set_optimizer_parameters() # Adjust atol/rtol due to differences in gelu backwards implementation - pybuda_compile( + forge_compile( tt0, "bert_ffnorm", encoder_input, @@ -160,7 +160,7 @@ def test_encoder(training, recompute): "num_heads": 4, "encoder_index": 0, } - mod = PyBudaBertEncoder("encoder", params, config) + mod = ForgeBertEncoder("encoder", params, config) sgd_optimizer = SGD(learning_rate=0.5, parameters=mod.get_parameters()) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) tt0.place_module(mod) @@ -176,7 +176,7 @@ def test_encoder(training, recompute): params["reciprocal_of_sqrt_of_head_size_0"].set_value(torch.full((1, 1, 1, 1), 1/math.sqrt(4))) sgd_optimizer.set_optimizer_parameters() - pybuda_compile( + forge_compile( tt0, "bert_encoder", encoder_input, diff --git a/pybuda/test/test_broadcast_splits.py b/forge/test/test_broadcast_splits.py similarity index 78% rename from pybuda/test/test_broadcast_splits.py rename to forge/test/test_broadcast_splits.py index f3b1f6871..fa842342f 100644 --- a/pybuda/test/test_broadcast_splits.py +++ b/forge/test/test_broadcast_splits.py @@ -8,21 +8,21 @@ import pytest import torch -import pybuda -from pybuda import ( +import forge +from forge import ( TTDevice, BackendType, Tensor, Parameter, - pybuda_compile, + forge_compile, CompilerConfig, VerifyConfig, SGD, - PyBudaModule, + ForgeModule, ) -class BroadcastSplitModule(PyBudaModule): +class BroadcastSplitModule(ForgeModule): """ BroadcastSplitModule """ @@ -31,11 +31,11 @@ def __init__(self, name): super().__init__(name) def forward(self, op1, op2, op3): - bc1 = pybuda.op.Broadcast("bc1", op2, 3, 32) - bc2 = pybuda.op.Broadcast("bc2", bc1, 2, 32) + bc1 = forge.op.Broadcast("bc1", op2, 3, 32) + bc2 = forge.op.Broadcast("bc2", bc1, 2, 32) # implicit Z broadcast in add - add1 = pybuda.op.Add("add1", op1, bc2) - mm1 = pybuda.op.Matmul("mm1", add1, op3) + add1 = forge.op.Add("add1", op1, bc2) + mm1 = forge.op.Matmul("mm1", add1, op3) return mm1 @@ -54,7 +54,7 @@ def test_broadcast_split(mode): tt0 = TTDevice("tt0", devtype=BackendType.Golden) tt0.place_module(mod) - ret = pybuda_compile( + ret = forge_compile( tt0, "broadcast_split", op1, diff --git a/pybuda/test/test_consteval.py b/forge/test/test_consteval.py similarity index 69% rename from pybuda/test/test_consteval.py rename to forge/test/test_consteval.py index f1d4c0453..c7380285d 100644 --- a/pybuda/test/test_consteval.py +++ b/forge/test/test_consteval.py @@ -8,8 +8,8 @@ import torch -import pybuda -from pybuda import ( +import forge +from forge import ( Tensor, Parameter, CompilerConfig, @@ -37,8 +37,8 @@ def test_consteval_simple(test_kind, test_device, shapes): ), ) def consteval_simple(x, param=None): - param = pybuda.op.Reshape("reshape0", param, shapes[0]) - return pybuda.op.Multiply("mul0", x, param) + param = forge.op.Reshape("reshape0", param, shapes[0]) + return forge.op.Multiply("mul0", x, param) x = Tensor.create_from_torch( torch.rand(*shapes[0], requires_grad=test_kind.is_training()) @@ -59,13 +59,13 @@ def test_consteval_param_chain(test_kind, test_device): ), ) def consteval_param_chain(x, param=None): - param = pybuda.op.Reshape("reshape0", param, (1, 10, 8, 8)) - param = pybuda.op.Transpose("transpose0", param, 1, 3) - param = pybuda.op.Transpose("transpose1", param, 2, 3) - param = pybuda.op.Reshape("reshape1", param, (1, 4, 16, 10)) - param = pybuda.op.Transpose("transpose2", param, 2, 3) - param = pybuda.op.Reshape("reshape2", param, (1, 2, 10, 32)) - return pybuda.op.Multiply("mul0", x, param) + param = forge.op.Reshape("reshape0", param, (1, 10, 8, 8)) + param = forge.op.Transpose("transpose0", param, 1, 3) + param = forge.op.Transpose("transpose1", param, 2, 3) + param = forge.op.Reshape("reshape1", param, (1, 4, 16, 10)) + param = forge.op.Transpose("transpose2", param, 2, 3) + param = forge.op.Reshape("reshape2", param, (1, 2, 10, 32)) + return forge.op.Multiply("mul0", x, param) x = Tensor.create_from_torch( torch.rand((1, 2, 10, 32), requires_grad=test_kind.is_training()) @@ -86,9 +86,9 @@ def test_consteval_partial(test_kind, test_device): ), ) def consteval_partial(x, param=None): - param = pybuda.op.Transpose("transpose", param, 2, 3) - param = pybuda.op.Exp("exp", param) - return pybuda.op.Multiply("mul0", x, param) + param = forge.op.Transpose("transpose", param, 2, 3) + param = forge.op.Exp("exp", param) + return forge.op.Multiply("mul0", x, param) x = Tensor.create_from_torch( torch.rand((1, 1, 256, 32), requires_grad=test_kind.is_training()) @@ -109,11 +109,11 @@ def test_consteval_fork(test_kind, test_device): ), ) def consteval_fork(x, const=None): - a = pybuda.op.Transpose("transpose", const, 2, 3) - b = pybuda.op.Exp("exp", a) - c = pybuda.op.Log("log", a) - d = pybuda.op.Multiply("mul0", x, b) - e = pybuda.op.Multiply("mul1", d, c) + a = forge.op.Transpose("transpose", const, 2, 3) + b = forge.op.Exp("exp", a) + c = forge.op.Log("log", a) + d = forge.op.Multiply("mul0", x, b) + e = forge.op.Multiply("mul1", d, c) return e x = Tensor.create_from_torch( @@ -133,13 +133,13 @@ def test_consteval_binary(test_kind, test_device): ), ) def consteval_binary(x, a=None, b=None): - a = pybuda.op.Transpose("ta", a, 2, 3) - a = pybuda.op.Exp("expa", a) - b = pybuda.op.Transpose("tb", b, 2, 3) - b = pybuda.op.Log("logb", b) - c = pybuda.op.Multiply("mulc", a, b) - c = pybuda.op.Transpose("tc", c, 2, 3) - return pybuda.op.Multiply("mul0", x, c) + a = forge.op.Transpose("ta", a, 2, 3) + a = forge.op.Exp("expa", a) + b = forge.op.Transpose("tb", b, 2, 3) + b = forge.op.Log("logb", b) + c = forge.op.Multiply("mulc", a, b) + c = forge.op.Transpose("tc", c, 2, 3) + return forge.op.Multiply("mul0", x, c) x = Tensor.create_from_torch( torch.rand((1, 1, 256, 32), requires_grad=test_kind.is_training()) @@ -163,17 +163,17 @@ def test_consteval_binary_fork(test_kind, test_device): ), ) def consteval_binary_fork(x, a=None, b=None): - a = pybuda.op.Transpose("ta", a, 2, 3) - a = pybuda.op.Exp("expa", a) - b = pybuda.op.Transpose("tb", b, 2, 3) - b = pybuda.op.Log("logb", b) - c = pybuda.op.Multiply("mulc", a, b) - d = pybuda.op.Transpose("tc0", c, 2, 3) - e = pybuda.op.Transpose("tc1", c, 2, 3) - e = pybuda.op.Exp("exp0", e) - f = pybuda.op.Multiply("mul0", x, d) - g = pybuda.op.Multiply("mul1", x, e) - return pybuda.op.Multiply("mul2", f, g) + a = forge.op.Transpose("ta", a, 2, 3) + a = forge.op.Exp("expa", a) + b = forge.op.Transpose("tb", b, 2, 3) + b = forge.op.Log("logb", b) + c = forge.op.Multiply("mulc", a, b) + d = forge.op.Transpose("tc0", c, 2, 3) + e = forge.op.Transpose("tc1", c, 2, 3) + e = forge.op.Exp("exp0", e) + f = forge.op.Multiply("mul0", x, d) + g = forge.op.Multiply("mul1", x, e) + return forge.op.Multiply("mul2", f, g) x = Tensor.create_from_torch( torch.rand((1, 1, 256, 32), requires_grad=test_kind.is_training()) diff --git a/pybuda/test/test_constraints.py b/forge/test/test_constraints.py similarity index 70% rename from pybuda/test/test_constraints.py rename to forge/test/test_constraints.py index b78b990ff..58c4d8f3f 100644 --- a/pybuda/test/test_constraints.py +++ b/forge/test/test_constraints.py @@ -5,8 +5,8 @@ import torch -import pybuda -from pybuda import ( +import forge +from forge import ( Tensor, CompilerConfig, CompileDepth, @@ -20,10 +20,10 @@ def test_max_input_grid_fork(): verify_cfg=VerifyConfig(run_golden=False) ) def max_input_grid_fork(a, b): - b = pybuda.op.Repeat("repeat", b, [1, 1, 3, 6]) - return pybuda.op.Add("add0", a, b) + b = forge.op.Repeat("repeat", b, [1, 1, 3, 6]) + return forge.op.Add("add0", a, b) - pybuda.config.override_op_size("add0", (3, 6)) + forge.config.override_op_size("add0", (3, 6)) a = Tensor.create_from_torch(torch.rand((1, 1, 96, 192))) b = Tensor.create_from_torch(torch.rand((1, 1, 32, 32))) @@ -40,7 +40,7 @@ def test_max_input_op_fork(): ) def max_input_op_fork(a, b): for i in range(max_forks + 1): - b = pybuda.op.Add(f"add{i}", a, b) + b = forge.op.Add(f"add{i}", a, b) return b a = Tensor.create_from_torch(torch.rand((1, 1, 96, 192))) @@ -54,12 +54,12 @@ def test_max_prologue_op_fork(): verify_cfg=VerifyConfig(run_golden=False) ) def max_prologue_op_fork(a, const=None): - return pybuda.op.Multiply(f"op0", a, const) + return forge.op.Multiply(f"op0", a, const) rt = 4 ct = 6 assert rt * ct > max_forks - pybuda.config.override_op_size("op0", (rt, ct)) + forge.config.override_op_size("op0", (rt, ct)) a = Tensor.create_from_torch(torch.rand((1, 1, rt*32, ct*32))) c = Tensor.create_from_torch(torch.rand((1, 1, 1, 1)), constant=True) max_prologue_op_fork(a, const=c) @@ -72,13 +72,13 @@ def test_max_output_op_fork(): ) def max_output_op_fork(a, b): outputs = [] - b = pybuda.op.Add(f"add_fork", a, b) + b = forge.op.Add(f"add_fork", a, b) for i in range(max_forks + 1): - outputs.append(pybuda.op.Exp(f"exp{i}", b)) + outputs.append(forge.op.Exp(f"exp{i}", b)) return outputs for i in range(max_forks + 1): - pybuda.config.set_epoch_break(f"exp{i}") + forge.config.set_epoch_break(f"exp{i}") a = Tensor.create_from_torch(torch.rand((1, 1, 96, 192))) b = Tensor.create_from_torch(torch.rand((1, 1, 96, 192))) @@ -90,14 +90,14 @@ def test_max_fork_streams(): verify_cfg=VerifyConfig(run_golden=False) ) def max_fork_streams(a, b): - c = pybuda.op.Add("add0", a, b) - d = pybuda.op.Add("add1", a, b) - e = pybuda.op.Add("add2", c, d) + c = forge.op.Add("add0", a, b) + d = forge.op.Add("add1", a, b) + e = forge.op.Add("add2", c, d) return e - pybuda.config.override_op_size("add0", (1, 1)) - pybuda.config.override_op_size("add1", (2, 4)) - pybuda.config.override_op_size("add2", (4, 8)) + forge.config.override_op_size("add0", (1, 1)) + forge.config.override_op_size("add1", (2, 4)) + forge.config.override_op_size("add2", (4, 8)) a = Tensor.create_from_torch(torch.rand((1, 1, 128, 256))) b = Tensor.create_from_torch(torch.rand((1, 1, 128, 256))) @@ -108,18 +108,18 @@ def max_fork_streams(a, b): def test_stream_stacking_rotate(): - pybuda.config.set_configuration_options(balancer_policy="MaximizeTMinimizeGrid") + forge.config.set_configuration_options(balancer_policy="MaximizeTMinimizeGrid") @compile( verify_cfg=VerifyConfig(run_golden=False, run_net2pipe=True) ) def stream_stacking_rotate(a, b, c): - x = pybuda.op.Matmul("mm0", a, b) + x = forge.op.Matmul("mm0", a, b) - c = pybuda.op.HSlice("h0", c, 12) - x = pybuda.op.Transpose("t1", x, 2, 3) - x = pybuda.op.VSlice("v1", x, 12) - r = pybuda.op.Matmul("mm1", c, x) + c = forge.op.HSlice("h0", c, 12) + x = forge.op.Transpose("t1", x, 2, 3) + x = forge.op.VSlice("v1", x, 12) + r = forge.op.Matmul("mm1", c, x) return r a = Tensor.create_from_torch(torch.rand((1, 1, 384, 768))) @@ -129,16 +129,16 @@ def stream_stacking_rotate(a, b, c): def test_stream_stacking_transpose(): - pybuda.config.set_configuration_options(balancer_policy="MaximizeTMinimizeGrid") + forge.config.set_configuration_options(balancer_policy="MaximizeTMinimizeGrid") @compile( verify_cfg=VerifyConfig(run_golden=False, run_net2pipe=True) ) def stream_stacking_transpose(a, b, c): - b = pybuda.op.Matmul("mm0", b, c) - b = pybuda.op.Transpose("transpose0", b, 2, 3) - b = pybuda.op.VStack("vstack0", b, 512) - r = pybuda.op.Matmul("mm1", a, b) + b = forge.op.Matmul("mm0", b, c) + b = forge.op.Transpose("transpose0", b, 2, 3) + b = forge.op.VStack("vstack0", b, 512) + r = forge.op.Matmul("mm1", a, b) return r a = Tensor.create_from_torch(torch.rand((1, 1, 128, 32*512))) @@ -148,15 +148,15 @@ def stream_stacking_transpose(a, b, c): def test_r_stream_mm_rhs(): - pybuda.config.set_configuration_options(balancer_policy="MaximizeTMinimizeGrid") - pybuda.config._get_global_compiler_config().insert_queues = [("exp0", "mm1", 1)] + forge.config.set_configuration_options(balancer_policy="MaximizeTMinimizeGrid") + forge.config._get_global_compiler_config().insert_queues = [("exp0", "mm1", 1)] @compile( verify_cfg=VerifyConfig(run_golden=False, run_net2pipe=True) ) def r_stream_mm_rhs(a, b): - b = pybuda.op.Exp("exp0", b) - r = pybuda.op.Matmul("mm1", a, b) + b = forge.op.Exp("exp0", b) + r = forge.op.Matmul("mm1", a, b) return r a = Tensor.create_from_torch(torch.rand((1, 1, 128, 128))) @@ -167,14 +167,14 @@ def r_stream_mm_rhs(a, b): def test_queue_fork_streams(): @compile() def queue_fork_streams(a, b, w=None): - c = pybuda.op.Add("add0", a, b) - d = pybuda.op.Matmul("mm0", c, w) + c = forge.op.Add("add0", a, b) + d = forge.op.Matmul("mm0", c, w) return d grid = (7, 8) - pybuda.config.override_op_size("add0", grid) - pybuda.config.set_epoch_break("mm0") - pybuda.config.override_op_size("mm0", (1, 1)) + forge.config.override_op_size("add0", grid) + forge.config.set_epoch_break("mm0") + forge.config.override_op_size("mm0", (1, 1)) a = Tensor.create_from_torch(torch.rand((1, 1, 32*grid[0], 32*grid[1]))) b = Tensor.create_from_torch(torch.rand((1, 1, 32*grid[0], 32*grid[1]))) @@ -194,15 +194,15 @@ def test_aggregate_queue_fork_streams(): def aggregate_queue_fork_streams(a, b): outs = [] for i in range(num_adds): - a = pybuda.op.Add(f"add{i}", a, b) + a = forge.op.Add(f"add{i}", a, b) outs.append(a) - return pybuda.op.Concatenate("concat0", *outs, axis=-1) + return forge.op.Concatenate("concat0", *outs, axis=-1) for i in range(num_adds): - pybuda.config.override_op_size(f"add{i}", grid) + forge.config.override_op_size(f"add{i}", grid) - pybuda.config.set_epoch_break("concat0.dc.concatenate.0") - pybuda.config.override_op_size("concat0.dc.concatenate.0", (1, 1)) + forge.config.set_epoch_break("concat0.dc.concatenate.0") + forge.config.override_op_size("concat0.dc.concatenate.0", (1, 1)) a = Tensor.create_from_torch(torch.rand((1, 1, 32*grid[0], 32*grid[1]))) b = Tensor.create_from_torch(torch.rand((1, 1, 32*grid[0], 32*grid[1]))) @@ -220,8 +220,8 @@ def test_epoch_to_epoch_disjoint(): @compile(compiler_cfg=compiler_cfg) def epoch_to_epoch_disjoint(a, b, w=None): - c = pybuda.op.Matmul("mm0", a, w) - return pybuda.op.Multiply("mul0", b, c) + c = forge.op.Matmul("mm0", a, w) + return forge.op.Multiply("mul0", b, c) a = Tensor.create_from_torch(torch.rand((1, 1, 32*grid[0], 32*grid[1]), requires_grad=True)) w = Tensor.create_from_torch(torch.rand((1, 1, 32*grid[1], 32*grid[0]), requires_grad=True)) diff --git a/pybuda/test/test_conv2d.py b/forge/test/test_conv2d.py similarity index 89% rename from pybuda/test/test_conv2d.py rename to forge/test/test_conv2d.py index 8d93cf030..b9ca845d7 100644 --- a/pybuda/test/test_conv2d.py +++ b/forge/test/test_conv2d.py @@ -4,34 +4,34 @@ # # Some basic bring-up tests of tracing functionality # -from pybuda._C.balancer import OpOverride -from pybuda.verify.config import TestKind -from pybuda._C import DataFormat, MathFidelity +from forge._C.balancer import OpOverride +from forge.verify.config import TestKind +from forge._C import DataFormat, MathFidelity import pytest import torch -import pybuda +import forge import os import random -from pybuda import ( - PyBudaModule, +from forge import ( + ForgeModule, TTDevice, Tensor, - pybuda_compile, + forge_compile, CompilerConfig, CompileDepth, VerifyConfig, ) -from pybuda._C.backend_api import BackendType -from pybuda._C.graph import RuntimeTensorTransformType -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.op.eval import compare_tensor_to_golden, does_prestriding_improve_perf -from pybuda.op.nn import Conv2dModule, ConvTranspose2dModule, MaxPool2dModule, AvgPool2dModule -from pybuda.utils import align_up_tile, round_up_div +from forge._C.backend_api import BackendType +from forge._C.graph import RuntimeTensorTransformType +from forge.config import CompileDepth, _get_global_compiler_config +from forge.op.eval import compare_tensor_to_golden, does_prestriding_improve_perf +from forge.op.nn import Conv2dModule, ConvTranspose2dModule, MaxPool2dModule, AvgPool2dModule +from forge.utils import align_up_tile, round_up_div from .common import run from .module_utils import Conv2dTModule -from pybuda.op.eval.sparse_utils import calculate_conv2d_output_dimensions, calculate_conv2d_transpose_output_dimensions, conv2d_padding_to_canonical, calculate_conv2d_prestride_weights_and_padding, can_conv2d_prestride +from forge.op.eval.sparse_utils import calculate_conv2d_output_dimensions, calculate_conv2d_transpose_output_dimensions, conv2d_padding_to_canonical, calculate_conv2d_prestride_weights_and_padding, can_conv2d_prestride # TODO: test grouped convs (not depthwise) @@ -95,16 +95,16 @@ def test_conv2d( relative_atol = 0.3 if test_kind.is_training() and test_device.devtype == BackendType.Silicon else 0.1 pcc = 0.96 if test_device.devtype == BackendType.Silicon else 0.99 - pybuda.config.set_configuration_options(enable_conv_prestride=False) + forge.config.set_configuration_options(enable_conv_prestride=False) try: - pybuda.verify.verify_module(mod, [(1, in_channels, original_shape[0], original_shape[1])], + forge.verify.verify_module(mod, [(1, in_channels, original_shape[0], original_shape[1])], VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, relative_atol=relative_atol, pcc=pcc)) except RuntimeError as e: if ( "Compile failed for TTDevice" in str(e) or "Could not satisfy all constraints for edge" in str(e) ): - pytest.xfail("tenstorrent/pybuda#185") + pytest.xfail("tenstorrent/forge#185") raise @@ -170,13 +170,13 @@ def test_convtranspose2d( relative_atol = 0.3 if test_kind.is_training() and test_device.devtype == BackendType.Silicon else 0.1 pcc = 0.96 if test_device.devtype == BackendType.Silicon else 0.99 - pybuda.config.set_configuration_options(enable_conv_prestride=False) + forge.config.set_configuration_options(enable_conv_prestride=False) try: - pybuda.verify.verify_module(mod, [(1, in_channels, original_shape[0], original_shape[1])], + forge.verify.verify_module(mod, [(1, in_channels, original_shape[0], original_shape[1])], VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, relative_atol=relative_atol, pcc=pcc)) except RuntimeError as e: if "Compile failed for TTDevice" in str(e): - pytest.xfail("tenstorrent/pybuda#185") + pytest.xfail("tenstorrent/forge#185") raise @@ -185,7 +185,7 @@ def test_convtranspose2d_data_mismatch_repro(test_device): # Fracturing the conv causes the data mismatch # Forcing the fracturing here, so the mismatch repros with small input - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" return test_convtranspose2d( test_kind=TestKind.INFERENCE, @@ -262,7 +262,7 @@ def test_conv2d_t_streaming( compiler_cfg.balancer_policy = "MaximizeTMinimizeGrid" compiler_cfg.enable_conv_prestride = False - pybuda.verify.verify_module(mod, [(1, in_channels, original_shape[0], original_shape[1])], + forge.verify.verify_module(mod, [(1, in_channels, original_shape[0], original_shape[1])], VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, relative_atol=relative_atol, pcc=pcc)) @@ -324,13 +324,13 @@ def test_conv2d_fractured( compiler_cfg.balancer_policy = "MaximizeTMinimizeGrid" compiler_cfg.enable_conv_prestride = False - pybuda.config.override_fracture_factor("conv2d_fractured.conv.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", kernel_size[0]) + forge.config.override_fracture_factor("conv2d_fractured.conv.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", kernel_size[0]) - os.environ["PYBUDA_FORCE_ALLOW_FRACTURING"] = "1" + os.environ["FORGE_FORCE_ALLOW_FRACTURING"] = "1" # TODO: Figure out how to (cleanly) confirm that fracturing happened (need to get grid_shape of conv op) - devices = pybuda.verify.verify_module(mod, [(1, in_channels, original_shape[0], original_shape[1])], + devices = forge.verify.verify_module(mod, [(1, in_channels, original_shape[0], original_shape[1])], VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, relative_atol=relative_atol, pcc=pcc)) - del os.environ["PYBUDA_FORCE_ALLOW_FRACTURING"] + del os.environ["FORGE_FORCE_ALLOW_FRACTURING"] @pytest.mark.parametrize("in_channels", [3]) @@ -385,7 +385,7 @@ def test_conv2d_multi_op_fractured( ) # This makes the conv fracture into multiple ops - pybuda.config.override_multi_op_fracture_factor("conv2d_multi_op_fractured.conv", kernel_size[0]) + forge.config.override_multi_op_fracture_factor("conv2d_multi_op_fractured.conv", kernel_size[0]) relative_atol = 0.3 if test_kind.is_training() and test_device.devtype == BackendType.Silicon else 0.1 pcc = 0.96 if test_device.devtype == BackendType.Silicon else 0.99 @@ -394,15 +394,15 @@ def test_conv2d_multi_op_fractured( compiler_cfg.balancer_policy = "MaximizeTMinimizeGrid" compiler_cfg.enable_conv_prestride = False - os.environ["PYBUDA_FORCE_DISALLOW_FRACTURING"] = "1" # Disables "within-op" fracturing + os.environ["FORGE_FORCE_DISALLOW_FRACTURING"] = "1" # Disables "within-op" fracturing - devices = pybuda.verify.verify_module(mod, [(1, in_channels, original_shape[0], original_shape[1])], + devices = forge.verify.verify_module(mod, [(1, in_channels, original_shape[0], original_shape[1])], VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, relative_atol=relative_atol, pcc=pcc)) # Confirm conv has fractured into multiple ops assert len(devices[0]._compiled_graph_state.ordered_constant_node_names) == kernel_size[0] * 2, f"Expected {kernel_size[0] * 2} constant nodes (2 per each sparse matmul), got {len(devices[0]._compiled_graph_state.ordered_constant_node_names)}" assert len(devices[0]._compiled_graph_state.ordered_parameter_node_names), f"Expected {kernel_size[0]} parameter nodes (1 per each sparse matmul), got {len(devices[0]._compiled_graph_state.ordered_parameter_node_names)}" - del os.environ["PYBUDA_FORCE_DISALLOW_FRACTURING"] + del os.environ["FORGE_FORCE_DISALLOW_FRACTURING"] @pytest.mark.parametrize("in_channels", [3, 4, 6, 7]) @@ -464,9 +464,9 @@ def test_conv2d_prestrided( compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "MaximizeTMinimizeGrid" - os.environ["PYBUDA_FORCE_DISALLOW_FRACTURING"] = "1" + os.environ["FORGE_FORCE_DISALLOW_FRACTURING"] = "1" - devices = pybuda.verify.verify_module(mod, [(1, in_channels, original_shape[0], original_shape[1])], + devices = forge.verify.verify_module(mod, [(1, in_channels, original_shape[0], original_shape[1])], VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, relative_atol=relative_atol, pcc=pcc)) # Confirm the conv was prestrided @@ -474,7 +474,7 @@ def test_conv2d_prestrided( transforms = devices[0]._compiled_graph_state.ordered_input_runtime_tensor_transforms assert len(transforms) == 1 assert transforms[0].type == RuntimeTensorTransformType.Prestride - del os.environ["PYBUDA_FORCE_DISALLOW_FRACTURING"] + del os.environ["FORGE_FORCE_DISALLOW_FRACTURING"] @pytest.mark.parametrize("in_channels", [3]) @@ -541,9 +541,9 @@ def test_conv2d_resnet_prestrided_fractured( compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "MaximizeTMinimizeGrid" - pybuda.config.override_fracture_factor("conv2d_resnet_prestrided_fractured.conv.dc.conv2d.3.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", 8) + forge.config.override_fracture_factor("conv2d_resnet_prestrided_fractured.conv.dc.conv2d.3.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", 8) - devices = pybuda.verify.verify_module(mod, [(1, in_channels, original_shape[0], original_shape[1])], + devices = forge.verify.verify_module(mod, [(1, in_channels, original_shape[0], original_shape[1])], VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, relative_atol=relative_atol, pcc=pcc)) # Confirm the conv was prestrided @@ -618,10 +618,10 @@ def test_conv2d_fractured_multi_c( compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "MaximizeTMinimizeGrid" compiler_cfg.enable_conv_prestride = False - # pybuda.config.override_fracture_factor("conv2d_fractured_multi_c.conv.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", fracture_factor) - # pybuda.config.override_op_size("?", (?, fracture_factor * 2)) + # forge.config.override_fracture_factor("conv2d_fractured_multi_c.conv.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", fracture_factor) + # forge.config.override_op_size("?", (?, fracture_factor * 2)) - pybuda.verify.verify_module(mod, [(1, in_channels, original_shape[0], original_shape[1])], + forge.verify.verify_module(mod, [(1, in_channels, original_shape[0], original_shape[1])], VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, relative_atol=relative_atol, pcc=pcc)) @@ -664,7 +664,7 @@ def test_simple_convnet( # Can't have the kernel be larger than the input itself pytest.skip() - class ConvNet(PyBudaModule): + class ConvNet(ForgeModule): def __init__(self, name, **kwargs): super().__init__(name) @@ -700,10 +700,10 @@ def forward(self, activations): bias=bias, ) - pybuda.config.set_configuration_options(enable_conv_prestride=False) + forge.config.set_configuration_options(enable_conv_prestride=False) relative_atol = 0.4 if test_kind.is_training() and test_device.devtype == BackendType.Silicon else 0.15 pcc = 0.94 if test_device.devtype == BackendType.Silicon else 0.99 - pybuda.verify.verify_module(mod, [(1, in_channels, original_shape[0], original_shape[1])], + forge.verify.verify_module(mod, [(1, in_channels, original_shape[0], original_shape[1])], VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, relative_atol=relative_atol, pcc=pcc)) @@ -780,7 +780,7 @@ def f(a, b): # relative_atol = 0.9 if test_device.devtype == BackendType.Silicon else 0.1 pcc = 0.90 if test_device.devtype == BackendType.Silicon else 0.99 - pybuda.verify.verify_module(mod, [(1, in_channels, original_shape[0], original_shape[1])], + forge.verify.verify_module(mod, [(1, in_channels, original_shape[0], original_shape[1])], VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, relative_atol=relative_atol, pcc=pcc, golden_compare_callback=f, fp32_fallback=df), inputs=[activations]) @@ -809,8 +809,8 @@ def test_max_pool2d_stream_through_queue(test_device): stride = 1 dilation = 1 ceil_mode = False - pybuda.config.override_t_stream_shape("max_pool2d.dc.sparse_matmul.5.dc.sparse_matmul.1.lc2", (5, 1)) - pybuda.config.override_t_stream_shape("max_pool2d.dc.reduce_max.6", (1, 1)) + forge.config.override_t_stream_shape("max_pool2d.dc.sparse_matmul.5.dc.sparse_matmul.1.lc2", (5, 1)) + forge.config.override_t_stream_shape("max_pool2d.dc.reduce_max.6", (1, 1)) test_max_pool2d(TestKind.INFERENCE, test_device, in_channels, kernel_size, original_shape, stride, dilation, ceil_mode) @@ -825,9 +825,9 @@ def test_max_pool2d_stream_through_queue_1x1(test_device, producer_stream_factor dilation = 1 ceil_mode = False padding = [0, 0, 0, 0] - os.environ["PYBUDA_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" - pybuda.config.override_t_stream_shape("max_pool2d.dc.sparse_matmul.5.dc.sparse_matmul.1.lc2", (producer_stream_factor, 1)) - pybuda.config.override_t_stream_shape("max_pool2d.dc.reduce_max.6", (2, 1)) + os.environ["FORGE_OVERRIDE_DEVICE_YAML"] = "wormhole_b0_1x1.yaml" + forge.config.override_t_stream_shape("max_pool2d.dc.sparse_matmul.5.dc.sparse_matmul.1.lc2", (producer_stream_factor, 1)) + forge.config.override_t_stream_shape("max_pool2d.dc.reduce_max.6", (2, 1)) test_max_pool2d(TestKind.INFERENCE, test_device, in_channels, kernel_size, original_shape, stride, dilation, ceil_mode, padding=padding) @@ -867,7 +867,7 @@ def test_avg_pool2d( relative_atol = 0.3 if test_kind.is_training() and test_device.devtype == BackendType.Silicon else 0.1 pcc = 0.96 if test_device.devtype == BackendType.Silicon else 0.99 - pybuda.verify.verify_module(mod, [(1, in_channels, original_shape[0], original_shape[1])], + forge.verify.verify_module(mod, [(1, in_channels, original_shape[0], original_shape[1])], VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, relative_atol=relative_atol, pcc=pcc)) @@ -913,7 +913,7 @@ def test_conv2d_stream_through_queue(test_device): def test_conv2d_vgg_head(test_device): compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "MaximizeTMinimizeGrid" - pybuda.config.override_t_stream_shape("conv2d.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (28, 1)) + forge.config.override_t_stream_shape("conv2d.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (28, 1)) return test_conv2d( TestKind.INFERENCE, test_device, diff --git a/pybuda/test/test_conv2d_perf.py b/forge/test/test_conv2d_perf.py similarity index 57% rename from pybuda/test/test_conv2d_perf.py rename to forge/test/test_conv2d_perf.py index a38c297dc..26a7a8dd5 100644 --- a/pybuda/test/test_conv2d_perf.py +++ b/forge/test/test_conv2d_perf.py @@ -3,9 +3,9 @@ # SPDX-License-Identifier: Apache-2.0 import pytest -import pybuda -from pybuda.op.nn import Conv2dModule, MaxPool2dModule -from pybuda.verify import VerifyConfig +import forge +from forge.op.nn import Conv2dModule, MaxPool2dModule +from forge.verify import VerifyConfig def get_relaxed_atol_pcc(test_kind, test_device, microbatch_size = 1): """ @@ -43,16 +43,16 @@ def test_single_conv(test_kind, test_device, kernel, stride, input_dim, input_c, ) relative_atol, pcc = get_relaxed_atol_pcc(test_kind, test_device) - pybuda.config._get_global_compiler_config().performance_trace = pybuda.config.PerfTraceLevel.VERBOSE - pybuda.config.override_op_size("conv2d_perf.dc.buffer.12", (1, 9)) - #pybuda.config.override_op_size("conv2d_perf.dc.matmul.9", (8, 1)) - #pybuda.config.override_t_stream_shape("conv2d_perf.dc.matmul.9", (1, 1)) - pybuda.config.override_op_size("conv2d_perf.dc.sparse_matmul.18.lc2", (8, 1)) - pybuda.config.override_t_stream_dir("conv2d_perf.dc.sparse_matmul.18.lc2", "r") - pybuda.config.override_t_stream_shape("conv2d_perf.dc.sparse_matmul.18.lc2", (16, 1)) - #pybuda.config.override_t_stream_shape("conv2d_perf.dc.sparse_matmul.18.lc2", (1, 1)) - pybuda.verify.verify_module(mod, [(16, input_c, input_dim, input_dim)], - VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, relative_atol=relative_atol, pcc=pcc, fp32_fallback=pybuda.DataFormat.Bfp8_b)) + forge.config._get_global_compiler_config().performance_trace = forge.config.PerfTraceLevel.VERBOSE + forge.config.override_op_size("conv2d_perf.dc.buffer.12", (1, 9)) + #forge.config.override_op_size("conv2d_perf.dc.matmul.9", (8, 1)) + #forge.config.override_t_stream_shape("conv2d_perf.dc.matmul.9", (1, 1)) + forge.config.override_op_size("conv2d_perf.dc.sparse_matmul.18.lc2", (8, 1)) + forge.config.override_t_stream_dir("conv2d_perf.dc.sparse_matmul.18.lc2", "r") + forge.config.override_t_stream_shape("conv2d_perf.dc.sparse_matmul.18.lc2", (16, 1)) + #forge.config.override_t_stream_shape("conv2d_perf.dc.sparse_matmul.18.lc2", (1, 1)) + forge.verify.verify_module(mod, [(16, input_c, input_dim, input_dim)], + VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, relative_atol=relative_atol, pcc=pcc, fp32_fallback=forge.DataFormat.Bfp8_b)) @pytest.mark.parametrize("kernel", [1, 3, 7]) @pytest.mark.parametrize("stride", [1, 2]) @@ -67,13 +67,13 @@ def test_single_maxpool(test_kind, test_device, kernel, stride, input_dim, input ) relative_atol, pcc = get_relaxed_atol_pcc(test_kind, test_device) - pybuda.config._get_global_compiler_config().performance_trace = pybuda.config.PerfTraceLevel.VERBOSE - pybuda.config.override_op_size("maxpool_perf.dc.sparse_matmul.5.lc2", (6, 1)) - #pybuda.config.override_t_stream_shape("conv2d_perf.dc.matmul.9", (1, 1)) - #pybuda.config.override_op_size("conv2d_perf.dc.sparse_matmul.18.lc2", (8, 1)) - #pybuda.config.override_t_stream_dir("conv2d_perf.dc.sparse_matmul.18.lc2", "r") - #pybuda.config.override_t_stream_shape("conv2d_perf.dc.sparse_matmul.18.lc2", (16, 1)) - #pybuda.config.override_t_stream_shape("conv2d_perf.dc.sparse_matmul.18.lc2", (1, 1)) - pybuda.verify.verify_module(mod, [(16, input_c, input_dim, input_dim)], - VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, relative_atol=relative_atol, pcc=pcc, fp32_fallback=pybuda.DataFormat.Bfp8_b)) + forge.config._get_global_compiler_config().performance_trace = forge.config.PerfTraceLevel.VERBOSE + forge.config.override_op_size("maxpool_perf.dc.sparse_matmul.5.lc2", (6, 1)) + #forge.config.override_t_stream_shape("conv2d_perf.dc.matmul.9", (1, 1)) + #forge.config.override_op_size("conv2d_perf.dc.sparse_matmul.18.lc2", (8, 1)) + #forge.config.override_t_stream_dir("conv2d_perf.dc.sparse_matmul.18.lc2", "r") + #forge.config.override_t_stream_shape("conv2d_perf.dc.sparse_matmul.18.lc2", (16, 1)) + #forge.config.override_t_stream_shape("conv2d_perf.dc.sparse_matmul.18.lc2", (1, 1)) + forge.verify.verify_module(mod, [(16, input_c, input_dim, input_dim)], + VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, relative_atol=relative_atol, pcc=pcc, fp32_fallback=forge.DataFormat.Bfp8_b)) diff --git a/pybuda/test/test_cross_entropy_loss.py b/forge/test/test_cross_entropy_loss.py similarity index 80% rename from pybuda/test/test_cross_entropy_loss.py rename to forge/test/test_cross_entropy_loss.py index c78fd4d92..4f8735f3a 100644 --- a/pybuda/test/test_cross_entropy_loss.py +++ b/forge/test/test_cross_entropy_loss.py @@ -5,19 +5,19 @@ import torch import numpy as np -import pybuda -import pybuda.op -from pybuda.op.loss import CrossEntropyLoss +import forge +import forge.op +from forge.op.loss import CrossEntropyLoss -from pybuda import ( - PyBudaModule, +from forge import ( + ForgeModule, TTDevice, Tensor, - pybuda_compile, + forge_compile, CompilerConfig, VerifyConfig, ) -from pybuda._C.backend_api import BackendType +from forge._C.backend_api import BackendType from loguru import logger @@ -74,7 +74,7 @@ def test_bert_cross_entropy_loss_torch(): @pytest.mark.parametrize("recompute", (True, False), ids=["recompute", "no_recompute"]) -def test_bert_cross_entropy_loss_pybuda(training, recompute): +def test_bert_cross_entropy_loss_forge(training, recompute): if not training and recompute: pytest.skip() # inference + recompute is the same as just inference @@ -97,7 +97,7 @@ def test_bert_cross_entropy_loss_pybuda(training, recompute): predictions_tensor = Tensor.create_from_torch(predictions) buda_masked_lm_labels_tensor = Tensor.create_from_torch(buda_masked_lm_labels) - ret = pybuda_compile( + ret = forge_compile( tt0, "cross_entropy_loss", predictions_tensor, @@ -108,14 +108,14 @@ def test_bert_cross_entropy_loss_pybuda(training, recompute): ), verify_cfg=VerifyConfig(), ) - pybuda_loss = torch.sum(ret.golden_outputs[0]) # torch.sum to reduce from tile-size 32,32 + forge_loss = torch.sum(ret.golden_outputs[0]) # torch.sum to reduce from tile-size 32,32 pytorch_loss = torch.nn.CrossEntropyLoss()( predictions.view(-1, TEST_VOCAB_SIZE), pytorch_masked_lm_labels.view(-1) ) - torch.allclose(pybuda_loss, pytorch_loss) + torch.allclose(forge_loss, pytorch_loss) -class PyBudaTwoOutputModule(PyBudaModule): +class ForgeTwoOutputModule(ForgeModule): """ dummy module to mimic conditions like the pretraining head with two outputs we can then feed into the loss module @@ -123,16 +123,16 @@ class PyBudaTwoOutputModule(PyBudaModule): shape = (1, 1, TEST_SEQUENCE_LENGTH, TEST_VOCAB_SIZE) def __init__(self, name): super().__init__(name) - self.weights1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.weights2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.weights1 = forge.Parameter(*self.shape, requires_grad=True) + self.weights2 = forge.Parameter(*self.shape, requires_grad=True) def forward(self, act1, act2): def mprefix(layer_name: str) -> str: # Prepends name with module-name prefix return self.get_name() + "." + layer_name - m1 = pybuda.op.Matmul(mprefix("matmul1"), act1, self.weights1) - m2 = pybuda.op.Matmul(mprefix("matmul2"), act2, self.weights2) + m1 = forge.op.Matmul(mprefix("matmul1"), act1, self.weights1) + m2 = forge.op.Matmul(mprefix("matmul2"), act2, self.weights2) return m1, m2 @@ -141,15 +141,15 @@ def test_bert_fwd_and_loss_module(training, recompute): if not training and recompute: pytest.skip() # inference + recompute is the same as just inference - forward_module = PyBudaTwoOutputModule("fwd") + forward_module = ForgeTwoOutputModule("fwd") loss_module = CrossEntropyLoss("cross_entropy_loss") tt0 = TTDevice("tt0", devtype=BackendType.Golden) tt0.place_module(forward_module) tt0.place_module(loss_module) - act1 = Tensor.create_from_torch(torch.rand(*PyBudaTwoOutputModule.shape)) - act2 = Tensor.create_from_torch(torch.rand(*PyBudaTwoOutputModule.shape, requires_grad=True)) + act1 = Tensor.create_from_torch(torch.rand(*ForgeTwoOutputModule.shape)) + act2 = Tensor.create_from_torch(torch.rand(*ForgeTwoOutputModule.shape, requires_grad=True)) # TODO(jchu): verify -> probably better to commonize under the Tensor api, modify do_verify(..) since # it currently expects loss as a pytorch tensor @@ -160,10 +160,10 @@ def test_bert_fwd_and_loss_module(training, recompute): else: losses = None - forward_module.set_parameter("weights1", torch.rand(*PyBudaTwoOutputModule.shape, requires_grad=True)) - forward_module.set_parameter("weights2", torch.rand(*PyBudaTwoOutputModule.shape, requires_grad=True)) + forward_module.set_parameter("weights1", torch.rand(*ForgeTwoOutputModule.shape, requires_grad=True)) + forward_module.set_parameter("weights2", torch.rand(*ForgeTwoOutputModule.shape, requires_grad=True)) - pybuda_compile( + forge_compile( tt0, "sanity", act1, diff --git a/pybuda/test/test_error.py b/forge/test/test_error.py similarity index 69% rename from pybuda/test/test_error.py rename to forge/test/test_error.py index 05c220a41..73021c3ff 100644 --- a/pybuda/test/test_error.py +++ b/forge/test/test_error.py @@ -8,32 +8,32 @@ import pytest import torch -import pybuda -from pybuda import CPUDevice, TTDevice, PyTorchModule, set_device_pipeline, PyBudaModule, Tensor -from .test_user import PyBudaTestModule, _safe_read +import forge +from forge import CPUDevice, TTDevice, PyTorchModule, set_device_pipeline, ForgeModule, Tensor +from .test_user import ForgeTestModule, _safe_read -class BudaMatmul(PyBudaModule): +class BudaMatmul(ForgeModule): """ Simple buda module for basic testing """ def __init__(self, name): super().__init__(name) - self.weights = pybuda.Parameter(32, 32, requires_grad=True) + self.weights = forge.Parameter(32, 32, requires_grad=True) def forward(self, act): - return pybuda.op.Matmul("matmul", act, self.weights) + return forge.op.Matmul("matmul", act, self.weights) -def test_pybuda_on_cpu_device(): +def test_forge_on_cpu_device(): - lin = PyBudaModule("lin") + lin = ForgeModule("lin") dev = CPUDevice("gs0") with pytest.raises(RuntimeError): dev.place_module(lin) # only pytorch modules on cpu devices def test_invalid_modules(): - dev0 = TTDevice("gs0", devtype=pybuda.BackendType.Golden) + dev0 = TTDevice("gs0", devtype=forge.BackendType.Golden) dev1 = CPUDevice("gs1") with pytest.raises(RuntimeError): @@ -62,21 +62,21 @@ def test_multiple_modules_with_same_name(): def test_different_batch_inputs(): - dev0 = TTDevice("gs0", devtype=pybuda.BackendType.Golden) + dev0 = TTDevice("gs0", devtype=forge.BackendType.Golden) with pytest.raises(Exception): - dev0.place_module(PyBudaTestModule("placed")) + dev0.place_module(ForgeTestModule("placed")) # Compile & initialize the pipeline for inference, with given shapes - output_q = pybuda.initialize_pipeline(training=False, sample_inputs=(torch.rand(4, 32, 32), torch.rand(4, 32, 32))) + output_q = forge.initialize_pipeline(training=False, sample_inputs=(torch.rand(4, 32, 32), torch.rand(4, 32, 32))) input1 = torch.rand(4, 32, 32) input2 = torch.rand(4, 32, 32) dev0.push_to_inputs((input1, input2)) - pybuda.run_forward(input_count=1) + forge.run_forward(input_count=1) print(_safe_read(output_q)) input1 = torch.rand(2, 32, 32) input2 = torch.rand(2, 32, 32) dev0.push_to_inputs((input1, input2)) - pybuda.run_forward(input_count=1) + forge.run_forward(input_count=1) print(_safe_read(output_q)) diff --git a/forge/test/test_fork_join.py b/forge/test/test_fork_join.py new file mode 100644 index 000000000..9cd601463 --- /dev/null +++ b/forge/test/test_fork_join.py @@ -0,0 +1,860 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 +from typing import List + +import pytest +import torch + +import forge +from forge.verify import verify_module, VerifyConfig +from forge import DataFormat, ForgeModule + +shape = (128, 768) + +def get_relaxed_atol_pcc(test_kind, test_device): + """ + Figure out reasonable pcc/atol for training on silicon + """ + training_atol = 0.3 + training_pcc = 0.95 + if test_device.is_silicon(): + training_pcc = 0.85 + inference_atol = 0.1 + inference_pcc = 0.95 + relative_atol = training_atol if test_kind.is_training() else inference_atol + if test_device.is_silicon() and test_kind.is_training(): + relative_atol *= 3.5 + pcc = training_pcc if test_kind.is_training() else inference_pcc + + return relative_atol, pcc + +class ForkJoinVariant(forge.ForgeModule): + + def __init__(self, name, input_shape, config): + super().__init__(name) + self.weights1 = forge.Parameter(1, input_shape[1], input_shape[1], requires_grad=True) + self.input_shape = input_shape + self.config = config + + def forward(self, act1): + + # fork + if self.config[0] == "e": + fork = forge.op.Gelu("gelu_fork", act1) + elif self.config[0] == "m": + fork = forge.op.Matmul("matmul_fork", act1, act1) + else: + raise TypeError("Unexpected value in configuration of fork-join test") + + # right + if self.config[1] == "e": + right = forge.op.Add("add_long_path", fork, self.weights1) + elif self.config[1] == "m": + right = forge.op.Matmul("matmul_long_path", fork, self.weights1) + else: + raise TypeError("Unexpected value in configuration of fork-join test") + + # join + if self.config[2] == "e": + join = forge.op.Add("add_join", fork, right) + elif self.config[2] == "m": + join = forge.op.Matmul("matmul_join", fork, right) + else: + raise TypeError("Unexpected value in configuration of fork-join test") + + return join + +@pytest.mark.parametrize("input_shape", [(128,128), (256,256), (512,512)], ids=["128","256","512"]) +@pytest.mark.parametrize("config", ["mem", "mmm", "eme", "emm"], ids=["mem", "mmm", "eme", "emm"]) +def test_fork_join_variant(test_kind, test_device, input_shape, config): + """ + input_shape: input shape of the tensor in the fork-join. + config: string that tells us type of each op in the simple fork-join. first character describes fork node, second describes op on the longer path and third describes join node. + if config is "m" then apropriate node is matmul, and if it is "e", then node is element-wise op. + """ + num_in_channels = 1 + relative_atol, pcc = get_relaxed_atol_pcc(test_kind, test_device) + verify_module(ForkJoinVariant("test_fork_join_variant", input_shape, config), [(1, num_in_channels, input_shape[0], input_shape[1])], + VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, pcc=pcc, relative_atol=relative_atol)) + +class ForkJoin(forge.ForgeModule): + + def __init__(self, name, stack_factor: int = 12): + super().__init__(name) + self.weights1 = forge.Parameter(stack_factor, shape[1] // stack_factor, shape[1] // stack_factor, requires_grad=True) + self.weights2 = forge.Parameter(1, shape[1], shape[1], requires_grad=True) + self.weights3 = forge.Parameter(stack_factor, shape[1] // stack_factor, shape[1] // stack_factor, requires_grad=True) + self.stack_factor = stack_factor + + def forward(self, act1): + + # input slice + sliced = forge.op.HSlice("slice", act1, self.stack_factor) + + # fork, t=stack_factor + fork = forge.op.Gelu("gelu", sliced) + + # right + right = forge.op.Matmul("matmul_1", fork, self.weights1) + right = forge.op.HStack("stack_branch", right) + right = forge.op.Matmul("matmul_2a_t1", right, self.weights2) + right = forge.op.Matmul("matmul_2b_t1", right, self.weights2) + right = forge.op.HSlice("slice_branch", right, self.stack_factor) + right = forge.op.Matmul("matmul_3", right, self.weights3) + + # join + join = forge.op.Add("join", fork, right) + return join + +@pytest.mark.parametrize("format", [DataFormat.Bfp8_b, DataFormat.Float16_b], ids=["bfp8", "fp16"]) +def test_fork_join(test_kind, test_device, format): + if test_device.arch == forge.BackendDevice.Blackhole: + pytest.skip("Skip until BudaBackend#2628 is consumed.") + + microbatch_count = 16 + + relative_atol, pcc = get_relaxed_atol_pcc(test_kind, test_device) + verify_module(ForkJoin("fork_join"), [(microbatch_count, *shape)], + VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, pcc=pcc, relative_atol=relative_atol, + fp32_fallback=format)) + +class ForkJoinWithBuffQueueLongPath(forge.ForgeModule): + def __init__(self, name, stack_factor: int = 12): + super().__init__(name) + self.in0_mm_1 = forge.Parameter(16, 60 * 32, 60 * 32, requires_grad=False) + self.in1_mm_2 = forge.Parameter(1, 32 * 32, 1 * 32, requires_grad=False) + self.in1_mm_3 = forge.Parameter(1, 1 * 32, 32 * 32, requires_grad=False) + # in original graph in1_mm_3 has dimension 3 equal to 1 * 32. But mm_3 has broadcast on dimension 3 for 32. + # pytorch doesn't allow for broadcast if dimension is greater than 1. So we can't broadcast here. + def forward(self, act1, act2): + # Longer path of fork join contains buffering queue, + # which has to be taken into consideration when buffering fork-join. + # fork, + fork = forge.op.Concatenate("concatenate", act1, act2, axis=2) + # right + right = forge.op.Matmul("matmul_1", self.in0_mm_1, fork) + forge.config._get_global_compiler_config().insert_queues = [("matmul_1", "matmul_2", 0)] + right = forge.op.HStack("hstack", right) + right = forge.op.Matmul("matmul_2", right, self.in1_mm_2) + right = forge.op.Matmul("matmul_3", right, self.in1_mm_3) + right = forge.op.HSlice("vslice", right, 16) + # join + join = forge.op.Subtract("join", fork, right) + return join +# This test will hang on silicon if fork-join is not buffered properly. Longer path of fork join contains buffering queue, +# which has to be taken into consideration when buffering fork-join. +@pytest.mark.parametrize("format", [DataFormat.Bfp8_b, DataFormat.Float16_b], ids=["bfp8", "fp16"]) +def test_fork_join_with_buff_queue_long_path(test_kind, test_device, format): + relative_atol, pcc = get_relaxed_atol_pcc(test_kind, test_device) + compiler_cfg = forge.config._get_global_compiler_config() + compiler_cfg.balancer_policy = "Ribbon" + verify_module(ForkJoinWithBuffQueueLongPath("test_fork_join_with_buff_queue_long_path"), [(1, 16, 40 * 32, 2 * 32), (1, 16, 20 * 32, 2 * 32)], + VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, pcc=pcc, relative_atol=relative_atol, + fp32_fallback=format)) + +class MultilevelForkJoin(forge.ForgeModule): + def __init__(self, name,): + super().__init__(name) + self.add_parameter("stages.2.blocks.1.conv_mid.0.conv.weight", forge.Parameter(*(192, 768, 3, 3), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("stages.2.blocks.1.conv_mid.0.bn.weight", forge.Parameter(*(192,), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("stages.2.blocks.1.conv_mid.0.bn.bias", forge.Parameter(*(192,), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("stages.2.blocks.1.conv_mid.1.conv.weight", forge.Parameter(*(192, 192, 3, 3), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("stages.2.blocks.1.conv_mid.1.bn.weight", forge.Parameter(*(192,), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("stages.2.blocks.1.conv_mid.1.bn.bias", forge.Parameter(*(192,), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("stages.2.blocks.1.conv_mid.2.conv.weight", forge.Parameter(*(192, 192, 3, 3), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("stages.2.blocks.1.conv_mid.2.bn.weight", forge.Parameter(*(192,), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("stages.2.blocks.1.conv_mid.2.bn.bias", forge.Parameter(*(192,), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("stages.2.blocks.1.conv_mid.3.conv.weight", forge.Parameter(*(192, 192, 3, 3), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("stages.2.blocks.1.conv_mid.3.bn.weight", forge.Parameter(*(192,), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("stages.2.blocks.1.conv_mid.3.bn.bias", forge.Parameter(*(192,), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("stages.2.blocks.1.conv_mid.4.conv.weight", forge.Parameter(*(192, 192, 3, 3), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("stages.2.blocks.1.conv_mid.4.bn.weight", forge.Parameter(*(192,), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("stages.2.blocks.1.conv_mid.4.bn.bias", forge.Parameter(*(192,), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("stages.2.blocks.1.conv_concat.conv.weight", forge.Parameter(*(768, 1728, 1, 1), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + + self.add_constant("stages.2.blocks.1.conv_mid.0.bn.running_var") + self.add_constant("stages.2.blocks.1.conv_mid.0.bn.running_mean") + self.add_constant("stages.2.blocks.1.conv_mid.1.bn.running_var") + self.add_constant("stages.2.blocks.1.conv_mid.1.bn.running_mean") + self.add_constant("stages.2.blocks.1.conv_mid.2.bn.running_var") + self.add_constant("stages.2.blocks.1.conv_mid.2.bn.running_mean") + self.add_constant("stages.2.blocks.1.conv_mid.3.bn.running_var") + self.add_constant("stages.2.blocks.1.conv_mid.3.bn.running_mean") + self.add_constant("stages.2.blocks.1.conv_mid.4.bn.running_var") + self.add_constant("stages.2.blocks.1.conv_mid.4.bn.running_mean") + self.add_constant("const_67322") + self.add_constant("const_68322") + self.add_constant("const_69322") + self.add_constant("const_70322") + self.add_constant("const_71322") + self.add_constant("const_72322") + self.add_constant("const_73322") + self.add_constant("const_74322") + self.add_constant("const_75322") + self.add_constant("const_76322") + self.add_constant("const_77322") + self.add_constant("const_78322") + self.add_constant("const_79322") + self.add_constant("const_80322") + self.add_constant("const_81322") + + self.set_constant("stages.2.blocks.1.conv_mid.0.bn.running_var", torch.rand(1, 192)) + self.set_constant("stages.2.blocks.1.conv_mid.0.bn.running_mean", torch.rand(1, 192)) + self.set_constant("stages.2.blocks.1.conv_mid.1.bn.running_var", torch.rand(1, 192)) + self.set_constant("stages.2.blocks.1.conv_mid.1.bn.running_mean", torch.rand(1, 192)) + self.set_constant("stages.2.blocks.1.conv_mid.2.bn.running_var", torch.rand(1, 192)) + self.set_constant("stages.2.blocks.1.conv_mid.2.bn.running_mean", torch.rand(1, 192)) + self.set_constant("stages.2.blocks.1.conv_mid.3.bn.running_var", torch.rand(1, 192)) + self.set_constant("stages.2.blocks.1.conv_mid.3.bn.running_mean", torch.rand(1, 192)) + self.set_constant("stages.2.blocks.1.conv_mid.4.bn.running_var", torch.rand(1, 192)) + self.set_constant("stages.2.blocks.1.conv_mid.4.bn.running_mean", torch.rand(1, 192)) + self.set_constant("const_67322", torch.rand(1, 1)) + self.set_constant("const_68322", torch.rand(1, 1)) + self.set_constant("const_69322", torch.rand(1, 1)) + self.set_constant("const_70322", torch.rand(1, 1)) + self.set_constant("const_71322", torch.rand(1, 1)) + self.set_constant("const_72322", torch.rand(1, 1)) + self.set_constant("const_73322", torch.rand(1, 1)) + self.set_constant("const_74322", torch.rand(1, 1)) + self.set_constant("const_75322", torch.rand(1, 1)) + self.set_constant("const_76322", torch.rand(1, 1)) + self.set_constant("const_77322", torch.rand(1, 1)) + self.set_constant("const_78322", torch.rand(1, 1)) + self.set_constant("const_79322", torch.rand(1, 1)) + self.set_constant("const_80322", torch.rand(1, 1)) + self.set_constant("const_81322", torch.rand(1, 1)) + + def forward(self, act_0): + + conv2d_586 = forge.op.Conv2d("", act_0, self.get_parameter("stages.2.blocks.1.conv_mid.0.conv.weight"), stride=[1, 1], padding=[1, 1, 1, 1], dilation=1, groups=1, channel_last=0) + add_589 = forge.op.Add("", self.get_constant("stages.2.blocks.1.conv_mid.0.bn.running_var"), self.get_constant("const_68322")) + sqrt_590 = forge.op.Sqrt("", add_589) + reciprocal_591 = forge.op.Reciprocal("", sqrt_590) + multiply_592 = forge.op.Multiply("", self.get_constant("const_67322"), reciprocal_591) + multiply_593 = forge.op.Multiply("", multiply_592, self.get_parameter("stages.2.blocks.1.conv_mid.0.bn.weight")) + reshape_594 = forge.op.Reshape("", multiply_593, shape=(192, 1, 1)) + multiply_595 = forge.op.Multiply("", conv2d_586, reshape_594) + multiply_597 = forge.op.Multiply("", self.get_constant("stages.2.blocks.1.conv_mid.0.bn.running_mean"), self.get_constant("const_69322")) + multiply_598 = forge.op.Multiply("", multiply_597, multiply_593) + add_599 = forge.op.Add("", multiply_598, self.get_parameter("stages.2.blocks.1.conv_mid.0.bn.bias")) + reshape_600 = forge.op.Reshape("", add_599, shape=(192, 1, 1)) + add_601 = forge.op.Add("", multiply_595, reshape_600) + relu_602 = forge.op.Relu("", add_601) + conv2d_603 = forge.op.Conv2d("", relu_602, self.get_parameter("stages.2.blocks.1.conv_mid.1.conv.weight"), stride=[1, 1], padding=[1, 1, 1, 1], dilation=1, groups=1, channel_last=0) + add_606 = forge.op.Add("", self.get_constant("stages.2.blocks.1.conv_mid.1.bn.running_var"), self.get_constant("const_71322")) + sqrt_607 = forge.op.Sqrt("", add_606) + reciprocal_608 = forge.op.Reciprocal("", sqrt_607) + multiply_609 = forge.op.Multiply("", self.get_constant("const_70322"), reciprocal_608) + multiply_610 = forge.op.Multiply("", multiply_609, self.get_parameter("stages.2.blocks.1.conv_mid.1.bn.weight")) + reshape_611 = forge.op.Reshape("", multiply_610, shape=(192, 1, 1)) + multiply_612 = forge.op.Multiply("", conv2d_603, reshape_611) + multiply_614 = forge.op.Multiply("", self.get_constant("stages.2.blocks.1.conv_mid.1.bn.running_mean"), self.get_constant("const_72322")) + multiply_615 = forge.op.Multiply("", multiply_614, multiply_610) + add_616 = forge.op.Add("", multiply_615, self.get_parameter("stages.2.blocks.1.conv_mid.1.bn.bias")) + reshape_617 = forge.op.Reshape("", add_616, shape=(192, 1, 1)) + add_618 = forge.op.Add("", multiply_612, reshape_617) + relu_619 = forge.op.Relu("", add_618) + conv2d_620 = forge.op.Conv2d("", relu_619, self.get_parameter("stages.2.blocks.1.conv_mid.2.conv.weight"), stride=[1, 1], padding=[1, 1, 1, 1], dilation=1, groups=1, channel_last=0) + add_623 = forge.op.Add("", self.get_constant("stages.2.blocks.1.conv_mid.2.bn.running_var"), self.get_constant("const_74322")) + sqrt_624 = forge.op.Sqrt("", add_623) + reciprocal_625 = forge.op.Reciprocal("", sqrt_624) + multiply_626 = forge.op.Multiply("", self.get_constant("const_73322"), reciprocal_625) + multiply_627 = forge.op.Multiply("", multiply_626, self.get_parameter("stages.2.blocks.1.conv_mid.2.bn.weight")) + reshape_628 = forge.op.Reshape("", multiply_627, shape=(192, 1, 1)) + multiply_629 = forge.op.Multiply("", conv2d_620, reshape_628) + multiply_631 = forge.op.Multiply("", self.get_constant("stages.2.blocks.1.conv_mid.2.bn.running_mean"), self.get_constant("const_75322")) + multiply_632 = forge.op.Multiply("", multiply_631, multiply_627) + add_633 = forge.op.Add("", multiply_632, self.get_parameter("stages.2.blocks.1.conv_mid.2.bn.bias")) + reshape_634 = forge.op.Reshape("", add_633, shape=(192, 1, 1)) + add_635 = forge.op.Add("", multiply_629, reshape_634) + relu_636 = forge.op.Relu("", add_635) + conv2d_637 = forge.op.Conv2d("", relu_636, self.get_parameter("stages.2.blocks.1.conv_mid.3.conv.weight"), stride=[1, 1], padding=[1, 1, 1, 1], dilation=1, groups=1, channel_last=0) + add_640 = forge.op.Add("", self.get_constant("stages.2.blocks.1.conv_mid.3.bn.running_var"), self.get_constant("const_77322")) + sqrt_641 = forge.op.Sqrt("", add_640) + reciprocal_642 = forge.op.Reciprocal("", sqrt_641) + multiply_643 = forge.op.Multiply("", self.get_constant("const_76322"), reciprocal_642) + multiply_644 = forge.op.Multiply("", multiply_643, self.get_parameter("stages.2.blocks.1.conv_mid.3.bn.weight")) + reshape_645 = forge.op.Reshape("", multiply_644, shape=(192, 1, 1)) + multiply_646 = forge.op.Multiply("", conv2d_637, reshape_645) + multiply_648 = forge.op.Multiply("", self.get_constant("stages.2.blocks.1.conv_mid.3.bn.running_mean"), self.get_constant("const_78322")) + multiply_649 = forge.op.Multiply("", multiply_648, multiply_644) + add_650 = forge.op.Add("", multiply_649, self.get_parameter("stages.2.blocks.1.conv_mid.3.bn.bias")) + reshape_651 = forge.op.Reshape("", add_650, shape=(192, 1, 1)) + add_652 = forge.op.Add("", multiply_646, reshape_651) + relu_653 = forge.op.Relu("", add_652) + conv2d_654 = forge.op.Conv2d("", relu_653, self.get_parameter("stages.2.blocks.1.conv_mid.4.conv.weight"), stride=[1, 1], padding=[1, 1, 1, 1], dilation=1, groups=1, channel_last=0) + add_657 = forge.op.Add("", self.get_constant("stages.2.blocks.1.conv_mid.4.bn.running_var"), self.get_constant("const_80322")) + sqrt_658 = forge.op.Sqrt("", add_657) + reciprocal_659 = forge.op.Reciprocal("", sqrt_658) + multiply_660 = forge.op.Multiply("", self.get_constant("const_79322"), reciprocal_659) + multiply_661 = forge.op.Multiply("", multiply_660, self.get_parameter("stages.2.blocks.1.conv_mid.4.bn.weight")) + reshape_662 = forge.op.Reshape("", multiply_661, shape=(192, 1, 1)) + multiply_663 = forge.op.Multiply("", conv2d_654, reshape_662) + multiply_665 = forge.op.Multiply("", self.get_constant("stages.2.blocks.1.conv_mid.4.bn.running_mean"), self.get_constant("const_81322")) + multiply_666 = forge.op.Multiply("", multiply_665, multiply_661) + add_667 = forge.op.Add("", multiply_666, self.get_parameter("stages.2.blocks.1.conv_mid.4.bn.bias")) + reshape_668 = forge.op.Reshape("", add_667, shape=(192, 1, 1)) + add_669 = forge.op.Add("", multiply_663, reshape_668) + relu_670 = forge.op.Relu("", add_669) + concatenate_671 = forge.op.Concatenate("", act_0, relu_602, relu_619, relu_636, relu_653, relu_670, axis=-3) + conv2d_672 = forge.op.Conv2d("", concatenate_671, self.get_parameter("stages.2.blocks.1.conv_concat.conv.weight"), stride=[1, 1], padding=[0, 0, 0, 0], dilation=1, groups=1, channel_last=0) + + return conv2d_672 + +# This test will hang on silicon if fork-join is not buffered properly. This test is from vovnet_v2 benchmark. +# This test will hang without fork-join multilevel feature fec3b1879941dde87fa7f1d460ba5ff1bbb751f4 +@pytest.mark.parametrize("format", [DataFormat.Bfp8_b, DataFormat.Float16_b], ids=["bfp8", "fp16"]) +def test_multilevel_fork_join_vovnet(test_kind, test_device, format): + if test_kind.is_training(): + pytest.skip() + try: + import os + os.environ["FORGE_MAXIMIZE_SPARSE_UBLOCK"] = "1" + os.environ["FORGE_RIBBON2"] = "1" + + compiler_cfg = forge.config._get_global_compiler_config() + compiler_cfg.balancer_policy = "Ribbon" + compiler_cfg.default_df_override = format + # Op overrides + forge.config.override_op_size("conv2d_0.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (1, 4)) + forge.config.override_op_size("conv2d_14.dc.matmul.11", (1, 2)) + forge.config.override_op_size("conv2d_14.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (1, 3)) + forge.config.override_op_size("conv2d_28.dc.matmul.11", (1, 2)) + forge.config.override_op_size("conv2d_28.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (1, 3)) + forge.config.override_op_size("conv2d_42.dc.matmul.11", (1, 2)) + forge.config.override_op_size("conv2d_42.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (1, 3)) + forge.config.override_op_size("conv2d_56.dc.matmul.11", (1, 2)) + forge.config.override_op_size("conv2d_56.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (1, 3)) + forge.config.override_op_size("concatenate_70.dc.concatenate.0", (1, 1)) + + relative_atol, pcc = get_relaxed_atol_pcc(test_kind, test_device) + verify_module(MultilevelForkJoin("test_multilevel_fork_join_vovnet"),[(1, 768, 14, 14)], + VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, pcc=pcc, relative_atol=relative_atol, + fp32_fallback=format)) + finally: + # unset env variables + os.environ.pop('FORGE_MAXIMIZE_SPARSE_UBLOCK', None) + os.environ.pop('FORGE_RIBBON2', None) + +class BertGeluFork(forge.ForgeModule): + + def __init__(self, name, seq_len=128, hidden_dim=784): + super().__init__(name) + self.seq_len = seq_len + self.hidden_dim = hidden_dim + self.weights1 = forge.Parameter(hidden_dim, hidden_dim*4); + self.weights2 = forge.Parameter(hidden_dim*4, hidden_dim); + + def forward(self, act): + + # fork + fork = forge.op.Buffer("fork", act) + + # right + right = forge.op.Matmul("ff1", fork, self.weights1) + right = forge.op.Gelu("gelu", right) + right = forge.op.Matmul("ff2", right, self.weights2) + + # join + join = forge.op.Add("join", fork, right) + return join + +@pytest.mark.parametrize("format", [DataFormat.Bfp8_b, DataFormat.Float16_b], ids=["bfp8", "fp16"]) +@pytest.mark.skip(reason="too slow for CI") +def test_bert_gelu_fork(test_kind, test_device, format): + microbatch_count = 256 + seq_len = 128 + hidden_dim = 768 + + relative_atol, pcc = get_relaxed_atol_pcc(test_kind, test_device) + forge.config._get_global_compiler_config().performance_trace = forge.config.PerfTraceLevel.VERBOSE + verify_module(BertGeluFork("bert_gelu_fork", seq_len, hidden_dim), [(microbatch_count, seq_len, hidden_dim)], + VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, pcc=pcc, relative_atol=relative_atol, + fp32_fallback=format), params_centered_on_zero=True) + +class BertReduceFork(forge.ForgeModule): + + def __init__(self, name, seq_len=128, hidden_dim=784): + super().__init__(name) + self.seq_len = seq_len + self.hidden_dim = hidden_dim + self.weights1 = forge.Parameter(seq_len, hidden_dim); + + def forward(self, act): + + # fork + fork = forge.op.Buffer("fork", act) + + # right + right = forge.op.Add("add", fork, self.weights1) + right = forge.op.ReduceAvg("reduce", right, dim=-1) + + # join + join = forge.op.Add("join", fork, right) + return join + +@pytest.mark.parametrize("format", [DataFormat.Bfp8_b, DataFormat.Float16_b], ids=["bfp8", "fp16"]) +@pytest.mark.skip(reason="too slow for CI") +def test_bert_reduce_fork(test_kind, test_device, format): + microbatch_count = 256 + seq_len = 384 + hidden_dim = 1024 + + relative_atol, pcc = get_relaxed_atol_pcc(test_kind, test_device) + forge.config._get_global_compiler_config().performance_trace = forge.config.PerfTraceLevel.VERBOSE + verify_module(BertReduceFork("bert_reduce_fork", seq_len, hidden_dim), [(microbatch_count, seq_len, hidden_dim)], + VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, pcc=pcc, relative_atol=relative_atol, + fp32_fallback=format), params_centered_on_zero=True) + + +class PipelineStuck(forge.ForgeModule): + + def __init__(self, name): + super().__init__(name) + + def forward(self, act): + + # fork + #act = forge.op.ReduceAvg("reduce", act, dim=-1) + act = forge.op.Sqrt("sqrt", act) + act = forge.op.Exp("exp", act) + act = forge.op.Buffer("nop2", act) + + return act + +@pytest.mark.parametrize("format", [DataFormat.Bfp8_b, DataFormat.Float16_b], ids=["bfp8", "fp16"]) +@pytest.mark.skip(reason="too slow for CI") +def test_pipeline_stuck(test_kind, test_device, format): + microbatch_count = 256 + seq_len = 128 + hidden_dim = 768 + + relative_atol, pcc = get_relaxed_atol_pcc(test_kind, test_device) + forge.config._get_global_compiler_config().performance_trace = forge.config.PerfTraceLevel.VERBOSE + verify_module(PipelineStuck("pipeline_stuck"), [(microbatch_count, seq_len, hidden_dim)], + VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, pcc=pcc, relative_atol=relative_atol, + fp32_fallback=format), params_centered_on_zero=True) + + +class NestedForks(forge.ForgeModule): + + def __init__(self, name): + super().__init__(name) + + def forward(self, act): + + # main fork + fork = forge.op.Buffer("main_fork", act) + + left_1 = forge.op.Buffer("left_1", fork) + left_2 = forge.op.Buffer("left_2", left_1) + fork_2 = forge.op.Buffer("fork_2", left_2) + right_2_1 = forge.op.Buffer("right_2_1", fork_2) + join_2 = forge.op.Add("join_2", fork_2, right_2_1) + + right_1 = forge.op.Buffer("right_1", fork) + join_3 = forge.op.Add("join_3", right_1, join_2) + + left_4 = forge.op.Buffer("left_4", join_3) + + join = forge.op.Add("join", fork, left_4) + + return join + +def test_nested_forks(test_kind, test_device): + microbatch_count = 1 + seq_len = 128 + hidden_dim = 768 + + relative_atol, pcc = get_relaxed_atol_pcc(test_kind, test_device) + #forge.config._get_global_compiler_config().performance_trace = forge.config.PerfTraceLevel.VERBOSE + verify_module(NestedForks("netsted_forks"), [(microbatch_count, seq_len, hidden_dim)], + VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, pcc=pcc, relative_atol=relative_atol), params_centered_on_zero=True) + +class YoloV3ForkJoin(ForgeModule): + def __init__(self, name): + super().__init__(name) + self.add_parameter("backbone.base.conv.weight", forge.Parameter(*(32, 3, 3, 3), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("backbone.base.bn.weight", forge.Parameter(*(32,), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("backbone.base.bn.bias", forge.Parameter(*(32,), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("backbone.darknet_0.0.conv.weight", forge.Parameter(*(64, 32, 3, 3), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("backbone.darknet_0.0.bn.weight", forge.Parameter(*(64,), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("backbone.darknet_0.0.bn.bias", forge.Parameter(*(64,), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("backbone.darknet_0.1.conv1.conv.weight", forge.Parameter(*(32, 64, 1, 1), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("backbone.darknet_0.1.conv1.bn.weight", forge.Parameter(*(32,), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("backbone.darknet_0.1.conv1.bn.bias", forge.Parameter(*(32,), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("backbone.darknet_0.1.conv2.conv.weight", forge.Parameter(*(64, 32, 3, 3), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("backbone.darknet_0.1.conv2.bn.weight", forge.Parameter(*(64,), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("backbone.darknet_0.1.conv2.bn.bias", forge.Parameter(*(64,), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_constant("backbone.base.bn.running_var", shape=(32,)) + self.set_constant("backbone.base.bn.running_var", torch.rand(32, )) + self.add_constant("backbone.base.bn.running_mean", shape=(32,)) + self.set_constant("backbone.base.bn.running_mean", torch.rand(32, )) + self.add_constant("backbone.darknet_0.0.bn.running_var", shape=(64,)) + self.set_constant("backbone.darknet_0.0.bn.running_var", torch.rand(64, )) + self.add_constant("backbone.darknet_0.0.bn.running_mean", shape=(64,)) + self.set_constant("backbone.darknet_0.0.bn.running_mean", torch.rand(64, )) + self.add_constant("backbone.darknet_0.1.conv1.bn.running_var", shape=(32,)) + self.set_constant("backbone.darknet_0.1.conv1.bn.running_var", torch.rand(32, )) + self.add_constant("backbone.darknet_0.1.conv1.bn.running_mean", shape=(32,)) + self.set_constant("backbone.darknet_0.1.conv1.bn.running_mean", torch.rand(32, )) + self.add_constant("backbone.darknet_0.1.conv2.bn.running_var", shape=(64,)) + self.set_constant("backbone.darknet_0.1.conv2.bn.running_var", torch.rand(64, )) + self.add_constant("backbone.darknet_0.1.conv2.bn.running_mean", shape=(64,)) + self.set_constant("backbone.darknet_0.1.conv2.bn.running_mean", torch.rand(64, )) + self.add_constant("const_0578", shape=(1, 1)) + self.set_constant("const_0578", torch.rand(1, 1)) + self.add_constant("const_1578", shape=(1, 1)) + self.set_constant("const_1578", torch.rand(1, 1)) + self.add_constant("const_2578", shape=(1, 1)) + self.set_constant("const_2578", torch.rand(1, 1)) + self.add_constant("const_3578", shape=(1, 1)) + self.set_constant("const_3578", torch.rand(1, 1)) + self.add_constant("const_4578", shape=(1, 1)) + self.set_constant("const_4578", torch.rand(1, 1)) + self.add_constant("const_5578", shape=(1, 1)) + self.set_constant("const_5578", torch.rand(1, 1)) + self.add_constant("const_6578", shape=(1, 1)) + self.set_constant("const_6578", torch.rand(1, 1)) + self.add_constant("const_7578", shape=(1, 1)) + self.set_constant("const_7578", torch.rand(1, 1)) + self.add_constant("const_8578", shape=(1, 1)) + self.set_constant("const_8578", torch.rand(1, 1)) + self.add_constant("const_9578", shape=(1, 1)) + self.set_constant("const_9578", torch.rand(1, 1)) + self.add_constant("const_10578", shape=(1, 1)) + self.set_constant("const_10578", torch.rand(1, 1)) + self.add_constant("const_11578", shape=(1, 1)) + self.set_constant("const_11578", torch.rand(1, 1)) + + # Input shapes: + # x_1 -> (1, 3, 512, 512) + def forward(self, x_1): + conv2d_367 = forge.op.Conv2d("conv2d_0", x_1, self.get_parameter("backbone.base.conv.weight"), stride=[1, 1], padding=[1, 1, 1, 1], dilation=1, groups=1, channel_last=0) + add_370 = forge.op.Add("add_1", self.get_constant("backbone.base.bn.running_var"), self.get_constant("const_1578")) + sqrt_371 = forge.op.Sqrt("sqrt_2", add_370) + reciprocal_372 = forge.op.Reciprocal("reciprocal_3", sqrt_371) + multiply_373 = forge.op.Multiply("multiply_4", self.get_constant("const_0578"), reciprocal_372) + multiply_374 = forge.op.Multiply("multiply_5", multiply_373, self.get_parameter("backbone.base.bn.weight")) + reshape_375 = forge.op.Reshape("reshape_6", multiply_374, shape=(32, 1, 1)) + multiply_376 = forge.op.Multiply("multiply_7", conv2d_367, reshape_375) + multiply_378 = forge.op.Multiply("multiply_8", self.get_constant("backbone.base.bn.running_mean"), self.get_constant("const_2578")) + multiply_379 = forge.op.Multiply("multiply_9", multiply_378, multiply_374) + add_380 = forge.op.Add("add_10", multiply_379, self.get_parameter("backbone.base.bn.bias")) + reshape_381 = forge.op.Reshape("reshape_11", add_380, shape=(32, 1, 1)) + add_382 = forge.op.Add("add_12", multiply_376, reshape_381) + leaky_relu_383 = forge.op.LeakyRelu("leaky_relu_13", add_382, alpha=0.10000000000000001) + conv2d_384 = forge.op.Conv2d("conv2d_14", leaky_relu_383, self.get_parameter("backbone.darknet_0.0.conv.weight"), stride=[2, 2], padding=[1, 1, 1, 1], dilation=1, groups=1, channel_last=0) + add_387 = forge.op.Add("add_15", self.get_constant("backbone.darknet_0.0.bn.running_var"), self.get_constant("const_4578")) + sqrt_388 = forge.op.Sqrt("sqrt_16", add_387) + reciprocal_389 = forge.op.Reciprocal("reciprocal_17", sqrt_388) + multiply_390 = forge.op.Multiply("multiply_18", self.get_constant("const_3578"), reciprocal_389) + multiply_391 = forge.op.Multiply("multiply_19", multiply_390, self.get_parameter("backbone.darknet_0.0.bn.weight")) + reshape_392 = forge.op.Reshape("reshape_20", multiply_391, shape=(64, 1, 1)) + multiply_393 = forge.op.Multiply("multiply_21", conv2d_384, reshape_392) + multiply_395 = forge.op.Multiply("multiply_22", self.get_constant("backbone.darknet_0.0.bn.running_mean"), self.get_constant("const_5578")) + multiply_396 = forge.op.Multiply("multiply_23", multiply_395, multiply_391) + add_397 = forge.op.Add("add_24", multiply_396, self.get_parameter("backbone.darknet_0.0.bn.bias")) + reshape_398 = forge.op.Reshape("reshape_25", add_397, shape=(64, 1, 1)) + add_399 = forge.op.Add("add_26", multiply_393, reshape_398) + leaky_relu_400 = forge.op.LeakyRelu("leaky_relu_27", add_399, alpha=0.10000000000000001) + conv2d_401 = forge.op.Conv2d("conv2d_28", leaky_relu_400, self.get_parameter("backbone.darknet_0.1.conv1.conv.weight"), stride=[1, 1], padding=[0, 0, 0, 0], dilation=1, groups=1, channel_last=0) + add_404 = forge.op.Add("add_29", self.get_constant("backbone.darknet_0.1.conv1.bn.running_var"), self.get_constant("const_7578")) + sqrt_405 = forge.op.Sqrt("sqrt_30", add_404) + reciprocal_406 = forge.op.Reciprocal("reciprocal_31", sqrt_405) + multiply_407 = forge.op.Multiply("multiply_32", self.get_constant("const_6578"), reciprocal_406) + multiply_408 = forge.op.Multiply("multiply_33", multiply_407, self.get_parameter("backbone.darknet_0.1.conv1.bn.weight")) + reshape_409 = forge.op.Reshape("reshape_34", multiply_408, shape=(32, 1, 1)) + multiply_410 = forge.op.Multiply("multiply_35", conv2d_401, reshape_409) + multiply_412 = forge.op.Multiply("multiply_36", self.get_constant("backbone.darknet_0.1.conv1.bn.running_mean"), self.get_constant("const_8578")) + multiply_413 = forge.op.Multiply("multiply_37", multiply_412, multiply_408) + add_414 = forge.op.Add("add_38", multiply_413, self.get_parameter("backbone.darknet_0.1.conv1.bn.bias")) + reshape_415 = forge.op.Reshape("reshape_39", add_414, shape=(32, 1, 1)) + add_416 = forge.op.Add("add_40", multiply_410, reshape_415) + leaky_relu_417 = forge.op.LeakyRelu("leaky_relu_41", add_416, alpha=0.10000000000000001) + conv2d_418 = forge.op.Conv2d("conv2d_42", leaky_relu_417, self.get_parameter("backbone.darknet_0.1.conv2.conv.weight"), stride=[1, 1], padding=[1, 1, 1, 1], dilation=1, groups=1, channel_last=0) + add_421 = forge.op.Add("add_43", self.get_constant("backbone.darknet_0.1.conv2.bn.running_var"), self.get_constant("const_10578")) + sqrt_422 = forge.op.Sqrt("sqrt_44", add_421) + reciprocal_423 = forge.op.Reciprocal("reciprocal_45", sqrt_422) + multiply_424 = forge.op.Multiply("multiply_46", self.get_constant("const_9578"), reciprocal_423) + multiply_425 = forge.op.Multiply("multiply_47", multiply_424, self.get_parameter("backbone.darknet_0.1.conv2.bn.weight")) + reshape_426 = forge.op.Reshape("reshape_48", multiply_425, shape=(64, 1, 1)) + multiply_427 = forge.op.Multiply("multiply_49", conv2d_418, reshape_426) + multiply_429 = forge.op.Multiply("multiply_50", self.get_constant("backbone.darknet_0.1.conv2.bn.running_mean"), self.get_constant("const_11578")) + multiply_430 = forge.op.Multiply("multiply_51", multiply_429, multiply_425) + add_431 = forge.op.Add("add_52", multiply_430, self.get_parameter("backbone.darknet_0.1.conv2.bn.bias")) + reshape_432 = forge.op.Reshape("reshape_53", add_431, shape=(64, 1, 1)) + add_433 = forge.op.Add("add_54", multiply_427, reshape_432) + leaky_relu_434 = forge.op.LeakyRelu("leaky_relu_55", add_433, alpha=0.10000000000000001) + add_435 = forge.op.Add("add_56", leaky_relu_434, leaky_relu_400) + reshape_436 = forge.op.Reshape("reshape_final", add_435, shape=(1, 1, 64, 65536)) + return reshape_436 + + @staticmethod + def add_op_overrides(): + forge.config.override_op_size("_fused_op_2", (2, 2)) + forge.config.override_t_stream_shape("_fused_op_2", (128, 1)) + forge.config.override_t_stream_dir("_fused_op_2", "r") + forge.config.override_op_size("conv2d_42.dc.conv2d.1.dc.matmul.11", (2, 2)) + forge.config.override_t_stream_shape("conv2d_42.dc.conv2d.1.dc.matmul.11", (128, 1)) + forge.config.override_t_stream_dir("conv2d_42.dc.conv2d.1.dc.matmul.11", "r") + forge.config.override_u_kt("conv2d_42.dc.conv2d.1.dc.matmul.11", 1) + forge.config.override_op_size("conv2d_42.dc.conv2d.3.dc.matmul.11", (2, 2)) + forge.config.override_t_stream_shape("conv2d_42.dc.conv2d.3.dc.matmul.11", (128, 1)) + forge.config.override_t_stream_dir("conv2d_42.dc.conv2d.3.dc.matmul.11", "r") + forge.config.override_u_kt("conv2d_42.dc.conv2d.3.dc.matmul.11", 1) + forge.config.override_op_size("conv2d_42.dc.conv2d.5.dc.matmul.11", (2, 2)) + forge.config.override_t_stream_shape("conv2d_42.dc.conv2d.5.dc.matmul.11", (128, 1)) + forge.config.override_t_stream_dir("conv2d_42.dc.conv2d.5.dc.matmul.11", "r") + forge.config.override_u_kt("conv2d_42.dc.conv2d.5.dc.matmul.11", 1) + forge.config.override_op_size("_fused_op_1", (2, 2)) + forge.config.override_t_stream_shape("_fused_op_1", (128, 1)) + forge.config.override_t_stream_dir("_fused_op_1", "r") + forge.config.override_op_size("conv2d_42.dc.conv2d.1.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (2, 1)) + forge.config.override_t_stream_shape("conv2d_42.dc.conv2d.1.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (128, 1)) + forge.config.override_t_stream_dir("conv2d_42.dc.conv2d.1.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", "rz") + forge.config.override_op_size("conv2d_42.dc.conv2d.3.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (2, 1)) + forge.config.override_t_stream_shape("conv2d_42.dc.conv2d.3.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (128, 1)) + forge.config.override_t_stream_dir("conv2d_42.dc.conv2d.3.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", "rz") + forge.config.override_op_size("conv2d_42.dc.conv2d.5.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (2, 1)) + forge.config.override_t_stream_shape("conv2d_42.dc.conv2d.5.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (128, 1)) + forge.config.override_t_stream_dir("conv2d_42.dc.conv2d.5.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", "rz") + forge.config.override_op_size("conv2d_14.dc.conv2d.1.dc.matmul.11", (2, 2)) + forge.config.override_t_stream_shape("conv2d_14.dc.conv2d.1.dc.matmul.11", (128, 1)) + forge.config.override_t_stream_dir("conv2d_14.dc.conv2d.1.dc.matmul.11", "r") + forge.config.override_u_kt("conv2d_14.dc.conv2d.1.dc.matmul.11", 1) + forge.config.override_op_size("conv2d_14.dc.conv2d.3.dc.matmul.11", (2, 2)) + forge.config.override_t_stream_shape("conv2d_14.dc.conv2d.3.dc.matmul.11", (128, 1)) + forge.config.override_t_stream_dir("conv2d_14.dc.conv2d.3.dc.matmul.11", "r") + forge.config.override_u_kt("conv2d_14.dc.conv2d.3.dc.matmul.11", 1) + forge.config.override_op_size("conv2d_14.dc.conv2d.5.dc.matmul.11", (2, 2)) + forge.config.override_t_stream_shape("conv2d_14.dc.conv2d.5.dc.matmul.11", (128, 1)) + forge.config.override_t_stream_dir("conv2d_14.dc.conv2d.5.dc.matmul.11", "r") + forge.config.override_u_kt("conv2d_14.dc.conv2d.5.dc.matmul.11", 1) + forge.config.override_op_size("leaky_relu_41", (2, 1)) + forge.config.override_t_stream_shape("leaky_relu_41", (128, 1)) + forge.config.override_t_stream_dir("leaky_relu_41", "r") + forge.config.override_op_size("conv2d_14.dc.conv2d.1.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (2, 1)) + forge.config.override_t_stream_shape("conv2d_14.dc.conv2d.1.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (128, 1)) + forge.config.override_t_stream_dir("conv2d_14.dc.conv2d.1.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", "rz") + forge.config.override_op_size("conv2d_14.dc.conv2d.3.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (2, 1)) + forge.config.override_t_stream_shape("conv2d_14.dc.conv2d.3.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (128, 1)) + forge.config.override_t_stream_dir("conv2d_14.dc.conv2d.3.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", "rz") + forge.config.override_op_size("conv2d_14.dc.conv2d.5.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (2, 1)) + forge.config.override_t_stream_shape("conv2d_14.dc.conv2d.5.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (128, 1)) + forge.config.override_t_stream_dir("conv2d_14.dc.conv2d.5.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", "rz") + forge.config.override_op_size("conv2d_28.dc.matmul.8", (2, 1)) + forge.config.override_t_stream_shape("conv2d_28.dc.matmul.8", (128, 1)) + forge.config.override_t_stream_dir("conv2d_28.dc.matmul.8", "r") + forge.config.override_u_kt("conv2d_28.dc.matmul.8", 2) + forge.config.override_op_size("_fused_op_0", (2, 1)) + forge.config.override_t_stream_shape("_fused_op_0", (256, 1)) + forge.config.override_t_stream_dir("_fused_op_0", "r") + forge.config.override_op_size("conv2d_0.dc.conv2d.1.dc.matmul.11", (2, 1)) + forge.config.override_t_stream_shape("conv2d_0.dc.conv2d.1.dc.matmul.11", (256, 1)) + forge.config.override_t_stream_dir("conv2d_0.dc.conv2d.1.dc.matmul.11", "r") + forge.config.override_u_kt("conv2d_0.dc.conv2d.1.dc.matmul.11", 1) + forge.config.override_op_size("conv2d_0.dc.conv2d.3.dc.matmul.11", (2, 1)) + forge.config.override_t_stream_shape("conv2d_0.dc.conv2d.3.dc.matmul.11", (256, 1)) + forge.config.override_t_stream_dir("conv2d_0.dc.conv2d.3.dc.matmul.11", "r") + forge.config.override_u_kt("conv2d_0.dc.conv2d.3.dc.matmul.11", 1) + forge.config.override_op_size("conv2d_0.dc.conv2d.5.dc.matmul.11", (2, 1)) + forge.config.override_t_stream_shape("conv2d_0.dc.conv2d.5.dc.matmul.11", (256, 1)) + forge.config.override_t_stream_dir("conv2d_0.dc.conv2d.5.dc.matmul.11", "r") + forge.config.override_u_kt("conv2d_0.dc.conv2d.5.dc.matmul.11", 1) + forge.config.override_op_size("conv2d_0.dc.conv2d.1.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (2, 1)) + forge.config.override_t_stream_shape("conv2d_0.dc.conv2d.1.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (256, 1)) + forge.config.override_t_stream_dir("conv2d_0.dc.conv2d.1.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", "rz") + forge.config.override_op_size("conv2d_0.dc.conv2d.3.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (2, 1)) + forge.config.override_t_stream_shape("conv2d_0.dc.conv2d.3.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (256, 1)) + forge.config.override_t_stream_dir("conv2d_0.dc.conv2d.3.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", "rz") + forge.config.override_op_size("conv2d_0.dc.conv2d.5.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (2, 1)) + forge.config.override_t_stream_shape("conv2d_0.dc.conv2d.5.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (256, 1)) + forge.config.override_t_stream_dir("conv2d_0.dc.conv2d.5.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", "rz") + +def test_fork_join_yolo_v3(test_kind, test_device): + """ + This test is extracted from yolo_v3 benchmark model. + + Fork-join which causes hang is the one from _fused_op_1 to _fused_op_2. + FORGE_FORK_JOIN_EXPAND_OUTPUT_BUFFERS=1 fixes the hang. + """ + + if test_kind.is_training(): + pytest.skip("Skipping training due to op overrides.") + + compiler_cfg = forge.config._get_global_compiler_config() + compiler_cfg.balancer_policy = "Ribbon" + compiler_cfg.default_df_override = DataFormat.Float16_b + compiler_cfg.enable_auto_transposing_placement = True + + YoloV3ForkJoin.add_op_overrides() + import os + os.environ["FORGE_RIBBON2"] = "1" + os.environ["FORGE_FORCE_SEQUENTIAL"] = "1" # TODO: Figure out why this is needed, segfaults otherwise: tenstorrent/forge#1935 + os.environ["FORGE_OVERRIDE_INPUT_QUEUE_ENTRIES"] = "32" + os.environ["FORGE_MAXIMIZE_SPARSE_UBLOCK"] = "1" + os.environ["FORGE_DISABLE_CAP_SPARSE_MM_FIDELITY"] = "1" + os.environ["FORGE_DISABLE_EXPLICIT_DRAM_IO"] = "1" + + # Fixes hang + os.environ["FORGE_FORK_JOIN_EXPAND_OUTPUT_BUFFERS"] = "1" + + relative_atol, pcc = get_relaxed_atol_pcc(test_kind, test_device) + verify_module(YoloV3ForkJoin("test_fork_join_yolo_v3"), [(32, 3, 512, 512)], + VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, pcc=pcc, relative_atol=relative_atol)) + +class HRNetForkJoin(forge.ForgeModule): + + def __init__(self, name): + super().__init__(name) + self.add_parameter("features.init_block.conv1.conv.weight", forge.Parameter(*(64, 3, 3, 3), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("features.init_block.conv1.bn.weight", forge.Parameter(*(64,), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("features.init_block.conv1.bn.bias", forge.Parameter(*(64,), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("features.init_block.conv2.conv.weight", forge.Parameter(*(64, 64, 3, 3), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("features.init_block.subblocks.block1.body.conv1.conv.weight", forge.Parameter(*(64, 64, 1, 1), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("features.init_block.subblocks.block1.body.conv2.conv.weight", forge.Parameter(*(64, 64, 3, 3), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("features.init_block.subblocks.block1.body.conv3.conv.weight", forge.Parameter(*(256, 64, 1, 1), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("features.init_block.subblocks.block1.identity_conv.conv.weight", forge.Parameter(*(256, 64, 1, 1), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("features.init_block.subblocks.block2.body.conv1.conv.weight", forge.Parameter(*(64, 256, 1, 1), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("bla", forge.Parameter(*(256, 56, 56), requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("bias1", forge.Parameter(64, requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("bias2", forge.Parameter(64, requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("bias3", forge.Parameter(64, requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("bias4", forge.Parameter(256, requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_parameter("bias5", forge.Parameter(256, requires_grad=True, dev_data_format=forge.DataFormat.Float32)) + self.add_constant("features.init_block.conv1.bn.running_var") + self.add_constant("features.init_block.conv1.bn.running_mean") + self.add_constant("const_12602") + self.add_constant("const_02602") + self.add_constant("const_22602") + + self.set_constant("features.init_block.conv1.bn.running_var", torch.rand(1, 1)) + self.set_constant("features.init_block.conv1.bn.running_mean", torch.rand(1, 1)) + self.set_constant("const_12602", torch.rand(1, 1)) + self.set_constant("const_02602", torch.rand(1, 1)) + self.set_constant("const_22602", torch.rand(1, 1)) + + for param in self.get_parameters(): + self.set_parameter(param.get_name(), torch.rand(size = param.shape.get_pytorch_shape())) + + def forward(self, act): + + conv2d_1632 = forge.op.Conv2d("", act, self.get_parameter("features.init_block.conv1.conv.weight"), stride=[2, 2], padding=[1, 1, 1, 1], dilation=1, groups=1, channel_last=0) + add_1635 = forge.op.Add("", self.get_constant("features.init_block.conv1.bn.running_var"), self.get_constant("const_12602")) + sqrt_1636 = forge.op.Sqrt("", add_1635) + reciprocal_1637 = forge.op.Reciprocal("", sqrt_1636) + multiply_1638 = forge.op.Multiply("", self.get_constant("const_02602"), reciprocal_1637) + multiply_1639 = forge.op.Multiply("", multiply_1638, self.get_parameter("features.init_block.conv1.bn.weight")) + reshape_1640 = forge.op.Reshape("", multiply_1639, shape=(64, 1, 1)) + multiply_1641 = forge.op.Multiply("", conv2d_1632, reshape_1640) + multiply_1643 = forge.op.Multiply("", self.get_constant("features.init_block.conv1.bn.running_mean"), self.get_constant("const_22602")) + multiply_1644 = forge.op.Multiply("", multiply_1643, multiply_1639) + add_1645 = forge.op.Add("", multiply_1644, self.get_parameter("features.init_block.conv1.bn.bias")) + reshape_1646 = forge.op.Reshape("", add_1645, shape=(64, 1, 1)) + add_1647 = forge.op.Add("", multiply_1641, reshape_1646) + relu_1648 = forge.op.Relu("", add_1647) + + conv2d_1649 = forge.op.Conv2d("", relu_1648, self.get_parameter("features.init_block.conv2.conv.weight"), self.get_parameter("bias1"), stride=[2, 2], padding=[1, 1, 1, 1], dilation=1, groups=1, channel_last=0) + relu_1665 = forge.op.Relu("", conv2d_1649) + conv2d_1666 = forge.op.Conv2d("", relu_1665, self.get_parameter("features.init_block.subblocks.block1.body.conv1.conv.weight"), self.get_parameter("bias2"), stride=[1, 1], padding=[0, 0, 0, 0], dilation=1, groups=1, channel_last=0) + relu_1682 = forge.op.Relu("", conv2d_1666) + conv2d_1683 = forge.op.Conv2d("", relu_1682, self.get_parameter("features.init_block.subblocks.block1.body.conv2.conv.weight"), self.get_parameter("bias3"), stride=[1, 1], padding=[1, 1, 1, 1], dilation=1, groups=1, channel_last=0) + relu_1699 = forge.op.Relu("", conv2d_1683) + conv2d_1700 = forge.op.Conv2d("", relu_1699, self.get_parameter("features.init_block.subblocks.block1.body.conv3.conv.weight"), self.get_parameter("bias4"), stride=[1, 1], padding=[0, 0, 0, 0], dilation=1, groups=1, channel_last=0) + + # Left side fork + conv2d_1716 = forge.op.Conv2d("", relu_1665, self.get_parameter("features.init_block.subblocks.block1.identity_conv.conv.weight"), self.get_parameter("bias5"), stride=[1, 1], padding=[0, 0, 0, 0], dilation=1, groups=1, channel_last=0) + + # Join + add_1732 = forge.op.Add("", conv2d_1700, conv2d_1716) + relu_1733 = forge.op.Relu("", add_1732) + + conv2d_1734 = forge.op.Conv2d("", relu_1733, self.get_parameter("features.init_block.subblocks.block2.body.conv1.conv.weight"), stride=[1, 1], padding=[0, 0, 0, 0], dilation=1, groups=1, channel_last=0) + + return conv2d_1734 + + @staticmethod + def add_overrides(): + forge.config.override_op_size("conv2d_14.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (7, 1)) + forge.config.override_t_stream_shape("conv2d_14.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (7, 1)) + forge.config.override_u_kt("conv2d_14.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", 28) + + # Fork node + forge.config.override_op_size("conv2d_14.dc.matmul.11", (2, 2)) + forge.config.override_t_stream_shape("conv2d_14.dc.matmul.11", (7, 1)) + forge.config.override_u_kt("conv2d_14.dc.matmul.11", 18) + + # Short path + forge.config.override_op_size("conv2d_21.dc.matmul.8", (2, 4)) + forge.config.override_t_stream_shape("conv2d_21.dc.matmul.8", (7, 1)) + forge.config.override_u_kt("conv2d_21.dc.matmul.8", 1) + + # Long path + forge.config.override_op_size("conv2d_16.dc.matmul.8", (2, 1)) + forge.config.override_t_stream_shape("conv2d_16.dc.matmul.8", (7, 1)) + forge.config.override_u_kt("conv2d_16.dc.matmul.8", 1) + forge.config.override_op_size("conv2d_18.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (6, 2)) + forge.config.override_t_stream_shape("conv2d_18.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (1, 1)) + forge.config.override_u_kt("conv2d_18.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", 7) + forge.config.override_op_size("conv2d_18.dc.matmul.11", (1, 1)) + forge.config.override_t_stream_shape("conv2d_18.dc.matmul.11", (1, 1)) + forge.config.override_u_kt("conv2d_18.dc.matmul.11", 1) + forge.config.override_op_size("conv2d_20.dc.matmul.8", (2, 4)) + forge.config.override_t_stream_shape("conv2d_20.dc.matmul.8", (7, 1)) + forge.config.override_u_kt("conv2d_20.dc.matmul.8", 1) + + # Join + forge.config.override_op_size("add_22", (2, 1)) + forge.config.override_t_stream_shape("add_22", (7, 1)) + +def test_fork_join_hrnet(test_kind, test_device): + if test_kind.is_training(): + pytest.skip("Skipping training test") + + if test_device.arch == forge.BackendDevice.Grayskull: + pytest.skip("There is not enough L1 memory on Grayskull to fit some of these ops.") + + channels = 3 + height = 224 + width = 224 + + compiler_cfg = forge.config._get_global_compiler_config() + compiler_cfg.balancer_policy = "Ribbon" + compiler_cfg.default_df_override = DataFormat.Float16_b + + import os + os.environ["FORGE_RIBBON2"] = "1" + + HRNetForkJoin.add_overrides() + + relative_atol, pcc = get_relaxed_atol_pcc(test_kind, test_device) + verify_module(HRNetForkJoin("test_fork_join_hrnet"), [(1, channels, height, width)], + VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, pcc=pcc, relative_atol=relative_atol)) + +class ForkJoinExpandOutputBuffer(forge.ForgeModule): + def __init__(self, name): + super().__init__(name) + self.weights0 = forge.Parameter(1, 64, 128, requires_grad=False) + + def forward(self, act1): + fork = forge.op.Matmul("matmul", act1, self.weights0) + left = forge.op.Exp("exp", fork) + right = forge.op.Buffer("buffer", fork) + join = forge.op.Add("add", left, right) + return join + +# Test implementation of Backend constrains for buf_size_mb. +def test_fork_join_expand_output_buffer_constraints(test_kind, test_device): + if test_kind.is_training(): + pytest.skip("Skipping training test") + + forge.config.override_op_size("matmul", (2, 1)) + forge.config.override_op_size("exp", (2, 4)) + forge.config.override_t_stream_shape("matmul", (10, 1)) + forge.config.override_t_stream_shape("exp", (1, 1)) + + relative_atol, pcc = get_relaxed_atol_pcc(test_kind, test_device) + verify_module(ForkJoinExpandOutputBuffer("test_fork_join_expand_output_buffer_constraints"), [(1, 1, 6400, 64)], + VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, pcc=pcc, relative_atol=relative_atol)) diff --git a/pybuda/test/test_fracturing.py b/forge/test/test_fracturing.py similarity index 73% rename from pybuda/test/test_fracturing.py rename to forge/test/test_fracturing.py index a0929a404..1f9e53c3d 100644 --- a/pybuda/test/test_fracturing.py +++ b/forge/test/test_fracturing.py @@ -5,8 +5,8 @@ import torch -import pybuda -from pybuda import ( +import forge +from forge import ( Tensor, Parameter, CompilerConfig, @@ -28,12 +28,12 @@ def test_fracture(test_kind, test_device, dim, factor): ) ) def simple_fracture(x, param=None): - return pybuda.op.Matmul("mm", x, param) + return forge.op.Matmul("mm", x, param) x = Tensor.create_from_torch(torch.rand((1, 1, 96, 384))) param = Parameter(torch.rand((1, 1, 384, 768)), name="m0") - pybuda.config.insert_fracture_group([("m0", dim, factor)]) + forge.config.insert_fracture_group([("m0", dim, factor)]) simple_fracture(x, param=param) @@ -41,10 +41,10 @@ def simple_fracture(x, param=None): @pytest.mark.parametrize("dim", [-1, -2]) @pytest.mark.parametrize("factor", [2, 3, 4]) def test_fracture_multichip(test_kind, test_device, dim, factor): - if test_device.arch == pybuda.BackendDevice.Wormhole_B0 or test_device.arch == pybuda.BackendDevice.Blackhole: + if test_device.arch == forge.BackendDevice.Wormhole_B0 or test_device.arch == forge.BackendDevice.Blackhole: pytest.skip("Skip until #736 is solved") - if test_device.arch == pybuda.BackendDevice.Grayskull: + if test_device.arch == forge.BackendDevice.Grayskull: pytest.skip() shape = (1, 1, 64, 64) @@ -57,12 +57,12 @@ def test_fracture_multichip(test_kind, test_device, dim, factor): ) ) def simple_fracture(x, param=None): - return pybuda.op.Matmul("mm", x, param) + return forge.op.Matmul("mm", x, param) x = Tensor.create_from_torch(torch.rand((1, 1, 96, 384))) param = Parameter(torch.rand((1, 1, 384, 768)), name="m0") - pybuda.config.insert_fracture_group([("m0", dim, factor), "mm"], chip_ids=list(range(factor))) + forge.config.insert_fracture_group([("m0", dim, factor), "mm"], chip_ids=list(range(factor))) compilation_results = simple_fracture(x, param=param) placer_solution = compilation_results.pass_specific_output_kwargs["placer_solution"] @@ -81,12 +81,12 @@ def test_fracture_2d(test_kind, test_device): ) ) def simple_fracture_2d(x, Win=None): - return pybuda.op.Matmul("e0", x, Win) + return forge.op.Matmul("e0", x, Win) x = Tensor.create_from_torch(torch.rand((1, 1, 128, 384))) Win = Parameter(torch.rand((1, 1, 384, 512)), name="Win") - pybuda.config.insert_fracture_group([("Win", [-2, -1], [3, 2]), "e0"]) + forge.config.insert_fracture_group([("Win", [-2, -1], [3, 2]), "e0"]) simple_fracture_2d(x, Win=Win) @@ -102,16 +102,16 @@ def test_fracture_1d_weight_stationary(test_kind, test_device): ) ) def fracture_1d_weight_stationary(x, Win=None, Wout=None): - x = pybuda.op.Matmul("e0", x, Win) - x = pybuda.op.Gelu("gelu", x) - x = pybuda.op.Matmul("e1", x, Wout) + x = forge.op.Matmul("e0", x, Win) + x = forge.op.Gelu("gelu", x) + x = forge.op.Matmul("e1", x, Wout) return x x = Tensor.create_from_torch(torch.rand((1, 1, 128, 384))) Win = Parameter(torch.rand((1, 1, 384, 512)), name="Win") Wout = Parameter(torch.rand((1, 1, 512, 384)), name="Wout") - pybuda.config.insert_fracture_group([("Win", -1, 2), ("Wout", -2, 2)]) + forge.config.insert_fracture_group([("Win", -1, 2), ("Wout", -2, 2)]) fracture_1d_weight_stationary(x, Win=Win, Wout=Wout) @@ -127,16 +127,16 @@ def test_fracture_2d_weight_stationary(test_kind, test_device): ) ) def fracture_2d_weight_stationary(x, Win=None, Wout=None): - x = pybuda.op.Matmul("e0", x, Win) - x = pybuda.op.Gelu("gelu", x) - x = pybuda.op.Matmul("e1", x, Wout) + x = forge.op.Matmul("e0", x, Win) + x = forge.op.Gelu("gelu", x) + x = forge.op.Matmul("e1", x, Wout) return x x = Tensor.create_from_torch(torch.rand((1, 1, 128, 384))) Win = Parameter(torch.rand((1, 1, 384, 512)), name="Win") Wout = Parameter(torch.rand((1, 1, 512, 384)), name="Wout") - pybuda.config.insert_fracture_group([("Win", [-2, -1], [2, 4]), ("Wout", [-2, -1], [4, 3])]) + forge.config.insert_fracture_group([("Win", [-2, -1], [2, 4]), ("Wout", [-2, -1], [4, 3])]) fracture_2d_weight_stationary(x, Win=Win, Wout=Wout) @@ -151,15 +151,15 @@ def test_fracture_slice(test_kind, test_device): ) ) def fracture_slice(x, Win=None): - Win = pybuda.op.HSlice("", Win, 12) - x = pybuda.op.Matmul("e0", x, Win) - x = pybuda.op.Gelu("gelu", x) + Win = forge.op.HSlice("", Win, 12) + x = forge.op.Matmul("e0", x, Win) + x = forge.op.Gelu("gelu", x) return x x = Tensor.create_from_torch(torch.rand((1, 1, 128, 384))) Win = Parameter(torch.rand((1, 1, 384, 128*12)), name="Win") - pybuda.config.insert_fracture_group([("e0", -3, 12), "gelu"]) + forge.config.insert_fracture_group([("e0", -3, 12), "gelu"]) fracture_slice(x, Win=Win) @@ -181,7 +181,7 @@ def fracture_slice(x, Win=None): ([-3, -1, -2], [3, 4, 2]), ]) def test_fracture_bcast(test_kind, test_device, config): - pytest.skip("tenstorrent/pybuda#1903") + pytest.skip("tenstorrent/forge#1903") if test_kind.is_training(): pytest.skip() @@ -191,22 +191,22 @@ def test_fracture_bcast(test_kind, test_device, config): ) ) def fracture_bcast(x, Win=None, bias=None): - x = pybuda.op.Matmul("e0", x, Win) - x = pybuda.op.Add("add", x, bias) - x = pybuda.op.Gelu("gelu", x) + x = forge.op.Matmul("e0", x, Win) + x = forge.op.Add("add", x, bias) + x = forge.op.Gelu("gelu", x) return x x = Tensor.create_from_torch(torch.rand((1, 1, 128, 384))) Win = Parameter(torch.rand((1, 12, 384, 128)), name="Win") bias = Tensor.create_from_torch(torch.rand(1), constant=True) - pybuda.config.insert_fracture_group([("Win",) + config, "gelu"]) + forge.config.insert_fracture_group([("Win",) + config, "gelu"]) fracture_bcast(x, Win=Win, bias=bias) def test_fracture_output(test_kind, test_device): - os.environ["PYBUDA_CONCAT_ON_HOST"] = "1" + os.environ["FORGE_CONCAT_ON_HOST"] = "1" if test_kind.is_training(): pytest.skip() @@ -218,12 +218,12 @@ def test_fracture_output(test_kind, test_device): ) ) def fracture_output(x, Win=None): - x = pybuda.op.Matmul("mm", x, Win) + x = forge.op.Matmul("mm", x, Win) return x x = Tensor.create_from_torch(torch.rand(1, 128, 128)) Win = Parameter(torch.rand(128, 1024), name="Win") - pybuda.config.insert_fracture_group([("mm", -1, 2)]) + forge.config.insert_fracture_group([("mm", -1, 2)]) fracture_output(x, Win=Win) @@ -237,19 +237,19 @@ def test_fracture_fork_join(test_kind, test_device): ) ) def fracture_fork_join(x, y, Win=None): - x = pybuda.op.Matmul("e0", x, Win) - h = pybuda.op.HSlice("", x, 16) - m = pybuda.op.Multiply("multiply", h, y) - f0 = pybuda.op.Gelu("gelu0", m) - f1 = pybuda.op.Gelu("gelu1", h) - j = pybuda.op.Multiply("join", f0, f1) + x = forge.op.Matmul("e0", x, Win) + h = forge.op.HSlice("", x, 16) + m = forge.op.Multiply("multiply", h, y) + f0 = forge.op.Gelu("gelu0", m) + f1 = forge.op.Gelu("gelu1", h) + j = forge.op.Multiply("join", f0, f1) return j x = Tensor.create_from_torch(torch.rand((1, 1, 128, 384))) y = Tensor.create_from_torch(torch.rand((1, 16, 128, 128))) Win = Parameter(torch.rand((1, 1, 384, 128*16)), name="Win") - pybuda.config.insert_fracture_group([("e0", -2, 2), ("join", -2, 2)]) + forge.config.insert_fracture_group([("e0", -2, 2), ("join", -2, 2)]) fracture_fork_join(x, y, Win=Win) @@ -264,14 +264,14 @@ def test_fracture_fork_input(test_kind, test_device): ) ) def fracture_fork_input(x, Win=None): - f0 = pybuda.op.Gelu("gelu0", x) - f1 = pybuda.op.Gelu("gelu1", x) + f0 = forge.op.Gelu("gelu0", x) + f1 = forge.op.Gelu("gelu1", x) return f0, f1 x = Tensor.create_from_torch(torch.rand((1, 1, 128, 128))) - pybuda.config.insert_fracture_group([("gelu0", -2, 2)]) - pybuda.config.insert_fracture_group([("gelu1", -1, 2)]) + forge.config.insert_fracture_group([("gelu0", -2, 2)]) + forge.config.insert_fracture_group([("gelu1", -1, 2)]) fracture_fork_input(x) @@ -289,18 +289,18 @@ def test_mixed_factors(test_kind, test_device, factor, fork): ) def mixed_factors(x, Win=None): if fork: - f0 = pybuda.op.Gelu("gelu0", x) - f1 = pybuda.op.Gelu("gelu1", x) + f0 = forge.op.Gelu("gelu0", x) + f1 = forge.op.Gelu("gelu1", x) return f0, f1 else: - f0 = pybuda.op.Gelu("gelu0", x) - return pybuda.op.Gelu("gelu1", f0) + f0 = forge.op.Gelu("gelu0", x) + return forge.op.Gelu("gelu1", f0) x = Tensor.create_from_torch(torch.rand((1, 1, 384, 384))) - compile_cfg = pybuda.config._get_global_compiler_config() + compile_cfg = forge.config._get_global_compiler_config() compile_cfg.scheduler_policy = "Topological" - pybuda.config.insert_fracture_group([("gelu0", -1, factor[0]), ("gelu1", -1, factor[1])]) + forge.config.insert_fracture_group([("gelu0", -1, factor[0]), ("gelu1", -1, factor[1])]) mixed_factors(x) @@ -309,13 +309,13 @@ def test_fracture_transpose(test_device): @run(test_device) def fracture_transpose(x, y=None): - f0 = pybuda.op.Gelu("gelu0", x) - f0 = pybuda.op.Transpose("t0", f0, -2, -1) - return pybuda.op.Add("add0", f0, y) + f0 = forge.op.Gelu("gelu0", x) + f0 = forge.op.Transpose("t0", f0, -2, -1) + return forge.op.Add("add0", f0, y) x = Tensor.create_from_torch(torch.rand((1, 1, 384, 384))) y = Parameter(torch.rand((1, 1, 384, 384)), name="y") - pybuda.config.insert_fracture_group([("gelu0", -1, 2), "add0"]) + forge.config.insert_fracture_group([("gelu0", -1, 2), "add0"]) fracture_transpose(x, y=y) diff --git a/pybuda/test/test_fusing.py b/forge/test/test_fusing.py similarity index 68% rename from pybuda/test/test_fusing.py rename to forge/test/test_fusing.py index 37703b4ab..8f417a421 100644 --- a/pybuda/test/test_fusing.py +++ b/forge/test/test_fusing.py @@ -5,9 +5,9 @@ import torch import pytest -import pybuda -from pybuda.verify import verify_module, VerifyConfig, TestKind -from pybuda import BackendDevice +import forge +from forge.verify import verify_module, VerifyConfig, TestKind +from forge import BackendDevice def get_relaxed_atol_pcc(is_training, test_device, size = 1): """ @@ -28,7 +28,7 @@ def get_relaxed_atol_pcc(is_training, test_device, size = 1): def check_fused_result(exp_count, *ops): # Check that fused counts are as expected - g = pybuda.pybudaglobal.get_devices()[0]._compile_output.lowered_graph + g = forge.forgeglobal.get_devices()[0]._compile_output.lowered_graph fused_ops = g.get_fused_ops() count = len(fused_ops) assert exp_count == count, f"Fused op count mismatch" @@ -44,7 +44,7 @@ def check_fused_result(exp_count, *ops): assert ops[i]["schedules"][j] == len(s), f"Op count mismatch on op {i}, schedule {j}" -class FuseEltwise(pybuda.PyBudaModule): +class FuseEltwise(forge.ForgeModule): """ Simple module with 2 eltwise ops to be fused """ @@ -53,14 +53,14 @@ class FuseEltwise(pybuda.PyBudaModule): def __init__(self, name): super().__init__(name) - self.weights1 = pybuda.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) + self.weights1 = forge.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) def forward(self, act1, act2): - a1 = pybuda.op.Matmul("matmul", act1, self.weights1) + a1 = forge.op.Matmul("matmul", act1, self.weights1) # Expecting fusing of a2 and a3 - a2 = pybuda.op.Add("add", act2, a1) - a3 = pybuda.op.Reciprocal("reciprocal", a2) + a2 = forge.op.Add("add", act2, a1) + a3 = forge.op.Reciprocal("reciprocal", a2) return a3 @@ -77,20 +77,20 @@ def test_fuse_eltwise(test_device, test_kind): check_fused_result(1, {"inputs": 2, "schedules": [2]}) def test_dont_fuse(test_device): - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.dont_fuse("add") verify_module(FuseEltwise("dont_fuse_eltwise"), [FuseEltwise.shape, FuseEltwise.shape], VerifyConfig(test_kind=TestKind.INFERENCE, devtype=test_device.devtype, arch=test_device.arch)) check_fused_result(0) def test_manual_fuse(test_device): - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.manual_fuse(["a.*", "r.*"]) verify_module(FuseEltwise("manual_fuse_eltwise"), [FuseEltwise.shape, FuseEltwise.shape], VerifyConfig(test_kind=TestKind.INFERENCE, devtype=test_device.devtype, arch=test_device.arch)) check_fused_result(1, {"inputs": 2, "schedules": [2]}) -class FuseForkJoin(pybuda.PyBudaModule): +class FuseForkJoin(forge.ForgeModule): """ Simple module with a fork """ @@ -99,16 +99,16 @@ class FuseForkJoin(pybuda.PyBudaModule): def __init__(self, name): super().__init__(name) - self.weights1 = pybuda.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) + self.weights1 = forge.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) def forward(self, act1, act2): - a1 = pybuda.op.Matmul("matmul", act1, self.weights1) + a1 = forge.op.Matmul("matmul", act1, self.weights1) # Expecting fusing of a2, a3, a4 - a2 = pybuda.op.Add("add", act2, a1) - a3 = pybuda.op.Reciprocal("reciprocal", a2) - a4 = pybuda.op.Sqrt("sqrt", a2) - return pybuda.op.Multiply("mul", a3, a4) + a2 = forge.op.Add("add", act2, a1) + a3 = forge.op.Reciprocal("reciprocal", a2) + a4 = forge.op.Sqrt("sqrt", a2) + return forge.op.Multiply("mul", a3, a4) def test_fuse_fork(test_device, test_kind): @@ -121,7 +121,7 @@ def test_fuse_fork(test_device, test_kind): if test_kind == TestKind.INFERENCE: check_fused_result(1, {"inputs": 2, "schedules": [4]}) -class FuseReduce(pybuda.PyBudaModule): +class FuseReduce(forge.ForgeModule): """ Simple module with a reduce in the middle, or at the end, of a fused sequence """ @@ -130,24 +130,24 @@ class FuseReduce(pybuda.PyBudaModule): def __init__(self, name, middle: bool): super().__init__(name) - self.weights1 = pybuda.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) + self.weights1 = forge.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) self.middle = middle def forward(self, act1, act2): - a1 = pybuda.op.Matmul("matmul", act1, self.weights1) + a1 = forge.op.Matmul("matmul", act1, self.weights1) # Expecting fusing of ops below - a2 = pybuda.op.Add("add", act2, a1) - a3 = pybuda.op.ReduceAvg("reduce_avg", a2, dim=-1) + a2 = forge.op.Add("add", act2, a1) + a3 = forge.op.ReduceAvg("reduce_avg", a2, dim=-1) if (self.middle): - a3 = pybuda.op.Reciprocal("reciprocal", a3) + a3 = forge.op.Reciprocal("reciprocal", a3) return a3 @pytest.mark.parametrize("middle", (True, False), ids=["middle", "end"]) def test_fuse_reduce(test_device, middle, test_kind): - os.environ["PYBUDA_FUSE_REDUCE"] = "1" - os.environ["PYBUDA_LEGACY_UBLOCK_SHAPE"] = "1" + os.environ["FORGE_FUSE_REDUCE"] = "1" + os.environ["FORGE_LEGACY_UBLOCK_SHAPE"] = "1" try: relative_atol, pcc = get_relaxed_atol_pcc(True, test_device) verify_module(FuseReduce("fuse_reduce_" + ("middle" if middle else "end"), middle), [FuseReduce.shape, FuseReduce.shape], @@ -162,9 +162,9 @@ def test_fuse_reduce(test_device, middle, test_kind): check_fused_result(1, {"inputs": 3, "schedules": [2]}) finally: - del os.environ["PYBUDA_FUSE_REDUCE"] + del os.environ["FORGE_FUSE_REDUCE"] -class FuseReduceRC(pybuda.PyBudaModule): +class FuseReduceRC(forge.ForgeModule): """ Simple module with reduces in both R and C dimensions. """ @@ -174,18 +174,18 @@ class FuseReduceRC(pybuda.PyBudaModule): def __init__(self, name): super().__init__(name) - self.weights = pybuda.Parameter(self.shape1[-1], self.shape1[-1], requires_grad=True) + self.weights = forge.Parameter(self.shape1[-1], self.shape1[-1], requires_grad=True) def forward(self, act1, act2): - a1 = pybuda.op.Matmul("matmul", act1, self.weights) + a1 = forge.op.Matmul("matmul", act1, self.weights) # Expecting ops below to be fused into two fused ops - # because we cannot have reduces with both R and C dims in one fused op. - a2 = pybuda.op.ReduceAvg("reduce_avg_r", a1, dim=-2) - a3 = pybuda.op.Add("add", act2, a2) + a2 = forge.op.ReduceAvg("reduce_avg_r", a1, dim=-2) + a3 = forge.op.Add("add", act2, a2) - a4 = pybuda.op.ReduceAvg("reduce_avg_c", a3, dim=-1) - a5 = pybuda.op.Multiply("multiply", a4, act2) + a4 = forge.op.ReduceAvg("reduce_avg_c", a3, dim=-1) + a5 = forge.op.Multiply("multiply", a4, act2) return a5 @@ -194,10 +194,10 @@ def test_fuse_reduce_rc(test_device, test_kind): if test_kind.is_training(): # Training is currently not working, issue: - # tenstorrent/pybuda#1211 + # tenstorrent/forge#1211 pytest.skip() - os.environ["PYBUDA_FUSE_REDUCE"] = "1" + os.environ["FORGE_FUSE_REDUCE"] = "1" try: relative_atol, pcc = get_relaxed_atol_pcc(True, test_device) verify_module(FuseReduceRC("fuse_reduce_RC"), [FuseReduceRC.shape1, FuseReduceRC.shape2], @@ -209,9 +209,9 @@ def test_fuse_reduce_rc(test_device, test_kind): check_fused_result(2, {"inputs": 3, "schedules": [1, 1]}, {"inputs": 3, "schedules": [1, 1]}) finally: - del os.environ["PYBUDA_FUSE_REDUCE"] + del os.environ["FORGE_FUSE_REDUCE"] -class FuseForkBroadcast(pybuda.PyBudaModule): +class FuseForkBroadcast(forge.ForgeModule): """ Module with fork/reduce/broadcast/join, and optional second fused op """ @@ -220,34 +220,34 @@ class FuseForkBroadcast(pybuda.PyBudaModule): def __init__(self, name, second_op: bool): super().__init__(name) - self.weights1 = pybuda.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) + self.weights1 = forge.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) self.second_op = second_op if self.second_op: - self.weights2 = pybuda.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) + self.weights2 = forge.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) def forward(self, act1, act2): - a1 = pybuda.op.Matmul("matmul1", act1, self.weights1) + a1 = forge.op.Matmul("matmul1", act1, self.weights1) # Expecting fusing of ops below - a2 = pybuda.op.Add("add", act2, a1) - a3 = pybuda.op.ReduceAvg("reduce_avg", a2, dim=-1) - a4 = pybuda.op.Add("add_join", a2, a3) + a2 = forge.op.Add("add", act2, a1) + a3 = forge.op.ReduceAvg("reduce_avg", a2, dim=-1) + a4 = forge.op.Add("add_join", a2, a3) if not self.second_op: return a4 - a5 = pybuda.op.Matmul("matmul2", a4, self.weights2) + a5 = forge.op.Matmul("matmul2", a4, self.weights2) # Expecting fusing of a different op here - a6 = pybuda.op.Gelu("gelu1", a5) - a7 = pybuda.op.Gelu("gelu2", a6) + a6 = forge.op.Gelu("gelu1", a5) + a7 = forge.op.Gelu("gelu2", a6) return a7 def test_fork_broadcast_join(test_device, test_kind): # Broadcast inputs that are reused - os.environ["PYBUDA_FUSE_REDUCE"] = "1" + os.environ["FORGE_FUSE_REDUCE"] = "1" try: relative_atol, pcc = get_relaxed_atol_pcc(True, test_device) verify_module(FuseForkBroadcast("fuse_fork_broadcast", second_op=False), [FuseForkBroadcast.shape, FuseForkBroadcast.shape], @@ -257,12 +257,12 @@ def test_fork_broadcast_join(test_device, test_kind): if test_kind == TestKind.INFERENCE: check_fused_result(1, {"inputs": 5, "schedules": [2, 2]}) finally: - del os.environ["PYBUDA_FUSE_REDUCE"] + del os.environ["FORGE_FUSE_REDUCE"] def test_two_fuse_ops(test_device, test_kind): # Add second op to the test - os.environ["PYBUDA_FUSE_REDUCE"] = "1" + os.environ["FORGE_FUSE_REDUCE"] = "1" try: relative_atol, pcc = get_relaxed_atol_pcc(True, test_device) verify_module(FuseForkBroadcast("two_fuse_ops", second_op=True), [FuseForkBroadcast.shape, FuseForkBroadcast.shape], @@ -274,10 +274,10 @@ def test_two_fuse_ops(test_device, test_kind): {"inputs": 5, "schedules": [2, 2]}, {"inputs": 1, "schedules": [2]}) finally: - del os.environ["PYBUDA_FUSE_REDUCE"] + del os.environ["FORGE_FUSE_REDUCE"] -class FuseTileBroadcast(pybuda.PyBudaModule): +class FuseTileBroadcast(forge.ForgeModule): """ Module with a tile broadcast """ @@ -286,26 +286,26 @@ class FuseTileBroadcast(pybuda.PyBudaModule): def __init__(self, name, dim): super().__init__(name) - self.weights1 = pybuda.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) + self.weights1 = forge.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) if dim == "r": - self.bias1 = pybuda.Parameter(1, self.shape[-1], requires_grad=True) + self.bias1 = forge.Parameter(1, self.shape[-1], requires_grad=True) else: - self.bias1 = pybuda.Parameter(self.shape[-2], 1, requires_grad=True) + self.bias1 = forge.Parameter(self.shape[-2], 1, requires_grad=True) def forward(self, act1): - a1 = pybuda.op.Matmul("matmul1", act1, self.weights1) + a1 = forge.op.Matmul("matmul1", act1, self.weights1) # Expecting fusing of op below, which will have a tile broadcast added - a2 = pybuda.op.Add("add", a1, self.bias1) + a2 = forge.op.Add("add", a1, self.bias1) return a2 @pytest.mark.parametrize("dim", ["r", "c"]) def test_tile_broadcast(test_device, test_kind, dim): # Broadcast inputs that are reused - os.environ["PYBUDA_FUSE_REDUCE"] = "1" - os.environ["PYBUDA_NO_FUSE_MATMUL_BIAS"] = "1" - os.environ["PYBUDA_DISABLE_TILE_BROADCAST_CONSTEVAL"] = "1" + os.environ["FORGE_FUSE_REDUCE"] = "1" + os.environ["FORGE_NO_FUSE_MATMUL_BIAS"] = "1" + os.environ["FORGE_DISABLE_TILE_BROADCAST_CONSTEVAL"] = "1" try: relative_atol, pcc = get_relaxed_atol_pcc(True, test_device) verify_module(FuseTileBroadcast("fuse_tile_broadcast", dim), [FuseTileBroadcast.shape], @@ -315,12 +315,12 @@ def test_tile_broadcast(test_device, test_kind, dim): if test_kind == TestKind.INFERENCE: check_fused_result(1, {"inputs": 2, "schedules": [1]}) finally: - del os.environ["PYBUDA_FUSE_REDUCE"] - del os.environ["PYBUDA_NO_FUSE_MATMUL_BIAS"] - del os.environ["PYBUDA_DISABLE_TILE_BROADCAST_CONSTEVAL"] + del os.environ["FORGE_FUSE_REDUCE"] + del os.environ["FORGE_NO_FUSE_MATMUL_BIAS"] + del os.environ["FORGE_DISABLE_TILE_BROADCAST_CONSTEVAL"] -class FuseSoftmax(pybuda.PyBudaModule): +class FuseSoftmax(forge.ForgeModule): """ Module with a softmax """ @@ -329,20 +329,20 @@ class FuseSoftmax(pybuda.PyBudaModule): def __init__(self, name, dim): super().__init__(name) - self.weights1 = pybuda.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) + self.weights1 = forge.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) self.dim = dim def forward(self, act1): - a1 = pybuda.op.Matmul("matmul1", act1, self.weights1) + a1 = forge.op.Matmul("matmul1", act1, self.weights1) # Expecting fusing of op below - a2 = pybuda.op.Softmax("softmax", a1, dim=self.dim) + a2 = forge.op.Softmax("softmax", a1, dim=self.dim) return a2 @pytest.mark.parametrize("dim", ["r", "c"]) def test_softmax(test_device, test_kind, dim): - os.environ["PYBUDA_FUSE_REDUCE"] = "1" + os.environ["FORGE_FUSE_REDUCE"] = "1" dim_index = -1 if dim == "c" else -2 try: @@ -354,9 +354,9 @@ def test_softmax(test_device, test_kind, dim): if test_kind == TestKind.INFERENCE: check_fused_result(1, {"inputs": 4, "schedules": [1, 3]}) finally: - del os.environ["PYBUDA_FUSE_REDUCE"] + del os.environ["FORGE_FUSE_REDUCE"] -class FuseLayernorm(pybuda.PyBudaModule): +class FuseLayernorm(forge.ForgeModule): """ Module with a layernorm """ @@ -364,15 +364,15 @@ class FuseLayernorm(pybuda.PyBudaModule): def __init__(self, name, shape): super().__init__(name) self.shape = shape - self.weights1 = pybuda.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) - self.ln_weights = pybuda.Parameter(1, self.shape[-1], requires_grad=True) - self.ln_bias = pybuda.Parameter(1, self.shape[-1], requires_grad=True) + self.weights1 = forge.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) + self.ln_weights = forge.Parameter(1, self.shape[-1], requires_grad=True) + self.ln_bias = forge.Parameter(1, self.shape[-1], requires_grad=True) def forward(self, act1): - a1 = pybuda.op.Matmul("matmul1", act1, self.weights1) + a1 = forge.op.Matmul("matmul1", act1, self.weights1) # Expecting fusing of op below - a2 = pybuda.op.Layernorm("layernorm", a1, self.ln_weights, self.ln_bias) + a2 = forge.op.Layernorm("layernorm", a1, self.ln_weights, self.ln_bias) return a2 @pytest.mark.parametrize("fuse_reduce", [True, False], ids=["fuse_reduce", "no_reduce"]) @@ -381,7 +381,7 @@ def test_layernorm(test_device, test_kind, fuse_reduce, rows): shape = (1, 1, 32*rows, 256) if fuse_reduce: - os.environ["PYBUDA_FUSE_REDUCE"] = "1" + os.environ["FORGE_FUSE_REDUCE"] = "1" try: relative_atol, pcc = get_relaxed_atol_pcc(True, test_device) @@ -398,9 +398,9 @@ def test_layernorm(test_device, test_kind, fuse_reduce, rows): finally: if fuse_reduce: - del os.environ["PYBUDA_FUSE_REDUCE"] + del os.environ["FORGE_FUSE_REDUCE"] -class FuseMatmulBias(pybuda.PyBudaModule): +class FuseMatmulBias(forge.ForgeModule): """ Module with matmuls+bias """ @@ -409,15 +409,15 @@ class FuseMatmulBias(pybuda.PyBudaModule): def __init__(self, name): super().__init__(name) - self.weights1 = pybuda.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) - self.weights2 = pybuda.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) - self.bias1 = pybuda.Parameter((100.0 * torch.rand(1, self.shape[-1])).detach(), requires_grad=True) - self.bias2 = pybuda.Parameter((100.0 * torch.rand(1, self.shape[-1])).detach(), requires_grad=True) + self.weights1 = forge.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) + self.weights2 = forge.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) + self.bias1 = forge.Parameter((100.0 * torch.rand(1, self.shape[-1])).detach(), requires_grad=True) + self.bias2 = forge.Parameter((100.0 * torch.rand(1, self.shape[-1])).detach(), requires_grad=True) def forward(self, act1): - a1 = pybuda.op.Matmul("matmul1", act1, self.weights1) + self.bias1 - a2 = pybuda.op.Sqrt("sqrt", a1) - a3 = pybuda.op.Matmul("matmul2", a2, self.weights2) + self.bias2 + a1 = forge.op.Matmul("matmul1", act1, self.weights1) + self.bias1 + a2 = forge.op.Sqrt("sqrt", a1) + a3 = forge.op.Matmul("matmul2", a2, self.weights2) + self.bias2 return a3 def test_matmul_bias(test_device, test_kind): @@ -427,7 +427,7 @@ def test_matmul_bias(test_device, test_kind): relative_atol=relative_atol, pcc=pcc)) -class FuseMatmulGelu(pybuda.PyBudaModule): +class FuseMatmulGelu(forge.ForgeModule): """ Module with matmuls+gelu """ @@ -436,17 +436,17 @@ class FuseMatmulGelu(pybuda.PyBudaModule): def __init__(self, name): super().__init__(name) - self.weights1 = pybuda.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) - self.weights2 = pybuda.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) - self.weights3 = pybuda.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) - self.bias1 = pybuda.Parameter((100.0 * torch.rand(1, self.shape[-1])).detach(), requires_grad=True) + self.weights1 = forge.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) + self.weights2 = forge.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) + self.weights3 = forge.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) + self.bias1 = forge.Parameter((100.0 * torch.rand(1, self.shape[-1])).detach(), requires_grad=True) def forward(self, act1): - a1 = pybuda.op.Matmul("matmul1", act1, self.weights1, self.bias1) - a2 = pybuda.op.Gelu("gelu1", a1) - a3 = pybuda.op.Matmul("matmul2", a2, self.weights2) - a4 = pybuda.op.Gelu("gelu2", a3) - a5 = pybuda.op.Matmul("matmul3", a4, self.weights3) + a1 = forge.op.Matmul("matmul1", act1, self.weights1, self.bias1) + a2 = forge.op.Gelu("gelu1", a1) + a3 = forge.op.Matmul("matmul2", a2, self.weights2) + a4 = forge.op.Gelu("gelu2", a3) + a5 = forge.op.Matmul("matmul3", a4, self.weights3) return a5 def test_matmul_gelu(test_device, test_kind): @@ -456,7 +456,7 @@ def test_matmul_gelu(test_device, test_kind): relative_atol=relative_atol, pcc=pcc)) -class FuseTwoLayernorm(pybuda.PyBudaModule): +class FuseTwoLayernorm(forge.ForgeModule): """ Module with two layernorms """ @@ -465,25 +465,25 @@ class FuseTwoLayernorm(pybuda.PyBudaModule): def __init__(self, name): super().__init__(name) - self.weights1 = pybuda.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) - self.weights2 = pybuda.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) - self.ln_weights1 = pybuda.Parameter(1, self.shape[-1], requires_grad=True) - self.ln_bias1 = pybuda.Parameter(1, self.shape[-1], requires_grad=True) - self.ln_weights2 = pybuda.Parameter(1, self.shape[-1], requires_grad=True) - self.ln_bias2 = pybuda.Parameter(1, self.shape[-1], requires_grad=True) + self.weights1 = forge.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) + self.weights2 = forge.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) + self.ln_weights1 = forge.Parameter(1, self.shape[-1], requires_grad=True) + self.ln_bias1 = forge.Parameter(1, self.shape[-1], requires_grad=True) + self.ln_weights2 = forge.Parameter(1, self.shape[-1], requires_grad=True) + self.ln_bias2 = forge.Parameter(1, self.shape[-1], requires_grad=True) def forward(self, act1): - a1 = pybuda.op.Matmul("matmul1", act1, self.weights1) + a1 = forge.op.Matmul("matmul1", act1, self.weights1) # Two layernorms with matmul in between. We want to see the fused op reused twice. - a2 = pybuda.op.Layernorm("layernorm1", a1, self.ln_weights1, self.ln_bias1) - a3 = pybuda.op.Matmul("matmul2", a2, self.weights2) - a4 = pybuda.op.Layernorm("layernorm2", a3, self.ln_weights2, self.ln_bias2) + a2 = forge.op.Layernorm("layernorm1", a1, self.ln_weights1, self.ln_bias1) + a3 = forge.op.Matmul("matmul2", a2, self.weights2) + a4 = forge.op.Layernorm("layernorm2", a3, self.ln_weights2, self.ln_bias2) return a4 def test_layernorm_reuse(test_device): - os.environ["PYBUDA_FUSE_REDUCE"] = "1" + os.environ["FORGE_FUSE_REDUCE"] = "1" try: relative_atol, pcc = get_relaxed_atol_pcc(True, test_device) @@ -496,10 +496,10 @@ def test_layernorm_reuse(test_device): #check_fused_result(1, {"inputs": 9, "schedules": [1, 3, 10]}) finally: - del os.environ["PYBUDA_FUSE_REDUCE"] + del os.environ["FORGE_FUSE_REDUCE"] -class FuseReduceMax(pybuda.PyBudaModule): +class FuseReduceMax(forge.ForgeModule): """ Simple module with a reduce_max in the middle, or at the end, of a fused sequence """ @@ -508,24 +508,24 @@ class FuseReduceMax(pybuda.PyBudaModule): def __init__(self, name, middle: bool): super().__init__(name) - self.weights1 = pybuda.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) + self.weights1 = forge.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) self.middle = middle def forward(self, act1, act2): - a1 = pybuda.op.Matmul("matmul", act1, self.weights1) + a1 = forge.op.Matmul("matmul", act1, self.weights1) # Expecting fusing of ops below - a2 = pybuda.op.Add("add", act2, a1) - a3 = pybuda.op.ReduceMax("reduce_max", a2, dim=-1) + a2 = forge.op.Add("add", act2, a1) + a3 = forge.op.ReduceMax("reduce_max", a2, dim=-1) if (self.middle): - a3 = pybuda.op.Reciprocal("reciprocal", a3) + a3 = forge.op.Reciprocal("reciprocal", a3) return a3 @pytest.mark.parametrize("middle", (True, False), ids=["middle", "end"]) @pytest.mark.skip(reason="Reduce max not supported yet in fusing") def test_fuse_reduce_max(test_device, middle, test_kind): - os.environ["PYBUDA_FUSE_REDUCE"] = "1" + os.environ["FORGE_FUSE_REDUCE"] = "1" try: relative_atol, pcc = get_relaxed_atol_pcc(True, test_device) verify_module(FuseReduceMax("fuse_reduce_max_" + ("middle" if middle else "end"), middle), [FuseReduce.shape, FuseReduce.shape], @@ -540,10 +540,10 @@ def test_fuse_reduce_max(test_device, middle, test_kind): check_fused_result(1, {"inputs": 3, "schedules": [2]}) finally: - del os.environ["PYBUDA_FUSE_REDUCE"] + del os.environ["FORGE_FUSE_REDUCE"] -class FuseSelect(pybuda.PyBudaModule): +class FuseSelect(forge.ForgeModule): """ Simple model with one select/splice operation in it. This op should not be fused. """ @@ -558,8 +558,8 @@ def __init__(self, name, dim, index, length): def forward(self, act1, act2): #These two ops shouldn't be fused since select op is not allowed for fusing. - a1 = pybuda.op.Add("add0", act1, act2) - a2 = pybuda.op.Select("select0", a1, self.dim, (self.index, self.length)) + a1 = forge.op.Add("add0", act1, act2) + a2 = forge.op.Select("select0", a1, self.dim, (self.index, self.length)) return a2 def test_fuse_select(test_device, test_kind): @@ -572,7 +572,7 @@ def test_fuse_select(test_device, test_kind): check_fused_result(0) -class FuseBuffer(pybuda.PyBudaModule): +class FuseBuffer(forge.ForgeModule): """ Simple model with buffer operation in it. This op shouln't be fused """ @@ -584,8 +584,8 @@ def __init__(self, name: str): def forward(self, act1, act2): #These two ops shouldn't be fused since buffer op is not allowed for fusing. - a1 = pybuda.op.Add("add0", act1, act2) - a2 = pybuda.op.Buffer("buffer0", a1) + a1 = forge.op.Add("add0", act1, act2) + a2 = forge.op.Buffer("buffer0", a1) return a2 def test_fuse_buffer(test_device, test_kind): @@ -598,7 +598,7 @@ def test_fuse_buffer(test_device, test_kind): check_fused_result(0) -class FuseEpochAndChipBreak(pybuda.PyBudaModule): +class FuseEpochAndChipBreak(forge.ForgeModule): """ Simple module with epoch and chip breaks. """ @@ -610,15 +610,15 @@ def __init__(self, name: str): def forward(self, act1, act2): # Make model with chip and epoch breaks - a1 = pybuda.op.Add("add0", act1, act2) - a2 = pybuda.op.Buffer("buffer_a", a1) - a3 = pybuda.op.Buffer("buffer_b", a2) - a4 = pybuda.op.Buffer("buffer_c", a2) - a5 = pybuda.op.Add("add1", a3, a4) + a1 = forge.op.Add("add0", act1, act2) + a2 = forge.op.Buffer("buffer_a", a1) + a3 = forge.op.Buffer("buffer_b", a2) + a4 = forge.op.Buffer("buffer_c", a2) + a5 = forge.op.Add("add1", a3, a4) - pybuda.set_epoch_break("buffer_a") - pybuda.set_chip_break("buffer_b") - pybuda.set_epoch_break("buffer_c") + forge.set_epoch_break("buffer_a") + forge.set_chip_break("buffer_b") + forge.set_epoch_break("buffer_c") return a5 @@ -632,23 +632,23 @@ def test_fuse_epoch_and_chip_break(test_device, test_kind): check_fused_result(0) -class FuseSimpleTileBroadcast(pybuda.PyBudaModule): +class FuseSimpleTileBroadcast(forge.ForgeModule): """ Module with a tile broadcast """ def __init__(self, name): super().__init__(name) - self.bias1 = pybuda.Parameter(1, 1, requires_grad=False) + self.bias1 = forge.Parameter(1, 1, requires_grad=False) def forward(self, act1): - a1 = pybuda.op.Subtract("sub", act1, self.bias1) + a1 = forge.op.Subtract("sub", act1, self.bias1) return a1 def test_simple_tile_broadcast_RC(test_device): shape = (1, 1, 64, 64) relative_atol, pcc = get_relaxed_atol_pcc(True, test_device) - os.environ["PYBUDA_DISABLE_TILE_BROADCAST_CONSTEVAL"] = "1" + os.environ["FORGE_DISABLE_TILE_BROADCAST_CONSTEVAL"] = "1" try: verify_module(FuseSimpleTileBroadcast("fuse_simple_tile_broadcast_RC"), [shape], VerifyConfig(test_kind=TestKind.INFERENCE, skip_shutdown=True, arch=test_device.arch, devtype=test_device.devtype, @@ -660,12 +660,12 @@ def test_simple_tile_broadcast_RC(test_device): # Only tile broadcast ops will be fused together. check_fused_result(1, {"inputs": 3, "schedules": [1, 1]}) finally: - del os.environ["PYBUDA_DISABLE_TILE_BROADCAST_CONSTEVAL"] + del os.environ["FORGE_DISABLE_TILE_BROADCAST_CONSTEVAL"] def test_simple_tile_broadcast_C(test_device): shape = (1, 1, 1, 64) relative_atol, pcc = get_relaxed_atol_pcc(True, test_device) - os.environ["PYBUDA_DISABLE_TILE_BROADCAST_CONSTEVAL"] = "1" + os.environ["FORGE_DISABLE_TILE_BROADCAST_CONSTEVAL"] = "1" try: verify_module(FuseSimpleTileBroadcast("fuse_simple_tile_broadcast_C"), [shape], VerifyConfig(test_kind=TestKind.INFERENCE, skip_shutdown=True, arch=test_device.arch, devtype=test_device.devtype, @@ -676,9 +676,9 @@ def test_simple_tile_broadcast_C(test_device): # Tile broadcast should be merged to sub and sub shoud be fused. check_fused_result(1, {"inputs": 2, "schedules": [1]}) finally: - del os.environ["PYBUDA_DISABLE_TILE_BROADCAST_CONSTEVAL"] + del os.environ["FORGE_DISABLE_TILE_BROADCAST_CONSTEVAL"] -class FuseBroadcastAsLHSOfMatmul(pybuda.PyBudaModule): +class FuseBroadcastAsLHSOfMatmul(forge.ForgeModule): """ Module with brodacast C, that forces u_block_order R """ @@ -687,32 +687,32 @@ class FuseBroadcastAsLHSOfMatmul(pybuda.PyBudaModule): def __init__(self, name): super().__init__(name) - self.weights = pybuda.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) + self.weights = forge.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) def forward(self, act1, act2): # Expecting fusing of ops below and having broadcast C - a1 = pybuda.op.Add("add", act1, act2) - a2 = pybuda.op.ReduceAvg("reduce_avg", a1, dim=-1) - a3 = pybuda.op.Add("add_join", a1, a2) + a1 = forge.op.Add("add", act1, act2) + a2 = forge.op.ReduceAvg("reduce_avg", a1, dim=-1) + a3 = forge.op.Add("add_join", a1, a2) # Have fused op as LHS argument - a4 = pybuda.op.Matmul("matmul", a3, self.weights) + a4 = forge.op.Matmul("matmul", a3, self.weights) return a4 # Test that fusing broacast C as LHS argument of matmul will work. def test_fuse_broadcast_c_as_lhs_matmul(test_device): - os.environ["PYBUDA_FUSE_REDUCE"] = "1" + os.environ["FORGE_FUSE_REDUCE"] = "1" try: relative_atol, pcc = get_relaxed_atol_pcc(True, test_device) verify_module(FuseBroadcastAsLHSOfMatmul("fuse_broadcast_c_as_lhs_matmul"), [FuseBroadcastAsLHSOfMatmul.shape, FuseBroadcastAsLHSOfMatmul.shape], VerifyConfig(test_kind=TestKind.INFERENCE, skip_shutdown=True, arch=test_device.arch, devtype=test_device.devtype, relative_atol=relative_atol, pcc=pcc)) finally: - del os.environ["PYBUDA_FUSE_REDUCE"] + del os.environ["FORGE_FUSE_REDUCE"] -class FuseBroadcastOutputOp(pybuda.PyBudaModule): +class FuseBroadcastOutputOp(forge.ForgeModule): """ Module with both broadcast and tile broadcast on output op of the fused op. Tests handling of tms in fused op shape calculation and in evaluation of fused ops. @@ -722,21 +722,21 @@ class FuseBroadcastOutputOp(pybuda.PyBudaModule): def __init__(self, name): super().__init__(name) - self.weights = pybuda.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) + self.weights = forge.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) def forward(self, act1, act2): - act1_reduced = pybuda.op.ReduceAvg("reduce_avg_0", act1, dim=-1) - act2_reduced = pybuda.op.ReduceAvg("reduce_avg_1", act2, dim=-1) + act1_reduced = forge.op.ReduceAvg("reduce_avg_0", act1, dim=-1) + act2_reduced = forge.op.ReduceAvg("reduce_avg_1", act2, dim=-1) # Inputs to the multiply operation (which will be the output op of the fused op) # will have both the "broadcast" and "tile_broadcast" tms. - a1 = pybuda.op.Add("add", act1_reduced, act2_reduced) - a2 = pybuda.op.Reciprocal("reciprocal", a1) - a3 = pybuda.op.Broadcast("broadcast", a2, -1, self.shape[-1]) - a4 = pybuda.op.Multiply("multiply", a3, act1_reduced) + a1 = forge.op.Add("add", act1_reduced, act2_reduced) + a2 = forge.op.Reciprocal("reciprocal", a1) + a3 = forge.op.Broadcast("broadcast", a2, -1, self.shape[-1]) + a4 = forge.op.Multiply("multiply", a3, act1_reduced) - a5 = pybuda.op.Matmul("matmul", a4, self.weights) + a5 = forge.op.Matmul("matmul", a4, self.weights) return a5 # Test that all broadcast operations inside the fused op will be treated correctly. diff --git a/pybuda/test/test_indexing.py b/forge/test/test_indexing.py similarity index 87% rename from pybuda/test/test_indexing.py rename to forge/test/test_indexing.py index 675427946..26271a809 100644 --- a/pybuda/test/test_indexing.py +++ b/forge/test/test_indexing.py @@ -5,14 +5,14 @@ import torch import random -import pybuda -from pybuda import ( +import forge +from forge import ( Tensor, CompilerConfig, VerifyConfig, ) from .common import compile, run -from pybuda.pybudaglobal import TILE_DIM +from forge.forgeglobal import TILE_DIM @pytest.mark.parametrize("mode", ["inference", "training"]) @@ -42,7 +42,7 @@ def test_index( verify_cfg=VerifyConfig(), ) def index(x): - return pybuda.op.Index("index", x, dim, start, stop, stride) + return forge.op.Index("index", x, dim, start, stop, stride) x = Tensor.create_from_torch(torch.rand(*shape, requires_grad=training)) index(x) @@ -74,7 +74,7 @@ def test_index1d( verify_cfg=VerifyConfig(), ) def index1d(x): - return pybuda.op.Index("index", x, dim, start, stop, stride) + return forge.op.Index("index", x, dim, start, stop, stride) x = Tensor.create_from_torch(torch.rand(*shape, requires_grad=training)) index1d(x) @@ -118,8 +118,8 @@ def test_index2d( verify_cfg=VerifyConfig(), ) def index2d(x): - x = pybuda.op.Index("index_r", x, 0, start0, stop0, stride0) - x = pybuda.op.Index("index_c", x, 1, start1, stop1, stride1) + x = forge.op.Index("index_r", x, 0, start0, stop0, stride0) + x = forge.op.Index("index_c", x, 1, start1, stop1, stride1) return x x = Tensor.create_from_torch(torch.rand(*shape, requires_grad=training)) @@ -143,7 +143,7 @@ def test_index3d( training = mode == "training" if training: - pytest.skip("tenstorrent/pybuda#184") + pytest.skip("tenstorrent/forge#184") if type(test) is int: random.seed(test) @@ -176,9 +176,9 @@ def test_index3d( verify_cfg=VerifyConfig(), ) def index3d(x): - x = pybuda.op.Index("index_z", x, has_w + 0, start0, stop0, stride0) - x = pybuda.op.Index("index_r", x, has_w + 1, start1, stop1, stride1) - x = pybuda.op.Index("index_c", x, has_w + 2, start2, stop2, stride2) + x = forge.op.Index("index_z", x, has_w + 0, start0, stop0, stride0) + x = forge.op.Index("index_r", x, has_w + 1, start1, stop1, stride1) + x = forge.op.Index("index_c", x, has_w + 2, start2, stop2, stride2) return x x = Tensor.create_from_torch(torch.rand(*shape, requires_grad=training)) diff --git a/pybuda/test/test_kernel_broadcast.py b/forge/test/test_kernel_broadcast.py similarity index 50% rename from pybuda/test/test_kernel_broadcast.py rename to forge/test/test_kernel_broadcast.py index fdcb7c744..d9901d217 100644 --- a/pybuda/test/test_kernel_broadcast.py +++ b/forge/test/test_kernel_broadcast.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 import pytest -import pybuda +import forge import torch import yaml from .common import run @@ -11,37 +11,37 @@ def test_kernel_broadcast_transpose(test_device): @run( - verify_cfg=pybuda.VerifyConfig( - test_kind=pybuda.verify.TestKind.INFERENCE, + verify_cfg=forge.VerifyConfig( + test_kind=forge.verify.TestKind.INFERENCE, devtype=test_device.devtype, arch=test_device.arch, run_net2pipe=True, ), ) def kernel_broadcast_transpose(x, y): - x = pybuda.op.Transpose("transpose", x, -2, -1) - x = pybuda.op.Add("add", y, x) + x = forge.op.Transpose("transpose", x, -2, -1) + x = forge.op.Add("add", y, x) return x - x = pybuda.Tensor.create_from_torch(torch.rand((1, 1, 128, 1))) - y = pybuda.Tensor.create_from_torch(torch.rand((1, 1, 128, 128))) + x = forge.Tensor.create_from_torch(torch.rand((1, 1, 128, 1))) + y = forge.Tensor.create_from_torch(torch.rand((1, 1, 128, 128))) kernel_broadcast_transpose(x, y) def test_lhs_matmul_zbroadcast(test_device): - pybuda.config.override_op_size("mm", (2, 2)) + forge.config.override_op_size("mm", (2, 2)) @run( - verify_cfg=pybuda.VerifyConfig( - test_kind=pybuda.verify.TestKind.INFERENCE, + verify_cfg=forge.VerifyConfig( + test_kind=forge.verify.TestKind.INFERENCE, devtype=test_device.devtype, arch=test_device.arch, run_net2pipe=True, ), ) def lhs_matmul_zbroadcast(rhs, lhs=None): - return pybuda.op.Matmul("mm", lhs, rhs) + return forge.op.Matmul("mm", lhs, rhs) - lhs = pybuda.Tensor.create_from_torch(torch.rand((1, 1, 128, 512)), constant=True) - rhs = pybuda.Tensor.create_from_torch(torch.rand((1, 128, 512, 64))) + lhs = forge.Tensor.create_from_torch(torch.rand((1, 1, 128, 512)), constant=True) + rhs = forge.Tensor.create_from_torch(torch.rand((1, 128, 512, 64))) lhs_matmul_zbroadcast(rhs, lhs=lhs) diff --git a/pybuda/test/test_large_parameters.py b/forge/test/test_large_parameters.py similarity index 85% rename from pybuda/test/test_large_parameters.py rename to forge/test/test_large_parameters.py index ae1bd0829..f4ede60df 100644 --- a/pybuda/test/test_large_parameters.py +++ b/forge/test/test_large_parameters.py @@ -6,14 +6,14 @@ import torch from loguru import logger -import pybuda -import pybuda.op -from pybuda import ( - PyBudaModule, +import forge +import forge.op +from forge import ( + ForgeModule, TTDevice, BackendType, Tensor, - pybuda_compile, + forge_compile, CompilerConfig, VerifyConfig, ) @@ -67,18 +67,18 @@ -class ElementWiseBinary(PyBudaModule): +class ElementWiseBinary(ForgeModule): def __init__(self, name, weight_shape): super().__init__(name) self.shape = weight_shape - self.weights1 = pybuda.Parameter(*self.shape, requires_grad=True) + self.weights1 = forge.Parameter(*self.shape, requires_grad=True) def forward(self, act): - return pybuda.op.Multiply("mul", act, self.weights1) + return forge.op.Multiply("mul", act, self.weights1) @pytest.mark.xfail( - reason="tenstorrent/pybuda#25" + reason="tenstorrent/forge#25" ) @pytest.mark.parametrize("shape", shape, ids=[f"shape={'x'.join([str(item) for item in sh])}" for sh in shape]) def test_eltwise_binary(shape): @@ -87,4 +87,4 @@ def test_eltwise_binary(shape): tt0 = TTDevice("tt0", devtype=BackendType.Golden) tt0.place_module(model) activations = Tensor.create_from_torch(torch.rand(*shape, requires_grad=True)) - pybuda_compile(tt0, model.name, activations) \ No newline at end of file + forge_compile(tt0, model.name, activations) \ No newline at end of file diff --git a/pybuda/test/test_long_short_path.py b/forge/test/test_long_short_path.py similarity index 67% rename from pybuda/test/test_long_short_path.py rename to forge/test/test_long_short_path.py index 1ab46cf21..2d4dccf8a 100644 --- a/pybuda/test/test_long_short_path.py +++ b/forge/test/test_long_short_path.py @@ -3,24 +3,24 @@ # SPDX-License-Identifier: Apache-2.0 import pytest import torch -import pybuda -import pybuda.op -from pybuda import PyBudaModule, Tensor -from pybuda._C.balancer import BalancerConfig, PolicyType -from pybuda._C import run_pre_placer_buda_passes +import forge +import forge.op +from forge import ForgeModule, Tensor +from forge._C.balancer import BalancerConfig, PolicyType +from forge._C import run_pre_placer_buda_passes from .common import compile, ModuleBuilder -from pybuda.verify import verify_module, VerifyConfig, TestKind -from pybuda._C.backend_api import BackendDevice +from forge.verify import verify_module, VerifyConfig, TestKind +from forge._C.backend_api import BackendDevice @pytest.mark.parametrize("mode", ["inference"]) @pytest.mark.parametrize("microbatch_size", (1, 8), ids=("mb1", "mb8")) def test_intra_epoch_relay_queue(mode, microbatch_size): def linked_list(activations): - activations = pybuda.op.Buffer(f"buffer_pre", activations) + activations = forge.op.Buffer(f"buffer_pre", activations) # num_entries=microbatch_size, so if the queue is statically allocated, it still has enough memory - activations = pybuda.op.DRAMQueue(f"buffering_queue", activations, num_entries=microbatch_size) - activations = pybuda.op.Buffer(f"buffer_post", activations) + activations = forge.op.DRAMQueue(f"buffering_queue", activations, num_entries=microbatch_size) + activations = forge.op.Buffer(f"buffer_post", activations) return activations module = ModuleBuilder(linked_list) @@ -33,23 +33,23 @@ def test_sanity(mode): shape = (1, 1, 64, 64) training = mode == "training" parameters = { - "weights1": pybuda.Parameter( + "weights1": forge.Parameter( torch.rand(*shape, requires_grad=True) ), - "weights2": pybuda.Parameter( + "weights2": forge.Parameter( torch.rand(*shape, requires_grad=True) ), } @compile( - compiler_cfg=pybuda.CompilerConfig(enable_training=training), - verify_cfg=pybuda.VerifyConfig(), + compiler_cfg=forge.CompilerConfig(enable_training=training), + verify_cfg=forge.VerifyConfig(), ) def test(act1, *, weights1, weights2): - m1 = pybuda.op.Matmul("matmul1", act1, weights1) - m2 = pybuda.op.Matmul("matmul2", act1, weights2) - m1e = pybuda.op.Exp("exp", m1) - return pybuda.op.Add("add", m1e, m2) + m1 = forge.op.Matmul("matmul1", act1, weights1) + m2 = forge.op.Matmul("matmul2", act1, weights2) + m1e = forge.op.Exp("exp", m1) + return forge.op.Add("add", m1e, m2) act1 = Tensor.create_from_torch(torch.rand(*shape)) outputs = test(act1, weights1=parameters["weights1"], weights2=parameters["weights2"]) @@ -65,20 +65,20 @@ def test_two_branch_fork_join_branch_asymmetry( shape = (1, 1, 64, 64) @compile( - compiler_cfg=pybuda.CompilerConfig(enable_training=training), - verify_cfg=pybuda.VerifyConfig(), + compiler_cfg=forge.CompilerConfig(enable_training=training), + verify_cfg=forge.VerifyConfig(), ) def two_branch_fork_join_branch_asymmetry(act1): left_branch = act1 right_branch = act1 for i in range(num_ops_left_branch): - left_branch = pybuda.op.Buffer(f"buffer_left_{i}", left_branch) + left_branch = forge.op.Buffer(f"buffer_left_{i}", left_branch) for i in range(num_ops_right_branch): - right_branch = pybuda.op.Buffer(f"buffer_right_{i}", right_branch) + right_branch = forge.op.Buffer(f"buffer_right_{i}", right_branch) - return pybuda.op.Add("add", left_branch, right_branch) + return forge.op.Add("add", left_branch, right_branch) act1 = Tensor.create_from_torch(torch.rand(*shape, requires_grad=True)) outputs = two_branch_fork_join_branch_asymmetry(act1) @@ -96,23 +96,23 @@ def test_two_branch_fork_join_branch_asymmetry_with_buffering_queue( shape = (microbatch_size, 1 , 64, 64) @compile( - compiler_cfg=pybuda.CompilerConfig(enable_training=training), - verify_cfg=pybuda.VerifyConfig(), + compiler_cfg=forge.CompilerConfig(enable_training=training), + verify_cfg=forge.VerifyConfig(), ) def two_branch_fork_join_branch_asymmetry(act1): left_branch = act1 right_branch = act1 for i in range(num_ops_left_branch): - left_branch = pybuda.op.Buffer(f"buffer_left_{i}", left_branch) + left_branch = forge.op.Buffer(f"buffer_left_{i}", left_branch) # num_entries=microbatch_size, so if the queue is statically allocated, it still has enough memory - left_branch = pybuda.op.DRAMQueue(f"buffering_queue", left_branch, num_entries=microbatch_size) + left_branch = forge.op.DRAMQueue(f"buffering_queue", left_branch, num_entries=microbatch_size) for i in range(num_ops_right_branch): - right_branch = pybuda.op.Buffer(f"buffer_right_{i}", right_branch) + right_branch = forge.op.Buffer(f"buffer_right_{i}", right_branch) - return pybuda.op.Add("add", left_branch, right_branch) + return forge.op.Add("add", left_branch, right_branch) act1 = Tensor.create_from_torch(torch.rand(*shape, requires_grad=True)) outputs = two_branch_fork_join_branch_asymmetry(act1) @@ -132,8 +132,8 @@ def test_three_branch_fork_join_branch_asymmetry( shape = (1, 1, 64, 64) @compile( - compiler_cfg=pybuda.CompilerConfig(enable_training=training), - verify_cfg=pybuda.VerifyConfig(), + compiler_cfg=forge.CompilerConfig(enable_training=training), + verify_cfg=forge.VerifyConfig(), ) def three_branch_fork_join_branch_asymmetry(act1): first_branch = act1 @@ -141,16 +141,16 @@ def three_branch_fork_join_branch_asymmetry(act1): third_branch = act1 for i in range(num_ops_first_branch): - first_branch = pybuda.op.Buffer(f"branch0_buffer_{i}", first_branch) + first_branch = forge.op.Buffer(f"branch0_buffer_{i}", first_branch) for i in range(num_ops_second_branch): - second_branch = pybuda.op.Buffer(f"branch1_buffer_{i}", second_branch) + second_branch = forge.op.Buffer(f"branch1_buffer_{i}", second_branch) for i in range(num_ops_third_branch): - third_branch = pybuda.op.Buffer(f"branch2_buffer_{i}", third_branch) + third_branch = forge.op.Buffer(f"branch2_buffer_{i}", third_branch) - partial_sum = pybuda.op.Add("partial_add", first_branch, second_branch) - sum = pybuda.op.Add("final_add", partial_sum, third_branch) + partial_sum = forge.op.Add("partial_add", first_branch, second_branch) + sum = forge.op.Add("final_add", partial_sum, third_branch) return sum act1 = Tensor.create_from_torch(torch.rand(*shape, requires_grad=True)) @@ -168,8 +168,8 @@ def test_nested_fork( shape = (1, 1, 64, 64) @compile( - compiler_cfg=pybuda.CompilerConfig(enable_training=training), - verify_cfg=pybuda.VerifyConfig(), + compiler_cfg=forge.CompilerConfig(enable_training=training), + verify_cfg=forge.VerifyConfig(), ) def nested_fork(act1): def instantiate_fork_join(nesting_level, fork_node): @@ -180,18 +180,18 @@ def instantiate_fork_join(nesting_level, fork_node): right_branch = fork_node for i in range(num_ops_left_branch): - left_branch = pybuda.op.Buffer( + left_branch = forge.op.Buffer( f"nesting_level_{nesting_level}_buffer_left_{i}", left_branch ) left_branch = instantiate_fork_join(nesting_level - 1, left_branch) for i in range(num_ops_right_branch): - right_branch = pybuda.op.Buffer( + right_branch = forge.op.Buffer( f"nesting_level_{nesting_level}_buffer_right_{i}", right_branch ) - return pybuda.op.Add( + return forge.op.Add( f"nesting_level_{nesting_level}_add", left_branch, right_branch ) diff --git a/pybuda/test/test_multichip.py b/forge/test/test_multichip.py similarity index 75% rename from pybuda/test/test_multichip.py rename to forge/test/test_multichip.py index dbc658323..b8c1c8677 100644 --- a/pybuda/test/test_multichip.py +++ b/forge/test/test_multichip.py @@ -6,16 +6,16 @@ import torch import os -import pybuda -import pybuda.op -from pybuda import ( +import forge +import forge.op +from forge import ( Tensor, CompilerConfig, ) -from pybuda.verify import verify_module, VerifyConfig, TestKind -from pybuda._C.backend_api import BackendDevice +from forge.verify import verify_module, VerifyConfig, TestKind +from forge._C.backend_api import BackendDevice from .common import compile, device, ModuleBuilder -from pybuda.config import _get_global_compiler_config +from forge.config import _get_global_compiler_config backend_devices = { "grayskull" : BackendDevice.Grayskull, @@ -37,7 +37,7 @@ def test_multichip_input_queue_forks_to_multiple_remote_chips(): compiler_cfg = CompilerConfig(enable_training=False) compiler_cfg.enable_consteval = False - # tenstorrent/pybuda#480 + # tenstorrent/forge#480 compiler_cfg.use_interactive_placer = False if arch is BackendDevice.Grayskull else True @compile( @@ -46,11 +46,11 @@ def test_multichip_input_queue_forks_to_multiple_remote_chips(): chip_ids=[0, 1, 2, 3] ) def three_branch_input_queue(act): - branch_a = pybuda.op.Gelu(f"branch_a", act) - branch_b = pybuda.op.Gelu(f"branch_b", act) - branch_c = pybuda.op.Gelu(f"branch_c", act) - add_a = pybuda.op.Add("add_a", branch_b, branch_a) - add_b = pybuda.op.Add("add_b", add_a, branch_c) + branch_a = forge.op.Gelu(f"branch_a", act) + branch_b = forge.op.Gelu(f"branch_b", act) + branch_c = forge.op.Gelu(f"branch_c", act) + add_a = forge.op.Add("add_a", branch_b, branch_a) + add_b = forge.op.Add("add_b", add_a, branch_c) return add_b act = Tensor.create_from_torch(torch.rand(*shape, requires_grad=True)) @@ -67,7 +67,7 @@ def test_multichip_producer_forks_to_multiple_remote_chips(manual_placement): compiler_cfg = CompilerConfig(enable_training=False) compiler_cfg.enable_consteval = False - # tenstorrent/pybuda#480 + # tenstorrent/forge#480 compiler_cfg.use_interactive_placer = False if arch is BackendDevice.Grayskull else True if manual_placement: compiler_cfg.place_on_new_chip("branch_a") @@ -80,14 +80,14 @@ def test_multichip_producer_forks_to_multiple_remote_chips(manual_placement): chip_ids=[0, 1, 2, 3] ) def three_branch_fork(act): - nop = pybuda.op.Buffer(f"nop", act) - fork = pybuda.op.Buffer(f"fork", nop) - - branch_a = pybuda.op.Gelu(f"branch_a", fork) - branch_b = pybuda.op.Gelu(f"branch_b", fork) - branch_c = pybuda.op.Gelu(f"branch_c", fork) - add_a = pybuda.op.Add("add_a", branch_b, branch_a) - add_b = pybuda.op.Add("add_b", add_a, branch_c) + nop = forge.op.Buffer(f"nop", act) + fork = forge.op.Buffer(f"fork", nop) + + branch_a = forge.op.Gelu(f"branch_a", fork) + branch_b = forge.op.Gelu(f"branch_b", fork) + branch_c = forge.op.Gelu(f"branch_c", fork) + add_a = forge.op.Add("add_a", branch_b, branch_a) + add_b = forge.op.Add("add_b", add_a, branch_c) return add_b act = Tensor.create_from_torch(torch.rand(*shape, requires_grad=True)) @@ -102,7 +102,7 @@ def test_multichip_constant_forks_to_multiple_remote_chips(): compiler_cfg.place_on_new_chip("constant_consumer_A") compiler_cfg.place_on_new_chip("constant_consumer_B") compiler_cfg.enable_consteval = False - # tenstorrent/pybuda#480 + # tenstorrent/forge#480 compiler_cfg.use_interactive_placer = False @compile( @@ -111,12 +111,12 @@ def test_multichip_constant_forks_to_multiple_remote_chips(): chip_ids=[0, 1, 2] ) def constant_two_branch_fork(act): - constant = pybuda.op.Constant("constant", constant=1.0) + constant = forge.op.Constant("constant", constant=1.0) - left_branch = pybuda.op.Buffer(f"constant_consumer_A", constant) - right_branch = pybuda.op.Buffer(f"constant_consumer_B", constant) - add = pybuda.op.Add("add_consumers", left_branch, right_branch) - final_add = pybuda.op.Add("add", act, add) + left_branch = forge.op.Buffer(f"constant_consumer_A", constant) + right_branch = forge.op.Buffer(f"constant_consumer_B", constant) + add = forge.op.Add("add_consumers", left_branch, right_branch) + final_add = forge.op.Add("add", act, add) return final_add act = Tensor.create_from_torch(torch.rand(*shape, requires_grad=True)) @@ -127,8 +127,8 @@ def constant_two_branch_fork(act): def test_multichip_wormhole_sanity(): def linked_list_two_chips(act): - op0 = pybuda.op.Gelu(f"op0", act) - op1 = pybuda.op.Gelu(f"op1", op0) + op0 = forge.op.Gelu(f"op0", act) + op1 = forge.op.Gelu(f"op1", op0) return op1 compiler_cfg = _get_global_compiler_config() @@ -143,13 +143,13 @@ def linked_list_two_chips(act): def test_four_chip_wormhole_sanity(): pytest.skip("Skip until BBE commit 42d9685b1 is consumed") def linked_list_four_chips(act): - op0 = pybuda.op.Gelu(f"op0", act) - op1 = pybuda.op.Gelu(f"op1", op0) + op0 = forge.op.Gelu(f"op0", act) + op1 = forge.op.Gelu(f"op1", op0) return op1 compiler_cfg = _get_global_compiler_config() compiler_cfg.enable_consteval = False - pybuda.set_configuration_options( + forge.set_configuration_options( backend_cluster_descriptor_path="third_party/budabackend/wormhole_2x4_sequential_cluster.yaml" ) @@ -165,7 +165,7 @@ def test_linked_list_multichip_auto_placer(): compiler_cfg = CompilerConfig(enable_training=False) compiler_cfg.enable_consteval = False - # tenstorrent/pybuda#480 + # tenstorrent/forge#480 compiler_cfg.use_interactive_placer = False @compile( @@ -174,10 +174,10 @@ def test_linked_list_multichip_auto_placer(): chip_ids=[0, 1, 2, 3] ) def linked_list(act): - a_out = pybuda.op.Buffer(f"A", act) - b_out = pybuda.op.Buffer(f"B", a_out) - c_out = pybuda.op.Buffer(f"C", b_out) - d_out = pybuda.op.Buffer(f"D", c_out) + a_out = forge.op.Buffer(f"A", act) + b_out = forge.op.Buffer(f"B", a_out) + c_out = forge.op.Buffer(f"C", b_out) + d_out = forge.op.Buffer(f"D", c_out) return d_out act = Tensor.create_from_torch(torch.rand(*shape, requires_grad=True)) @@ -191,7 +191,7 @@ def linked_list(act): -class BudaTrain(pybuda.PyBudaModule): +class BudaTrain(forge.ForgeModule): """ Simple buda module for basic testing, with parameters """ @@ -199,20 +199,20 @@ class BudaTrain(pybuda.PyBudaModule): def __init__(self, name): super().__init__(name) - self.weights1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.weights2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.weights1 = forge.Parameter(*self.shape, requires_grad=True) + self.weights2 = forge.Parameter(*self.shape, requires_grad=True) def forward(self, act1, act2): - in1 = pybuda.op.Matmul("matmul1", act1, self.weights1) - in2 = pybuda.op.Matmul("matmul2", act2, self.weights2) - sum_sqrt = pybuda.op.Sqrt("sqrt", in1) - sum = pybuda.op.Add("add", sum_sqrt, in2) + in1 = forge.op.Matmul("matmul1", act1, self.weights1) + in2 = forge.op.Matmul("matmul2", act2, self.weights2) + sum_sqrt = forge.op.Sqrt("sqrt", in1) + sum = forge.op.Add("add", sum_sqrt, in2) return sum def test_training_sanity_multichip_grayskull(test_device): microbatch_size = 1 - pybuda.set_chip_break("sqrt") + forge.set_chip_break("sqrt") compiler_cfg = _get_global_compiler_config() compiler_cfg.enable_consteval = False compiler_cfg.use_interactive_placer = False @@ -225,7 +225,7 @@ def test_training_sanity_multichip_grayskull(test_device): def test_training_sanity_multichip_wormhole(test_device): microbatch_size = 1 - pybuda.set_chip_break("sqrt") + forge.set_chip_break("sqrt") compiler_cfg = _get_global_compiler_config() compiler_cfg.enable_consteval = False verify_module(BudaTrain("verify_module"), [(microbatch_size, *BudaTrain.shape), (microbatch_size, *BudaTrain.shape)], diff --git a/pybuda/test/test_nlp_pipeline.py b/forge/test/test_nlp_pipeline.py similarity index 98% rename from pybuda/test/test_nlp_pipeline.py rename to forge/test/test_nlp_pipeline.py index fff69a43a..8514bd7bf 100644 --- a/pybuda/test/test_nlp_pipeline.py +++ b/forge/test/test_nlp_pipeline.py @@ -1,8 +1,8 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -import pybuda -from pybuda.transformers import pipeline +import forge +from forge.transformers import pipeline from test.utils import download_model def check_results(results, expected, keys): diff --git a/pybuda/test/test_nn.py b/forge/test/test_nn.py similarity index 87% rename from pybuda/test/test_nn.py rename to forge/test/test_nn.py index 721d8b235..598d20aad 100644 --- a/pybuda/test/test_nn.py +++ b/forge/test/test_nn.py @@ -8,22 +8,22 @@ import pytest import torch -import pybuda -import pybuda.op -import pybuda.op.nn as nn -from pybuda import ( - PyBudaModule, +import forge +import forge.op +import forge.op.nn as nn +from forge import ( + ForgeModule, TTDevice, Tensor, - pybuda_compile, + forge_compile, CompilerConfig, VerifyConfig, ) -from pybuda._C.backend_api import BackendType +from forge._C.backend_api import BackendType verify_cfg = VerifyConfig(run_golden=True) # run backend golden on each test -class SoftmaxTest(PyBudaModule): +class SoftmaxTest(ForgeModule): """ Test wrapper for softmax """ @@ -37,15 +37,15 @@ def forward(self, act): return nn.Softmax("softmax", act, dim=self.dim, stable=self.stable) -class LayernormTest(PyBudaModule): +class LayernormTest(ForgeModule): """ Test wrapper for layernorm """ def __init__(self, name, wshape, bshape): super().__init__(name) - self.weights = pybuda.Parameter(*wshape, requires_grad=True) - self.bias = pybuda.Parameter(*bshape, requires_grad=True) + self.weights = forge.Parameter(*wshape, requires_grad=True) + self.bias = forge.Parameter(*bshape, requires_grad=True) def forward(self, act): return nn.Layernorm("layernorm", act, self.weights, self.bias) @@ -76,7 +76,7 @@ def test_softmax( act1.requires_grad = True act1 = Tensor.create_from_torch(act1) - pybuda_compile( + forge_compile( tt0, "softmax", act1, @@ -112,7 +112,7 @@ def test_layernorm( mod.set_parameter("weights", torch.rand(*wshape, requires_grad=True)) mod.set_parameter("bias", torch.rand(*bshape, requires_grad=True)) - pybuda_compile( + forge_compile( tt0, "layernorm", act1, diff --git a/pybuda/test/test_optimizers.py b/forge/test/test_optimizers.py similarity index 86% rename from pybuda/test/test_optimizers.py rename to forge/test/test_optimizers.py index dd1b1412f..8ec2013b9 100644 --- a/pybuda/test/test_optimizers.py +++ b/forge/test/test_optimizers.py @@ -1,17 +1,17 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -from pybuda.module import PyBudaModule +from forge.module import ForgeModule import pytest import math import torch -import pybuda -import pybuda.op -from pybuda import ( +import forge +import forge.op +from forge import ( Tensor, - pybuda_compile, + forge_compile, CompilerConfig, VerifyConfig, TTDevice, @@ -20,22 +20,22 @@ LAMB, SGD ) -from pybuda.verify import verify_module, VerifyConfig, TestKind -from pybuda._C.backend_api import BackendDevice, BackendType, get_output +from forge.verify import verify_module, VerifyConfig, TestKind +from forge._C.backend_api import BackendDevice, BackendType, get_output from .common import compile, device, ModuleBuilder -from pybuda.config import _get_global_compiler_config +from forge.config import _get_global_compiler_config import numpy as np from loguru import logger from test.bert.modules import ( - PyBudaBertMHA, - PyBudaBertEncoder, - PyBudaFeedForward, - PyBudaPredictionHeadDecoder, - PyBudaPredictionHeadTransform, - PyBudaFFNorm, + ForgeBertMHA, + ForgeBertEncoder, + ForgeFeedForward, + ForgePredictionHeadDecoder, + ForgePredictionHeadTransform, + ForgeFFNorm, get_bert_parameters ) @@ -50,17 +50,17 @@ def test_mm_adam_optimizer(bias_correction, weight_decay): shape = (1, 1, 64, 64) def single_matmul(act, weights=None): assert weights - return pybuda.op.Matmul("matmul1", act, weights) + return forge.op.Matmul("matmul1", act, weights) torch_weights = torch.rand(*shape, requires_grad=True) - weights = pybuda.Parameter.create_from_torch(torch_weights) + weights = forge.Parameter.create_from_torch(torch_weights) module = ModuleBuilder(single_matmul, weights=weights) module.set_parameter("weights", torch_weights) - fp32_fallback = pybuda.DataFormat.Float16_b + fp32_fallback = forge.DataFormat.Float16_b if bias_correction: # Requires higher precision to pass - # pybuda.config.set_configuration_options(accumulate_df=pybuda.DataFormat.Float32) + # forge.config.set_configuration_options(accumulate_df=forge.DataFormat.Float32) pytest.skip("Data mismatch issue.") verify_module(module, @@ -90,18 +90,18 @@ def test_mm_adam_consteval_optimizer(bias_correction): shape = (128, 512) def single_matmul(act, weights=None): assert weights - transposed_weights = pybuda.op.Transpose("transposed_weights", weights, dim0=-2, dim1=-1) - return pybuda.op.Matmul("matmul1", act, transposed_weights) + transposed_weights = forge.op.Transpose("transposed_weights", weights, dim0=-2, dim1=-1) + return forge.op.Matmul("matmul1", act, transposed_weights) torch_weights = torch.rand(*shape, requires_grad=True) - weights = pybuda.Parameter.create_from_torch(torch_weights) + weights = forge.Parameter.create_from_torch(torch_weights) module = ModuleBuilder(single_matmul, weights=weights) module.set_parameter("weights", torch_weights) - fp32_fallback = pybuda.DataFormat.Float16_b + fp32_fallback = forge.DataFormat.Float16_b if bias_correction: # Requires higher precision to pass - # pybuda.config.set_configuration_options(accumulate_df=pybuda.DataFormat.Float32) + # forge.config.set_configuration_options(accumulate_df=forge.DataFormat.Float32) pytest.skip("Data mismatch issue.") verify_module(module, @@ -129,23 +129,23 @@ def test_mm_double_adam_optimizer(bias_correction, weight_decay): shape = (1, 1, 64, 64) def double_matmul(act, *, weights1=None, weights2=None): - m1 = pybuda.op.Matmul("matmul1", act, weights1) - m2 = pybuda.op.Matmul("matmul2", act, weights2) - return pybuda.op.Add("add", m1, m2) + m1 = forge.op.Matmul("matmul1", act, weights1) + m2 = forge.op.Matmul("matmul2", act, weights2) + return forge.op.Add("add", m1, m2) torch_weights1 = torch.rand(*shape, requires_grad=True) torch_weights2 = torch.rand(*shape, requires_grad=True) - weights1 = pybuda.Parameter.create_from_torch(torch_weights1) - weights2 = pybuda.Parameter.create_from_torch(torch_weights2) + weights1 = forge.Parameter.create_from_torch(torch_weights1) + weights2 = forge.Parameter.create_from_torch(torch_weights2) module = ModuleBuilder(double_matmul, weights1=weights1, weights2=weights2) module.set_parameter("weights1", torch_weights1) module.set_parameter("weights2", torch_weights2) - fp32_fallback = pybuda.DataFormat.Float16_b + fp32_fallback = forge.DataFormat.Float16_b if bias_correction: # Requires higher precision to pass - # pybuda.config.set_configuration_options(accumulate_df=pybuda.DataFormat.Float32) + # forge.config.set_configuration_options(accumulate_df=forge.DataFormat.Float32) pytest.skip("Data mismatch issue.") verify_module(module, @@ -184,12 +184,12 @@ def test_mha(cfg, weight_decay, bias_correction): params = get_bert_parameters("mha", hidden_dim=hidden_dim) config = { "num_heads": num_heads, "encoder_index": 0 } - mod = PyBudaBertMHA("mha", params, config) + mod = ForgeBertMHA("mha", params, config) - fp32_fallback = pybuda.DataFormat.Float16_b + fp32_fallback = forge.DataFormat.Float16_b if bias_correction: # Requires higher precision to pass - # pybuda.config.set_configuration_options(accumulate_df=pybuda.DataFormat.Float32) + # forge.config.set_configuration_options(accumulate_df=forge.DataFormat.Float32) pytest.skip("Data mismatch issue.") verify_module(mod, [(microbatch_size, seq_len, hidden_dim), (microbatch_size, 1, seq_len)], @@ -227,19 +227,19 @@ def test_learning_rate_scheduler_with_linear_warmup_and_decay(cfg, bias_correcti shape = (1, 1, 64, 64) - fp32_fallback = pybuda.DataFormat.Float16_b + fp32_fallback = forge.DataFormat.Float16_b def single_matmul(act, weights=None): assert weights - return pybuda.op.Matmul("matmul1", act, weights) + return forge.op.Matmul("matmul1", act, weights) torch_weights = torch.rand(*shape, requires_grad=True) - weights = pybuda.Parameter.create_from_torch(torch_weights) + weights = forge.Parameter.create_from_torch(torch_weights) module = ModuleBuilder(single_matmul, weights=weights) module.set_parameter("weights", torch_weights) # Requires higher precision to pass - pybuda.config.set_configuration_options(accumulate_df=pybuda.DataFormat.Float32) + forge.config.set_configuration_options(accumulate_df=forge.DataFormat.Float32) def scheduler_iterable(): warmup_slope = (max_learning_rate - min_learning_rate) / (learning_rate_warmup * num_steps) @@ -257,7 +257,7 @@ def scheduler_iterable(): cur_lr += decay_slope - class TorchSchedulerWithWarmupAndDecay(pybuda.torch_schedulers.TorchLearningRateScheduler): + class TorchSchedulerWithWarmupAndDecay(forge.torch_schedulers.TorchLearningRateScheduler): def __init__(self, optimizer): super().__init__(optimizer) self.get_lr_iterable = scheduler_iterable() @@ -265,7 +265,7 @@ def __init__(self, optimizer): def get_lr(self): return [next(self.get_lr_iterable)] - class LearningRateSchedulerWithWarmupAndDecay(pybuda.schedulers.LearningRateScheduler): + class LearningRateSchedulerWithWarmupAndDecay(forge.schedulers.LearningRateScheduler): def __init__(self, optimizer): super().__init__(optimizer) self.get_lr_iterable = scheduler_iterable() @@ -326,11 +326,11 @@ def get_pytorch_scheduler(self, optimizer: torch.optim.Optimizer): def matmul(shape): def matmul_inner(activations, weights): - return pybuda.op.Matmul("mm", activations, weights) + return forge.op.Matmul("mm", activations, weights) torch_weights = DISTRIBUTION(DISTRIBUTION_MIN, DISTRIBUTION_MAX).sample(shape) torch_weights.requires_grad = True - weights = pybuda.Parameter.create_from_torch(torch_weights) + weights = forge.Parameter.create_from_torch(torch_weights) module = ModuleBuilder(matmul_inner, weights=weights) module.set_parameter("weights", torch_weights) @@ -341,9 +341,9 @@ def ff_2(shape): def ff_2_inner(activations, weights1, weights2): """ Feed-Forward Neural Net, 2 Layers""" - l1 = pybuda.op.Matmul("mm1", activations, weights1) - act1 = pybuda.op.Gelu("gelu1", l1) - l2 = pybuda.op.Matmul("mm2", act1, weights2) + l1 = forge.op.Matmul("mm1", activations, weights1) + act1 = forge.op.Gelu("gelu1", l1) + l2 = forge.op.Matmul("mm2", act1, weights2) return l2 torch_weights1 = DISTRIBUTION(DISTRIBUTION_MIN, DISTRIBUTION_MAX).sample(shape) @@ -351,8 +351,8 @@ def ff_2_inner(activations, weights1, weights2): torch_weights2 = DISTRIBUTION(DISTRIBUTION_MIN, DISTRIBUTION_MAX).sample(shape) torch_weights2.requires_grad = True - weights1 = pybuda.Parameter.create_from_torch(torch_weights1) - weights2 = pybuda.Parameter.create_from_torch(torch_weights2) + weights1 = forge.Parameter.create_from_torch(torch_weights1) + weights2 = forge.Parameter.create_from_torch(torch_weights2) module = ModuleBuilder(ff_2_inner, weights1=weights1, weights2=weights2) module.set_parameter("weights1", torch_weights1) @@ -365,19 +365,19 @@ def ff_5(shape): def ff_5_inner(activations, weights1, weights2, weights3, weights4, weights5): """ Feed-Forward Neural Net, 5 Layers""" - l1 = pybuda.op.Matmul("mm1", activations, weights1) - act1 = pybuda.op.Gelu("gelu1", l1) + l1 = forge.op.Matmul("mm1", activations, weights1) + act1 = forge.op.Gelu("gelu1", l1) - l2 = pybuda.op.Matmul("mm2", act1, weights2) - act2 = pybuda.op.Gelu("gelu2", l2) + l2 = forge.op.Matmul("mm2", act1, weights2) + act2 = forge.op.Gelu("gelu2", l2) - l3 = pybuda.op.Matmul("mm3", act2, weights3) - act3 = pybuda.op.Relu("gelu3", l3) + l3 = forge.op.Matmul("mm3", act2, weights3) + act3 = forge.op.Relu("gelu3", l3) - l4 = pybuda.op.Matmul("mm4", act3, weights4) - act4 = pybuda.op.Gelu("gelu4", l4) + l4 = forge.op.Matmul("mm4", act3, weights4) + act4 = forge.op.Gelu("gelu4", l4) - l5 = pybuda.op.Matmul("mm5", act4, weights5) + l5 = forge.op.Matmul("mm5", act4, weights5) return l5 @@ -392,11 +392,11 @@ def ff_5_inner(activations, weights1, weights2, weights3, weights4, weights5): torch_weights5 = DISTRIBUTION(DISTRIBUTION_MIN, DISTRIBUTION_MAX).sample(shape) torch_weights5.requires_grad = True - weights1 = pybuda.Parameter.create_from_torch(torch_weights1) - weights2 = pybuda.Parameter.create_from_torch(torch_weights2) - weights3 = pybuda.Parameter.create_from_torch(torch_weights3) - weights4 = pybuda.Parameter.create_from_torch(torch_weights4) - weights5 = pybuda.Parameter.create_from_torch(torch_weights5) + weights1 = forge.Parameter.create_from_torch(torch_weights1) + weights2 = forge.Parameter.create_from_torch(torch_weights2) + weights3 = forge.Parameter.create_from_torch(torch_weights3) + weights4 = forge.Parameter.create_from_torch(torch_weights4) + weights5 = forge.Parameter.create_from_torch(torch_weights5) module = ModuleBuilder( ff_5_inner, @@ -480,7 +480,7 @@ def test_adamw_optimizer( module = model(shape) - fp32_fallback = pybuda.DataFormat.Float16_b + fp32_fallback = forge.DataFormat.Float16_b verify_module(module, [shape], @@ -582,12 +582,12 @@ def test_lamb_optimizer( accumulation_steps, steps ): - #Fusing disabled due to tenstorrent/pybuda#548 - pybuda.set_configuration_options(enable_auto_fusing=False) + #Fusing disabled due to tenstorrent/forge#548 + forge.set_configuration_options(enable_auto_fusing=False) module = model(shape) - fp32_fallback = pybuda.DataFormat.Float16_b + fp32_fallback = forge.DataFormat.Float16_b verify_module(module, [shape], @@ -682,12 +682,12 @@ def test_lars_optimizer( steps ): - #Fusing disabled due to tenstorrent/pybuda#548 - pybuda.set_configuration_options(enable_auto_fusing=False) + #Fusing disabled due to tenstorrent/forge#548 + forge.set_configuration_options(enable_auto_fusing=False) module = model(shape) - fp32_fallback = pybuda.DataFormat.Float16_b + fp32_fallback = forge.DataFormat.Float16_b verify_module(module, [shape], @@ -737,8 +737,8 @@ def test_mha_adamw( epsilon ): - #Fusing disabled due to tenstorrent/pybuda#548 - pybuda.set_configuration_options(enable_auto_fusing=False) + #Fusing disabled due to tenstorrent/forge#548 + forge.set_configuration_options(enable_auto_fusing=False) hidden_dim = cfg[0] num_heads = cfg[1] @@ -749,9 +749,9 @@ def test_mha_adamw( params = get_bert_parameters("mha", hidden_dim=hidden_dim) config = { "num_heads": num_heads, "encoder_index": 0 } - mod = PyBudaBertMHA("mha", params, config) + mod = ForgeBertMHA("mha", params, config) - fp32_fallback = pybuda.DataFormat.Float16_b + fp32_fallback = forge.DataFormat.Float16_b verify_module(mod, [(microbatch_size, seq_len, hidden_dim), (microbatch_size, 1, seq_len)], VerifyConfig( @@ -795,8 +795,8 @@ def test_mha_lamb( epsilon ): - #Fusing disabled due to tenstorrent/pybuda#548 - pybuda.set_configuration_options(enable_auto_fusing=False) + #Fusing disabled due to tenstorrent/forge#548 + forge.set_configuration_options(enable_auto_fusing=False) hidden_dim = cfg[0] num_heads = cfg[1] @@ -807,9 +807,9 @@ def test_mha_lamb( params = get_bert_parameters("mha", hidden_dim=hidden_dim) config = { "num_heads": num_heads, "encoder_index": 0 } - mod = PyBudaBertMHA("mha", params, config) + mod = ForgeBertMHA("mha", params, config) - fp32_fallback = pybuda.DataFormat.Float16_b + fp32_fallback = forge.DataFormat.Float16_b verify_module(mod, [(microbatch_size, seq_len, hidden_dim), (microbatch_size, 1, seq_len)], VerifyConfig( @@ -854,8 +854,8 @@ def test_mha_lars( momentum ): - #Fusing disabled due to tenstorrent/pybuda#548 - pybuda.set_configuration_options(enable_auto_fusing=False) + #Fusing disabled due to tenstorrent/forge#548 + forge.set_configuration_options(enable_auto_fusing=False) hidden_dim = cfg[0] num_heads = cfg[1] @@ -866,9 +866,9 @@ def test_mha_lars( params = get_bert_parameters("mha", hidden_dim=hidden_dim) config = { "num_heads": num_heads, "encoder_index": 0 } - mod = PyBudaBertMHA("mha", params, config) + mod = ForgeBertMHA("mha", params, config) - fp32_fallback = pybuda.DataFormat.Float16_b + fp32_fallback = forge.DataFormat.Float16_b verify_module(mod, [(microbatch_size, seq_len, hidden_dim), (microbatch_size, 1, seq_len)], VerifyConfig( diff --git a/pybuda/test/test_padding/__init__.py b/forge/test/test_padding/__init__.py similarity index 100% rename from pybuda/test/test_padding/__init__.py rename to forge/test/test_padding/__init__.py diff --git a/pybuda/test/test_padding/other/__init__.py b/forge/test/test_padding/other/__init__.py similarity index 100% rename from pybuda/test/test_padding/other/__init__.py rename to forge/test/test_padding/other/__init__.py diff --git a/pybuda/test/test_padding/other/test_padding_pass_a.py b/forge/test/test_padding/other/test_padding_pass_a.py similarity index 86% rename from pybuda/test/test_padding/other/test_padding_pass_a.py rename to forge/test/test_padding/other/test_padding_pass_a.py index 65732d980..158020a3e 100644 --- a/pybuda/test/test_padding/other/test_padding_pass_a.py +++ b/forge/test/test_padding/other/test_padding_pass_a.py @@ -9,19 +9,19 @@ import torch import pytest -import pybuda -from pybuda import ( +import forge +from forge import ( TTDevice, Tensor, - pybuda_compile, + forge_compile, CompilerConfig, VerifyConfig, ) -from pybuda._C.backend_api import BackendType -from pybuda._C import DataFormat +from forge._C.backend_api import BackendType +from forge._C import DataFormat -class TestPaddingPassA(pybuda.PyBudaModule): +class TestPaddingPassA(forge.ForgeModule): # Convolutional Network @@ -54,7 +54,7 @@ def __init__( # Auxiliary function that creates convolutional layer def conv2d(name, kernel, stride, padding=None, in_channels=None): - return pybuda.op.nn.Conv2dModule( + return forge.op.nn.Conv2dModule( name=f"{self.name}.{name}", in_channels=in_channels if in_channels is not None else self.in_channels, out_channels=self.out_channels, @@ -68,7 +68,7 @@ def conv2d(name, kernel, stride, padding=None, in_channels=None): # Auxiliary function that creates maxpool layer def maxpool2d(name, kernel, stride): - return pybuda.op.nn.MaxPool2dModule( + return forge.op.nn.MaxPool2dModule( name=f"{self.name}.{name}", kernel_size=kernel, stride=stride, @@ -77,7 +77,7 @@ def maxpool2d(name, kernel, stride): # Auxiliary function that creates FC layer(Linear) layer # We use this layer at the end of the CNN before Softmax def linear(name): - return pybuda.op.nn.Linear( + return forge.op.nn.Linear( name=f"{self.name}.{name}", in_features=self.in_features, out_features=self.out_features, @@ -107,29 +107,29 @@ def forward(self, x1, x2): conv2 = self.conv2(x2) maxpool1 = self.maxpool1(conv1) maxpool2 = self.maxpool2(conv2) - add1 = pybuda.op.Add("add1", maxpool1, maxpool2) + add1 = forge.op.Add("add1", maxpool1, maxpool2) # block - relu1 = pybuda.op.Relu("relu1", add1) + relu1 = forge.op.Relu("relu1", add1) conv3 = self.conv3(relu1) - relu2 = pybuda.op.Relu("relu2", conv3) + relu2 = forge.op.Relu("relu2", conv3) conv4 = self.conv4(relu2) - relu3 = pybuda.op.Relu("relu3", conv4) + relu3 = forge.op.Relu("relu3", conv4) conv5 = self.conv5(relu3) - add2 = pybuda.op.Add("add2", relu1, conv5) + add2 = forge.op.Add("add2", relu1, conv5) # tail W, Z, R, C = 1, 1, add2.shape[-3], add2.shape[-1] * add2.shape[-2] - resh = pybuda.op.Reshape("resh", add2, (W, Z, R, C)) - tr = pybuda.op.Transpose("tr", resh, -1, -2) - ra = pybuda.op.ReduceAvg("ra", tr, -2) + resh = forge.op.Reshape("resh", add2, (W, Z, R, C)) + tr = forge.op.Transpose("tr", resh, -1, -2) + ra = forge.op.ReduceAvg("ra", tr, -2) lin = self.linear(ra) - sm = pybuda.op.Softmax("sm", lin, dim=-1, stable=True) + sm = forge.op.Softmax("sm", lin, dim=-1, stable=True) return sm -class TestPaddingPassA_1(pybuda.PyBudaModule): +class TestPaddingPassA_1(forge.ForgeModule): def __init__( self, @@ -160,7 +160,7 @@ def __init__( # Auxiliary function that creates convolutional layer def conv2d(name, kernel, stride, padding=None, in_channels=None): - return pybuda.op.nn.Conv2dModule( + return forge.op.nn.Conv2dModule( name=f"{self.name}.{name}", in_channels=in_channels if in_channels is not None else self.in_channels, out_channels=self.out_channels, @@ -174,7 +174,7 @@ def conv2d(name, kernel, stride, padding=None, in_channels=None): # Auxiliary function that creates maxpool layer def maxpool2d(name, kernel, stride): - return pybuda.op.nn.MaxPool2dModule( + return forge.op.nn.MaxPool2dModule( name=f"{self.name}.{name}", kernel_size=kernel, stride=stride, @@ -183,7 +183,7 @@ def maxpool2d(name, kernel, stride): # Auxiliary function that creates FC layer(Linear) layer # We use this layer at the end of the CNN before Softmax def linear(name): - return pybuda.op.nn.Linear( + return forge.op.nn.Linear( name=f"{self.name}.{name}", in_features=self.in_features, out_features=self.out_features, @@ -208,19 +208,19 @@ def forward(self, x1, x2): conv2 = self.conv2(x2) maxpool1 = self.maxpool1(conv1) maxpool2 = self.maxpool2(conv2) - add1 = pybuda.op.Add("add1", maxpool1, maxpool2) + add1 = forge.op.Add("add1", maxpool1, maxpool2) # tail W, Z, R, C = 1, 1, add1.shape[-3], add1.shape[-1] * add1.shape[-2] - resh = pybuda.op.Reshape("resh", add1, (W, Z, R, C)) - tr = pybuda.op.Transpose("tr", resh, -1, -2) - ra = pybuda.op.ReduceAvg("ra", tr, -2) + resh = forge.op.Reshape("resh", add1, (W, Z, R, C)) + tr = forge.op.Transpose("tr", resh, -1, -2) + ra = forge.op.ReduceAvg("ra", tr, -2) lin = self.linear(ra) - sm = pybuda.op.Softmax("sm", lin, dim=-1, stable=True) + sm = forge.op.Softmax("sm", lin, dim=-1, stable=True) return sm -class TestPaddingPassA_2(pybuda.PyBudaModule): +class TestPaddingPassA_2(forge.ForgeModule): def __init__( self, @@ -251,7 +251,7 @@ def __init__( # Auxiliary function that creates convolutional layer def conv2d(name, kernel, stride, padding=None, in_channels=None): - return pybuda.op.nn.Conv2dModule( + return forge.op.nn.Conv2dModule( name=f"{self.name}.{name}", in_channels=in_channels if in_channels is not None else self.in_channels, out_channels=self.out_channels, @@ -265,7 +265,7 @@ def conv2d(name, kernel, stride, padding=None, in_channels=None): # Auxiliary function that creates maxpool layer def maxpool2d(name, kernel, stride): - return pybuda.op.nn.MaxPool2dModule( + return forge.op.nn.MaxPool2dModule( name=f"{self.name}.{name}", kernel_size=kernel, stride=stride, @@ -274,7 +274,7 @@ def maxpool2d(name, kernel, stride): # Auxiliary function that creates FC layer(Linear) layer # We use this layer at the end of the CNN before Softmax def linear(name): - return pybuda.op.nn.Linear( + return forge.op.nn.Linear( name=f"{self.name}.{name}", in_features=self.in_features, out_features=self.out_features, @@ -290,7 +290,7 @@ def linear(name): def forward(self, x1, x2): - add = pybuda.op.Add("add", x1, x2) + add = forge.op.Add("add", x1, x2) # head conv1 = self.conv1(add) @@ -298,15 +298,15 @@ def forward(self, x1, x2): # tail W, Z, R, C = 1, 1, maxpool1.shape[-3], maxpool1.shape[-1] * maxpool1.shape[-2] - resh = pybuda.op.Reshape("resh", maxpool1, (W, Z, R, C)) - tr = pybuda.op.Transpose("tr", resh, -1, -2) - ra = pybuda.op.ReduceAvg("ra", tr, -2) + resh = forge.op.Reshape("resh", maxpool1, (W, Z, R, C)) + tr = forge.op.Transpose("tr", resh, -1, -2) + ra = forge.op.ReduceAvg("ra", tr, -2) lin = self.linear(ra) - sm = pybuda.op.Softmax("sm", lin, dim=-1, stable=True) + sm = forge.op.Softmax("sm", lin, dim=-1, stable=True) return sm -class TestPaddingPassA_3(pybuda.PyBudaModule): +class TestPaddingPassA_3(forge.ForgeModule): def __init__( self, @@ -337,7 +337,7 @@ def __init__( # Auxiliary function that creates convolutional layer def conv2d(name, kernel, stride, padding=None, in_channels=None): - return pybuda.op.nn.Conv2dModule( + return forge.op.nn.Conv2dModule( name=f"{self.name}.{name}", in_channels=in_channels if in_channels is not None else self.in_channels, out_channels=self.out_channels, @@ -354,7 +354,7 @@ def conv2d(name, kernel, stride, padding=None, in_channels=None): def forward(self, x1, x2): - add = pybuda.op.Add("add", x1, x2) + add = forge.op.Add("add", x1, x2) conv1 = self.conv1(add) return conv1 @@ -400,25 +400,25 @@ def set_environment(): # Environment variable that adds padding pass if TEST_A_DISABLE_PADDING_PASS_FLAG: - os.environ["PYBUDA_DISABLE_PADDING_PASS"] = "1" + os.environ["FORGE_DISABLE_PADDING_PASS"] = "1" if TEST_A_PADDING_PASS_LEGACY: - os.environ["PYBUDA_PADDING_PASS_LEGACY"] = "1" + os.environ["FORGE_PADDING_PASS_LEGACY"] = "1" if TEST_A_PADDING_PASS_SPARSE_MATMUL: - os.environ["PYBUDA_PADDING_PASS_SPARSE_MATMUL"] = "1" + os.environ["FORGE_PADDING_PASS_SPARSE_MATMUL"] = "1" # Environment variable that allows printing a graph if TEST_A_PRINT_GRAPH_VIZ_FLAG: - os.environ["PYBUDA_PRINT_GRAPH_VIZ_FORMAT_DIR"] = "ALL" + os.environ["FORGE_PRINT_GRAPH_VIZ_FORMAT_DIR"] = "ALL" if TEST_A_PRINT_GRAPH_AT_FLAG: - os.environ["PYBUDA_PRINT_GRAPH_AT"] = "ALL" + os.environ["FORGE_PRINT_GRAPH_AT"] = "ALL" # Environment variable that allows fracturing if TEST_A_FRACTURING_FLAG: - os.environ["PYBUDA_FRACTURIZATION_DISABLE"] = "1" + os.environ["FORGE_FRACTURIZATION_DISABLE"] = "1" # Environment variables that describe constraints if TEST_A_RESOURCE_USAGE_FALLBACK_MODE: - os.environ["PYBUDA_RESOURCE_USAGE_FALLBACK_MODE"] = "1" + os.environ["FORGE_RESOURCE_USAGE_FALLBACK_MODE"] = "1" # Include or not environment variables for debugging the stack if TEST_A_LOGGER_LEVEL_TRACE: @@ -428,9 +428,9 @@ def set_environment(): # Include or not environment variables for debugging chip placement module if TEST_A_CHIP_PLACEMENT_LEGALIZER_DETAILED_FLAG: - os.environ["PYBUDA_LEGALIZER_DETAILED_DEBUGGING"] = "1" + os.environ["FORGE_LEGALIZER_DETAILED_DEBUGGING"] = "1" if TEST_A_CHIP_PLACEMENT_LEGALIZER_NODE_NAME: - os.environ["PYBUDA_LEGALIZER_DEBUG_NODE_NAME"] = "" + os.environ["FORGE_LEGALIZER_DEBUG_NODE_NAME"] = "" @@ -581,7 +581,7 @@ def test_padding_pass_a( tt0 = TTDevice("tt0", devtype=test_device.devtype, arch=test_device.arch) tt0.place_module(model) - pybuda_compile( + forge_compile( tt0, model.name, *inputs, diff --git a/pybuda/test/test_padding/other/test_padding_pass_b.py b/forge/test/test_padding/other/test_padding_pass_b.py similarity index 83% rename from pybuda/test/test_padding/other/test_padding_pass_b.py rename to forge/test/test_padding/other/test_padding_pass_b.py index 7405eefeb..640ff727d 100644 --- a/pybuda/test/test_padding/other/test_padding_pass_b.py +++ b/forge/test/test_padding/other/test_padding_pass_b.py @@ -9,18 +9,18 @@ import torch import pytest -import pybuda -from pybuda import ( +import forge +from forge import ( TTDevice, Tensor, - pybuda_compile, + forge_compile, CompilerConfig, VerifyConfig, ) -from pybuda._C.backend_api import BackendType +from forge._C.backend_api import BackendType -class TestPaddingPassB(pybuda.PyBudaModule): +class TestPaddingPassB(forge.ForgeModule): # Convolutional Network @@ -52,7 +52,7 @@ def __init__( # Auxiliary function that creates convolutional layer def conv2d(name, kernel, stride, padding=None, in_channels=None): - return pybuda.op.nn.Conv2dModule( + return forge.op.nn.Conv2dModule( name=f"{self.name}.{name}", in_channels=in_channels if in_channels is not None else self.in_channels, out_channels=self.out_channels, @@ -66,7 +66,7 @@ def conv2d(name, kernel, stride, padding=None, in_channels=None): # Auxiliary function that creates maxpool layer def maxpool2d(name, kernel, stride): - return pybuda.op.nn.MaxPool2dModule( + return forge.op.nn.MaxPool2dModule( name=f"{self.name}.{name}", kernel_size=kernel, stride=stride, @@ -75,7 +75,7 @@ def maxpool2d(name, kernel, stride): # Auxiliary function that creates FC layer(Linear) layer # We use this layer at the end of the CNN before Softmax def linear(name): - return pybuda.op.nn.Linear( + return forge.op.nn.Linear( name=f"{self.name}.{name}", in_features=self.in_features, out_features=self.out_features, @@ -142,8 +142,8 @@ def forward(self, x1, x2): conv2 = self.conv2(x1) conv3 = self.conv3(x1) - relu1 = pybuda.op.Relu("relu1", conv1) - relu2 = pybuda.op.Relu("relu2", conv3) + relu1 = forge.op.Relu("relu1", conv1) + relu2 = forge.op.Relu("relu2", conv3) # ... head flow 2 ... conv4 = self.conv4(x2) @@ -151,13 +151,13 @@ def forward(self, x1, x2): conv6 = self.conv6(x2) conv7 = self.conv7(x2) - relu3 = pybuda.op.Relu("relu3", conv5) - relu4 = pybuda.op.Relu("relu4", conv7) + relu3 = forge.op.Relu("relu3", conv5) + relu4 = forge.op.Relu("relu4", conv7) # ... head flow 1 & 2 ... - mul1 = pybuda.op.Multiply("mul1", conv2, conv4) - add1 = pybuda.op.Add("add1", relu2, conv6) - mul2 = pybuda.op.Multiply("mul2", relu3, relu4) + mul1 = forge.op.Multiply("mul1", conv2, conv4) + add1 = forge.op.Add("add1", relu2, conv6) + mul2 = forge.op.Multiply("mul2", relu3, relu4) maxpool1 = self.maxpool1(relu1) maxpool2 = self.maxpool2(mul1) @@ -167,78 +167,78 @@ def forward(self, x1, x2): # block # ... block flow 1 ... - add2 = pybuda.op.Add("add2", maxpool1, maxpool2) - relu5 = pybuda.op.Relu("relu5", add2) + add2 = forge.op.Add("add2", maxpool1, maxpool2) + relu5 = forge.op.Relu("relu5", add2) conv8 = self.conv8(relu5) - relu6 = pybuda.op.Relu("relu6", conv8) + relu6 = forge.op.Relu("relu6", conv8) conv9 = self.conv9(relu6) - relu7 = pybuda.op.Relu("relu7", conv9) + relu7 = forge.op.Relu("relu7", conv9) conv10 = self.conv10(relu7) - add5 = pybuda.op.Add("add5", relu5, conv10) + add5 = forge.op.Add("add5", relu5, conv10) # ... block flow 2 ... - add3 = pybuda.op.Add("add3", maxpool3, maxpool4) - mul3 = pybuda.op.Multiply("mul3", relu5, add3) - exp1 = pybuda.op.Exp("exp1", mul3) + add3 = forge.op.Add("add3", maxpool3, maxpool4) + mul3 = forge.op.Multiply("mul3", relu5, add3) + exp1 = forge.op.Exp("exp1", mul3) conv11 = self.conv11(add3) - relu8 = pybuda.op.Relu("relu8", conv11) - exp2 = pybuda.op.Exp("exp2", relu8) + relu8 = forge.op.Relu("relu8", conv11) + exp2 = forge.op.Exp("exp2", relu8) conv12 = self.conv12(exp2) conv13 = self.conv13(conv12) - exp3 = pybuda.op.Exp("exp3", conv13) - add6 = pybuda.op.Add("add6", exp1, exp3) + exp3 = forge.op.Exp("exp3", conv13) + add6 = forge.op.Add("add6", exp1, exp3) # ... block flow 3 ... - add4 = pybuda.op.Add("add4", maxpool4, maxpool5) + add4 = forge.op.Add("add4", maxpool4, maxpool5) conv14 = self.conv14(add4) - relu9 = pybuda.op.Relu("relu9", conv14) + relu9 = forge.op.Relu("relu9", conv14) conv15 = self.conv15(relu9) - exp4 = pybuda.op.Exp("exp4", conv15) + exp4 = forge.op.Exp("exp4", conv15) conv16 = self.conv16(exp4) - mul4 = pybuda.op.Multiply("mul4", add4, conv16) + mul4 = forge.op.Multiply("mul4", add4, conv16) # ... block flow 4 ... - mm1 = pybuda.op.Matmul("mm1", add5, add6) # maybe add operation, add7 - relu10 = pybuda.op.Relu("relu10", mm1) + mm1 = forge.op.Matmul("mm1", add5, add6) # maybe add operation, add7 + relu10 = forge.op.Relu("relu10", mm1) conv17 = self.conv17(relu10) - relu11 = pybuda.op.Relu("relu11", conv17) + relu11 = forge.op.Relu("relu11", conv17) conv18 = self.conv18(relu11) - relu12 = pybuda.op.Relu("relu12", conv18) + relu12 = forge.op.Relu("relu12", conv18) conv19 = self.conv19(relu12) - add7 = pybuda.op.Add("add7", relu10, conv19) + add7 = forge.op.Add("add7", relu10, conv19) # ... block flow 5 ... - relu13 = pybuda.op.Relu("relu13", mul4) + relu13 = forge.op.Relu("relu13", mul4) conv20 = self.conv20(relu13) - relu14 = pybuda.op.Relu("relu14", conv20) + relu14 = forge.op.Relu("relu14", conv20) conv21 = self.conv21(relu14) - relu15 = pybuda.op.Relu("relu15", conv21) + relu15 = forge.op.Relu("relu15", conv21) conv22 = self.conv22(relu15) - mm2 = pybuda.op.Matmul("mm2", relu10, relu13) - add8 = pybuda.op.Add("add8", mm2, conv22) + mm2 = forge.op.Matmul("mm2", relu10, relu13) + add8 = forge.op.Add("add8", mm2, conv22) # tail # ... tail flow 1 ... W1, Z1, R1, C1 = 1, 1, add7.shape[-3], add7.shape[-1] * add7.shape[-2] - resh1 = pybuda.op.Reshape("resh1", add7, (W1, Z1, R1, C1)) - tr1 = pybuda.op.Transpose("tr1", resh1, -1, -2) - ra1 = pybuda.op.ReduceAvg("ra1", tr1, -2) + resh1 = forge.op.Reshape("resh1", add7, (W1, Z1, R1, C1)) + tr1 = forge.op.Transpose("tr1", resh1, -1, -2) + ra1 = forge.op.ReduceAvg("ra1", tr1, -2) lin1 = self.linear1(ra1) - sm1 = pybuda.op.Softmax("sm1", lin1, dim=-1, stable=True) + sm1 = forge.op.Softmax("sm1", lin1, dim=-1, stable=True) # ... tail flow 2 ... W2, Z2, R2, C2 = 1, 1, add8.shape[-3], add8.shape[-1] * add8.shape[-2] - resh2 = pybuda.op.Reshape("resh2", add8, (W2, Z2, R2, C2)) - tr2 = pybuda.op.Transpose("tr2", resh2, -1, -2) - ra2 = pybuda.op.ReduceAvg("ra2", tr2, -2) + resh2 = forge.op.Reshape("resh2", add8, (W2, Z2, R2, C2)) + tr2 = forge.op.Transpose("tr2", resh2, -1, -2) + ra2 = forge.op.ReduceAvg("ra2", tr2, -2) lin2 = self.linear2(ra2) - sm2 = pybuda.op.Softmax("sm2", lin2, dim=-1, stable=True) + sm2 = forge.op.Softmax("sm2", lin2, dim=-1, stable=True) return sm1, sm2 -class TestPaddingPassB_1(pybuda.PyBudaModule): +class TestPaddingPassB_1(forge.ForgeModule): def __init__( self, @@ -269,7 +269,7 @@ def __init__( # Auxiliary function that creates convolutional layer def conv2d(name, kernel, stride, padding=None, in_channels=None): - return pybuda.op.nn.Conv2dModule( + return forge.op.nn.Conv2dModule( name=f"{self.name}.{name}", in_channels=in_channels if in_channels is not None else self.in_channels, out_channels=self.out_channels, @@ -283,7 +283,7 @@ def conv2d(name, kernel, stride, padding=None, in_channels=None): # Auxiliary function that creates maxpool layer def maxpool2d(name, kernel, stride): - return pybuda.op.nn.MaxPool2dModule( + return forge.op.nn.MaxPool2dModule( name=f"{self.name}.{name}", kernel_size=kernel, stride=stride, @@ -307,12 +307,12 @@ def forward(self, x1, x2): maxpool2 = self.maxpool2(conv2) # Layer 3 - add = pybuda.op.Add("add", maxpool1, maxpool2) + add = forge.op.Add("add", maxpool1, maxpool2) return add -class TestPaddingPassB_2(pybuda.PyBudaModule): +class TestPaddingPassB_2(forge.ForgeModule): def __init__( self, @@ -343,7 +343,7 @@ def __init__( # Auxiliary function that creates convolutional layer def conv2d(name, kernel, stride, padding=None, in_channels=None): - return pybuda.op.nn.Conv2dModule( + return forge.op.nn.Conv2dModule( name=f"{self.name}.{name}", in_channels=in_channels if in_channels is not None else self.in_channels, out_channels=self.out_channels, @@ -357,7 +357,7 @@ def conv2d(name, kernel, stride, padding=None, in_channels=None): # Auxiliary function that creates maxpool layer def maxpool2d(name, kernel, stride): - return pybuda.op.nn.MaxPool2dModule( + return forge.op.nn.MaxPool2dModule( name=f"{self.name}.{name}", kernel_size=kernel, stride=stride, @@ -378,19 +378,19 @@ def forward(self, x1, x2): conv2 = self.conv2(x2) # Layer 3 - add = pybuda.op.Add("add", conv1, conv2) - mul = pybuda.op.Multiply("mul", conv1, conv2) + add = forge.op.Add("add", conv1, conv2) + mul = forge.op.Multiply("mul", conv1, conv2) # Layer 4 maxpool1 = self.maxpool1(add) maxpool2 = self.maxpool2(mul) # Layer 5 - sub = pybuda.op.Subtract("sub", maxpool1, maxpool2) + sub = forge.op.Subtract("sub", maxpool1, maxpool2) return sub -class TestPaddingPassB_3(pybuda.PyBudaModule): +class TestPaddingPassB_3(forge.ForgeModule): def __init__( self, @@ -421,7 +421,7 @@ def __init__( # Auxiliary function that creates convolutional layer def conv2d(name, kernel, stride, padding=None, in_channels=None): - return pybuda.op.nn.Conv2dModule( + return forge.op.nn.Conv2dModule( name=f"{self.name}.{name}", in_channels=in_channels if in_channels is not None else self.in_channels, out_channels=self.out_channels, @@ -435,7 +435,7 @@ def conv2d(name, kernel, stride, padding=None, in_channels=None): # Auxiliary function that creates maxpool layer def maxpool2d(name, kernel, stride): - return pybuda.op.nn.MaxPool2dModule( + return forge.op.nn.MaxPool2dModule( name=f"{self.name}.{name}", kernel_size=kernel, stride=stride, @@ -449,15 +449,15 @@ def maxpool2d(name, kernel, stride): def forward(self, x1, x2): # Layer 2 - sub = pybuda.op.Subtract("sub", x1, x2) + sub = forge.op.Subtract("sub", x1, x2) # Layer 3 conv = self.conv1(sub) # Layer 4 - add = pybuda.op.Add("add", conv, conv) + add = forge.op.Add("add", conv, conv) # Layer 5 maxpool = self.maxpool1(add) # Layer 6 - exp = pybuda.op.Exp("exp", maxpool) + exp = forge.op.Exp("exp", maxpool) return exp @@ -498,17 +498,17 @@ def set_environment(): # Environment variable that adds padding pass if TEST_B_DISABLE_PADDING_PASS_FLAG: - os.environ["PYBUDA_DISABLE_PADDING_PASS"] = "1" + os.environ["FORGE_DISABLE_PADDING_PASS"] = "1" # Environment variable that allows printing a graph if TEST_B_PRINT_GRAPH_VIZ_FLAG: - os.environ["PYBUDA_PRINT_GRAPH_VIZ_FORMAT_DIR"] = "ALL" + os.environ["FORGE_PRINT_GRAPH_VIZ_FORMAT_DIR"] = "ALL" if TEST_B_PRINT_GRAPH_AT_FLAG: - os.environ["PYBUDA_PRINT_GRAPH_AT"] = "ALL" + os.environ["FORGE_PRINT_GRAPH_AT"] = "ALL" # Environment variable that allows fracturing if TEST_B_FRACTURING_FLAG: - os.environ["PYBUDA_FRACTURIZATION_DISABLE"] = "1" + os.environ["FORGE_FRACTURIZATION_DISABLE"] = "1" # Include or not environment variables for debugging the stack if TEST_B_LOGGER_LEVEL_TRACE: @@ -518,9 +518,9 @@ def set_environment(): # Include or not environment variables for debugging chip placement module if TEST_B_CHIP_PLACEMENT_LEGALIZER_DETAILED_FLAG: - os.environ["PYBUDA_LEGALIZER_DETAILED_DEBUGGING"] = "1" + os.environ["FORGE_LEGALIZER_DETAILED_DEBUGGING"] = "1" if TEST_B_CHIP_PLACEMENT_LEGALIZER_NODE_NAME: - os.environ["PYBUDA_LEGALIZER_DEBUG_NODE_NAME"] = "" + os.environ["FORGE_LEGALIZER_DEBUG_NODE_NAME"] = "" @@ -651,7 +651,7 @@ def test_padding_pass_b( tt0 = TTDevice("tt0", devtype=test_device.devtype, arch=test_device.arch) tt0.place_module(model) - pybuda_compile( + forge_compile( tt0, model.name, *inputs, diff --git a/pybuda/test/test_padding/other/test_padding_pass_c.py b/forge/test/test_padding/other/test_padding_pass_c.py similarity index 80% rename from pybuda/test/test_padding/other/test_padding_pass_c.py rename to forge/test/test_padding/other/test_padding_pass_c.py index d11689aec..f075a02b8 100644 --- a/pybuda/test/test_padding/other/test_padding_pass_c.py +++ b/forge/test/test_padding/other/test_padding_pass_c.py @@ -9,18 +9,18 @@ import torch import pytest -import pybuda -from pybuda import ( +import forge +from forge import ( TTDevice, Tensor, - pybuda_compile, + forge_compile, CompilerConfig, VerifyConfig, ) -from pybuda._C.backend_api import BackendType -from pybuda._C import DataFormat +from forge._C.backend_api import BackendType +from forge._C import DataFormat -class TestPaddingPassC(pybuda.PyBudaModule): +class TestPaddingPassC(forge.ForgeModule): # Convolutional Network @@ -53,7 +53,7 @@ def __init__( # Auxiliary function that creates convolutional layer def conv2d(name, kernel, stride, padding=None, in_channels=None): - return pybuda.op.nn.Conv2dModule( + return forge.op.nn.Conv2dModule( name=f"{self.name}.{name}", in_channels=in_channels if in_channels is not None else self.in_channels, out_channels=self.out_channels, @@ -67,7 +67,7 @@ def conv2d(name, kernel, stride, padding=None, in_channels=None): # Auxiliary function that creates maxpool layer def maxpool2d(name, kernel, stride): - return pybuda.op.nn.MaxPool2dModule( + return forge.op.nn.MaxPool2dModule( name=f"{self.name}.{name}", kernel_size=kernel, stride=stride, @@ -76,7 +76,7 @@ def maxpool2d(name, kernel, stride): # Auxiliary function that creates FC layer(Linear) layer # We use this layer at the end of the CNN before Softmax def linear(name): - return pybuda.op.nn.Linear( + return forge.op.nn.Linear( name=f"{self.name}.{name}", in_features=self.in_features, out_features=self.out_features, @@ -181,45 +181,45 @@ def forward(self, x1, x2): conv1 = self.conv1(x1) conv2 = self.conv2(x1) conv3 = self.conv3(x1) - exp1 = pybuda.op.Exp("exp1", conv1) - exp2 = pybuda.op.Exp("exp2", conv2) - exp3 = pybuda.op.Exp("exp3", conv3) + exp1 = forge.op.Exp("exp1", conv1) + exp2 = forge.op.Exp("exp2", conv2) + exp3 = forge.op.Exp("exp3", conv3) # ... head flow 2 ... conv4 = self.conv4(x2) conv5 = self.conv5(x2) conv6 = self.conv6(x2) - exp4 = pybuda.op.Exp("exp4", conv4) - exp5 = pybuda.op.Exp("exp5", conv5) - exp6 = pybuda.op.Exp("exp6", conv6) + exp4 = forge.op.Exp("exp4", conv4) + exp5 = forge.op.Exp("exp5", conv5) + exp6 = forge.op.Exp("exp6", conv6) # block # ... block flow 1 ... - mm1 = pybuda.op.Matmul("mm1", exp1, exp4) + mm1 = forge.op.Matmul("mm1", exp1, exp4) conv7 = self.conv7(mm1) - relu1 = pybuda.op.Relu("relu1", conv7) + relu1 = forge.op.Relu("relu1", conv7) conv8 = self.conv8(relu1) - relu2 = pybuda.op.Relu("relu2", conv8) + relu2 = forge.op.Relu("relu2", conv8) conv9 = self.conv9(relu2) - add1 = pybuda.op.Add("add1", mm1, conv9) + add1 = forge.op.Add("add1", mm1, conv9) # ... block flow 2 ... - mm2 = pybuda.op.Matmul("mm2", exp2, exp6) + mm2 = forge.op.Matmul("mm2", exp2, exp6) conv10 = self.conv10(mm2) - relu3 = pybuda.op.Relu("relu3", conv10) + relu3 = forge.op.Relu("relu3", conv10) conv11 = self.conv11(relu3) - exp8 = pybuda.op.Exp("exp8", conv11) + exp8 = forge.op.Exp("exp8", conv11) conv12 = self.conv12(exp8) - mul1 = pybuda.op.Multiply("mul1", mm2, conv12) + mul1 = forge.op.Multiply("mul1", mm2, conv12) # ... block flow 3 ... - mm3 = pybuda.op.Matmul("mm3", exp3, exp5) + mm3 = forge.op.Matmul("mm3", exp3, exp5) conv13 = self.conv13(mm3) - exp7 = pybuda.op.Exp("exp7", conv13) + exp7 = forge.op.Exp("exp7", conv13) conv14 = self.conv14(exp7) - exp9 = pybuda.op.Exp("exp9", conv14) + exp9 = forge.op.Exp("exp9", conv14) conv15 = self.conv15(exp9) - add2 = pybuda.op.Add("add2", mm3, conv15) + add2 = forge.op.Add("add2", mm3, conv15) # intermediate # ... intermediate flow 1 ... @@ -232,11 +232,11 @@ def forward(self, x1, x2): maxpool3 = self.maxpool3(add2) # ... intermediate flow 1, 2 & 3 ... - mm4 = pybuda.op.Matmul("mm4", maxpool1, maxpool2) - mm5 = pybuda.op.Matmul("mm5", maxpool2, maxpool3) - mm6 = pybuda.op.Matmul("mm6", maxpool1, maxpool3) - exp10 = pybuda.op.Exp("exp10", mm4) - exp11 = pybuda.op.Exp("exp11", mm6) + mm4 = forge.op.Matmul("mm4", maxpool1, maxpool2) + mm5 = forge.op.Matmul("mm5", maxpool2, maxpool3) + mm6 = forge.op.Matmul("mm6", maxpool1, maxpool3) + exp10 = forge.op.Exp("exp10", mm4) + exp11 = forge.op.Exp("exp11", mm6) conv16 = self.conv16(mm4) conv17 = self.conv17(mm4) conv18 = self.conv18(exp10) @@ -245,81 +245,81 @@ def forward(self, x1, x2): conv21 = self.conv21(mm6) conv22 = self.conv22(mm6) conv23 = self.conv23(exp11) - mul2 = pybuda.op.Multiply("mul2", conv16, conv17) - add3 = pybuda.op.Add("add3", conv18, conv19) - add4 = pybuda.op.Add("add4", conv20, conv22) - mul3 = pybuda.op.Multiply("mul3", conv21, conv23) + mul2 = forge.op.Multiply("mul2", conv16, conv17) + add3 = forge.op.Add("add3", conv18, conv19) + add4 = forge.op.Add("add4", conv20, conv22) + mul3 = forge.op.Multiply("mul3", conv21, conv23) # block # ... block flow 4 ... maxpool4 = self.maxpool4(mul2) conv24 = self.conv24(maxpool4) - relu4 = pybuda.op.Relu("relu4", conv24) + relu4 = forge.op.Relu("relu4", conv24) conv25 = self.conv25(relu4) - relu5 = pybuda.op.Relu("relu5", conv25) + relu5 = forge.op.Relu("relu5", conv25) conv26 = self.conv26(relu5) - add5 = pybuda.op.Add("add5", maxpool4, conv26) + add5 = forge.op.Add("add5", maxpool4, conv26) # ... block flow 5 ... maxpool5 = self.maxpool5(add3) conv27 = self.conv27(maxpool5) - relu6 = pybuda.op.Relu("relu6", conv27) + relu6 = forge.op.Relu("relu6", conv27) conv28 = self.conv28(relu6) - relu7 = pybuda.op.Relu("relu7", conv28) + relu7 = forge.op.Relu("relu7", conv28) conv29 = self.conv29(relu7) - add6 = pybuda.op.Add("add6", maxpool5, conv29) + add6 = forge.op.Add("add6", maxpool5, conv29) # ... block flow 6 ... maxpool6 = self.maxpool6(add4) conv30 = self.conv30(maxpool6) - relu8 = pybuda.op.Relu("relu8", conv30) + relu8 = forge.op.Relu("relu8", conv30) conv31 = self.conv31(relu8) - relu9 = pybuda.op.Relu("relu9", conv31) + relu9 = forge.op.Relu("relu9", conv31) conv32 = self.conv32(relu9) - add7 = pybuda.op.Add("add7", maxpool6, conv32) + add7 = forge.op.Add("add7", maxpool6, conv32) # ... block flow 7 ... maxpool7 = self.maxpool7(mul3) conv33 = self.conv33(maxpool7) - relu10 = pybuda.op.Relu("relu10", conv33) + relu10 = forge.op.Relu("relu10", conv33) conv34 = self.conv34(relu10) - relu11 = pybuda.op.Relu("relu11", conv34) + relu11 = forge.op.Relu("relu11", conv34) conv35 = self.conv35(relu11) - add8 = pybuda.op.Add("add8", maxpool7, conv35) + add8 = forge.op.Add("add8", maxpool7, conv35) # intermediate # ... intermediate flow 4 ... - mm7 = pybuda.op.Matmul("mm7", add5, add8) + mm7 = forge.op.Matmul("mm7", add5, add8) maxpool8 = self.maxpool8(mm7) # ... intermediate flow 5 ... - mm8 = pybuda.op.Matmul("mm8", add6, add7) + mm8 = forge.op.Matmul("mm8", add6, add7) maxpool9 = self.maxpool9(mm8) # tail # ... tail flow 1 ... W1, Z1, R1, C1 = 1, 1, maxpool8.shape[-3], maxpool8.shape[-1] * maxpool8.shape[-2] - resh1 = pybuda.op.Reshape("resh1", maxpool8, (W1, Z1, R1, C1)) - tr1 = pybuda.op.Transpose("tr1", resh1, -1, -2) - ra1 = pybuda.op.ReduceAvg("ra1", tr1, -2) + resh1 = forge.op.Reshape("resh1", maxpool8, (W1, Z1, R1, C1)) + tr1 = forge.op.Transpose("tr1", resh1, -1, -2) + ra1 = forge.op.ReduceAvg("ra1", tr1, -2) lin1 = self.linear1(ra1) - sm1 = pybuda.op.Softmax("sm1", lin1, dim=-1, stable=True) + sm1 = forge.op.Softmax("sm1", lin1, dim=-1, stable=True) # ... tail flow 2 ... W2, Z2, R2, C2 = 1, 1, maxpool9.shape[-3], maxpool9.shape[-1] * maxpool9.shape[-2] - resh2 = pybuda.op.Reshape("resh2", maxpool9, (W2, Z2, R2, C2)) - tr2 = pybuda.op.Transpose("tr2", resh2, -1, -2) - ra2 = pybuda.op.ReduceAvg("ra2", tr2, -2) + resh2 = forge.op.Reshape("resh2", maxpool9, (W2, Z2, R2, C2)) + tr2 = forge.op.Transpose("tr2", resh2, -1, -2) + ra2 = forge.op.ReduceAvg("ra2", tr2, -2) lin2 = self.linear2(ra2) - sm2 = pybuda.op.Softmax("sm2", lin2, dim=-1, stable=True) + sm2 = forge.op.Softmax("sm2", lin2, dim=-1, stable=True) # block # ... block flow 8 ... - add9 = pybuda.op.Add("add9", sm1, sm2) + add9 = forge.op.Add("add9", sm1, sm2) conv36 = self.conv36(add9) - relu12 = pybuda.op.Relu("relu12", conv36) + relu12 = forge.op.Relu("relu12", conv36) conv37 = self.conv37(relu12) - add10 = pybuda.op.Add("add10", add9, conv37) + add10 = forge.op.Add("add10", add9, conv37) return add10 @@ -356,17 +356,17 @@ def set_environment(): # Environment variable that adds padding pass if TEST_C_DISABLE_PADDING_PASS_FLAG: - os.environ["PYBUDA_DISABLE_PADDING_PASS"] = "1" + os.environ["FORGE_DISABLE_PADDING_PASS"] = "1" # Environment variable that allows printing a graph if TEST_C_PRINT_GRAPH_VIZ_FLAG: - os.environ["PYBUDA_PRINT_GRAPH_VIZ_FORMAT_DIR"] = "ALL" + os.environ["FORGE_PRINT_GRAPH_VIZ_FORMAT_DIR"] = "ALL" if TEST_C_PRINT_GRAPH_AT_FLAG: - os.environ["PYBUDA_PRINT_GRAPH_AT"] = "ALL" + os.environ["FORGE_PRINT_GRAPH_AT"] = "ALL" # Environment variable that allows fracturing if TEST_C_FRACTURING_FLAG: - os.environ["PYBUDA_FRACTURIZATION_DISABLE"] = "1" + os.environ["FORGE_FRACTURIZATION_DISABLE"] = "1" # Include or not environment variables for debugging the stack if TEST_C_LOGGER_LEVEL_TRACE: @@ -376,9 +376,9 @@ def set_environment(): # Include or not environment variables for debugging chip placement module if TEST_C_CHIP_PLACEMENT_LEGALIZER_DETAILED_FLAG: - os.environ["PYBUDA_LEGALIZER_DETAILED_DEBUGGING"] = "1" + os.environ["FORGE_LEGALIZER_DETAILED_DEBUGGING"] = "1" if TEST_C_CHIP_PLACEMENT_LEGALIZER_NODE_NAME: - os.environ["PYBUDA_LEGALIZER_DEBUG_NODE_NAME"] = "" + os.environ["FORGE_LEGALIZER_DEBUG_NODE_NAME"] = "" # The main reason why we use shapes of different sizes is @@ -499,7 +499,7 @@ def test_padding_pass_c( tt0 = TTDevice("tt0", devtype=test_device.devtype, arch=test_device.arch) tt0.place_module(model) - pybuda_compile( + forge_compile( tt0, model.name, *inputs, diff --git a/pybuda/test/test_padding/other/test_padding_pass_d.py b/forge/test/test_padding/other/test_padding_pass_d.py similarity index 76% rename from pybuda/test/test_padding/other/test_padding_pass_d.py rename to forge/test/test_padding/other/test_padding_pass_d.py index 8f8c12c5b..377e744cb 100644 --- a/pybuda/test/test_padding/other/test_padding_pass_d.py +++ b/forge/test/test_padding/other/test_padding_pass_d.py @@ -9,18 +9,18 @@ import torch import pytest -import pybuda -from pybuda import ( +import forge +from forge import ( TTDevice, Tensor, - pybuda_compile, + forge_compile, CompilerConfig, VerifyConfig, ) -from pybuda._C.backend_api import BackendType +from forge._C.backend_api import BackendType -class TestPaddingPassD(pybuda.PyBudaModule): +class TestPaddingPassD(forge.ForgeModule): # Convolutional Network @@ -51,7 +51,7 @@ def __init__( # Auxiliary function that creates convolutional layer def conv2d(name, kernel, stride, padding=None, in_channels=None): - return pybuda.op.nn.Conv2dModule( + return forge.op.nn.Conv2dModule( name=f"{self.name}.{name}", in_channels=in_channels if in_channels is not None else self.in_channels, out_channels=self.out_channels, @@ -65,7 +65,7 @@ def conv2d(name, kernel, stride, padding=None, in_channels=None): # Auxiliary function that creates maxpool layer def maxpool2d(name, kernel, stride): - return pybuda.op.nn.MaxPool2dModule( + return forge.op.nn.MaxPool2dModule( name=f"{self.name}.{name}", kernel_size=kernel, stride=stride, @@ -74,7 +74,7 @@ def maxpool2d(name, kernel, stride): # Auxiliary function that creates FC layer(Linear) layer # We use this layer at the end of the CNN before Softmax def linear(name): - return pybuda.op.nn.Linear( + return forge.op.nn.Linear( name=f"{self.name}.{name}", in_features=self.in_features, out_features=self.out_features, @@ -155,117 +155,117 @@ def forward(self, x1, x2, x3): # ... head flow 1 ... conv1 = self.conv1(x1) conv2 = self.conv2(x1) - mm1 = pybuda.op.Matmul("mm1", conv1, conv2) + mm1 = forge.op.Matmul("mm1", conv1, conv2) # ... head flow 2 ... conv3 = self.conv3(x2) conv4 = self.conv4(x2) - mm2 = pybuda.op.Matmul("mm2", conv3, conv4) + mm2 = forge.op.Matmul("mm2", conv3, conv4) # ... head flow 3 ... conv5 = self.conv5(x3) conv6 = self.conv6(x3) - mm3 = pybuda.op.Matmul("mm3", conv5, conv6) + mm3 = forge.op.Matmul("mm3", conv5, conv6) # block # ... block flow 1 ... maxpool1 = self.maxpool1(mm1) conv7 = self.conv7(maxpool1) - exp1 = pybuda.op.Exp("exp1", conv7) + exp1 = forge.op.Exp("exp1", conv7) conv8 = self.conv8(exp1) - exp2 = pybuda.op.Exp("exp2", conv8) + exp2 = forge.op.Exp("exp2", conv8) conv9 = self.conv9(exp2) - add13 = pybuda.op.Add("add13", maxpool1, conv9) + add13 = forge.op.Add("add13", maxpool1, conv9) # ... block flow 2 ... maxpool2 = self.maxpool2(mm2) conv12 = self.conv12(maxpool2) - exp3 = pybuda.op.Exp("exp3", conv12) + exp3 = forge.op.Exp("exp3", conv12) conv13 = self.conv13(exp3) - exp4 = pybuda.op.Exp("exp4", conv13) + exp4 = forge.op.Exp("exp4", conv13) conv14 = self.conv14(exp4) - add4 = pybuda.op.Add("add4", maxpool2, conv14) + add4 = forge.op.Add("add4", maxpool2, conv14) # ... block flow 3 ... maxpool3 = self.maxpool3(mm3) conv17 = self.conv17(maxpool3) - exp5 = pybuda.op.Exp("exp5", conv17) + exp5 = forge.op.Exp("exp5", conv17) conv18 = self.conv18(exp5) - exp6 = pybuda.op.Exp("exp6", conv18) + exp6 = forge.op.Exp("exp6", conv18) conv19 = self.conv19(exp6) - add6 = pybuda.op.Add("add6", maxpool3, conv19) + add6 = forge.op.Add("add6", maxpool3, conv19) # ... block flow 1 & 2 ... - mul1 = pybuda.op.Multiply("mul1", conv7, conv12) - sm1 = pybuda.op.Softmax("sm1", mul1, dim=-2, stable=True) + mul1 = forge.op.Multiply("mul1", conv7, conv12) + sm1 = forge.op.Softmax("sm1", mul1, dim=-2, stable=True) conv10 = self.conv10(sm1) - add2_ = pybuda.op.Add("add2_", conv8, conv13) - add2 = pybuda.op.Add("add2", add2_, conv10) + add2_ = forge.op.Add("add2_", conv8, conv13) + add2 = forge.op.Add("add2", add2_, conv10) conv11 = self.conv11(add2) - mul2_ = pybuda.op.Multiply("mul2_", conv9, conv14) - mul2 = pybuda.op.Multiply("mul2", mul2_, conv11) + mul2_ = forge.op.Multiply("mul2_", conv9, conv14) + mul2 = forge.op.Multiply("mul2", mul2_, conv11) # ... block flow 2 & 3 ... - add1 = pybuda.op.Add("add1", conv12, conv17) - sm2 = pybuda.op.Softmax("sm2", add1, dim=-2, stable=True) + add1 = forge.op.Add("add1", conv12, conv17) + sm2 = forge.op.Softmax("sm2", add1, dim=-2, stable=True) conv15 = self.conv15(sm2) - add3_ = pybuda.op.Add("add3_", conv15, conv18) - add3 = pybuda.op.Add("add3", add3_, conv13) + add3_ = forge.op.Add("add3_", conv15, conv18) + add3 = forge.op.Add("add3", add3_, conv13) conv16 = self.conv16(add3) - add5_ = pybuda.op.Add("add5_", conv14, conv16) - add5 = pybuda.op.Add("add5", add5_, conv19) + add5_ = forge.op.Add("add5_", conv14, conv16) + add5 = forge.op.Add("add5", add5_, conv19) # ... block flow 4 ... - add7 = pybuda.op.Add("add7", add13, mul2) + add7 = forge.op.Add("add7", add13, mul2) conv20 = self.conv20(add7) - relu1 = pybuda.op.Relu("relu1", conv20) + relu1 = forge.op.Relu("relu1", conv20) conv21 = self.conv21(relu1) - relu2 = pybuda.op.Relu("relu2", conv21) + relu2 = forge.op.Relu("relu2", conv21) conv22 = self.conv22(relu2) - add10 = pybuda.op.Add("add10", add7, conv22) - exp7 = pybuda.op.Exp("exp7", add10) - red1 = pybuda.op.ReduceSum("red1", exp7, dim=-2) + add10 = forge.op.Add("add10", add7, conv22) + exp7 = forge.op.Exp("exp7", add10) + red1 = forge.op.ReduceSum("red1", exp7, dim=-2) # ... block flow 5 ... - add8 = pybuda.op.Add("add8", add4, add5) + add8 = forge.op.Add("add8", add4, add5) conv23 = self.conv23(add8) - relu3 = pybuda.op.Relu("relu3", conv23) + relu3 = forge.op.Relu("relu3", conv23) conv24 = self.conv24(relu3) - relu4 = pybuda.op.Relu("relu4", conv24) + relu4 = forge.op.Relu("relu4", conv24) conv25 = self.conv25(relu4) - add11 = pybuda.op.Add("add11", add8, conv25) - exp8 = pybuda.op.Exp("exp8", add11) - red2 = pybuda.op.ReduceSum("red2", exp8, dim=-2) + add11 = forge.op.Add("add11", add8, conv25) + exp8 = forge.op.Exp("exp8", add11) + red2 = forge.op.ReduceSum("red2", exp8, dim=-2) # ... block flow 6 ... - add9 = pybuda.op.Add("add9", add5, add6) + add9 = forge.op.Add("add9", add5, add6) conv26 = self.conv26(add9) - relu5 = pybuda.op.Relu("relu5", conv26) + relu5 = forge.op.Relu("relu5", conv26) conv27 = self.conv27(relu5) - relu6 = pybuda.op.Relu("relu6", conv27) + relu6 = forge.op.Relu("relu6", conv27) conv28 = self.conv28(relu6) - add12 = pybuda.op.Add("add12", add9, conv28) - exp9 = pybuda.op.Exp("exp9", add12) - red3 = pybuda.op.ReduceSum("red3", exp9, dim=-2) + add12 = forge.op.Add("add12", add9, conv28) + exp9 = forge.op.Exp("exp9", add12) + red3 = forge.op.ReduceSum("red3", exp9, dim=-2) # tail # ... tail flow 1 ... - add15 = pybuda.op.Add("add15", red1, red2) + add15 = forge.op.Add("add15", red1, red2) W1, Z1, R1, C1 = 1, 1, add15.shape[-3], add15.shape[-1] * add15.shape[-2] - resh1 = pybuda.op.Reshape("resh1", add15, (W1, Z1, R1, C1)) - tr1 = pybuda.op.Transpose("tr1", resh1, -2, -1) - ra1 = pybuda.op.ReduceAvg("ra1", tr1, -2) + resh1 = forge.op.Reshape("resh1", add15, (W1, Z1, R1, C1)) + tr1 = forge.op.Transpose("tr1", resh1, -2, -1) + ra1 = forge.op.ReduceAvg("ra1", tr1, -2) lin1 = self.linear1(ra1) - sm3 = pybuda.op.Softmax("sm3", lin1, dim=-1, stable=True) + sm3 = forge.op.Softmax("sm3", lin1, dim=-1, stable=True) # ... tail flow 2 ... - add14 = pybuda.op.Add("add14", red2, red3) + add14 = forge.op.Add("add14", red2, red3) W2, Z2, R2, C2 = 1, 1, add14.shape[-3], add14.shape[-1] * add14.shape[-2] - resh2 = pybuda.op.Reshape("resh2", add14, (W2, Z2, R2, C2)) - tr2 = pybuda.op.Transpose("tr2", resh2, -1, -2) - ra2 = pybuda.op.ReduceAvg("ra2", tr2, -2) + resh2 = forge.op.Reshape("resh2", add14, (W2, Z2, R2, C2)) + tr2 = forge.op.Transpose("tr2", resh2, -1, -2) + ra2 = forge.op.ReduceAvg("ra2", tr2, -2) lin2 = self.linear2(ra2) - sm4 = pybuda.op.Softmax("sm4", lin2, dim=-1, stable=True) + sm4 = forge.op.Softmax("sm4", lin2, dim=-1, stable=True) return sm3, sm4 @@ -301,17 +301,17 @@ def set_environment(): # Environment variable that adds padding pass if TEST_D_DISABLE_PADDING_PASS_FLAG: - os.environ["PYBUDA_DISABLE_PADDING_PASS"] = "1" + os.environ["FORGE_DISABLE_PADDING_PASS"] = "1" # Environment variable that allows printing a graph if TEST_D_PRINT_GRAPH_VIZ_FLAG: - os.environ["PYBUDA_PRINT_GRAPH_VIZ_FORMAT_DIR"] = "ALL" + os.environ["FORGE_PRINT_GRAPH_VIZ_FORMAT_DIR"] = "ALL" if TEST_D_PRINT_GRAPH_AT_FLAG: - os.environ["PYBUDA_PRINT_GRAPH_AT"] = "ALL" + os.environ["FORGE_PRINT_GRAPH_AT"] = "ALL" # Environment variable that allows fracturing if TEST_D_FRACTURING_FLAG: - os.environ["PYBUDA_FRACTURIZATION_DISABLE"] = "1" + os.environ["FORGE_FRACTURIZATION_DISABLE"] = "1" # Include or not environment variables for debugging the stack if TEST_D_LOGGER_LEVEL_TRACE: @@ -321,9 +321,9 @@ def set_environment(): # Include or not environment variables for debugging chip placement module if TEST_D_CHIP_PLACEMENT_LEGALIZER_DETAILED_FLAG: - os.environ["PYBUDA_LEGALIZER_DETAILED_DEBUGGING"] = "1" + os.environ["FORGE_LEGALIZER_DETAILED_DEBUGGING"] = "1" if TEST_D_CHIP_PLACEMENT_LEGALIZER_NODE_NAME: - os.environ["PYBUDA_LEGALIZER_DEBUG_NODE_NAME"] = "" + os.environ["FORGE_LEGALIZER_DEBUG_NODE_NAME"] = "" # The main reason why we use shapes of different sizes is # because convolutional neural networks can't always work with big shapes. @@ -423,7 +423,7 @@ def test_padding_pass_d( tt0 = TTDevice("tt0", devtype=test_device.devtype, arch=test_device.arch) tt0.place_module(model) - pybuda_compile( + forge_compile( tt0, model.name, *inputs, diff --git a/pybuda/test/test_padding/other/test_padding_pass_e.py b/forge/test/test_padding/other/test_padding_pass_e.py similarity index 70% rename from pybuda/test/test_padding/other/test_padding_pass_e.py rename to forge/test/test_padding/other/test_padding_pass_e.py index db6ecf48b..4448d4e69 100644 --- a/pybuda/test/test_padding/other/test_padding_pass_e.py +++ b/forge/test/test_padding/other/test_padding_pass_e.py @@ -9,19 +9,19 @@ import torch import pytest -import pybuda -from pybuda import ( +import forge +from forge import ( TTDevice, Tensor, - pybuda_compile, + forge_compile, CompilerConfig, VerifyConfig, ) -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda._C import DataFormat +from forge._C.backend_api import BackendType, BackendDevice +from forge._C import DataFormat -class TestPaddingPassE(pybuda.PyBudaModule): +class TestPaddingPassE(forge.ForgeModule): # Testing padding/unpadding pass. # This test is based on element-wise operation. @@ -29,13 +29,13 @@ class TestPaddingPassE(pybuda.PyBudaModule): def __init__(self, name: str, shape: list): super().__init__(name) self.shape = shape - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param4 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param5 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param6 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param7 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param4 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param5 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param6 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param7 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for _ in range(3)] for i in range(1, 8): @@ -44,61 +44,61 @@ def __init__(self, name: str, shape: list): def forward(self, x1, x2, x3): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x2, self.train_param2) - mul3 = pybuda.op.Multiply("mul3", x3, self.train_param3) - add1 = pybuda.op.Add("add1", x1, x2) - add2 = pybuda.op.Add("add2", x2, x3) + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x2, self.train_param2) + mul3 = forge.op.Multiply("mul3", x3, self.train_param3) + add1 = forge.op.Add("add1", x1, x2) + add2 = forge.op.Add("add2", x2, x3) # Layer 3 - mul4 = pybuda.op.Multiply("mul4", self.train_param1, add1) - mul5 = pybuda.op.Multiply("mul5", self.train_param2, add2) - mul6 = pybuda.op.Multiply("mul6", mul3, self.train_param3) - mul7 = pybuda.op.Multiply("mul7", add2, self.train_param6) - add3 = pybuda.op.Add("add3", mul1, mul4) - add4 = pybuda.op.Add("add4", mul2, mul5) + mul4 = forge.op.Multiply("mul4", self.train_param1, add1) + mul5 = forge.op.Multiply("mul5", self.train_param2, add2) + mul6 = forge.op.Multiply("mul6", mul3, self.train_param3) + mul7 = forge.op.Multiply("mul7", add2, self.train_param6) + add3 = forge.op.Add("add3", mul1, mul4) + add4 = forge.op.Add("add4", mul2, mul5) # Layer 4 - relu1 = pybuda.op.Relu("relu1", add3) - relu2 = pybuda.op.Relu("relu2", mul4) - relu3 = pybuda.op.Relu("relu3", add4) - exp1 = pybuda.op.Exp("exp1", mul6) + relu1 = forge.op.Relu("relu1", add3) + relu2 = forge.op.Relu("relu2", mul4) + relu3 = forge.op.Relu("relu3", add4) + exp1 = forge.op.Exp("exp1", mul6) # Layer 5 - add5 = pybuda.op.Add("add5", mul5, mul6) - add6 = pybuda.op.Add("add6", mul2, relu3) - mul8 = pybuda.op.Multiply("mul8", self.train_param4, relu1) - mul9 = pybuda.op.Multiply("mul9", self.train_param5, exp1) - mul10 = pybuda.op.Multiply("mul10", relu1, mul2) - mul11 = pybuda.op.Multiply("mul11", exp1, add4) - mul12 = pybuda.op.Multiply("mul12", relu2, mul3) - mul13 = pybuda.op.Multiply("mul13", add4, self.train_param7) - mul14 = pybuda.op.Multiply("mul14", add6, add2) + add5 = forge.op.Add("add5", mul5, mul6) + add6 = forge.op.Add("add6", mul2, relu3) + mul8 = forge.op.Multiply("mul8", self.train_param4, relu1) + mul9 = forge.op.Multiply("mul9", self.train_param5, exp1) + mul10 = forge.op.Multiply("mul10", relu1, mul2) + mul11 = forge.op.Multiply("mul11", exp1, add4) + mul12 = forge.op.Multiply("mul12", relu2, mul3) + mul13 = forge.op.Multiply("mul13", add4, self.train_param7) + mul14 = forge.op.Multiply("mul14", add6, add2) # Layer 6 - recip1 = pybuda.op.Reciprocal("recip1", mul10) - exp2 = pybuda.op.Exp("exp2", mul11) - exp3 = pybuda.op.Exp("exp3", add5) - relu4 = pybuda.op.Relu("relu4", mul12) - relu5 = pybuda.op.Relu("relu5", mul14) + recip1 = forge.op.Reciprocal("recip1", mul10) + exp2 = forge.op.Exp("exp2", mul11) + exp3 = forge.op.Exp("exp3", add5) + relu4 = forge.op.Relu("relu4", mul12) + relu5 = forge.op.Relu("relu5", mul14) # Layer 7 - mul15 = pybuda.op.Multiply("mul15", add1, relu4) - add7 = pybuda.op.Add("add7", mul9, mul8) - add8 = pybuda.op.Add("add8", recip1, exp2) - add9 = pybuda.op.Add("add9", exp1, exp3) - add10 = pybuda.op.Add("add10", mul7, mul13) + mul15 = forge.op.Multiply("mul15", add1, relu4) + add7 = forge.op.Add("add7", mul9, mul8) + add8 = forge.op.Add("add8", recip1, exp2) + add9 = forge.op.Add("add9", exp1, exp3) + add10 = forge.op.Add("add10", mul7, mul13) # Layer 8 - mul16 = pybuda.op.Multiply("mul16", exp1, add8) - mul17 = pybuda.op.Multiply("mul17", add7, add10) - mul18 = pybuda.op.Multiply("mul18", add9, mul15) - mul19 = pybuda.op.Multiply("mul19", relu2, relu5) + mul16 = forge.op.Multiply("mul16", exp1, add8) + mul17 = forge.op.Multiply("mul17", add7, add10) + mul18 = forge.op.Multiply("mul18", add9, mul15) + mul19 = forge.op.Multiply("mul19", relu2, relu5) return mul16, mul17, mul18, mul19 -class TestPaddingPassE_1(pybuda.PyBudaModule): +class TestPaddingPassE_1(forge.ForgeModule): # Testing padding/unpadding pass. # This test is based on element-wise operation. @@ -106,8 +106,8 @@ class TestPaddingPassE_1(pybuda.PyBudaModule): def __init__(self, name: str, shape: list): super().__init__(name) self.shape = shape - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for _ in range(2)] self.set_parameter("train_param1", torch.rand(*self.shape, requires_grad=True)) @@ -116,21 +116,21 @@ def __init__(self, name: str, shape: list): def forward(self, x1, x2): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x2, self.train_param1) - mul3 = pybuda.op.Multiply("mul3", x2, self.train_param2) + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x2, self.train_param1) + mul3 = forge.op.Multiply("mul3", x2, self.train_param2) # Layer 3 - mul4 = pybuda.op.Multiply("mul4", mul1, mul2) - mul5 = pybuda.op.Multiply("mul5", mul2, mul3) + mul4 = forge.op.Multiply("mul4", mul1, mul2) + mul5 = forge.op.Multiply("mul5", mul2, mul3) # Layer 4 - mul6 = pybuda.op.Multiply("mul6", mul4, mul5) + mul6 = forge.op.Multiply("mul6", mul4, mul5) return mul6 -class TestPaddingPassE_2(pybuda.PyBudaModule): +class TestPaddingPassE_2(forge.ForgeModule): # Testing padding/unpadding pass. # This test is based on element-wise operation. @@ -139,7 +139,7 @@ def __init__(self, name: str, shape: list, explicit_padding: bool = False): super().__init__(name) self.shape = shape self.explicit_padding = explicit_padding - self.train_param = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape))] self.set_parameter("train_param", torch.rand(*self.shape, requires_grad=True)) @@ -151,23 +151,23 @@ def forward(self, x): # Layer 2 if explicit_padding: - pad1 = pybuda.op.BudaPad("pad1", x, (4, 4), 0.0) - pad2 = pybuda.op.BudaPad("pad2", self.train_param, (4, 4), 0.0) - mul = pybuda.op.Multiply("mul", pad1, pad2) + pad1 = forge.op.BudaPad("pad1", x, (4, 4), 0.0) + pad2 = forge.op.BudaPad("pad2", self.train_param, (4, 4), 0.0) + mul = forge.op.Multiply("mul", pad1, pad2) else: - mul = pybuda.op.Multiply("mul", x, self.train_param) + mul = forge.op.Multiply("mul", x, self.train_param) # Layer 3 - exp = pybuda.op.Exp("exp", mul) + exp = forge.op.Exp("exp", mul) if explicit_padding: - unpad = pybuda.op.BudaUnpad("unpad", exp, (192, 192), (4, 4)) + unpad = forge.op.BudaUnpad("unpad", exp, (192, 192), (4, 4)) return unpad else: return exp -class TestPaddingPassE_3(pybuda.PyBudaModule): +class TestPaddingPassE_3(forge.ForgeModule): # Testing padding/unpadding pass. # This test is based on element-wise operation. @@ -176,7 +176,7 @@ def __init__(self, name: str, shape: list, explicit_padding: bool = False): super().__init__(name) self.shape = shape self.explicit_padding = explicit_padding - self.train_param = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape))] self.set_parameter("train_param", torch.rand(*self.shape, requires_grad=True)) @@ -194,25 +194,25 @@ def forward(self, x): pad_rt, pad_ct = 7, 7 else: pad_rt, pad_ct = 0, 0 - pad1 = pybuda.op.BudaPad("pad1", x, (pad_rt, pad_ct), 0.0) - pad2 = pybuda.op.BudaPad("pad2", self.train_param, (pad_rt, pad_ct), 0.0) - mul1 = pybuda.op.Multiply("mul1", pad1, pad2) + pad1 = forge.op.BudaPad("pad1", x, (pad_rt, pad_ct), 0.0) + pad2 = forge.op.BudaPad("pad2", self.train_param, (pad_rt, pad_ct), 0.0) + mul1 = forge.op.Multiply("mul1", pad1, pad2) else: - mul1 = pybuda.op.Multiply("mul1", x, self.train_param) + mul1 = forge.op.Multiply("mul1", x, self.train_param) # Layer 3 if explicit_padding: - mul2 = pybuda.op.Multiply("mul2", pad1, mul1) - mul3 = pybuda.op.Multiply("mul3", mul1, pad2) + mul2 = forge.op.Multiply("mul2", pad1, mul1) + mul3 = forge.op.Multiply("mul3", mul1, pad2) else: - mul2 = pybuda.op.Multiply("mul2", x, mul1) - mul3 = pybuda.op.Multiply("mul3", mul1, self.train_param) + mul2 = forge.op.Multiply("mul2", x, mul1) + mul3 = forge.op.Multiply("mul3", mul1, self.train_param) # Layer 4 - mul4 = pybuda.op.Multiply("mul4", mul1, mul3) + mul4 = forge.op.Multiply("mul4", mul1, mul3) # Layer 5 - mul5 = pybuda.op.Multiply("mul5", mul2, mul4) + mul5 = forge.op.Multiply("mul5", mul2, mul4) if explicit_padding: if self.shape[-2:] == (192, 192): @@ -223,7 +223,7 @@ def forward(self, x): pad_rt, pad_ct = 7, 7 else: pad_rt, pad_ct = 0, 0 - unpad = pybuda.op.BudaUnpad("unpad", mul5, (orig_r, orig_c), (pad_rt, pad_ct)) + unpad = forge.op.BudaUnpad("unpad", mul5, (orig_r, orig_c), (pad_rt, pad_ct)) return unpad else: return mul5 @@ -235,7 +235,7 @@ def forward(self, x): TEST_E_PADDING_PASS_BUFFER_QUEUE_FLAG = False TEST_E_PADDING_PASS_DISABLE_BUDA_OP_FLAG = False -TEST_E_PYBUDA_DISABLE_OP_FUSING = True +TEST_E_FORGE_DISABLE_OP_FUSING = True TEST_E_VERIFY_ALL_FLAG = True TEST_E_PRINT_GRAPH_VIZ_FLAG = False @@ -269,33 +269,33 @@ def set_environment(): # Environment variable that adds padding pass if TEST_E_DISABLE_PADDING_PASS_FLAG: - os.environ["PYBUDA_DISABLE_PADDING_PASS"] = "1" + os.environ["FORGE_DISABLE_PADDING_PASS"] = "1" if TEST_E_PADDING_PASS_ELEMENT_WISE_FLAG: - os.environ["PYBUDA_PADDING_PASS_ELEMENT_WISE"] = "1" + os.environ["FORGE_PADDING_PASS_ELEMENT_WISE"] = "1" if TEST_E_PADDING_PASS_MATMUL_FLAG: - os.environ["PYBUDA_PADDING_PASS_MATMUL"] = "1" + os.environ["FORGE_PADDING_PASS_MATMUL"] = "1" if TEST_E_PADDING_PASS_BUFFER_QUEUE_FLAG: - os.environ["PYBUDA_PADDING_PASS_BUFFER_QUEUE"] = "1" + os.environ["FORGE_PADDING_PASS_BUFFER_QUEUE"] = "1" if TEST_E_PADDING_PASS_DISABLE_BUDA_OP_FLAG: - os.environ["PYBUDA_PADDING_PASS_DISABLE_BUDA_OP"] = "1" + os.environ["FORGE_PADDING_PASS_DISABLE_BUDA_OP"] = "1" else: - os.environ["PYBUDA_PADDING_PASS_DISABLE_BUDA_OP"] = "0" + os.environ["FORGE_PADDING_PASS_DISABLE_BUDA_OP"] = "0" # Environment variables that controls operation fusing - if TEST_E_PYBUDA_DISABLE_OP_FUSING: - os.environ["PYBUDA_DISABLE_OP_FUSING"] = "1" + if TEST_E_FORGE_DISABLE_OP_FUSING: + os.environ["FORGE_DISABLE_OP_FUSING"] = "1" # Environment variable that allows printing a graph if TEST_E_VERIFY_ALL_FLAG: - os.environ["PYBUDA_FORCE_VERIFY_ALL"] = "1" + os.environ["FORGE_FORCE_VERIFY_ALL"] = "1" if TEST_E_PRINT_GRAPH_VIZ_FLAG: - os.environ["PYBUDA_PRINT_GRAPH_VIZ_FORMAT_DIR"] = "ALL" + os.environ["FORGE_PRINT_GRAPH_VIZ_FORMAT_DIR"] = "ALL" if TEST_E_PRINT_GRAPH_AT_FLAG: - os.environ["PYBUDA_PRINT_GRAPH_AT"] = "ALL" + os.environ["FORGE_PRINT_GRAPH_AT"] = "ALL" # Environment variable that allows fracturing if TEST_E_FRACTURING_FLAG: - os.environ["PYBUDA_FRACTURIZATION_DISABLE"] = "1" + os.environ["FORGE_FRACTURIZATION_DISABLE"] = "1" # Include or not environment variables for debugging the stack if TEST_E_LOGGER_LEVEL_TRACE: @@ -305,9 +305,9 @@ def set_environment(): # Include or not environment variables for debugging chip placement module if TEST_E_CHIP_PLACEMENT_LEGALIZER_DETAILED_FLAG: - os.environ["PYBUDA_LEGALIZER_DETAILED_DEBUGGING"] = "1" + os.environ["FORGE_LEGALIZER_DETAILED_DEBUGGING"] = "1" if TEST_E_CHIP_PLACEMENT_LEGALIZER_NODE_NAME: - os.environ["PYBUDA_LEGALIZER_DEBUG_NODE_NAME"] = "" + os.environ["FORGE_LEGALIZER_DEBUG_NODE_NAME"] = "" # The main reason why we use shapes of different sizes is @@ -461,7 +461,7 @@ def test_padding_pass_e( model = test_model[1](name=test_name, shape=original_shape) tt0 = TTDevice("tt0", devtype=test_device.devtype, arch=test_device.arch) tt0.place_module(model) - pybuda_compile( + forge_compile( tt0, model.name, *model.inputs, @@ -511,7 +511,7 @@ def test_padding_pass_e_argument_e2( model = TestPaddingPassE_2(name=test_name, shape=original_shape, explicit_padding=explicit_padding) tt0 = TTDevice("tt0", devtype=test_device.devtype, arch=test_device.arch) tt0.place_module(model) - pybuda_compile( + forge_compile( tt0, model.name, *model.inputs, @@ -532,7 +532,7 @@ def test_padding_pass_e_argument_e3( original_shape, explicit_padding ): - if test_device.arch == pybuda.BackendDevice.Wormhole_B0: + if test_device.arch == forge.BackendDevice.Wormhole_B0: pytest.skip("Skip until #731 is solved") if explicit_padding: @@ -565,7 +565,7 @@ def test_padding_pass_e_argument_e3( model = TestPaddingPassE_3(name=test_name, shape=original_shape, explicit_padding=explicit_padding) tt0 = TTDevice("tt0", devtype=test_device.devtype, arch=test_device.arch) tt0.place_module(model) - pybuda_compile( + forge_compile( tt0, model.name, *model.inputs, diff --git a/pybuda/test/test_padding/other/test_padding_pass_f.py b/forge/test/test_padding/other/test_padding_pass_f.py similarity index 71% rename from pybuda/test/test_padding/other/test_padding_pass_f.py rename to forge/test/test_padding/other/test_padding_pass_f.py index 8b3959915..2a2723d68 100644 --- a/pybuda/test/test_padding/other/test_padding_pass_f.py +++ b/forge/test/test_padding/other/test_padding_pass_f.py @@ -9,18 +9,18 @@ import torch import pytest -import pybuda -from pybuda import ( +import forge +from forge import ( TTDevice, Tensor, - pybuda_compile, + forge_compile, CompilerConfig, VerifyConfig, ) -from pybuda._C.backend_api import BackendType -from pybuda._C import DataFormat +from forge._C.backend_api import BackendType +from forge._C import DataFormat -class TestPaddingPassF(pybuda.PyBudaModule): +class TestPaddingPassF(forge.ForgeModule): # Testing padding/unpadding pass. # This test is combination of convolutional layers, @@ -54,11 +54,11 @@ def __init__( self.bias = False self.stride = 1 - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param4 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param5 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param4 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param5 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(5)] for i in range(1, 6): @@ -69,7 +69,7 @@ def __init__( # Auxiliary function that creates convolutional layer def conv2d(name, padding=None, in_channels=None): - return pybuda.op.nn.Conv2dModule( + return forge.op.nn.Conv2dModule( name=f"{self.name}.{name}", in_channels=in_channels if in_channels is not None else self.in_channels, out_channels=self.out_channels, @@ -83,7 +83,7 @@ def conv2d(name, padding=None, in_channels=None): # Auxiliary function that creates maxpool layer def maxpool2d(name, kernel=2, stride=1): - return pybuda.op.nn.MaxPool2dModule( + return forge.op.nn.MaxPool2dModule( name=f"{self.name}.{name}", kernel_size=kernel, stride=stride, @@ -92,7 +92,7 @@ def maxpool2d(name, kernel=2, stride=1): # Auxiliary function that creates FC layer(Linear) layer # We use this layer at the end of the CNN before Softmax def linear(name): - return pybuda.op.nn.Linear( + return forge.op.nn.Linear( name=f"{self.name}.{name}", in_features=self.in_features, out_features=self.out_features, @@ -139,104 +139,104 @@ def linear(name): def forward(self, x1, x2, x3): # ... head ... - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x1, self.train_param2) - mul3 = pybuda.op.Multiply("mul3", x2, self.train_param2) - mul4 = pybuda.op.Multiply("mul4", x3, self.train_param2) - mul5 = pybuda.op.Multiply("mul5", x3, self.train_param3) + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x1, self.train_param2) + mul3 = forge.op.Multiply("mul3", x2, self.train_param2) + mul4 = forge.op.Multiply("mul4", x3, self.train_param2) + mul5 = forge.op.Multiply("mul5", x3, self.train_param3) # ... block ... - add1 = pybuda.op.Add("add1", mul4, mul5) + add1 = forge.op.Add("add1", mul4, mul5) conv4 = self.conv4(mul3) - mul6 = pybuda.op.Multiply("mul6", mul2, conv4) - mul7 = pybuda.op.Multiply("mul7", mul3, add1) - add2 = pybuda.op.Add("add2", mul3, mul6) + mul6 = forge.op.Multiply("mul6", mul2, conv4) + mul7 = forge.op.Multiply("mul7", mul3, add1) + add2 = forge.op.Add("add2", mul3, mul6) # ... block flow 1 ... conv1 = self.conv1(mul1) - relu1 = pybuda.op.Relu("relu1", conv1) + relu1 = forge.op.Relu("relu1", conv1) conv2 = self.conv2(relu1) - relu2 = pybuda.op.Relu("relu2", conv2) + relu2 = forge.op.Relu("relu2", conv2) conv3 = self.conv3(relu2) - add3 = pybuda.op.Add("add3", mul1, conv3) - relu7 = pybuda.op.Relu("relu7", add3) - mul9 = pybuda.op.Multiply("mul9", relu7, mul7) + add3 = forge.op.Add("add3", mul1, conv3) + relu7 = forge.op.Relu("relu7", add3) + mul9 = forge.op.Multiply("mul9", relu7, mul7) # ... block flow 2 ... conv5 = self.conv5(mul6) - relu3 = pybuda.op.Relu("relu3", conv5) + relu3 = forge.op.Relu("relu3", conv5) conv6 = self.conv6(relu3) - relu4 = pybuda.op.Relu("relu4", conv6) + relu4 = forge.op.Relu("relu4", conv6) conv7 = self.conv7(relu4) - add4 = pybuda.op.Add("add4", add2, conv7) - relu8 = pybuda.op.Relu("relu8", add4) - add5 = pybuda.op.Add("add5", conv7, mul7) - relu9 = pybuda.op.Relu("relu9", add5) + add4 = forge.op.Add("add4", add2, conv7) + relu8 = forge.op.Relu("relu8", add4) + add5 = forge.op.Add("add5", conv7, mul7) + relu9 = forge.op.Relu("relu9", add5) # ... block flow 3 ... conv8 = self.conv8(mul5) - relu5 = pybuda.op.Relu("relu5", conv8) + relu5 = forge.op.Relu("relu5", conv8) conv9 = self.conv9(relu5) - relu6 = pybuda.op.Relu("relu6", conv9) + relu6 = forge.op.Relu("relu6", conv9) conv10 = self.conv10(relu6) - mul8 = pybuda.op.Multiply("mul8", mul5, conv10) - exp1 = pybuda.op.Exp("exp1", mul8) - add6 = pybuda.op.Add("add6", exp1, mul7) - mul10 = pybuda.op.Multiply("mul10", add5, exp1) - relu10 = pybuda.op.Relu("relu10", mul10) + mul8 = forge.op.Multiply("mul8", mul5, conv10) + exp1 = forge.op.Exp("exp1", mul8) + add6 = forge.op.Add("add6", exp1, mul7) + mul10 = forge.op.Multiply("mul10", add5, exp1) + relu10 = forge.op.Relu("relu10", mul10) # ... intermediate ... - add7 = pybuda.op.Add("add7", mul9, relu10) - add8 = pybuda.op.Add("add8", mul2, relu9) - add9 = pybuda.op.Add("add9", relu8, relu10) - add10 = pybuda.op.Add("add10", relu8, add6) + add7 = forge.op.Add("add7", mul9, relu10) + add8 = forge.op.Add("add8", mul2, relu9) + add9 = forge.op.Add("add9", relu8, relu10) + add10 = forge.op.Add("add10", relu8, add6) maxpool1 = self.maxpool1(add7) maxpool2 = self.maxpool2(add10) - mul11 = pybuda.op.Multiply("mul11", add8, add9) - mul12 = pybuda.op.Multiply("mul12", add7, add10) - mul13 = pybuda.op.Multiply("mul13", maxpool1, maxpool2) + mul11 = forge.op.Multiply("mul11", add8, add9) + mul12 = forge.op.Multiply("mul12", add7, add10) + mul13 = forge.op.Multiply("mul13", maxpool1, maxpool2) # ... block ... # ... block flow 4 ... conv11 = self.conv11(mul11) - relu11 = pybuda.op.Relu("relu11", conv11) + relu11 = forge.op.Relu("relu11", conv11) conv12 = self.conv12(relu11) - relu12 = pybuda.op.Relu("relu12", conv12) + relu12 = forge.op.Relu("relu12", conv12) conv13 = self.conv13(relu12) - add11 = pybuda.op.Add("add11", conv13, mul11) + add11 = forge.op.Add("add11", conv13, mul11) # ... block flow 5 ... conv14 = self.conv14(mul12) - relu13 = pybuda.op.Relu("relu13", conv14) + relu13 = forge.op.Relu("relu13", conv14) conv15 = self.conv15(relu13) - relu14 = pybuda.op.Relu("relu14", conv15) + relu14 = forge.op.Relu("relu14", conv15) conv16 = self.conv16(relu14) - add12 = pybuda.op.Add("add12", conv16, mul12) + add12 = forge.op.Add("add12", conv16, mul12) # ... tail ... # ... tail flow 1 ... W, Z, R, C = 1, 1, add11.shape[-3], add11.shape[-1] * add11.shape[-2] - resh1 = pybuda.op.Reshape("resh1", add11, (W, Z, R, C)) - tr1 = pybuda.op.Transpose("tr1", resh1, -1, -2) - red1 = pybuda.op.ReduceAvg("red1", tr1, -2) + resh1 = forge.op.Reshape("resh1", add11, (W, Z, R, C)) + tr1 = forge.op.Transpose("tr1", resh1, -1, -2) + red1 = forge.op.ReduceAvg("red1", tr1, -2) lin1 = self.lin1(red1) - sm1 = pybuda.op.Softmax("sm1", lin1, dim=-1, stable=True) + sm1 = forge.op.Softmax("sm1", lin1, dim=-1, stable=True) # ... tail flow 2 ... W, Z, R, C = 1, 1, add12.shape[-3], add12.shape[-1] * add12.shape[-2] - resh2 = pybuda.op.Reshape("resh2", add12, (W, Z, R, C)) - tr2 = pybuda.op.Transpose("tr2", resh2, -1, -2) - red2 = pybuda.op.ReduceAvg("red2", tr2, -2) + resh2 = forge.op.Reshape("resh2", add12, (W, Z, R, C)) + tr2 = forge.op.Transpose("tr2", resh2, -1, -2) + red2 = forge.op.ReduceAvg("red2", tr2, -2) lin2 = self.lin2(red2) - sm2 = pybuda.op.Softmax("sm2", lin2, dim=-1, stable=True) + sm2 = forge.op.Softmax("sm2", lin2, dim=-1, stable=True) # ... tail flow 3 ... W, Z, R, C = 1, 1, mul13.shape[-3], mul13.shape[-1] * mul13.shape[-2] - resh3 = pybuda.op.Reshape("resh3", mul13, (W, Z, R, C)) - tr3 = pybuda.op.Transpose("tr3", resh3, -1, -2) - red3 = pybuda.op.ReduceAvg("red3", tr3, -2) + resh3 = forge.op.Reshape("resh3", mul13, (W, Z, R, C)) + tr3 = forge.op.Transpose("tr3", resh3, -1, -2) + red3 = forge.op.ReduceAvg("red3", tr3, -2) lin3 = self.lin3(red3) - sm3 = pybuda.op.Softmax("sm3", lin3, dim=-1, stable=True) + sm3 = forge.op.Softmax("sm3", lin3, dim=-1, stable=True) return sm1, sm2, sm3 @@ -273,17 +273,17 @@ def set_environment(): # Environment variable that adds padding pass if TEST_F_DISABLE_PADDING_PASS_FLAG: - os.environ["PYBUDA_DISABLE_PADDING_PASS"] = "1" + os.environ["FORGE_DISABLE_PADDING_PASS"] = "1" # Environment variable that allows printing a graph if TEST_F_PRINT_GRAPH_VIZ_FLAG: - os.environ["PYBUDA_PRINT_GRAPH_VIZ_FORMAT_DIR"] = "ALL" + os.environ["FORGE_PRINT_GRAPH_VIZ_FORMAT_DIR"] = "ALL" if TEST_F_PRINT_GRAPH_AT_FLAG: - os.environ["PYBUDA_PRINT_GRAPH_AT"] = "ALL" + os.environ["FORGE_PRINT_GRAPH_AT"] = "ALL" # Environment variable that allows fracturing if TEST_F_FRACTURING_FLAG: - os.environ["PYBUDA_FRACTURIZATION_DISABLE"] = "1" + os.environ["FORGE_FRACTURIZATION_DISABLE"] = "1" # Include or not environment variables for debugging the stack if TEST_F_LOGGER_LEVEL_TRACE: @@ -293,11 +293,11 @@ def set_environment(): # Include or not environment variables for debugging chip placement module if TEST_F_CHIP_PLACEMENT_LEGALIZER_DETAILED_FLAG: - os.environ["PYBUDA_LEGALIZER_DETAILED_DEBUGGING"] = "1" + os.environ["FORGE_LEGALIZER_DETAILED_DEBUGGING"] = "1" if TEST_F_CHIP_PLACEMENT_SELF_CUT_TYPE_FLAG: - os.environ["PYBUDA_GRAPH_SOLVER_SELF_CUT_TYPE"] = "FastCut" + os.environ["FORGE_GRAPH_SOLVER_SELF_CUT_TYPE"] = "FastCut" if TEST_F_CHIP_PLACEMENT_LEGALIZER_NODE_NAME: - os.environ["PYBUDA_LEGALIZER_DEBUG_NODE_NAME"] = "" + os.environ["FORGE_LEGALIZER_DEBUG_NODE_NAME"] = "" @@ -437,7 +437,7 @@ def test_padding_pass_f( tt0 = TTDevice("tt0", devtype=test_device.devtype, arch=test_device.arch) tt0.place_module(model) - pybuda_compile( + forge_compile( tt0, model.name, *inputs, diff --git a/pybuda/test/test_padding/other/test_padding_pass_g.py b/forge/test/test_padding/other/test_padding_pass_g.py similarity index 67% rename from pybuda/test/test_padding/other/test_padding_pass_g.py rename to forge/test/test_padding/other/test_padding_pass_g.py index c56cb4ba9..32d64fa2d 100644 --- a/pybuda/test/test_padding/other/test_padding_pass_g.py +++ b/forge/test/test_padding/other/test_padding_pass_g.py @@ -9,19 +9,19 @@ import torch import pytest -import pybuda -from pybuda import ( +import forge +from forge import ( TTDevice, Tensor, - pybuda_compile, + forge_compile, CompilerConfig, VerifyConfig, ) -from pybuda._C.backend_api import BackendType -from pybuda._C import DataFormat +from forge._C.backend_api import BackendType +from forge._C import DataFormat -class TestPaddingPassG(pybuda.PyBudaModule): +class TestPaddingPassG(forge.ForgeModule): # Testing padding/unpadding pass. # This test is based on element-wise operation. @@ -29,8 +29,8 @@ class TestPaddingPassG(pybuda.PyBudaModule): def __init__(self, name: str, shape: list): super().__init__(name) self.shape = shape - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for _ in range(2)] for i in range(1, 3): @@ -39,45 +39,45 @@ def __init__(self, name: str, shape: list): def forward(self, x1, x2): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x1, self.train_param2) - mul3 = pybuda.op.Multiply("mul3", x2, self.train_param2) + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x1, self.train_param2) + mul3 = forge.op.Multiply("mul3", x2, self.train_param2) # Layer 3 - add1 = pybuda.op.Add("add1", mul1, mul2) - add2 = pybuda.op.Add("add2", mul2, mul3) - add3 = pybuda.op.Add("add3", mul2, mul3) + add1 = forge.op.Add("add1", mul1, mul2) + add2 = forge.op.Add("add2", mul2, mul3) + add3 = forge.op.Add("add3", mul2, mul3) # Layer 4 - mul4 = pybuda.op.Multiply("mul4", add1, add2) - mul5 = pybuda.op.Multiply("mul5", add1, add2) - mul6 = pybuda.op.Multiply("mul6", add2, add3) + mul4 = forge.op.Multiply("mul4", add1, add2) + mul5 = forge.op.Multiply("mul5", add1, add2) + mul6 = forge.op.Multiply("mul6", add2, add3) # Layer 5 - exp1 = pybuda.op.Exp("exp1", mul4) - exp2 = pybuda.op.Exp("exp2", mul5) - exp3 = pybuda.op.Exp("exp3", mul6) + exp1 = forge.op.Exp("exp1", mul4) + exp2 = forge.op.Exp("exp2", mul5) + exp3 = forge.op.Exp("exp3", mul6) # Layer 6 - relu1 = pybuda.op.Relu("relu1", exp1) - relu2 = pybuda.op.Relu("relu2", exp2) - relu3 = pybuda.op.Relu("relu3", exp3) + relu1 = forge.op.Relu("relu1", exp1) + relu2 = forge.op.Relu("relu2", exp2) + relu3 = forge.op.Relu("relu3", exp3) # Layer 7 - exp4 = pybuda.op.Exp("exp4", relu1) - exp5 = pybuda.op.Exp("exp5", relu2) - exp6 = pybuda.op.Exp("exp6", relu3) + exp4 = forge.op.Exp("exp4", relu1) + exp5 = forge.op.Exp("exp5", relu2) + exp6 = forge.op.Exp("exp6", relu3) return exp4, exp5, exp6 -class TestPaddingPassG_1(pybuda.PyBudaModule): +class TestPaddingPassG_1(forge.ForgeModule): def __init__(self, name: str, shape: list): super().__init__(name) self.shape = shape - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for _ in range(2)] for i in range(1, 3): @@ -86,24 +86,24 @@ def __init__(self, name: str, shape: list): def forward(self, x1, x2): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x2, self.train_param2) + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x2, self.train_param2) # Layer 3 - add = pybuda.op.Add("add", mul1, mul2) + add = forge.op.Add("add", mul1, mul2) # Layer 4 - exp = pybuda.op.Exp("exp", add) + exp = forge.op.Exp("exp", add) return exp -class TestPaddingPassG_2(pybuda.PyBudaModule): +class TestPaddingPassG_2(forge.ForgeModule): def __init__(self, name: str, shape: list): super().__init__(name) self.shape = shape - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for _ in range(2)] for i in range(1, 3): @@ -112,24 +112,24 @@ def __init__(self, name: str, shape: list): def forward(self, x1, x2): # Layer 2 - add = pybuda.op.Add("add", x1, x2) + add = forge.op.Add("add", x1, x2) # Layer 3 - mul1 = pybuda.op.Multiply("mul1", add, self.train_param1) + mul1 = forge.op.Multiply("mul1", add, self.train_param1) # Layer 4 - mul2 = pybuda.op.Multiply("mul2", mul1, self.train_param2) + mul2 = forge.op.Multiply("mul2", mul1, self.train_param2) # Layer 5 - recip = pybuda.op.Reciprocal("recip", mul2) + recip = forge.op.Reciprocal("recip", mul2) return recip -class TestPaddingPassG_3(pybuda.PyBudaModule): +class TestPaddingPassG_3(forge.ForgeModule): def __init__(self, name: str, shape: list): super().__init__(name) self.shape = shape - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(2)] for i in range(1, 3): @@ -138,11 +138,11 @@ def __init__(self, name: str, shape: list): def forward(self, x1, x2): # Layer 2 - add1 = pybuda.op.Add("add1", x1, x2) - add2 = pybuda.op.Add("add2", self.train_param1, self.train_param2) + add1 = forge.op.Add("add1", x1, x2) + add2 = forge.op.Add("add2", self.train_param1, self.train_param2) # Layer 3 - mul = pybuda.op.Multiply("mul", add1, add2) + mul = forge.op.Multiply("mul", add1, add2) return mul @@ -185,24 +185,24 @@ def set_environment(): # Environment variable that adds padding pass if TEST_G_DISABLE_PADDING_PASS_FLAG: - os.environ["PYBUDA_DISABLE_PADDING_PASS"] = "1" + os.environ["FORGE_DISABLE_PADDING_PASS"] = "1" if TEST_G_PADDING_PASS_ELEMENT_WISE_FLAG: - os.environ["PYBUDA_PADDING_PASS_ELEMENT_WISE"] = "1" + os.environ["FORGE_PADDING_PASS_ELEMENT_WISE"] = "1" if TEST_G_PADDING_PASS_MATMUL_FLAG: - os.environ["PYBUDA_PADDING_PASS_MATMUL"] = "1" + os.environ["FORGE_PADDING_PASS_MATMUL"] = "1" # Environment variable that allows printing a graph if TEST_G_VERIFY_ALL_FLAG: - os.environ["PYBUDA_FORCE_VERIFY_ALL"] = "1" + os.environ["FORGE_FORCE_VERIFY_ALL"] = "1" # Environment variable that allows printing a graph if TEST_G_PRINT_GRAPH_VIZ_FLAG: - os.environ["PYBUDA_PRINT_GRAPH_VIZ_FORMAT_DIR"] = "ALL" + os.environ["FORGE_PRINT_GRAPH_VIZ_FORMAT_DIR"] = "ALL" if TEST_G_PRINT_GRAPH_AT_FLAG: - os.environ["PYBUDA_PRINT_GRAPH_AT"] = "ALL" + os.environ["FORGE_PRINT_GRAPH_AT"] = "ALL" # Environment variable that allows fracturing if TEST_G_FRACTURING_FLAG: - os.environ["PYBUDA_FRACTURIZATION_DISABLE"] = "1" + os.environ["FORGE_FRACTURIZATION_DISABLE"] = "1" # Include or not environment variables for debugging the stack if TEST_G_LOGGER_LEVEL_TRACE: @@ -212,9 +212,9 @@ def set_environment(): # Include or not environment variables for debugging chip placement module if TEST_G_CHIP_PLACEMENT_LEGALIZER_DETAILED_FLAG: - os.environ["PYBUDA_LEGALIZER_DETAILED_DEBUGGING"] = "1" + os.environ["FORGE_LEGALIZER_DETAILED_DEBUGGING"] = "1" if TEST_G_CHIP_PLACEMENT_LEGALIZER_NODE_NAME: - os.environ["PYBUDA_LEGALIZER_DEBUG_NODE_NAME"] = "" + os.environ["FORGE_LEGALIZER_DEBUG_NODE_NAME"] = "" @@ -296,7 +296,7 @@ def test_padding_pass_g( original_shape, test_model ): - if test_device.arch == pybuda.BackendDevice.Wormhole_B0: + if test_device.arch == forge.BackendDevice.Wormhole_B0: pytest.skip("Skip until #731 is solved") if test_kind.is_training(): @@ -312,7 +312,7 @@ def test_padding_pass_g( model = test_model[1](name=test_name, shape=original_shape) tt0 = TTDevice("tt0", devtype=test_device.devtype, arch=test_device.arch) tt0.place_module(model) - pybuda_compile( + forge_compile( tt0, model.name, *inputs, diff --git a/pybuda/test/test_padding/other/test_padding_pass_h.py b/forge/test/test_padding/other/test_padding_pass_h.py similarity index 71% rename from pybuda/test/test_padding/other/test_padding_pass_h.py rename to forge/test/test_padding/other/test_padding_pass_h.py index 84a767df2..86b9e2efa 100644 --- a/pybuda/test/test_padding/other/test_padding_pass_h.py +++ b/forge/test/test_padding/other/test_padding_pass_h.py @@ -9,19 +9,19 @@ import torch import pytest -import pybuda -from pybuda import ( +import forge +from forge import ( TTDevice, Tensor, - pybuda_compile, + forge_compile, CompilerConfig, VerifyConfig, ) -from pybuda._C.balancer import OpOverride -from pybuda._C import DataFormat +from forge._C.balancer import OpOverride +from forge._C import DataFormat -class TestPaddingPassH(pybuda.PyBudaModule): +class TestPaddingPassH(forge.ForgeModule): # Testing padding/unpadding pass. # This test is based on matmul operation and eltwise operations. @@ -29,9 +29,9 @@ class TestPaddingPassH(pybuda.PyBudaModule): def __init__(self, name: str, shape: list): super().__init__(name) self.shape = shape - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for _ in range(3)] self.set_parameter("train_param1", torch.rand(*self.shape, requires_grad=True)) @@ -41,37 +41,37 @@ def __init__(self, name: str, shape: list): def forward(self, x1, x2, x3): # Layer 2 - mm1 = pybuda.op.Matmul("mm1", x1, self.train_param1) - mm2 = pybuda.op.Matmul("mm2", x2, self.train_param2) - mm3 = pybuda.op.Matmul("mm3", x3, self.train_param3) + mm1 = forge.op.Matmul("mm1", x1, self.train_param1) + mm2 = forge.op.Matmul("mm2", x2, self.train_param2) + mm3 = forge.op.Matmul("mm3", x3, self.train_param3) # Layer 3 - mul1 = pybuda.op.Multiply("mul1", mm1, x2) - mul2 = pybuda.op.Multiply("mul2", self.train_param1, mm2) - mul3 = pybuda.op.Multiply("mul3", mm2, self.train_param3) - mul4 = pybuda.op.Multiply("mul4", self.train_param2, mm3) + mul1 = forge.op.Multiply("mul1", mm1, x2) + mul2 = forge.op.Multiply("mul2", self.train_param1, mm2) + mul3 = forge.op.Multiply("mul3", mm2, self.train_param3) + mul4 = forge.op.Multiply("mul4", self.train_param2, mm3) # Layer 4 - mm4 = pybuda.op.Matmul("mm4", mul1, mul2) - mm5 = pybuda.op.Matmul("mm5", mul3, mul4) + mm4 = forge.op.Matmul("mm4", mul1, mul2) + mm5 = forge.op.Matmul("mm5", mul3, mul4) # Layer 5 - mul5 = pybuda.op.Multiply("mul5", mm4, mm2) - mul6 = pybuda.op.Multiply("mul6", mm2, mm5) + mul5 = forge.op.Multiply("mul5", mm4, mm2) + mul6 = forge.op.Multiply("mul6", mm2, mm5) # Layer 6 - mm6 = pybuda.op.Matmul("mm6", mul1, mul5) - mm7 = pybuda.op.Matmul("mm7", mul6, mul4) - add = pybuda.op.Add("add", mul5, mul6) + mm6 = forge.op.Matmul("mm6", mul1, mul5) + mm7 = forge.op.Matmul("mm7", mul6, mul4) + add = forge.op.Add("add", mul5, mul6) # Layer 7 - mul7 = pybuda.op.Multiply("mul7", mm6, add) - mul8 = pybuda.op.Multiply("mul8", add, mm7) + mul7 = forge.op.Multiply("mul7", mm6, add) + mul8 = forge.op.Multiply("mul8", add, mm7) return mul7, mul8 -class TestPaddingPassH_1(pybuda.PyBudaModule): +class TestPaddingPassH_1(forge.ForgeModule): # Testing padding/unpadding pass. # This test is based on matmul operation and eltwise operations. @@ -79,8 +79,8 @@ class TestPaddingPassH_1(pybuda.PyBudaModule): def __init__(self, name: str, shape: list): super().__init__(name) self.shape = shape - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for _ in range(2)] self.set_parameter("train_param1", torch.rand(*self.shape, requires_grad=True)) @@ -89,21 +89,21 @@ def __init__(self, name: str, shape: list): def forward(self, x1, x2): # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x2, self.train_param2) - mm1 = pybuda.op.Matmul("mm1", self.train_param1, x2) + mul1 = forge.op.Multiply("mul1", x1, self.train_param1) + mul2 = forge.op.Multiply("mul2", x2, self.train_param2) + mm1 = forge.op.Matmul("mm1", self.train_param1, x2) # Layer 3 - mm2 = pybuda.op.Matmul("mm2", mul1, mm1) - mm3 = pybuda.op.Matmul("mm3", mm1, mul2) + mm2 = forge.op.Matmul("mm2", mul1, mm1) + mm3 = forge.op.Matmul("mm3", mm1, mul2) # Layer 4 - mm4 = pybuda.op.Matmul("mm4", mm2, mm3) + mm4 = forge.op.Matmul("mm4", mm2, mm3) return mm4 -class TestPaddingPassH_2(pybuda.PyBudaModule): +class TestPaddingPassH_2(forge.ForgeModule): # Testing padding/unpadding pass. # This test is based on matmul operation and eltwise operations. @@ -111,7 +111,7 @@ class TestPaddingPassH_2(pybuda.PyBudaModule): def __init__(self, name: str, shape: list): super().__init__(name) self.shape = shape - self.train_param = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape))] self.set_parameter("train_param", torch.rand(*self.shape, requires_grad=True)) @@ -119,15 +119,15 @@ def __init__(self, name: str, shape: list): def forward(self, x): # Layer 2 - mm = pybuda.op.Matmul("mm", x, self.train_param) + mm = forge.op.Matmul("mm", x, self.train_param) # Layer 3 - exp = pybuda.op.Exp("exp", mm) + exp = forge.op.Exp("exp", mm) return exp -class TestPaddingPassH_3(pybuda.PyBudaModule): +class TestPaddingPassH_3(forge.ForgeModule): # Testing padding/unpadding pass. # This test is based on matmul operation and eltwise operations. @@ -135,7 +135,7 @@ class TestPaddingPassH_3(pybuda.PyBudaModule): def __init__(self, name: str, shape: list): super().__init__(name) self.shape = shape - self.train_param = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape))] self.set_parameter("train_param", torch.rand(*self.shape, requires_grad=True)) @@ -143,20 +143,20 @@ def __init__(self, name: str, shape: list): def forward(self, x): # Layer 2 - mm1 = pybuda.op.Matmul("mm1", x, self.train_param) - mm2 = pybuda.op.Matmul("mm2", x, self.train_param) - mm3 = pybuda.op.Matmul("mm3", x, self.train_param) + mm1 = forge.op.Matmul("mm1", x, self.train_param) + mm2 = forge.op.Matmul("mm2", x, self.train_param) + mm3 = forge.op.Matmul("mm3", x, self.train_param) # Layer 3 - mm4 = pybuda.op.Matmul("mm4", mm1, mm2) - mm5 = pybuda.op.Matmul("mm5", mm2, mm3) + mm4 = forge.op.Matmul("mm4", mm1, mm2) + mm5 = forge.op.Matmul("mm5", mm2, mm3) # Layer 4 - mul1 = pybuda.op.Multiply("mul1", mm1, mm4) - mul2 = pybuda.op.Multiply("mul2", mm5, mm3) + mul1 = forge.op.Multiply("mul1", mm1, mm4) + mul2 = forge.op.Multiply("mul2", mm5, mm3) # Layer 5 - mm6 = pybuda.op.Matmul("mm6", mul1, mul2) + mm6 = forge.op.Matmul("mm6", mul1, mul2) return mm6 @@ -203,26 +203,26 @@ def set_environment(): # Environment variable that adds padding pass if TEST_H_DISABLE_PADDING_PASS_FLAG: - os.environ["PYBUDA_DISABLE_PADDING_PASS"] = "1" + os.environ["FORGE_DISABLE_PADDING_PASS"] = "1" if TEST_H_PADDING_PASS_ELEMENT_WISE_FLAG: - os.environ["PYBUDA_PADDING_PASS_ELEMENT_WISE"] = "1" + os.environ["FORGE_PADDING_PASS_ELEMENT_WISE"] = "1" if TEST_H_PADDING_PASS_MATMUL_FLAG: - os.environ["PYBUDA_PADDING_PASS_MATMUL"] = "1" + os.environ["FORGE_PADDING_PASS_MATMUL"] = "1" if TEST_H_PADDING_PASS_BUFFER_QUEUE_FLAG: - os.environ["PYBUDA_PADDING_PASS_BUFFER_QUEUE"] = "1" + os.environ["FORGE_PADDING_PASS_BUFFER_QUEUE"] = "1" # Environment variable that allows printing a graph if TEST_H_VERIFY_ALL_FLAG: - os.environ["PYBUDA_FORCE_VERIFY_ALL"] = "1" + os.environ["FORGE_FORCE_VERIFY_ALL"] = "1" # Environment variable that allows printing a graph if TEST_H_PRINT_GRAPH_VIZ_FLAG: - os.environ["PYBUDA_PRINT_GRAPH_VIZ_FORMAT_DIR"] = "ALL" + os.environ["FORGE_PRINT_GRAPH_VIZ_FORMAT_DIR"] = "ALL" if TEST_H_PRINT_GRAPH_AT_FLAG: - os.environ["PYBUDA_PRINT_GRAPH_AT"] = "ALL" + os.environ["FORGE_PRINT_GRAPH_AT"] = "ALL" # Environment variable that allows fracturing if TEST_H_FRACTURING_FLAG: - os.environ["PYBUDA_FRACTURIZATION_DISABLE"] = "1" + os.environ["FORGE_FRACTURIZATION_DISABLE"] = "1" # Include or not environment variables for debugging the stack if TEST_H_LOGGER_LEVEL_TRACE: @@ -232,15 +232,15 @@ def set_environment(): # Include or not environment variables for debugging chip placement module if TEST_H_CHIP_PLACEMENT_LEGALIZER_DETAILED_FLAG: - os.environ["PYBUDA_LEGALIZER_DETAILED_DEBUGGING"] = "1" + os.environ["FORGE_LEGALIZER_DETAILED_DEBUGGING"] = "1" if TEST_H_CHIP_PLACEMENT_SELF_CUT_TYPE_FLAG: if TEST_H_CHIP_PLACEMENT_SELF_CUT_TYPE_FLAG_CONSUMER: - os.environ["PYBUDA_GRAPHSOLVER_SELF_CUT_TYPE"] = "ConsumerOperandDataEdgesFirst" + os.environ["FORGE_GRAPHSOLVER_SELF_CUT_TYPE"] = "ConsumerOperandDataEdgesFirst" if TEST_H_CHIP_PLACEMENT_SELF_CUT_TYPE_FLAG_PRODUCER: - os.environ["PYBUDA_GRAPHSOLVER_SELF_CUT_TYPE"] = "ProducerUserDataEdgesFirst" + os.environ["FORGE_GRAPHSOLVER_SELF_CUT_TYPE"] = "ProducerUserDataEdgesFirst" if TEST_H_CHIP_PLACEMENT_LEGALIZER_NODE_NAME: - os.environ["PYBUDA_LEGALIZER_DEBUG_NODE_NAME"] = "" + os.environ["FORGE_LEGALIZER_DEBUG_NODE_NAME"] = "" @@ -314,7 +314,7 @@ def set_environment(): test_model.append(("TestPaddingPassH_3", TestPaddingPassH_3)) -@pytest.mark.xfail(reason="tenstorrent/pybuda#1004#note_216172") +@pytest.mark.xfail(reason="tenstorrent/forge#1004#note_216172") @pytest.mark.parametrize("original_shape", original_shape, ids=original_shape_ids) @pytest.mark.parametrize("test_model", test_model, ids=[item[0] for item in test_model]) def test_padding_pass_h( @@ -344,7 +344,7 @@ def test_padding_pass_h( model = test_model[1](name=test_name, shape=original_shape) tt0 = TTDevice("tt0", devtype=test_device.devtype, arch=test_device.arch) tt0.place_module(model) - pybuda_compile( + forge_compile( tt0, model.name, *model.inputs, @@ -354,14 +354,14 @@ def test_padding_pass_h( verify_cfg=verify_cfg ) -@pytest.mark.xfail(reason="tenstorrent/pybuda#1004#note_216172") +@pytest.mark.xfail(reason="tenstorrent/forge#1004#note_216172") @pytest.mark.parametrize("original_shape", original_shape, ids=original_shape_ids) def test_padding_pass_h_argument( test_kind, test_device, original_shape ): - if test_device.arch == pybuda.BackendDevice.Grayskull: + if test_device.arch == forge.BackendDevice.Grayskull: pytest.skip("Wait until #1004 is resolved") if test_kind.is_training(): @@ -398,7 +398,7 @@ def test_padding_pass_h_argument( model = TestPaddingPassH_3(name=test_name, shape=original_shape) tt0 = TTDevice("tt0", devtype=test_device.devtype, arch=test_device.arch) tt0.place_module(model) - pybuda_compile( + forge_compile( tt0, model.name, *model.inputs, diff --git a/pybuda/test/test_padding/other/test_padding_pass_i.py b/forge/test/test_padding/other/test_padding_pass_i.py similarity index 68% rename from pybuda/test/test_padding/other/test_padding_pass_i.py rename to forge/test/test_padding/other/test_padding_pass_i.py index 559798d62..ef95e479e 100644 --- a/pybuda/test/test_padding/other/test_padding_pass_i.py +++ b/forge/test/test_padding/other/test_padding_pass_i.py @@ -9,18 +9,18 @@ import torch import pytest -import pybuda -from pybuda import ( +import forge +from forge import ( TTDevice, Tensor, - pybuda_compile, + forge_compile, CompilerConfig, VerifyConfig, ) -from pybuda._C.backend_api import BackendType -from pybuda._C import DataFormat +from forge._C.backend_api import BackendType +from forge._C import DataFormat -class TestPaddingPassI(pybuda.PyBudaModule): +class TestPaddingPassI(forge.ForgeModule): # Testing padding/unpadding pass. # This test is based on only matmul operation. @@ -28,9 +28,9 @@ class TestPaddingPassI(pybuda.PyBudaModule): def __init__(self, name: str, shape: list): super().__init__(name) self.shape = shape - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param3 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for _ in range(3)] self.set_parameter("train_param1", torch.rand(*self.shape, requires_grad=True)) @@ -40,37 +40,37 @@ def __init__(self, name: str, shape: list): def forward(self, x1, x2, x3): # Layer 2 - mm1 = pybuda.op.Matmul("mm1", x1, self.train_param1) - mm2 = pybuda.op.Matmul("mm2", x2, self.train_param2) - mm3 = pybuda.op.Matmul("mm3", x3, self.train_param3) + mm1 = forge.op.Matmul("mm1", x1, self.train_param1) + mm2 = forge.op.Matmul("mm2", x2, self.train_param2) + mm3 = forge.op.Matmul("mm3", x3, self.train_param3) # Layer 3 - mm4 = pybuda.op.Matmul("mm4", mm1, x2) - mm5 = pybuda.op.Matmul("mm5", self.train_param1, mm2) - mm6 = pybuda.op.Matmul("mm6", mm2, self.train_param3) - mm7 = pybuda.op.Matmul("mm7", self.train_param2, mm3) + mm4 = forge.op.Matmul("mm4", mm1, x2) + mm5 = forge.op.Matmul("mm5", self.train_param1, mm2) + mm6 = forge.op.Matmul("mm6", mm2, self.train_param3) + mm7 = forge.op.Matmul("mm7", self.train_param2, mm3) # Layer 4 - mm8 = pybuda.op.Matmul("mm8", mm4, mm5) - mm9 = pybuda.op.Matmul("mm9", mm6, mm7) + mm8 = forge.op.Matmul("mm8", mm4, mm5) + mm9 = forge.op.Matmul("mm9", mm6, mm7) # Layer 5 - mm10 = pybuda.op.Matmul("mm10", mm8, mm2) - mm11 = pybuda.op.Matmul("mm11", mm2, mm9) + mm10 = forge.op.Matmul("mm10", mm8, mm2) + mm11 = forge.op.Matmul("mm11", mm2, mm9) # Layer 6 - mm12 = pybuda.op.Matmul("mm12", mm4, mm10) - mm13 = pybuda.op.Matmul("mm13", mm10, mm11) - mm14 = pybuda.op.Matmul("mm14", mm11, mm7) + mm12 = forge.op.Matmul("mm12", mm4, mm10) + mm13 = forge.op.Matmul("mm13", mm10, mm11) + mm14 = forge.op.Matmul("mm14", mm11, mm7) # Layer 7 - mm15 = pybuda.op.Matmul("mm15", mm12, mm13) - mm16 = pybuda.op.Matmul("mm16", mm13, mm14) + mm15 = forge.op.Matmul("mm15", mm12, mm13) + mm16 = forge.op.Matmul("mm16", mm13, mm14) return mm15, mm16 -class TestPaddingPassI_1(pybuda.PyBudaModule): +class TestPaddingPassI_1(forge.ForgeModule): # Testing padding/unpadding pass. # This test is based on matmul operation and eltwise operations. @@ -78,8 +78,8 @@ class TestPaddingPassI_1(pybuda.PyBudaModule): def __init__(self, name: str, shape: list): super().__init__(name) self.shape = shape - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param1 = forge.Parameter(*self.shape, requires_grad=True) + self.train_param2 = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for _ in range(2)] self.set_parameter("train_param1", torch.rand(*self.shape, requires_grad=True)) @@ -88,16 +88,16 @@ def __init__(self, name: str, shape: list): def forward(self, x1, x2): # Layer 2 - mm1 = pybuda.op.Matmul("mm1", x1, self.train_param1) - mm2 = pybuda.op.Matmul("mm2", x2, self.train_param2) + mm1 = forge.op.Matmul("mm1", x1, self.train_param1) + mm2 = forge.op.Matmul("mm2", x2, self.train_param2) # Layer 3 - mm3 = pybuda.op.Matmul("mm3", mm1, mm2) + mm3 = forge.op.Matmul("mm3", mm1, mm2) return mm3 -class TestPaddingPassI_2(pybuda.PyBudaModule): +class TestPaddingPassI_2(forge.ForgeModule): # Testing padding/unpadding pass. # This test is based on matmul operation and eltwise operations. @@ -105,7 +105,7 @@ class TestPaddingPassI_2(pybuda.PyBudaModule): def __init__(self, name: str, shape: list): super().__init__(name) self.shape = shape - self.train_param = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape))] self.set_parameter("train_param", torch.rand(*self.shape, requires_grad=True)) @@ -113,12 +113,12 @@ def __init__(self, name: str, shape: list): def forward(self, x): # Layer 2 - mm = pybuda.op.Matmul("mm", x, self.train_param) + mm = forge.op.Matmul("mm", x, self.train_param) return mm -class TestPaddingPassI_3(pybuda.PyBudaModule): +class TestPaddingPassI_3(forge.ForgeModule): # Testing padding/unpadding pass. # This test is based on matmul operation and eltwise operations. @@ -126,7 +126,7 @@ class TestPaddingPassI_3(pybuda.PyBudaModule): def __init__(self, name: str, shape: list): super().__init__(name) self.shape = shape - self.train_param = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape))] self.set_parameter("train_param", torch.rand(*self.shape, requires_grad=True)) @@ -134,25 +134,25 @@ def __init__(self, name: str, shape: list): def forward(self, x): # Layer 2 - mm1 = pybuda.op.Matmul("mm1", x, self.train_param) - mm2 = pybuda.op.Matmul("mm2", x, self.train_param) - mm3 = pybuda.op.Matmul("mm3", x, self.train_param) + mm1 = forge.op.Matmul("mm1", x, self.train_param) + mm2 = forge.op.Matmul("mm2", x, self.train_param) + mm3 = forge.op.Matmul("mm3", x, self.train_param) # Layer 3 - mm4 = pybuda.op.Matmul("mm4", mm1, mm2) - mm5 = pybuda.op.Matmul("mm5", mm2, mm3) + mm4 = forge.op.Matmul("mm4", mm1, mm2) + mm5 = forge.op.Matmul("mm5", mm2, mm3) # Layer 4 - mm6 = pybuda.op.Matmul("mm6", mm1, mm4) - mm7 = pybuda.op.Matmul("mm7", mm5, mm3) + mm6 = forge.op.Matmul("mm6", mm1, mm4) + mm7 = forge.op.Matmul("mm7", mm5, mm3) # Layer 5 - mm8 = pybuda.op.Matmul("mm8", mm6, mm7) + mm8 = forge.op.Matmul("mm8", mm6, mm7) return mm8 -class TestPaddingPassI_4(pybuda.PyBudaModule): +class TestPaddingPassI_4(forge.ForgeModule): # Testing padding/unpadding pass. # This test is based on matmul operation and eltwise operations. @@ -160,7 +160,7 @@ class TestPaddingPassI_4(pybuda.PyBudaModule): def __init__(self, name: str, shape: list): super().__init__(name) self.shape = shape - self.train_param = pybuda.Parameter(*self.shape, requires_grad=True) + self.train_param = forge.Parameter(*self.shape, requires_grad=True) self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape))] self.set_parameter("train_param", torch.rand(*self.shape, requires_grad=True)) @@ -168,10 +168,10 @@ def __init__(self, name: str, shape: list): def forward(self, x): # Layer 2 - mm1 = pybuda.op.Matmul("mm1", x, self.train_param) - mm2 = pybuda.op.Matmul("mm2", x, self.train_param) + mm1 = forge.op.Matmul("mm1", x, self.train_param) + mm2 = forge.op.Matmul("mm2", x, self.train_param) - mm3 = pybuda.op.Matmul("mm3", mm1, mm2) + mm3 = forge.op.Matmul("mm3", mm1, mm2) return mm3 @@ -213,24 +213,24 @@ def set_environment(): # Environment variable that adds padding pass if TEST_I_DISABLE_PADDING_PASS_FLAG: - os.environ["PYBUDA_DISABLE_PADDING_PASS"] = "1" + os.environ["FORGE_DISABLE_PADDING_PASS"] = "1" if TEST_I_PADDING_PASS_ELEMENT_WISE_FLAG: - os.environ["PYBUDA_PADDING_PASS_ELEMENT_WISE"] = "1" + os.environ["FORGE_PADDING_PASS_ELEMENT_WISE"] = "1" if TEST_I_PADDING_PASS_MATMUL_FLAG: - os.environ["PYBUDA_PADDING_PASS_MATMUL"] = "1" + os.environ["FORGE_PADDING_PASS_MATMUL"] = "1" # Environment variable that allows printing a graph if TEST_I_VERIFY_ALL_FLAG: - os.environ["PYBUDA_FORCE_VERIFY_ALL"] = "1" + os.environ["FORGE_FORCE_VERIFY_ALL"] = "1" # Environment variable that allows printing a graph if TEST_I_PRINT_GRAPH_VIZ_FLAG: - os.environ["PYBUDA_PRINT_GRAPH_VIZ_FORMAT_DIR"] = "ALL" + os.environ["FORGE_PRINT_GRAPH_VIZ_FORMAT_DIR"] = "ALL" if TEST_I_PRINT_GRAPH_AT_FLAG: - os.environ["PYBUDA_PRINT_GRAPH_AT"] = "ALL" + os.environ["FORGE_PRINT_GRAPH_AT"] = "ALL" # Environment variable that allows fracturing if TEST_I_FRACTURING_FLAG: - os.environ["PYBUDA_FRACTURIZATION_DISABLE"] = "1" + os.environ["FORGE_FRACTURIZATION_DISABLE"] = "1" # Include or not environment variables for debugging the stack if TEST_I_LOGGER_LEVEL_TRACE: @@ -240,11 +240,11 @@ def set_environment(): # Include or not environment variables for debugging chip placement module if TEST_I_CHIP_PLACEMENT_LEGALIZER_DETAILED_FLAG: - os.environ["PYBUDA_LEGALIZER_DETAILED_DEBUGGING"] = "1" + os.environ["FORGE_LEGALIZER_DETAILED_DEBUGGING"] = "1" if TEST_I_CHIP_PLACEMENT_SELF_CUT_TYPE_FLAG: - os.environ["PYBUDA_GRAPH_SOLVER_SELF_CUT_TYPE"] = "FastCut" + os.environ["FORGE_GRAPH_SOLVER_SELF_CUT_TYPE"] = "FastCut" if TEST_I_CHIP_PLACEMENT_LEGALIZER_NODE_NAME: - os.environ["PYBUDA_LEGALIZER_DEBUG_NODE_NAME"] = "" + os.environ["FORGE_LEGALIZER_DEBUG_NODE_NAME"] = "" @@ -330,7 +330,7 @@ def test_padding_pass_i( original_shape, test_model ): - if test_device.arch == pybuda.BackendDevice.Wormhole_B0: + if test_device.arch == forge.BackendDevice.Wormhole_B0: pytest.skip("Skip until #731 is solved") if test_kind.is_training(): @@ -344,7 +344,7 @@ def test_padding_pass_i( model = test_model[1](name=test_name, shape=original_shape) tt0 = TTDevice("tt0", devtype=test_device.devtype, arch=test_device.arch) tt0.place_module(model) - pybuda_compile( + forge_compile( tt0, model.name, *model.inputs, diff --git a/pybuda/test/test_padding/other/test_padding_pass_k.py b/forge/test/test_padding/other/test_padding_pass_k.py similarity index 87% rename from pybuda/test/test_padding/other/test_padding_pass_k.py rename to forge/test/test_padding/other/test_padding_pass_k.py index 83c2dfe8a..d4764f1a9 100644 --- a/pybuda/test/test_padding/other/test_padding_pass_k.py +++ b/forge/test/test_padding/other/test_padding_pass_k.py @@ -9,18 +9,18 @@ import torch import pytest -import pybuda -from pybuda import ( +import forge +from forge import ( TTDevice, Tensor, - pybuda_compile, + forge_compile, CompilerConfig, VerifyConfig, ) -from pybuda._C.backend_api import BackendType +from forge._C.backend_api import BackendType -class TestPaddingPassK(pybuda.PyBudaModule): +class TestPaddingPassK(forge.ForgeModule): # Convolutional Network @@ -53,7 +53,7 @@ def __init__( # Auxiliary function that creates convolutional layer def conv2d(name, kernel, stride, padding=None, in_channels=None): - return pybuda.op.nn.Conv2dModule( + return forge.op.nn.Conv2dModule( name=f"{self.name}.{name}", in_channels=in_channels if in_channels is not None else self.in_channels, out_channels=self.out_channels, @@ -67,7 +67,7 @@ def conv2d(name, kernel, stride, padding=None, in_channels=None): # Auxiliary function that creates maxpool layer def maxpool2d(name, kernel, stride): - return pybuda.op.nn.MaxPool2dModule( + return forge.op.nn.MaxPool2dModule( name=f"{self.name}.{name}", kernel_size=kernel, stride=stride, @@ -76,7 +76,7 @@ def maxpool2d(name, kernel, stride): # Auxiliary function that creates FC layer(Linear) layer # We use this layer at the end of the CNN before Softmax def linear(name): - return pybuda.op.nn.Linear( + return forge.op.nn.Linear( name=f"{self.name}.{name}", in_features=self.in_features, out_features=self.out_features, @@ -106,29 +106,29 @@ def forward(self, x1, x2): conv2 = self.conv2(x2) maxpool1 = self.maxpool1(conv1) maxpool2 = self.maxpool2(conv2) - add1 = pybuda.op.Add("add1", maxpool1, maxpool2) + add1 = forge.op.Add("add1", maxpool1, maxpool2) # block - relu1 = pybuda.op.Relu("relu1", add1) + relu1 = forge.op.Relu("relu1", add1) conv3 = self.conv3(relu1) - relu2 = pybuda.op.Relu("relu2", conv3) + relu2 = forge.op.Relu("relu2", conv3) conv4 = self.conv4(relu2) - relu3 = pybuda.op.Relu("relu3", conv4) + relu3 = forge.op.Relu("relu3", conv4) conv5 = self.conv5(relu3) - add2 = pybuda.op.Add("add2", relu1, conv5) + add2 = forge.op.Add("add2", relu1, conv5) # tail W, Z, R, C = 1, 1, add2.shape[-3], add2.shape[-1] * add2.shape[-2] - resh = pybuda.op.Reshape("resh", add2, (W, Z, R, C)) - tr = pybuda.op.Transpose("tr", resh, -1, -2) - ra = pybuda.op.ReduceAvg("ra", tr, -2) + resh = forge.op.Reshape("resh", add2, (W, Z, R, C)) + tr = forge.op.Transpose("tr", resh, -1, -2) + ra = forge.op.ReduceAvg("ra", tr, -2) lin = self.linear(ra) - sm = pybuda.op.Softmax("sm", lin, dim=-1, stable=True) + sm = forge.op.Softmax("sm", lin, dim=-1, stable=True) return sm -class TestPaddingPassK_1(pybuda.PyBudaModule): +class TestPaddingPassK_1(forge.ForgeModule): def __init__( self, @@ -159,7 +159,7 @@ def __init__( # Auxiliary function that creates convolutional layer def conv2d(name, kernel, stride, padding=None, in_channels=None): - return pybuda.op.nn.Conv2dModule( + return forge.op.nn.Conv2dModule( name=f"{self.name}.{name}", in_channels=in_channels if in_channels is not None else self.in_channels, out_channels=self.out_channels, @@ -173,7 +173,7 @@ def conv2d(name, kernel, stride, padding=None, in_channels=None): # Auxiliary function that creates maxpool layer def maxpool2d(name, kernel, stride): - return pybuda.op.nn.MaxPool2dModule( + return forge.op.nn.MaxPool2dModule( name=f"{self.name}.{name}", kernel_size=kernel, stride=stride, @@ -182,7 +182,7 @@ def maxpool2d(name, kernel, stride): # Auxiliary function that creates FC layer(Linear) layer # We use this layer at the end of the CNN before Softmax def linear(name): - return pybuda.op.nn.Linear( + return forge.op.nn.Linear( name=f"{self.name}.{name}", in_features=self.in_features, out_features=self.out_features, @@ -203,30 +203,30 @@ def linear(name): def forward(self, x): # pre-head - exp = pybuda.op.Exp("exp", x) - relu = pybuda.op.Relu("relu", x) + exp = forge.op.Exp("exp", x) + relu = forge.op.Relu("relu", x) - mul = pybuda.op.Multiply("mul", exp, relu) - sub = pybuda.op.Multiply("sub", exp, relu) + mul = forge.op.Multiply("mul", exp, relu) + sub = forge.op.Multiply("sub", exp, relu) # head conv1 = self.conv1(mul) conv2 = self.conv2(sub) maxpool1 = self.maxpool1(conv1) maxpool2 = self.maxpool2(conv2) - add1 = pybuda.op.Add("add1", maxpool1, maxpool2) + add1 = forge.op.Add("add1", maxpool1, maxpool2) # tail W, Z, R, C = 1, 1, add1.shape[-3], add1.shape[-1] * add1.shape[-2] - resh = pybuda.op.Reshape("resh", add1, (W, Z, R, C)) - tr = pybuda.op.Transpose("tr", resh, -1, -2) - ra = pybuda.op.ReduceAvg("ra", tr, -2) + resh = forge.op.Reshape("resh", add1, (W, Z, R, C)) + tr = forge.op.Transpose("tr", resh, -1, -2) + ra = forge.op.ReduceAvg("ra", tr, -2) lin = self.linear(ra) - sm = pybuda.op.Softmax("sm", lin, dim=-1, stable=True) + sm = forge.op.Softmax("sm", lin, dim=-1, stable=True) return sm -class TestPaddingPassK_2(pybuda.PyBudaModule): +class TestPaddingPassK_2(forge.ForgeModule): def __init__( self, @@ -257,7 +257,7 @@ def __init__( # Auxiliary function that creates convolutional layer def conv2d(name, kernel, stride, padding=None, in_channels=None): - return pybuda.op.nn.Conv2dModule( + return forge.op.nn.Conv2dModule( name=f"{self.name}.{name}", in_channels=in_channels if in_channels is not None else self.in_channels, out_channels=self.out_channels, @@ -271,7 +271,7 @@ def conv2d(name, kernel, stride, padding=None, in_channels=None): # Auxiliary function that creates maxpool layer def maxpool2d(name, kernel, stride): - return pybuda.op.nn.MaxPool2dModule( + return forge.op.nn.MaxPool2dModule( name=f"{self.name}.{name}", kernel_size=kernel, stride=stride, @@ -280,7 +280,7 @@ def maxpool2d(name, kernel, stride): # Auxiliary function that creates FC layer(Linear) layer # We use this layer at the end of the CNN before Softmax def linear(name): - return pybuda.op.nn.Linear( + return forge.op.nn.Linear( name=f"{self.name}.{name}", in_features=self.in_features, out_features=self.out_features, @@ -302,15 +302,15 @@ def forward(self, x): # tail W, Z, R, C = 1, 1, maxpool1.shape[-3], maxpool1.shape[-1] * maxpool1.shape[-2] - resh = pybuda.op.Reshape("resh", maxpool1, (W, Z, R, C)) - tr = pybuda.op.Transpose("tr", resh, -1, -2) - ra = pybuda.op.ReduceAvg("ra", tr, -2) + resh = forge.op.Reshape("resh", maxpool1, (W, Z, R, C)) + tr = forge.op.Transpose("tr", resh, -1, -2) + ra = forge.op.ReduceAvg("ra", tr, -2) lin = self.linear(ra) - sm = pybuda.op.Softmax("sm", lin, dim=-1, stable=True) + sm = forge.op.Softmax("sm", lin, dim=-1, stable=True) return sm -class TestPaddingPassK_3(pybuda.PyBudaModule): +class TestPaddingPassK_3(forge.ForgeModule): def __init__( self, @@ -341,7 +341,7 @@ def __init__( # Auxiliary function that creates convolutional layer def conv2d(name, kernel, stride, padding=None, in_channels=None): - return pybuda.op.nn.Conv2dModule( + return forge.op.nn.Conv2dModule( name=f"{self.name}.{name}", in_channels=in_channels if in_channels is not None else self.in_channels, out_channels=self.out_channels, @@ -358,12 +358,12 @@ def conv2d(name, kernel, stride, padding=None, in_channels=None): def forward(self, x): - exp = pybuda.op.Exp("exp", x) + exp = forge.op.Exp("exp", x) conv = self.conv1(exp) return conv -class TestPaddingPassK_4(pybuda.PyBudaModule): +class TestPaddingPassK_4(forge.ForgeModule): def __init__( self, @@ -394,7 +394,7 @@ def __init__( # Auxiliary function that creates convolutional layer def conv2d(name, kernel, stride, padding=None, in_channels=None): - return pybuda.op.nn.Conv2dModule( + return forge.op.nn.Conv2dModule( name=f"{self.name}.{name}", in_channels=in_channels if in_channels is not None else self.in_channels, out_channels=self.out_channels, @@ -454,17 +454,17 @@ def set_environment(): # Environment variable that adds padding pass if TEST_K_DISABLE_PADDING_PASS_FLAG: - os.environ["PYBUDA_DISABLE_PADDING_PASS"] = "1" + os.environ["FORGE_DISABLE_PADDING_PASS"] = "1" # Environment variable that allows printing a graph if TEST_K_PRINT_GRAPH_VIZ_FLAG: - os.environ["PYBUDA_PRINT_GRAPH_VIZ_FORMAT_DIR"] = "ALL" + os.environ["FORGE_PRINT_GRAPH_VIZ_FORMAT_DIR"] = "ALL" if TEST_K_PRINT_GRAPH_AT_FLAG: - os.environ["PYBUDA_PRINT_GRAPH_AT"] = "ALL" + os.environ["FORGE_PRINT_GRAPH_AT"] = "ALL" # Environment variable that allows fracturing if TEST_K_FRACTURING_FLAG: - os.environ["PYBUDA_FRACTURIZATION_DISABLE"] = "1" + os.environ["FORGE_FRACTURIZATION_DISABLE"] = "1" # Include or not environment variables for debugging the stack if TEST_K_LOGGER_LEVEL_TRACE: @@ -474,9 +474,9 @@ def set_environment(): # Include or not environment variables for debugging chip placement module if TEST_K_CHIP_PLACEMENT_LEGALIZER_DETAILED_FLAG: - os.environ["PYBUDA_LEGALIZER_DETAILED_DEBUGGING"] = "1" + os.environ["FORGE_LEGALIZER_DETAILED_DEBUGGING"] = "1" if TEST_K_CHIP_PLACEMENT_LEGALIZER_NODE_NAME: - os.environ["PYBUDA_LEGALIZER_DEBUG_NODE_NAME"] = "" + os.environ["FORGE_LEGALIZER_DEBUG_NODE_NAME"] = "" @@ -629,7 +629,7 @@ def test_padding_pass_k( tt0 = TTDevice("tt0", devtype=test_device.devtype, arch=test_device.arch) tt0.place_module(model) - pybuda_compile( + forge_compile( tt0, model.name, *inputs, diff --git a/pybuda/test/test_padding/sanity/__init__.py b/forge/test/test_padding/sanity/__init__.py similarity index 100% rename from pybuda/test/test_padding/sanity/__init__.py rename to forge/test/test_padding/sanity/__init__.py diff --git a/pybuda/test/test_padding/sanity/test_padding.py b/forge/test/test_padding/sanity/test_padding.py similarity index 84% rename from pybuda/test/test_padding/sanity/test_padding.py rename to forge/test/test_padding/sanity/test_padding.py index e31bac401..285c73b4b 100644 --- a/pybuda/test/test_padding/sanity/test_padding.py +++ b/forge/test/test_padding/sanity/test_padding.py @@ -12,29 +12,29 @@ import time import os -import pybuda -import pybuda.op -from pybuda import ( - PyBudaModule, +import forge +import forge.op +from forge import ( + ForgeModule, TTDevice, BackendDevice, BackendType, Tensor, - pybuda_compile, + forge_compile, CompilerConfig, CompileDepth, VerifyConfig, PyTorchModule, ) -from pybuda.utils import align_up_tile -from pybuda.pybudaglobal import TILE_DIM +from forge.utils import align_up_tile +from forge.forgeglobal import TILE_DIM -from pybuda.verify import TestKind, verify_module +from forge.verify import TestKind, verify_module -class BudaPadTest1(PyBudaModule): +class BudaPadTest1(ForgeModule): """ Test wrapper for padding pad """ @@ -46,9 +46,9 @@ def __init__(self, name, paddings, value): self.value = value def forward(self, act): - return pybuda.op.BudaPad("buda_pad", act, self.paddings, self.value) + return forge.op.BudaPad("buda_pad", act, self.paddings, self.value) -class BudaPadTest2(PyBudaModule): +class BudaPadTest2(ForgeModule): """ Test wrapper for padding pad """ @@ -60,9 +60,9 @@ def __init__(self, name, paddings, value): self.value = value def forward(self, act1, act2): - pad1 = pybuda.op.BudaPad("buda_pad1", act1, self.paddings, self.value) - pad2 = pybuda.op.BudaPad("buda_pad2", act2, self.paddings, self.value) - multiply = pybuda.op.Multiply("multiply", pad1, pad2) + pad1 = forge.op.BudaPad("buda_pad1", act1, self.paddings, self.value) + pad2 = forge.op.BudaPad("buda_pad2", act2, self.paddings, self.value) + multiply = forge.op.Multiply("multiply", pad1, pad2) return multiply @pytest.mark.xfail(reason="Unsupported TM op pad! Found on op buda_pad, type nop, input 0. Backend should be updated.") @@ -131,7 +131,7 @@ def test_buda_pad2( ) ) -class BudaUnpadTest1(PyBudaModule): +class BudaUnpadTest1(ForgeModule): """ Test wrapper for padding unpad """ @@ -143,9 +143,9 @@ def __init__(self, name, original_length, paddings): self.original_length = original_length def forward(self, act): - return pybuda.op.BudaUnpad("buda_unpad", act, self.original_length, self.paddings) + return forge.op.BudaUnpad("buda_unpad", act, self.original_length, self.paddings) -class BudaUnpadTest2(PyBudaModule): +class BudaUnpadTest2(ForgeModule): """ Test wrapper for padding unpad """ @@ -157,9 +157,9 @@ def __init__(self, name, original_length, paddings): self.original_length = original_length def forward(self, act1, act2): - unpad1 = pybuda.op.BudaUnpad("buda_unpad1", act1, self.original_length, self.paddings) - unpad2 = pybuda.op.BudaUnpad("buda_unpad2", act2, self.original_length, self.paddings) - multiply = pybuda.op.Multiply("multiply", unpad1, unpad2) + unpad1 = forge.op.BudaUnpad("buda_unpad1", act1, self.original_length, self.paddings) + unpad2 = forge.op.BudaUnpad("buda_unpad2", act2, self.original_length, self.paddings) + multiply = forge.op.Multiply("multiply", unpad1, unpad2) return multiply @pytest.mark.parametrize("original_shape", ((1, 200, 300), (3, 200, 300), (1, 5, 100, 200)), ids=[f"shape={'x'.join([str(dim) for dim in shape])}" for shape in ((1, 200, 300), (3, 200, 300), (1, 5, 100, 200))]) @@ -232,7 +232,7 @@ def test_buda_unpad2( ) ) -class PaddingTest1(PyBudaModule): +class PaddingTest1(ForgeModule): """ Test wrapper for padding pad and unpad This test contains a both padding pad and unpad @@ -247,18 +247,18 @@ def __init__(self, name, paddings, value, original_length): def forward(self, x1, x2): # pad inputs, x1 and x2 - pad_x1 = pybuda.op.BudaPad("buda_pad1", x1, self.paddings, self.value) - pad_x2 = pybuda.op.BudaPad("buda_pad2", x2, self.paddings, self.value) + pad_x1 = forge.op.BudaPad("buda_pad1", x1, self.paddings, self.value) + pad_x2 = forge.op.BudaPad("buda_pad2", x2, self.paddings, self.value) # multiply padded inputs - multiply = pybuda.op.Multiply("multiply", pad_x1, pad_x2) + multiply = forge.op.Multiply("multiply", pad_x1, pad_x2) # unpad the result of the multiplication - unpad_multiply = pybuda.op.BudaUnpad("buda_unpad", multiply, self.original_length, self.paddings) + unpad_multiply = forge.op.BudaUnpad("buda_unpad", multiply, self.original_length, self.paddings) return unpad_multiply -class PaddingTest2(PyBudaModule): +class PaddingTest2(ForgeModule): """ Test wrapper for padding pad and unpad This test contains a both padding pad and unpad and matmul operation @@ -273,25 +273,25 @@ def __init__(self, name, paddings, value, original_length): def forward(self, x1, x2): # pad inputs, x1 and x2 - pad1 = pybuda.op.BudaPad("buda_pad1", x1, self.paddings, self.value) - pad2 = pybuda.op.BudaPad("buda_pad2", x2, self.paddings, self.value) + pad1 = forge.op.BudaPad("buda_pad1", x1, self.paddings, self.value) + pad2 = forge.op.BudaPad("buda_pad2", x2, self.paddings, self.value) # add padded inputs - add = pybuda.op.Add("add", pad1, pad2) + add = forge.op.Add("add", pad1, pad2) # unpad the result of the addition - unpad_add = pybuda.op.BudaUnpad("unpad_add", add, self.original_length, self.paddings) + unpad_add = forge.op.BudaUnpad("unpad_add", add, self.original_length, self.paddings) # pad again the result of the addition - pad3 = pybuda.op.BudaPad("buda_pad3", unpad_add, self.paddings, self.value) + pad3 = forge.op.BudaPad("buda_pad3", unpad_add, self.paddings, self.value) - exp = pybuda.op.Exp("exp", x1) + exp = forge.op.Exp("exp", x1) # matrix multiplication between padded and unpadded inputs - mm = pybuda.op.Matmul("matmul", pad3, exp) + mm = forge.op.Matmul("matmul", pad3, exp) # unpad the result of the matrix multiplication - unpad_mm = pybuda.op.BudaUnpad("unpad_mm", mm, self.original_length, self.paddings) + unpad_mm = forge.op.BudaUnpad("unpad_mm", mm, self.original_length, self.paddings) return unpad_mm diff --git a/pybuda/test/test_padding/tms/__init__.py b/forge/test/test_padding/tms/__init__.py similarity index 100% rename from pybuda/test/test_padding/tms/__init__.py rename to forge/test/test_padding/tms/__init__.py diff --git a/pybuda/test/test_padding/tms/test_padding_tms.py b/forge/test/test_padding/tms/test_padding_tms.py similarity index 76% rename from pybuda/test/test_padding/tms/test_padding_tms.py rename to forge/test/test_padding/tms/test_padding_tms.py index 4ffbbc18d..9db9b7f21 100644 --- a/pybuda/test/test_padding/tms/test_padding_tms.py +++ b/forge/test/test_padding/tms/test_padding_tms.py @@ -12,28 +12,28 @@ import time import os -import pybuda -import pybuda.op -from pybuda import ( - PyBudaModule, +import forge +import forge.op +from forge import ( + ForgeModule, TTDevice, BackendDevice, BackendType, Tensor, - pybuda_compile, + forge_compile, CompilerConfig, CompileDepth, VerifyConfig, PyTorchModule, ) -from pybuda.utils import align_up_tile -from pybuda.pybudaglobal import TILE_DIM +from forge.utils import align_up_tile +from forge.forgeglobal import TILE_DIM -from pybuda.verify import TestKind, verify_module +from forge.verify import TestKind, verify_module -class BudaPadTMsTest1(PyBudaModule): +class BudaPadTMsTest1(ForgeModule): def __init__(self, name, paddings, value, vfactor, hfactor): super().__init__(name) @@ -49,14 +49,14 @@ def __init__(self, name, paddings, value, vfactor, hfactor): def forward(self, act): - vsl = pybuda.op.VSlice("vsl", act, self.vfactor) - hsl = pybuda.op.HSlice("hsl", vsl, self.hfactor) - tr = pybuda.op.Transpose("tr", hsl, self.dim1, self.dim2) - pad = pybuda.op.BudaPad("buda_pad", tr, self.paddings, self.value) + vsl = forge.op.VSlice("vsl", act, self.vfactor) + hsl = forge.op.HSlice("hsl", vsl, self.hfactor) + tr = forge.op.Transpose("tr", hsl, self.dim1, self.dim2) + pad = forge.op.BudaPad("buda_pad", tr, self.paddings, self.value) return pad -class BudaPadTMsTest2(PyBudaModule): +class BudaPadTMsTest2(ForgeModule): def __init__(self, name, paddings, value, vfactor, hfactor): super().__init__(name) @@ -73,19 +73,19 @@ def __init__(self, name, paddings, value, vfactor, hfactor): def forward(self, act1, act2): # Flow of the first input - vsl1 = pybuda.op.VSlice("vsl1", act1, self.vfactor) - hsl1 = pybuda.op.HSlice("hsl1", vsl1, self.hfactor) - pad1 = pybuda.op.BudaPad("buda_pad1", hsl1, self.paddings, self.value) + vsl1 = forge.op.VSlice("vsl1", act1, self.vfactor) + hsl1 = forge.op.HSlice("hsl1", vsl1, self.hfactor) + pad1 = forge.op.BudaPad("buda_pad1", hsl1, self.paddings, self.value) # Flow of the second input - hsl2 = pybuda.op.HSlice("hsl2", act2, self.hfactor) - vsl2 = pybuda.op.VSlice("vsl2", hsl2, self.vfactor) - pad2 = pybuda.op.BudaPad("buda_pad2", vsl2, self.paddings, self.value) + hsl2 = forge.op.HSlice("hsl2", act2, self.hfactor) + vsl2 = forge.op.VSlice("vsl2", hsl2, self.vfactor) + pad2 = forge.op.BudaPad("buda_pad2", vsl2, self.paddings, self.value) # Merge the two flows - mul = pybuda.op.Multiply("mul", pad1, pad2) - tr = pybuda.op.Transpose("tr", mul, self.dim1, self.dim2) - vst = pybuda.op.VStack("vst", tr, self.vfactor) + mul = forge.op.Multiply("mul", pad1, pad2) + tr = forge.op.Transpose("tr", mul, self.dim1, self.dim2) + vst = forge.op.VStack("vst", tr, self.vfactor) return vst @@ -179,7 +179,7 @@ def test_buda_pad_tms2( ) ) -class BudaUnpadTMsTest1(PyBudaModule): +class BudaUnpadTMsTest1(ForgeModule): def __init__(self, name, paddings, original_length, vfactor, hfactor): super().__init__(name) @@ -192,15 +192,15 @@ def __init__(self, name, paddings, original_length, vfactor, hfactor): def forward(self, act): - unpad = pybuda.op.BudaUnpad("buda_unpad", act, self.original_length, self.paddings) - vsl = pybuda.op.VSlice("vsl", unpad, self.vfactor) - hsl = pybuda.op.HSlice("hsl", vsl, self.hfactor) - vst = pybuda.op.VStack("vst", hsl, self.vfactor) - hst = pybuda.op.HStack("hst", vst, self.hfactor) + unpad = forge.op.BudaUnpad("buda_unpad", act, self.original_length, self.paddings) + vsl = forge.op.VSlice("vsl", unpad, self.vfactor) + hsl = forge.op.HSlice("hsl", vsl, self.hfactor) + vst = forge.op.VStack("vst", hsl, self.vfactor) + hst = forge.op.HStack("hst", vst, self.hfactor) return hst -class BudaUnpadTMsTest2(PyBudaModule): +class BudaUnpadTMsTest2(ForgeModule): def __init__(self, name, paddings, original_length, vfactor, hfactor): super().__init__(name) @@ -217,19 +217,19 @@ def __init__(self, name, paddings, original_length, vfactor, hfactor): def forward(self, act1, act2): # Flow of the first input - unpad1 = pybuda.op.BudaUnpad("buda_unpad1", act1, self.original_length, self.paddings) - vsl1 = pybuda.op.VSlice("vsl1", unpad1, self.vfactor) - tr1 = pybuda.op.Transpose("tr1", vsl1, self.dim1, self.dim2) + unpad1 = forge.op.BudaUnpad("buda_unpad1", act1, self.original_length, self.paddings) + vsl1 = forge.op.VSlice("vsl1", unpad1, self.vfactor) + tr1 = forge.op.Transpose("tr1", vsl1, self.dim1, self.dim2) # Flow of the second input - unpad2 = pybuda.op.BudaUnpad("buda_unpad2", act2, self.original_length, self.paddings) - vsl2 = pybuda.op.VSlice("vsl2", unpad2, self.vfactor) - tr2 = pybuda.op.Transpose("tr2", vsl2, self.dim1, self.dim2) + unpad2 = forge.op.BudaUnpad("buda_unpad2", act2, self.original_length, self.paddings) + vsl2 = forge.op.VSlice("vsl2", unpad2, self.vfactor) + tr2 = forge.op.Transpose("tr2", vsl2, self.dim1, self.dim2) # Merge the two flows - add = pybuda.op.Add("add", tr1, tr2) - tr3 = pybuda.op.Transpose("tr3", add, self.dim1, self.dim2) - vst = pybuda.op.VStack("vst", tr3) + add = forge.op.Add("add", tr1, tr2) + tr3 = forge.op.Transpose("tr3", add, self.dim1, self.dim2) + vst = forge.op.VStack("vst", tr3) return vst @@ -333,7 +333,7 @@ def test_buda_unpad_tms2( ) ) -class PaddingTMsTest1(PyBudaModule): +class PaddingTMsTest1(ForgeModule): def __init__(self, name, paddings, value, original_length, vfactor, hfactor): super().__init__(name) @@ -352,37 +352,37 @@ def forward(self, act1, act2): # Flow of the first input # Padding - pad1 = pybuda.op.BudaPad("pad1", act1, self.paddings, self.value) - exp = pybuda.op.Exp("exp", pad1) - unpad1 = pybuda.op.BudaUnpad("unpad1", exp, self.original_length, self.paddings) + pad1 = forge.op.BudaPad("pad1", act1, self.paddings, self.value) + exp = forge.op.Exp("exp", pad1) + unpad1 = forge.op.BudaUnpad("unpad1", exp, self.original_length, self.paddings) # TMs after padding - vsl1 = pybuda.op.VSlice("vsl1", unpad1, self.vfactor) + vsl1 = forge.op.VSlice("vsl1", unpad1, self.vfactor) # Padding before joining the two flows - pad3 = pybuda.op.BudaPad("pad3", vsl1, self.paddings, self.value) + pad3 = forge.op.BudaPad("pad3", vsl1, self.paddings, self.value) # Flow of the second input # Padding - pad2 = pybuda.op.BudaPad("pad2", act2, self.paddings, self.value) - relu = pybuda.op.Relu("relu", pad2) - unpad2 = pybuda.op.BudaUnpad("unpad2", relu, self.original_length, self.paddings) + pad2 = forge.op.BudaPad("pad2", act2, self.paddings, self.value) + relu = forge.op.Relu("relu", pad2) + unpad2 = forge.op.BudaUnpad("unpad2", relu, self.original_length, self.paddings) # TMs after padding - vsl2 = pybuda.op.VSlice("vsl2", unpad2, self.vfactor) + vsl2 = forge.op.VSlice("vsl2", unpad2, self.vfactor) # Padding before joining the two flows - pad4 = pybuda.op.BudaPad("pad4", vsl2, self.paddings, self.value) + pad4 = forge.op.BudaPad("pad4", vsl2, self.paddings, self.value) original_length_v = list(self.original_length) original_length_v[-2] //= self.vfactor # Merge the two flows - add = pybuda.op.Add("add", pad3, pad4) + add = forge.op.Add("add", pad3, pad4) # Unpad after joining the two flows - unpad3 = pybuda.op.BudaUnpad("unpad3", add, original_length_v, self.paddings) + unpad3 = forge.op.BudaUnpad("unpad3", add, original_length_v, self.paddings) # TMs after unpadding - vst = pybuda.op.VStack("vst", unpad3, self.vfactor) + vst = forge.op.VStack("vst", unpad3, self.vfactor) return vst -class PaddingTMsTest2(PyBudaModule): +class PaddingTMsTest2(ForgeModule): def __init__(self, name, paddings, value, original_length, vfactor, hfactor): super().__init__(name) @@ -401,40 +401,40 @@ def forward(self, act1, act2): # Flow of the first input # TMs before padding - vsl1 = pybuda.op.VSlice("vsl1", act1, self.vfactor) - tr1 = pybuda.op.Transpose("tr1", vsl1, self.dim1, self.dim2) + vsl1 = forge.op.VSlice("vsl1", act1, self.vfactor) + tr1 = forge.op.Transpose("tr1", vsl1, self.dim1, self.dim2) original_length_flow1 = list(self.original_length) original_length_flow1[-1] //= self.vfactor # Padding - pad1 = pybuda.op.BudaPad("pad1", tr1, self.paddings, self.value) - exp = pybuda.op.Exp("exp", pad1) - unpad1 = pybuda.op.BudaUnpad("unpad1", exp, original_length_flow1, self.paddings) + pad1 = forge.op.BudaPad("pad1", tr1, self.paddings, self.value) + exp = forge.op.Exp("exp", pad1) + unpad1 = forge.op.BudaUnpad("unpad1", exp, original_length_flow1, self.paddings) # TMs after padding - tr2 = pybuda.op.Transpose("tr2", unpad1, self.dim1, self.dim2) - vst1 = pybuda.op.VStack("vst1", tr2, self.vfactor) - vsl3 = pybuda.op.VSlice("vsl3", vst1, self.vfactor) - tr3 = pybuda.op.Transpose("tr3", vsl3, self.dim1, self.dim2) + tr2 = forge.op.Transpose("tr2", unpad1, self.dim1, self.dim2) + vst1 = forge.op.VStack("vst1", tr2, self.vfactor) + vsl3 = forge.op.VSlice("vsl3", vst1, self.vfactor) + tr3 = forge.op.Transpose("tr3", vsl3, self.dim1, self.dim2) # Flow of the second input # TMs before padding - hst1 = pybuda.op.HStack("hst1", act2, self.hfactor) - vsl2 = pybuda.op.VSlice("vsl2", hst1, self.vfactor) + hst1 = forge.op.HStack("hst1", act2, self.hfactor) + vsl2 = forge.op.VSlice("vsl2", hst1, self.vfactor) original_length_flow2 = list(self.original_length) original_length_flow2[-2] //= self.vfactor original_length_flow2[-1] *= self.hfactor # Padding - pad2 = pybuda.op.BudaPad("pad2", vsl2, self.paddings, self.value) - relu = pybuda.op.Relu("relu", pad2) - unpad2 = pybuda.op.BudaUnpad("unpad2", relu, original_length_flow2, self.paddings) + pad2 = forge.op.BudaPad("pad2", vsl2, self.paddings, self.value) + relu = forge.op.Relu("relu", pad2) + unpad2 = forge.op.BudaUnpad("unpad2", relu, original_length_flow2, self.paddings) # TMs after padding - vst2 = pybuda.op.VStack("vst2", unpad2, self.vfactor) - hsl1 = pybuda.op.HSlice("hsl1", vst2, self.hfactor) - vsl4 = pybuda.op.VSlice("vsl4", hsl1, self.vfactor) - tr4 = pybuda.op.Transpose("tr4", vsl4, self.dim1, self.dim2) + vst2 = forge.op.VStack("vst2", unpad2, self.vfactor) + hsl1 = forge.op.HSlice("hsl1", vst2, self.hfactor) + vsl4 = forge.op.VSlice("vsl4", hsl1, self.vfactor) + tr4 = forge.op.Transpose("tr4", vsl4, self.dim1, self.dim2) # Merge the two flows @@ -442,14 +442,14 @@ def forward(self, act1, act2): original_length_flow_merged[-1] //= self.vfactor # Padding before joining the two flows - pad3 = pybuda.op.BudaPad("pad3", tr3, self.paddings, self.value) - pad4 = pybuda.op.BudaPad("pad4", tr4, self.paddings, self.value) - add = pybuda.op.Add("add", pad3, pad4) + pad3 = forge.op.BudaPad("pad3", tr3, self.paddings, self.value) + pad4 = forge.op.BudaPad("pad4", tr4, self.paddings, self.value) + add = forge.op.Add("add", pad3, pad4) # Unpad after joining the two flows - unpad3 = pybuda.op.BudaUnpad("unpad3", add, original_length_flow_merged, self.paddings) + unpad3 = forge.op.BudaUnpad("unpad3", add, original_length_flow_merged, self.paddings) # TMs after unpadding - hst2 = pybuda.op.HStack("hst2", unpad3, self.vfactor) - tr5 = pybuda.op.Transpose("tr5", hst2, self.dim1, self.dim2) + hst2 = forge.op.HStack("hst2", unpad3, self.vfactor) + tr5 = forge.op.Transpose("tr5", hst2, self.dim1, self.dim2) return tr5 diff --git a/forge/test/test_perf_simulator.py b/forge/test/test_perf_simulator.py new file mode 100644 index 000000000..24ff95ab3 --- /dev/null +++ b/forge/test/test_perf_simulator.py @@ -0,0 +1,132 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 +import os + +import forge +from forge.verify import verify_module, VerifyConfig, TestKind +from forge import PyTorchModule +from transformers import BertModel, BertConfig +from test.utils import download_model + + +def test_bert_encoder(): + forge.config._get_global_compiler_config().compile_depth = forge.config.CompileDepth.GENERATE_NETLIST + + model_name = "bert-base-uncased" + seq_len = 128 + + config = download_model(BertConfig.from_pretrained, model_name) + config.num_hidden_layers = 1 + + model = BertModel(config=config) + encoder = PyTorchModule("bert_encoder", model.encoder) + microbatch = 1 + + os.environ["FORGE_PERF_SIMULATOR"] = "1" + try: + verify_module(encoder, [(microbatch, seq_len, config.hidden_size), (microbatch, 1, seq_len, seq_len)], + VerifyConfig(test_kind=TestKind.INFERENCE, skip_shutdown=True, fp32_fallback=forge.DataFormat.Bfp8_b)) + + perf_results = forge.forgeglobal.get_devices()[0]._compile_output.perf_model_results + print(perf_results) + + finally: + del os.environ["FORGE_PERF_SIMULATOR"] + +class LayernormFork(forge.ForgeModule): + """ + Module with a layernorm, and some matmuls + """ + + shape = (1, 1, 128, 512) + + def __init__(self, name): + super().__init__(name) + #self.weights1 = forge.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) + #self.weights2 = forge.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) + self.ln_weights = forge.Parameter(1, self.shape[-1], requires_grad=True) + self.ln_bias = forge.Parameter(1, self.shape[-1], requires_grad=True) + + def forward(self, act1): + #a1 = forge.op.Matmul("matmul1", act1, self.weights1) + a1 = act1 + a2 = forge.op.Layernorm("layernorm", a1, self.ln_weights, self.ln_bias) + #a3 = forge.op.Matmul("matmul2", a2, self.weights2) + a3 = a2 + return a3 + +def test_layernorm_fork(test_device): + #forge.config._get_global_compiler_config().compile_depth = forge.config.CompileDepth.GENERATE_NETLIST + + microbatch = 64 + + os.environ["FORGE_PERF_SIMULATOR"] = "1" + try: + forge.config.set_configuration_options(performance_trace=forge.PerfTraceLevel.VERBOSE) + verify_module(LayernormFork("layernorm_fork"), [(microbatch, LayernormFork.shape[-2], LayernormFork.shape[-1])], + VerifyConfig( + test_kind=TestKind.INFERENCE, + devtype=test_device.devtype, + arch=test_device.arch, + skip_shutdown=True, + fp32_fallback=forge.DataFormat.Bfp8_b)) + + perf_results = forge.forgeglobal.get_devices()[0]._compile_output.perf_model_results + print(perf_results) + + finally: + del os.environ["FORGE_PERF_SIMULATOR"] + +class MHALikeFork(forge.ForgeModule): + """ + Module with a layernorm, and some matmuls + """ + + shape = (1, 1, 128, 768) + + def __init__(self, name): + super().__init__(name) + self.weights1 = forge.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) + self.weights2 = forge.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) + self.weights3 = forge.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) + self.weights4 = forge.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) + self.bias = forge.Parameter(1, self.shape[-1], requires_grad=True) + + def forward(self, act1, act2): + # don't for out of a queue, since that doesn't need buffering + in1 = act1 - act2 + + # fork + a1 = forge.op.Matmul("matmul1", in1, self.weights1) + a2 = forge.op.Matmul("matmul2", in1, self.weights2) + a3 = forge.op.Matmul("matmul3", in1, self.weights3) + + a23 = a2+a3 + a23 = forge.op.Matmul("matmul_a23_1", a23, self.weights4) + a23 = forge.op.Matmul("matmul_a23_2", a23, self.weights4) + a23 = forge.op.Matmul("matmul_a23_3", a23, self.weights4) + + return a1 + a23 # join + +def test_mha_fork(test_device): + + microbatch = 64 + seq_len = MHALikeFork.shape[-2] + hidden_size = MHALikeFork.shape[-1] + os.environ["FORGE_PERF_SIMULATOR"] = "1" + try: + forge.config.set_configuration_options(performance_trace=forge.PerfTraceLevel.VERBOSE) + verify_module(MHALikeFork("mha_like_fork"), [(microbatch, seq_len, hidden_size), (microbatch, seq_len, hidden_size)], + VerifyConfig( + test_kind=TestKind.INFERENCE, + devtype=test_device.devtype, + arch=test_device.arch, + skip_shutdown=True, + fp32_fallback=forge.DataFormat.Bfp8_b)) + + perf_results = forge.forgeglobal.get_devices()[0]._compile_output.perf_model_results + print(perf_results) + + finally: + del os.environ["FORGE_PERF_SIMULATOR"] diff --git a/pybuda/test/test_placer_apis.py b/forge/test/test_placer_apis.py similarity index 69% rename from pybuda/test/test_placer_apis.py rename to forge/test/test_placer_apis.py index 2d5b914c2..aee16d2c9 100644 --- a/pybuda/test/test_placer_apis.py +++ b/forge/test/test_placer_apis.py @@ -6,27 +6,27 @@ import torch import os -import pybuda -import pybuda.op -from pybuda import ( - PyBudaModule, +import forge +import forge.op +from forge import ( + ForgeModule, TTDevice, BackendType, Tensor, - pybuda_compile, + forge_compile, CompilerConfig, VerifyConfig, CompileDepth, ) -from pybuda._C.backend_api import BackendDevice +from forge._C.backend_api import BackendDevice from .common import compile, device, ModuleBuilder, run -import pybuda.verify as verify -import pybuda.query as query +import forge.verify as verify +import forge.query as query verify_cfg = VerifyConfig(run_golden=True) # Run backend golden check on all tests in here -class BudaTest(PyBudaModule): +class BudaTest(ForgeModule): """ Simple buda module for basic testing """ @@ -35,17 +35,17 @@ class BudaTest(PyBudaModule): def __init__(self, name): super().__init__(name) - self.weights1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.weights2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.weights1 = forge.Parameter(*self.shape, requires_grad=True) + self.weights2 = forge.Parameter(*self.shape, requires_grad=True) def forward(self, act1, act2): - m1 = pybuda.op.Matmul("matmul1", act1, self.weights1) - m2 = pybuda.op.Matmul("matmul2", act2, self.weights2) - m1e = pybuda.op.Exp("exp", m1) - return pybuda.op.Add("add", m1e, m2) + m1 = forge.op.Matmul("matmul1", act1, self.weights1) + m2 = forge.op.Matmul("matmul2", act2, self.weights2) + m1e = forge.op.Exp("exp", m1) + return forge.op.Add("add", m1e, m2) -class ForkJoinTest(PyBudaModule): +class ForkJoinTest(ForgeModule): """ Simple buda module for basic testing """ @@ -54,23 +54,23 @@ class ForkJoinTest(PyBudaModule): def __init__(self, name): super().__init__(name) - self.weights1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.weights2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.weights1 = forge.Parameter(*self.shape, requires_grad=True) + self.weights2 = forge.Parameter(*self.shape, requires_grad=True) def forward(self, act): - nop = pybuda.op.Buffer("nop", act) - m1 = pybuda.op.Matmul("matmul1", nop, self.weights1) - m2 = pybuda.op.Matmul("matmul2", nop, self.weights2) - m1_nop = pybuda.op.Buffer("matmul1_nop", m1) - m2_nop = pybuda.op.Buffer("matmul2_nop", m2) - return pybuda.op.Add("add", m1_nop, m2_nop) + nop = forge.op.Buffer("nop", act) + m1 = forge.op.Matmul("matmul1", nop, self.weights1) + m2 = forge.op.Matmul("matmul2", nop, self.weights2) + m1_nop = forge.op.Buffer("matmul1_nop", m1) + m2_nop = forge.op.Buffer("matmul2_nop", m2) + return forge.op.Add("add", m1_nop, m2_nop) def test_epoch_break(): training = False mod = BudaTest("test_module") - sgd_optimizer = pybuda.optimizers.SGD( + sgd_optimizer = forge.optimizers.SGD( learning_rate=0.5, parameters=mod.get_parameters() ) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) @@ -88,7 +88,7 @@ def test_epoch_break(): compiler_cfg.place_on_new_epoch("exp") compiler_cfg.place_on_new_epoch("add") - compile_result = pybuda_compile(tt0, "sanity", act1, act2, compiler_cfg=compiler_cfg, verify_cfg=verify_cfg) + compile_result = forge_compile(tt0, "sanity", act1, act2, compiler_cfg=compiler_cfg, verify_cfg=verify_cfg) placer_solution = compile_result.pass_specific_output_kwargs["placer_solution"] epochs = set() @@ -101,7 +101,7 @@ def test_chip_break(): training = False mod = BudaTest("test_module") - sgd_optimizer = pybuda.optimizers.SGD( + sgd_optimizer = forge.optimizers.SGD( learning_rate=0.5, parameters=mod.get_parameters() ) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer, chip_ids=[0, 1, 2]) @@ -117,10 +117,10 @@ def test_chip_break(): compiler_cfg = CompilerConfig(enable_training=training, compile_depth=CompileDepth.BALANCER_PASS) compiler_cfg.place_on_new_chip("exp") compiler_cfg.place_on_new_chip("add") - # tenstorrent/pybuda#480 + # tenstorrent/forge#480 compiler_cfg.use_interactive_placer = False - compile_result = pybuda_compile(tt0, "sanity", act1, act2, compiler_cfg=compiler_cfg, verify_cfg=verify_cfg) + compile_result = forge_compile(tt0, "sanity", act1, act2, compiler_cfg=compiler_cfg, verify_cfg=verify_cfg) placer_solution = compile_result.pass_specific_output_kwargs["placer_solution"] assert placer_solution.epoch_id("exp") - placer_solution.epoch_id("matmul1") == 1 @@ -128,9 +128,9 @@ def test_chip_break(): def test_override_chip_id(test_device): def matmul_buffer_matmul(act, *, ff1_weights, ff2_weights): - op0 = pybuda.op.Matmul(f"ff1", act, ff1_weights) - op1 = pybuda.op.Buffer(f"gelu", op0) - op2 = pybuda.op.Matmul(f"ff2", op1, ff2_weights) + op0 = forge.op.Matmul(f"ff1", act, ff1_weights) + op1 = forge.op.Buffer(f"gelu", op0) + op2 = forge.op.Matmul(f"ff2", op1, ff2_weights) return op2 # interactive_placer multi-chip is only enabled for B0 @@ -142,17 +142,17 @@ def matmul_buffer_matmul(act, *, ff1_weights, ff2_weights): act1 = Tensor.create_from_torch(torch.rand(shape, requires_grad=True)) module = ModuleBuilder( matmul_buffer_matmul, - ff1_weights=pybuda.Tensor.create_from_torch(torch.rand(shape)), - ff2_weights=pybuda.Tensor.create_from_torch(torch.rand(shape)) + ff1_weights=forge.Tensor.create_from_torch(torch.rand(shape)), + ff2_weights=forge.Tensor.create_from_torch(torch.rand(shape)) ) tt0.place_module(module) # apply overrides - pybuda.config.override_op_placement("ff1", chip_id=3) - pybuda.config.override_op_placement("gelu", chip_id=1) - pybuda.config.override_op_placement("ff2", chip_id=2) + forge.config.override_op_placement("ff1", chip_id=3) + forge.config.override_op_placement("gelu", chip_id=1) + forge.config.override_op_placement("ff2", chip_id=2) - compile_result = pybuda_compile(tt0, "test_override_chip_id", act1) + compile_result = forge_compile(tt0, "test_override_chip_id", act1) placer_solution = compile_result.pass_specific_output_kwargs["placer_solution"] assert placer_solution.name_to_op_placement["ff1"].chip_id == 3 @@ -166,7 +166,7 @@ def test_epoch_break_fork_join(): training = False mod = ForkJoinTest("fork_join_test") - sgd_optimizer = pybuda.optimizers.SGD( + sgd_optimizer = forge.optimizers.SGD( learning_rate=0.5, parameters=mod.get_parameters() ) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) @@ -185,7 +185,7 @@ def test_epoch_break_fork_join(): # so we should expect ["nop", "matmul2"], ["matmul2_nop", "matmul1", "matmul1_nop", "add"] compiler_cfg.place_on_new_epoch(["matmul1_nop", "matmul2_nop"]) - compile_result = pybuda_compile(tt0, "sanity", act1, compiler_cfg=compiler_cfg, verify_cfg=verify_cfg) + compile_result = forge_compile(tt0, "sanity", act1, compiler_cfg=compiler_cfg, verify_cfg=verify_cfg) placer_solution = compile_result.pass_specific_output_kwargs["placer_solution"] #for name, op_placement in placer_solution.name_to_op_placement.items(): @@ -200,9 +200,9 @@ def test_epoch_break_fork_join(): def test_change_start_grid_location(test_kind): def matmul_buffer_matmul(act, *, ff1_weights, ff2_weights): - op0 = pybuda.op.Matmul(f"ff1", act, ff1_weights) - op1 = pybuda.op.Buffer(f"gelu", op0) - op2 = pybuda.op.Matmul(f"ff2", op1, ff2_weights) + op0 = forge.op.Matmul(f"ff1", act, ff1_weights) + op1 = forge.op.Buffer(f"gelu", op0) + op2 = forge.op.Matmul(f"ff2", op1, ff2_weights) return op2 tt0 = TTDevice("tt0", devtype=BackendType.Golden) @@ -210,16 +210,16 @@ def matmul_buffer_matmul(act, *, ff1_weights, ff2_weights): act1 = Tensor.create_from_torch(torch.rand(shape, requires_grad=True)) module = ModuleBuilder( matmul_buffer_matmul, - ff1_weights=pybuda.Tensor.create_from_torch(torch.rand(shape)), - ff2_weights=pybuda.Tensor.create_from_torch(torch.rand(shape)) + ff1_weights=forge.Tensor.create_from_torch(torch.rand(shape)), + ff2_weights=forge.Tensor.create_from_torch(torch.rand(shape)) ) tt0.place_module(module) # apply overrides - pybuda.config.override_op_size("ff2", (2,2)) - pybuda.config.override_op_placement("ff2", start=(3,3)) + forge.config.override_op_size("ff2", (2,2)) + forge.config.override_op_placement("ff2", start=(3,3)) - compile_result = pybuda_compile(tt0, "test_change_start_grid_location", act1) + compile_result = forge_compile(tt0, "test_change_start_grid_location", act1) placer_solution = compile_result.pass_specific_output_kwargs["placer_solution"] placed_core = placer_solution.name_to_op_placement["ff2"].placed_cores @@ -229,15 +229,15 @@ def matmul_buffer_matmul(act, *, ff1_weights, ff2_weights): def test_conflicting_placement_overrides(test_kind): def conflicting_placement_overrides(act, *, ff1_weights, ff2_weights): - op0 = pybuda.op.Matmul(f"ff1", act, ff1_weights) - op1 = pybuda.op.Buffer(f"gelu", op0) - op2 = pybuda.op.Matmul(f"ff2", op1, ff2_weights) + op0 = forge.op.Matmul(f"ff1", act, ff1_weights) + op1 = forge.op.Buffer(f"gelu", op0) + op2 = forge.op.Matmul(f"ff2", op1, ff2_weights) return op2 - pybuda.config.override_op_placement("gelu", start=[2,2]) - pybuda.config.override_op_placement("ff2", start=[2,2]) + forge.config.override_op_placement("gelu", start=[2,2]) + forge.config.override_op_placement("ff2", start=[2,2]) - module = ModuleBuilder(conflicting_placement_overrides, ff1_weights=pybuda.Parameter(1,1,64,64), ff2_weights=pybuda.Parameter(1,1,64,64)) + module = ModuleBuilder(conflicting_placement_overrides, ff1_weights=forge.Parameter(1,1,64,64), ff2_weights=forge.Parameter(1,1,64,64)) verify.verify_module(module, [(1, 1, 64, 64)], VerifyConfig(test_kind=test_kind)) @@ -249,14 +249,14 @@ def test_dram_allocator_api(test_device): VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch), ) def override_dram_allocator(x, weight=None): - mm0 = pybuda.op.Matmul("mm0", x, weight) - gelu = pybuda.op.Gelu("gelu", mm0) + mm0 = forge.op.Matmul("mm0", x, weight) + gelu = forge.op.Gelu("gelu", mm0) return gelu x = Tensor.create_from_torch(torch.randn(shape)) - w = pybuda.Parameter(torch.randn(shape)) - pybuda.config.set_epoch_break("gelu") - pybuda.config.override_dram_queue_placement("e2e_mm0_0", chip_id=0) + w = forge.Parameter(torch.randn(shape)) + forge.config.set_epoch_break("gelu") + forge.config.override_dram_queue_placement("e2e_mm0_0", chip_id=0) override_dram_allocator(x, weight=w) @@ -267,25 +267,25 @@ def test_predicate_overrides(test_device, transpose_op, temporal_epoch_break): num_layers = 3 chip_ids = [0] - pybuda.config.override_op_placement( + forge.config.override_op_placement( query.name_regex("mm\\d"), transpose_op=transpose_op, temporal_epoch_break=temporal_epoch_break, ) for layer in range(num_layers): - pybuda.config.override_op_size(f"mm{layer}", (1, 2)) + forge.config.override_op_size(f"mm{layer}", (1, 2)) @compile(chip_ids=chip_ids) def predicate_override(x, weight=None): out = x for layer in range(num_layers): - out = pybuda.op.Matmul(f"mm{layer}", out, weight) - out = pybuda.op.Exp(f"exp{layer}", out) + out = forge.op.Matmul(f"mm{layer}", out, weight) + out = forge.op.Exp(f"exp{layer}", out) return out x = Tensor.create_from_torch(torch.randn(shape)) - w = pybuda.Parameter(torch.randn(shape)) + w = forge.Parameter(torch.randn(shape)) compile_result = predicate_override(x, weight=w) placer_solution = compile_result.pass_specific_output_kwargs["placer_solution"] diff --git a/pybuda/test/test_recompile.py b/forge/test/test_recompile.py similarity index 56% rename from pybuda/test/test_recompile.py rename to forge/test/test_recompile.py index 79b7b1e4b..bf0d59700 100644 --- a/pybuda/test/test_recompile.py +++ b/forge/test/test_recompile.py @@ -2,11 +2,11 @@ # SPDX-License-Identifier: Apache-2.0 import os -import pybuda +import forge import pytest -from pybuda.verify import verify_module, VerifyConfig, TestKind +from forge.verify import verify_module, VerifyConfig, TestKind -class FusingStreamLimitsStress(pybuda.PyBudaModule): +class FusingStreamLimitsStress(forge.ForgeModule): """ Module which tests recompile when fused op doesn't satisfy stream contraints. """ @@ -15,20 +15,20 @@ class FusingStreamLimitsStress(pybuda.PyBudaModule): def __init__(self, name): super().__init__(name) - self.weights = pybuda.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) + self.weights = forge.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) def forward(self, act1, act2): matmuls = [] for i in range(10): - matmuls.append(pybuda.op.Matmul(f"matmul_{i}", act1, self.weights)) + matmuls.append(forge.op.Matmul(f"matmul_{i}", act1, self.weights)) for i in range(10): - matmuls.append(pybuda.op.Matmul(f"matmul_{i+10}", act2, self.weights)) + matmuls.append(forge.op.Matmul(f"matmul_{i+10}", act2, self.weights)) # Expecting fusing of ops below - add = pybuda.op.Add("", matmuls[0], matmuls[1]) + add = forge.op.Add("", matmuls[0], matmuls[1]) for i in range(2, 20): - add = pybuda.op.Add("", add, matmuls[i]) + add = forge.op.Add("", add, matmuls[i]) return add @@ -36,17 +36,17 @@ def test_recompile_fuse_stream_limits(test_device): pytest.skip() # Setting target cycles to 0 causes us to hit stream constraints on fused op. - os.environ["PYBUDA_RIBBON_TARGET_CYCLES"] = "0" - os.environ["PYBUDA_RIBBON2"] = "1" - os.environ["PYBUDA_TEMP_BALANCER_MODEL_PCIE_BW"] = "0" - os.environ["PYBUDA_TEMP_DISABLE_MODEL_KB_PROLOGUE_BW"] = "1" + os.environ["FORGE_RIBBON_TARGET_CYCLES"] = "0" + os.environ["FORGE_RIBBON2"] = "1" + os.environ["FORGE_TEMP_BALANCER_MODEL_PCIE_BW"] = "0" + os.environ["FORGE_TEMP_DISABLE_MODEL_KB_PROLOGUE_BW"] = "1" # Enable recompilation to recover from net2pipe failure. - os.environ["PYBUDA_AUTO_RECOMPILE"] = "1" + os.environ["FORGE_AUTO_RECOMPILE"] = "1" run_net2pipe = not test_device.is_silicon() - pybuda.config.set_configuration_options(balancer_policy="Ribbon") + forge.config.set_configuration_options(balancer_policy="Ribbon") verify_module(FusingStreamLimitsStress("recompile_fuse_stream_limits"), [FusingStreamLimitsStress.shape, FusingStreamLimitsStress.shape], VerifyConfig(test_kind=TestKind.INFERENCE, arch=test_device.arch, devtype=test_device.devtype, run_net2pipe=run_net2pipe)) diff --git a/pybuda/test/test_sanity.py b/forge/test/test_sanity.py similarity index 74% rename from pybuda/test/test_sanity.py rename to forge/test/test_sanity.py index f948b07bb..c81f3eccc 100644 --- a/pybuda/test/test_sanity.py +++ b/forge/test/test_sanity.py @@ -14,30 +14,30 @@ from collections import defaultdict from typing import Dict, List -import pybuda -import pybuda.op -from pybuda import ( - PyBudaModule, +import forge +import forge.op +from forge import ( + ForgeModule, TTDevice, BackendDevice, BackendType, Tensor, - pybuda_compile, + forge_compile, CompilerConfig, CompileDepth, VerifyConfig, PyTorchModule, ci ) -from pybuda.ttdevice import get_device_config -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.utils import align_up_tile -from pybuda.pybudaglobal import TILE_DIM -from pybuda.op.eval import compare_tensor_to_golden +from forge.ttdevice import get_device_config +from forge.config import CompileDepth, _get_global_compiler_config +from forge.utils import align_up_tile +from forge.forgeglobal import TILE_DIM +from forge.op.eval import compare_tensor_to_golden from .common import compile, device, run, run_torch, ModuleBuilder -import pybuda.verify as verify -from pybuda.verify import TestKind, verify_module -from test.bert.modules import PyBudaBertMHA, get_bert_parameters +import forge.verify as verify +from forge.verify import TestKind, verify_module +from test.bert.modules import ForgeBertMHA, get_bert_parameters verify_cfg = VerifyConfig(run_golden=True, run_net2pipe=True) # Run backend golden check on all tests in here @@ -47,7 +47,7 @@ "blackhole": BackendDevice.Blackhole } -class BudaTestAdd(PyBudaModule): +class BudaTestAdd(ForgeModule): """ Simple buda module for basic testing """ @@ -56,16 +56,16 @@ class BudaTestAdd(PyBudaModule): def __init__(self, name): super().__init__(name) - self.weights1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.weights2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.weights1 = forge.Parameter(*self.shape, requires_grad=True) + self.weights2 = forge.Parameter(*self.shape, requires_grad=True) def forward(self, act1, act2): - a1 = pybuda.op.Add("add1", act1, self.weights1) - a2 = pybuda.op.Add("add2", act2, self.weights2) - a3 = pybuda.op.Add("add3", a1, a2) + a1 = forge.op.Add("add1", act1, self.weights1) + a2 = forge.op.Add("add2", act2, self.weights2) + a3 = forge.op.Add("add3", a1, a2) return a3 -class BudaTest(PyBudaModule): +class BudaTest(ForgeModule): """ Simple buda module for basic testing """ @@ -74,17 +74,17 @@ class BudaTest(PyBudaModule): def __init__(self, name): super().__init__(name) - self.weights1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.weights2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.weights1 = forge.Parameter(*self.shape, requires_grad=True) + self.weights2 = forge.Parameter(*self.shape, requires_grad=True) def forward(self, act1, act2): - m1 = pybuda.op.Matmul("matmul1", act1, self.weights1) - m2 = pybuda.op.Matmul("matmul2", act2, self.weights2) - m1e = pybuda.op.Exp("exp", m1) - return pybuda.op.Add("add", m1e, m2) + m1 = forge.op.Matmul("matmul1", act1, self.weights1) + m2 = forge.op.Matmul("matmul2", act2, self.weights2) + m1e = forge.op.Exp("exp", m1) + return forge.op.Add("add", m1e, m2) -class MatmulModule(PyBudaModule): +class MatmulModule(ForgeModule): """ Single Matmul module for basic testing """ @@ -93,54 +93,54 @@ class MatmulModule(PyBudaModule): def __init__(self, name, bias): super().__init__(name) - self.weights1 = pybuda.Parameter(*self.shape, requires_grad=True) + self.weights1 = forge.Parameter(*self.shape, requires_grad=True) self.bias = bias if bias: bias_shape = (1, 1, 1, self.shape[-1]) - self.bias1 = pybuda.Parameter(*bias_shape, requires_grad=True) + self.bias1 = forge.Parameter(*bias_shape, requires_grad=True) def forward(self, act1): if self.bias: - m1 = pybuda.op.Matmul("matmul1", act1, self.weights1) + self.bias1 + m1 = forge.op.Matmul("matmul1", act1, self.weights1) + self.bias1 else: - m1 = pybuda.op.Matmul("matmul1", act1, self.weights1) + m1 = forge.op.Matmul("matmul1", act1, self.weights1) return m1 -class SimpleLinear(PyBudaModule): +class SimpleLinear(ForgeModule): """ Linear module for basic testing """ def __init__(self, name, in_features, out_features, bias=True, relu=True): super().__init__(name) - self.weights = pybuda.Parameter(1, 1, in_features, out_features, requires_grad=True) + self.weights = forge.Parameter(1, 1, in_features, out_features, requires_grad=True) sqrt_k = (1.0 / in_features) ** 0.5 weights_value = torch.empty(*self.weights.shape.get_pytorch_shape(), requires_grad=True) torch.nn.init.uniform_(weights_value, -sqrt_k, sqrt_k) self.set_parameter("weights", weights_value) self.bias = None if bias: - self.bias = pybuda.Parameter(out_features, requires_grad=True) + self.bias = forge.Parameter(out_features, requires_grad=True) bias_value = torch.empty(*self.bias.shape.get_pytorch_shape(), requires_grad=True) torch.nn.init.uniform_(bias_value, -sqrt_k, sqrt_k) self.set_parameter("bias", bias_value) self.relu = relu def forward(self, activations): - x = pybuda.op.Matmul(self.name + ".matmul", activations, self.weights) + x = forge.op.Matmul(self.name + ".matmul", activations, self.weights) if self.bias is not None: - x = pybuda.op.Add(self.name + ".bias", x, self.bias) + x = forge.op.Add(self.name + ".bias", x, self.bias) if self.relu: - x = pybuda.op.Relu(self.name + ".relu", x) + x = forge.op.Relu(self.name + ".relu", x) # TODO: fixme, problems if relu is the final graph output - x = pybuda.op.Identity(self.name + ".ident", x) + x = forge.op.Identity(self.name + ".ident", x) return x def test_trace(training): mod = BudaTest("test_module") - sgd_optimizer = pybuda.optimizers.SGD( + sgd_optimizer = forge.optimizers.SGD( learning_rate=0.5, parameters=mod.get_parameters() ) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) @@ -153,13 +153,13 @@ def test_trace(training): mod.set_parameter("weights2", torch.rand(*BudaTest.shape, requires_grad=True)) sgd_optimizer.set_optimizer_parameters() - pybuda_compile(tt0, "sanity", act1, act2, compiler_cfg=CompilerConfig(enable_training=training), verify_cfg=verify_cfg) + forge_compile(tt0, "sanity", act1, act2, compiler_cfg=CompilerConfig(enable_training=training), verify_cfg=verify_cfg) def test_trace_add_params(training): mod = BudaTest("test_module") - sgd_optimizer = pybuda.optimizers.SGD( + sgd_optimizer = forge.optimizers.SGD( learning_rate=0.5, parameters=mod.get_parameters() ) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) @@ -172,7 +172,7 @@ def test_trace_add_params(training): mod.set_parameter("weights2", torch.rand(*BudaTest.shape, requires_grad=True)) sgd_optimizer.set_optimizer_parameters() - pybuda_compile(tt0, "add_params", act1, act2, compiler_cfg=CompilerConfig(enable_training=training), verify_cfg=verify_cfg) + forge_compile(tt0, "add_params", act1, act2, compiler_cfg=CompilerConfig(enable_training=training), verify_cfg=verify_cfg) @pytest.mark.parametrize("bias", (True, False), ids=["bias", "no_bias"]) def test_trace_matmul(training, bias): @@ -181,7 +181,7 @@ def test_trace_matmul(training, bias): pytest.skip() # golden random fail in CI, to be figured out mod = MatmulModule("test_module", bias=bias) - sgd_optimizer = pybuda.optimizers.SGD( + sgd_optimizer = forge.optimizers.SGD( learning_rate=0.5, parameters=mod.get_parameters() ) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) @@ -196,25 +196,25 @@ def test_trace_matmul(training, bias): sgd_optimizer.set_optimizer_parameters() - pybuda_compile(tt0, "trace_matmul", act1, compiler_cfg=CompilerConfig(enable_training=training), verify_cfg=verify_cfg) + forge_compile(tt0, "trace_matmul", act1, compiler_cfg=CompilerConfig(enable_training=training), verify_cfg=verify_cfg) def test_trace_log(test_kind): - class BudaLogModule(PyBudaModule): + class BudaLogModule(ForgeModule): shape = (1, 1, 64, 64) def __init__(self, name): super().__init__(name) def forward(self, act1): - return pybuda.op.Log("log", act1) + return forge.op.Log("log", act1) verify.verify_module(BudaLogModule("log_module"), [(1, 1, 64, 64)], VerifyConfig( graph_name="log", test_kind=test_kind, devtype=BackendType.NoBackend)) """ mod = BudaLogModule("log_module") - sgd_optimizer = pybuda.optimizers.SGD( + sgd_optimizer = forge.optimizers.SGD( learning_rate=0.5, parameters=mod.get_parameters() ) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) @@ -224,31 +224,31 @@ def forward(self, act1): torch.rand(*BudaLogModule.shape, requires_grad=True) ) sgd_optimizer.set_optimizer_parameters() - pybuda_compile(tt0, "log", act1, compiler_cfg=CompilerConfig(enable_training=training), verify_cfg=verify_cfg) + forge_compile(tt0, "log", act1, compiler_cfg=CompilerConfig(enable_training=training), verify_cfg=verify_cfg) """ def test_trace_add(): - class BudaAddModule(PyBudaModule): + class BudaAddModule(ForgeModule): shape = (1, 1, 128, 128) def __init__(self, name): super().__init__(name) def forward(self, act1, act2): - return pybuda.op.Add("add", act1, act2) + return forge.op.Add("add", act1, act2) verify.verify_module(BudaAddModule("add_module"), [(1, 1, 128, 128), (1, 1, 128, 128)], VerifyConfig()) def test_trace_constant(): - class BudaAddModule(PyBudaModule): + class BudaAddModule(ForgeModule): shape = (1, 1, 128, 128) def __init__(self, name): super().__init__(name) def forward(self, act1): - constant = pybuda.op.Constant("constant", constant=2.0) - return pybuda.op.Add("add", act1, constant) + constant = forge.op.Constant("constant", constant=2.0) + return forge.op.Add("add", act1, constant) mod = BudaAddModule("add_module") tt0 = TTDevice("tt0", devtype=BackendType.Golden) @@ -259,7 +259,7 @@ def forward(self, act1): ) vcfg = VerifyConfig(run_golden=False) # segfaults, need to skip running golden - pybuda_compile( + forge_compile( tt0, "constant", act1, @@ -273,7 +273,7 @@ def test_trace_linear_relu(mode): training = (mode == "training" or mode == "optimizer") optimizer = (mode == "optimizer") mod = SimpleLinear("simple_linear", 64, 32, bias=False, relu=True) - sgd_optimizer = pybuda.optimizers.SGD( + sgd_optimizer = forge.optimizers.SGD( learning_rate=0.5, parameters=mod.get_parameters() ) if optimizer else None tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) @@ -283,7 +283,7 @@ def test_trace_linear_relu(mode): sgd_optimizer.set_optimizer_parameters() verify_cfg=VerifyConfig(run_golden=False) # relu not supported yet - pybuda_compile( + forge_compile( tt0, "linear_relu", activations, @@ -304,9 +304,9 @@ def test_reshape(mode, shapes): verify_cfg=VerifyConfig(run_golden=False), # reshape not supported by backend ) def simple_reshape(x): - x = pybuda.op.Identity("id0", x) - x = pybuda.op.Reshape("reshape0", x, shapes[1]) - return pybuda.op.Identity("id1", x) + x = forge.op.Identity("id0", x) + x = forge.op.Reshape("reshape0", x, shapes[1]) + return forge.op.Identity("id1", x) x = Tensor.create_from_torch(torch.rand(*shapes[0], requires_grad=training)) simple_reshape(x) @@ -327,9 +327,9 @@ def test_reduce(training, shapes, type, dim, test_device): ) def simple_reduce(x): if type == "Avg": - x = pybuda.op.ReduceAvg("reduce", x, dim) + x = forge.op.ReduceAvg("reduce", x, dim) else: - x = pybuda.op.ReduceSum("reduce", x, dim) + x = forge.op.ReduceSum("reduce", x, dim) return x x = Tensor.create_from_torch(torch.rand(*shapes[0], requires_grad=training)) @@ -354,9 +354,9 @@ def test_select(test_kind, test_device, shape, dim_index_length): VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, verify_all=True), ) def simple_select(x): - x0 = pybuda.op.Select("select0", x, dim, (index, length), stride=stride) - x1 = pybuda.op.Select("select1", x, dim, (index, length), stride=stride) - return pybuda.op.Multiply("mul0", x0, x1) + x0 = forge.op.Select("select0", x, dim, (index, length), stride=stride) + x1 = forge.op.Select("select1", x, dim, (index, length), stride=stride) + return forge.op.Multiply("mul0", x0, x1) x = Tensor.create_from_torch(torch.rand(*shape, requires_grad=test_kind.is_training())) simple_select(x) @@ -374,7 +374,7 @@ def test_single_select(test_kind, test_device, shape, dim_index_length): compiler_cfg = _get_global_compiler_config() compiler_cfg.enable_t_streaming = True compiler_cfg.manual_t_streaming = True - # pybuda.config.override_t_stream_shape("index.dc.select.0", (9, 1)) + # forge.config.override_t_stream_shape("index.dc.select.0", (9, 1)) @compile( compiler_cfg = CompilerConfig(enable_t_streaming=True, manual_t_streaming = True), @@ -382,7 +382,7 @@ def test_single_select(test_kind, test_device, shape, dim_index_length): ) def simple_select(x): - ret = pybuda.op.Select("select0", x, dim, (index, length), stride=stride) + ret = forge.op.Select("select0", x, dim, (index, length), stride=stride) return ret x = Tensor.create_from_torch(torch.rand(*shape, requires_grad=test_kind.is_training())) @@ -395,8 +395,8 @@ def simple_select(x): def test_slice_stack(mode, shape, factor, direction): training = mode == "training" - slice_op = {"h": pybuda.op.HSlice, "v": pybuda.op.VSlice}[direction] - stack_op = {"h": pybuda.op.HStack, "v": pybuda.op.VStack}[direction] + slice_op = {"h": forge.op.HSlice, "v": forge.op.VSlice}[direction] + stack_op = {"h": forge.op.HStack, "v": forge.op.VStack}[direction] verify_cfg=VerifyConfig(run_golden=False) # select not supported yet @compile( @@ -414,10 +414,10 @@ def simple_slice_stack(x): def test_trace_add_sub_rsub(): - class BudaAddSubRSubModule(PyBudaModule): + class BudaAddSubRSubModule(ForgeModule): def __init__(self, name): super().__init__(name) - self.one = pybuda.Parameter(1, requires_grad=True) + self.one = forge.Parameter(1, requires_grad=True) self.set_parameter("one", torch.tensor((1.0,), requires_grad=False)) def forward(self, act1, act2): @@ -456,7 +456,7 @@ def forward(self, act1, act2): act2 = torch.rand(*shape) vcfg = VerifyConfig() - ret = pybuda_compile( + ret = forge_compile( tt0, "add_sub_rsub", Tensor.create_from_torch(act1), @@ -478,7 +478,7 @@ def test_argmax(dim): verify_cfg=verify_cfg, ) def simple_argmax(x): - return pybuda.op.Argmax("argmax0", x, dim=dim) + return forge.op.Argmax("argmax0", x, dim=dim) x = Tensor.create_from_torch(torch.rand((1, 2, 384, 384), requires_grad=False)) simple_argmax(x) @@ -499,7 +499,7 @@ def test_argmax_multiple_maximums(dim, input_shape, max_value): verify_cfg=verify_cfg, ) def simple_argmax(x): - return pybuda.op.Argmax("argmax0", x, dim=dim) + return forge.op.Argmax("argmax0", x, dim=dim) simple_argmax(x) @@ -522,7 +522,7 @@ def test_max(test_kind, test_device, dim): input_params=[{"data_format": torch.bfloat16}], ) def simple_max(x): - return pybuda.op.ReduceMax("max0", x, dim=dim) + return forge.op.ReduceMax("max0", x, dim=dim) x = Tensor.create_from_torch(torch.randn((1, 4, 128, 128), requires_grad=test_kind.is_training())) simple_max(x) @@ -530,33 +530,33 @@ def simple_max(x): @pytest.mark.parametrize("dim", [2, -1]) def test_reduce_tile_broadcast(test_kind, test_device, dim): - pytest.skip("tenstorrent/pybuda#131") + pytest.skip("tenstorrent/forge#131") @run( VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch), uniform_inputs=True, inputs_centered_on_zero=True, ) def simple_reduce_tile_broadcast(a, b): - a = pybuda.op.ReduceMax("", a, dim=dim) - return pybuda.op.Add("", a, b) + a = forge.op.ReduceMax("", a, dim=dim) + return forge.op.Add("", a, b) a = Tensor.create_from_torch(torch.randn((1, 4, 4, 4), requires_grad=test_kind.is_training())) b = Tensor.create_from_torch(torch.randn((1, 4, 4, 4), requires_grad=test_kind.is_training())) simple_reduce_tile_broadcast(a, b) -class MultiEpochModule(pybuda.PyBudaModule): +class MultiEpochModule(forge.ForgeModule): def __init__(self, name: str, num_matmuls: int): super().__init__(name) self.num_matmuls = num_matmuls - self.weights = [pybuda.Parameter(64, 64, name = f"weights_{i}") for i in range(self.num_matmuls)] + self.weights = [forge.Parameter(64, 64, name = f"weights_{i}") for i in range(self.num_matmuls)] def forward(self, act): val = act for i in range(self.num_matmuls): - val = pybuda.op.Matmul(f"matmul_{i}", val, self.weights[i]) - val = pybuda.op.Gelu(f"gelu_{i}", val) - val = pybuda.op.Matmul(f"second_matmul_{i}", val, self.weights[i]) + val = forge.op.Matmul(f"matmul_{i}", val, self.weights[i]) + val = forge.op.Gelu(f"gelu_{i}", val) + val = forge.op.Matmul(f"second_matmul_{i}", val, self.weights[i]) return val @@ -573,7 +573,7 @@ def test_recompute(test_device): @pytest.mark.parametrize("config", ["3x3conv", "data_mismatch", "c_stream", "in_out_stream"]) def test_sparse_matmul(test_device, config): - from pybuda.op.eval.sparse_utils import create_conv2d_sparse_picker_matrix + from forge.op.eval.sparse_utils import create_conv2d_sparse_picker_matrix compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "MaximizeTMinimizeGrid" @@ -610,8 +610,8 @@ def test_sparse_matmul(test_device, config): elif config == "c_stream": pytest.skip() # tenstorrent/budabackend#1543 - pybuda.config.override_t_stream_dir("sparse0.lc2", "C") - pybuda.config.override_t_stream_shape("sparse0.lc2", (1, 32)) + forge.config.override_t_stream_dir("sparse0.lc2", "C") + forge.config.override_t_stream_shape("sparse0.lc2", (1, 32)) iH, iW = (64, 64) inC = 1024 kH, kW = (1, 1) @@ -631,10 +631,10 @@ def test_sparse_matmul(test_device, config): pickers.append(picker) sparse = Tensor.create_from_torch(torch.stack(pickers).unsqueeze(0), constant=True) elif config == "in_out_stream": - pybuda.config.override_t_stream_dir("buf0", "R") - pybuda.config.override_t_stream_shape("buf0", (2, 1)) - pybuda.config.override_t_stream_dir("sparse0.lc2", "R") - pybuda.config.override_t_stream_shape("sparse0.lc2", (3, 1)) + forge.config.override_t_stream_dir("buf0", "R") + forge.config.override_t_stream_shape("buf0", (2, 1)) + forge.config.override_t_stream_dir("sparse0.lc2", "R") + forge.config.override_t_stream_shape("sparse0.lc2", (3, 1)) iH, iW = (32, 32) inC = 32 @@ -665,8 +665,8 @@ def test_sparse_matmul(test_device, config): ) def simple_sparse_matmul(act, sparse=None): if config == "in_out_stream": - act = pybuda.op.Buffer("buf0", act) - return pybuda.op.SparseMatmul("sparse0", sparse, act) + act = forge.op.Buffer("buf0", act) + return forge.op.SparseMatmul("sparse0", sparse, act) simple_sparse_matmul(act, sparse=sparse) @@ -682,7 +682,7 @@ def test_simple_clip(test_device): arch=test_device.arch), ) def simple_clip(act): - return pybuda.op.Clip("clip0", act, 0.3, 0.7) + return forge.op.Clip("clip0", act, 0.3, 0.7) act = Tensor.create_from_torch(torch.rand(1, 1, 32, 32)) simple_clip(act) @@ -702,7 +702,7 @@ def test_deterministic_netlist(scheduler_policy): param.set_value(t) config = { "num_heads": num_heads, "encoder_index": 0 } - mod = PyBudaBertMHA("mha", params, config) + mod = ForgeBertMHA("mha", params, config) tt0 = TTDevice("tt0", devtype=BackendType.Golden) tt0.place_module(mod) @@ -712,7 +712,7 @@ def test_deterministic_netlist(scheduler_policy): ) act1 = Tensor.create_from_torch(torch.rand((microbatch_size, seq_len, hidden_dim))) act2 = Tensor.create_from_torch(torch.rand((microbatch_size, 1, seq_len))) - ret = pybuda_compile(tt0, f"mha_{i}", act1, act2, compiler_cfg=CompilerConfig(enable_training=True, scheduler_policy=scheduler_policy), verify_cfg=verify_cfg) + ret = forge_compile(tt0, f"mha_{i}", act1, act2, compiler_cfg=CompilerConfig(enable_training=True, scheduler_policy=scheduler_policy), verify_cfg=verify_cfg) with open(ret.netlist_filename) as fd: netlist = yaml.safe_load(fd) @@ -721,7 +721,7 @@ def test_deterministic_netlist(scheduler_policy): reference_netlist = netlist -class PadTest(PyBudaModule): +class PadTest(ForgeModule): """ Test wrapper for pad """ @@ -733,7 +733,7 @@ def __init__(self, name, pad, mode, channel_last): self.mode = mode def forward(self, act): - return pybuda.op.Pad("pad", act, self.pad, self.mode, self.channel_last) + return forge.op.Pad("pad", act, self.pad, self.mode, self.channel_last) @pytest.mark.parametrize("shape", ([1, 1, 64, 64], [128, 768]), ids=["shape1x1x64x64", "shape128x768"]) @@ -769,7 +769,7 @@ def test_pad( act1 = Tensor.create_from_torch(torch.rand(shape, requires_grad=True)) - pybuda_compile( + forge_compile( tt0, "pad", act1, @@ -804,7 +804,7 @@ def validate_dropout(golden, result): golden_compare_callback=validate_dropout), ) def simple_dropout(x): - return pybuda.op.Dropout("dropout0", x, p=p, training=test_kind.is_training()) + return forge.op.Dropout("dropout0", x, p=p, training=test_kind.is_training()) simple_dropout(x) @@ -817,24 +817,24 @@ def test_matmul_gradient_t(test_kind, test_device): VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, pcc=pcc), ) def simple_matmul_gradient_t(x, weight=None): - return pybuda.op.Matmul("mm0", x, weight) + return forge.op.Matmul("mm0", x, weight) x = Tensor.create_from_torch(torch.randn(shape, requires_grad=test_kind.is_training())) - w = pybuda.Parameter(*shape, requires_grad=test_kind.is_training()) + w = forge.Parameter(*shape, requires_grad=test_kind.is_training()) simple_matmul_gradient_t(x, weight=w) -class ComparisonTest(PyBudaModule): +class ComparisonTest(ForgeModule): """ Test wrapper for comparison operators """ op_map = { - "eq": pybuda.op.Equal, - "ne": pybuda.op.NotEqual, - "gt": pybuda.op.Greater, - "lt": pybuda.op.Less, - "ge": pybuda.op.GreaterEqual, - "le": pybuda.op.LessEqual + "eq": forge.op.Equal, + "ne": forge.op.NotEqual, + "gt": forge.op.Greater, + "lt": forge.op.Less, + "ge": forge.op.GreaterEqual, + "le": forge.op.LessEqual } def __init__(self, name, op_type): @@ -857,7 +857,7 @@ def test_comparison( op_type ): - verify_cfg.run_net2pipe=False #tenstorrent/pybuda#1078 + verify_cfg.run_net2pipe=False #tenstorrent/forge#1078 if training: pytest.skip("Comparison operators shouldn't have derivative, and backward.") @@ -874,7 +874,7 @@ def test_comparison( act1 = Tensor.create_from_torch(act1) act2 = Tensor.create_from_torch(act2) - pybuda_compile( + forge_compile( tt0, "comparison", act1, @@ -887,7 +887,7 @@ def test_comparison( ) -class ClipTest(PyBudaModule): +class ClipTest(ForgeModule): """ Test wrapper for clip """ @@ -898,7 +898,7 @@ def __init__(self, name, min_value, max_value): self.max_value = max_value def forward(self, act): - return pybuda.op.Clip("clip", act, self.min_value, self.max_value) + return forge.op.Clip("clip", act, self.min_value, self.max_value) @pytest.mark.parametrize("shape", ([128, 768], [1, 70, 90]), ids=["shape=128x768", "shape=1x1x70x90"]) @@ -914,7 +914,7 @@ def test_clip( shape ): if test_device.is_grayskull(): - verify_cfg.run_net2pipe=False #tenstorrent/pybuda#1078 + verify_cfg.run_net2pipe=False #tenstorrent/forge#1078 if not training and recompute: pytest.skip() # inference + recompute is the same as just inference @@ -928,7 +928,7 @@ def test_clip( act1.requires_grad=True act1 = Tensor.create_from_torch(act1) - pybuda_compile( + forge_compile( tt0, "clip", act1, @@ -940,7 +940,7 @@ def test_clip( ) -class HeavisideTest(PyBudaModule): +class HeavisideTest(ForgeModule): """ Test wrapper for heaviside operator """ @@ -949,7 +949,7 @@ def __init__(self, name): super().__init__(name) def forward(self, act1, act2): - return pybuda.op.Heaviside("heaviside", act1, act2) + return forge.op.Heaviside("heaviside", act1, act2) @pytest.mark.parametrize("shape", ([1, 1, 64, 64], [128, 768], [1, 340, 180]), ids=["shape1x1x64x64", "shape128x768", "shape=1x340x180"]) @@ -960,7 +960,7 @@ def test_heaviside( recompute, shape, ): - verify_cfg.run_net2pipe=False #tenstorrent/pybuda#1078 + verify_cfg.run_net2pipe=False #tenstorrent/forge#1078 if training: pytest.skip("Heaviside shouldn't have derivative, and backward.") @@ -977,7 +977,7 @@ def test_heaviside( act2 = Tensor.create_from_torch(act2) - pybuda_compile( + forge_compile( tt0, "heaviside", act1, @@ -991,41 +991,41 @@ def test_heaviside( def test_matmul_relu(test_kind): def matmul_relu(act, *, weights): - op0 = pybuda.op.Matmul(f"op0", act, weights) - op1 = pybuda.op.Relu(f"op1", op0) + op0 = forge.op.Matmul(f"op0", act, weights) + op1 = forge.op.Relu(f"op1", op0) return op1 - module = ModuleBuilder(matmul_relu, weights=pybuda.Parameter(1,1,64,64)) + module = ModuleBuilder(matmul_relu, weights=forge.Parameter(1,1,64,64)) verify_module(module, [(1, 1, 64, 64)], VerifyConfig(test_kind=test_kind)) def test_matmul_gelu_matmul(test_kind): def matmul_gelu(act, *, ff1_weights, ff2_weights): - op0 = pybuda.op.Matmul(f"ff1", act, ff1_weights) - op1 = pybuda.op.Gelu(f"gelu", op0) - op2 = pybuda.op.Matmul(f"ff2", op1, ff2_weights) + op0 = forge.op.Matmul(f"ff1", act, ff1_weights) + op1 = forge.op.Gelu(f"gelu", op0) + op2 = forge.op.Matmul(f"ff2", op1, ff2_weights) return op2 - module = ModuleBuilder(matmul_gelu, ff1_weights=pybuda.Parameter(1,1,64,64), ff2_weights=pybuda.Parameter(1,1,64,64)) + module = ModuleBuilder(matmul_gelu, ff1_weights=forge.Parameter(1,1,64,64), ff2_weights=forge.Parameter(1,1,64,64)) verify_module(module, [(1, 1, 64, 64)], VerifyConfig(test_kind=test_kind, optimizer=None)) def test_consumer_ops_belonging_to_different_epochs(test_kind): def consumer_ops_belonging_to_different_epochs(act, *, weights): - op0 = pybuda.op.Matmul(f"op0", act, weights) - op1 = pybuda.op.Buffer(f"buffer_a", op0) - op2 = pybuda.op.Buffer(f"buffer_b", op1) - op3 = pybuda.op.Buffer(f"buffer_c", op1) - op3 = pybuda.op.Add(f"add", op2, op3) + op0 = forge.op.Matmul(f"op0", act, weights) + op1 = forge.op.Buffer(f"buffer_a", op0) + op2 = forge.op.Buffer(f"buffer_b", op1) + op3 = forge.op.Buffer(f"buffer_c", op1) + op3 = forge.op.Add(f"add", op2, op3) return op3 - pybuda.set_epoch_break("buffer_a") - pybuda.set_epoch_break("buffer_b") - pybuda.set_epoch_break("buffer_c") + forge.set_epoch_break("buffer_a") + forge.set_epoch_break("buffer_b") + forge.set_epoch_break("buffer_c") - module = ModuleBuilder(consumer_ops_belonging_to_different_epochs, weights=pybuda.Parameter(1,1,64,64)) + module = ModuleBuilder(consumer_ops_belonging_to_different_epochs, weights=forge.Parameter(1,1,64,64)) verify_module(module, [(1, 1, 64, 64)], VerifyConfig(test_kind=test_kind)) @@ -1044,16 +1044,16 @@ def test_consumer_ops_belonging_to_different_chips(test_kind): There should only be a single e2e queue generated in this situation, rather than two. """ def consumer_ops_belonging_to_different_chips(act, *, weights): - op0 = pybuda.op.Matmul(f"op0", act, weights) - op1 = pybuda.op.Buffer(f"buffer_a", op0) - op2 = pybuda.op.Buffer(f"buffer_b", op1) - op3 = pybuda.op.Buffer(f"buffer_c", op1) - op3 = pybuda.op.Add(f"add", op2, op3) + op0 = forge.op.Matmul(f"op0", act, weights) + op1 = forge.op.Buffer(f"buffer_a", op0) + op2 = forge.op.Buffer(f"buffer_b", op1) + op3 = forge.op.Buffer(f"buffer_c", op1) + op3 = forge.op.Add(f"add", op2, op3) return op3 - pybuda.set_epoch_break("buffer_a") - pybuda.set_chip_break("buffer_b") - pybuda.set_epoch_break("buffer_c") + forge.set_epoch_break("buffer_a") + forge.set_chip_break("buffer_b") + forge.set_epoch_break("buffer_c") arch = backend_devices[os.environ.get("BACKEND_ARCH_NAME", "grayskull")] @@ -1061,32 +1061,32 @@ def consumer_ops_belonging_to_different_chips(act, *, weights): pytest.skip("Blackhole doesn't support chip breaks. Skipping until BudaBackend#2650 is fixed.") compiler_cfg = _get_global_compiler_config() - # tenstorrent/pybuda#480 + # tenstorrent/forge#480 compiler_cfg.use_interactive_placer = False if arch is BackendDevice.Grayskull else True - module = ModuleBuilder(consumer_ops_belonging_to_different_chips, weights=pybuda.Parameter(1,1,64,64)) + module = ModuleBuilder(consumer_ops_belonging_to_different_chips, weights=forge.Parameter(1,1,64,64)) verify_module(module, [(1, 1, 64, 64)], VerifyConfig(test_kind=test_kind, arch=arch, chip_ids=list(range(2)))) def test_matmul_buffer_matmul(test_kind): def matmul_buffer_matmul(act, *, ff1_weights, ff2_weights): - op0 = pybuda.op.Matmul(f"ff1", act, ff1_weights) - op1 = pybuda.op.Buffer(f"gelu", op0) - op2 = pybuda.op.Matmul(f"ff2", op1, ff2_weights) + op0 = forge.op.Matmul(f"ff1", act, ff1_weights) + op1 = forge.op.Buffer(f"gelu", op0) + op2 = forge.op.Matmul(f"ff2", op1, ff2_weights) return op2 - pybuda.set_epoch_break("gelu") - pybuda.set_epoch_break("ff2") + forge.set_epoch_break("gelu") + forge.set_epoch_break("ff2") - module = ModuleBuilder(matmul_buffer_matmul, ff1_weights=pybuda.Parameter(1,1,64,64), ff2_weights=pybuda.Parameter(1,1,64,64)) + module = ModuleBuilder(matmul_buffer_matmul, ff1_weights=forge.Parameter(1,1,64,64), ff2_weights=forge.Parameter(1,1,64,64)) verify_module(module, [(1, 1, 64, 64)], VerifyConfig(test_kind=test_kind)) def test_z_sparse_matmul(test_device): input_shape = (1, 64, 128, 128) - class Model(PyBudaModule): + class Model(ForgeModule): def __init__(self): super().__init__(name="sparsematmul_test") rows = torch.arange(0, 128).tolist() @@ -1095,16 +1095,16 @@ def __init__(self): sparse = torch.stack([sparse]*64, -3) sparse = torch.unsqueeze(sparse, 0) self.add_constant("sparse") - self.set_constant("sparse", pybuda.Tensor.create_from_torch(sparse, constant=True)) + self.set_constant("sparse", forge.Tensor.create_from_torch(sparse, constant=True)) def forward(self, x): - out = pybuda.op.SparseMatmul("", self.get_constant("sparse"), x) + out = forge.op.SparseMatmul("", self.get_constant("sparse"), x) return out compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "MaximizeTMinimizeGrid" - pybuda.verify.verify_module( + forge.verify.verify_module( Model(), (input_shape,), verify_cfg=VerifyConfig( @@ -1116,13 +1116,13 @@ def forward(self, x): -class PowTest(pybuda.PyBudaModule): +class PowTest(forge.ForgeModule): def __init__(self, name, exp_val): super().__init__(name) self.exp_val = exp_val def forward(self, act1): - p1 = pybuda.op.Pow("pow", act1, self.exp_val) + p1 = forge.op.Pow("pow", act1, self.exp_val) return p1 @@ -1136,7 +1136,7 @@ def test_pow(test_device, test_kind, is_exp_fp): exp_val = int(2) x = Tensor.create_from_torch(torch.rand((1, 1, 64, 64), requires_grad=True)) - pybuda.verify.verify_module( + forge.verify.verify_module( PowTest("pow-test", exp_val), ([1,1,64,64]), verify_cfg=VerifyConfig( @@ -1149,19 +1149,19 @@ def test_pow(test_device, test_kind, is_exp_fp): ) -class PowerBinaryTest(pybuda.PyBudaModule): +class PowerBinaryTest(forge.ForgeModule): def __init__(self, name): super().__init__(name) def forward(self, act1, act2): - p1 = pybuda.op.Power("power-binary", act1, act2) + p1 = forge.op.Power("power-binary", act1, act2) return p1 def test_power_binary(test_device, test_kind): x = Tensor.create_from_torch(torch.rand((1, 1, 64, 64), requires_grad=True)) y = Tensor.create_from_torch(torch.rand((1, 1, 64, 64), requires_grad=True)) - pybuda.verify.verify_module( + forge.verify.verify_module( PowerBinaryTest("power-binary-test"), ([1,1,64,64]), verify_cfg=VerifyConfig( @@ -1174,14 +1174,14 @@ def test_power_binary(test_device, test_kind): ) -class ReluTest(pybuda.PyBudaModule): +class ReluTest(forge.ForgeModule): def __init__(self, name, _threshold, _mode): super().__init__(name) self.threshold = _threshold self.mode = _mode def forward(self, act1): - p1 = pybuda.op.Relu("relu", act1, self.threshold, self.mode) + p1 = forge.op.Relu("relu", act1, self.threshold, self.mode) return p1 @@ -1192,7 +1192,7 @@ def test_relu(test_device, test_kind, threshold, mode): pytest.skip("inv-relu is not supposed to be called with the default threshold") x = Tensor.create_from_torch(torch.randn((1, 1, 64, 64), requires_grad=True)) - pybuda.verify.verify_module( + forge.verify.verify_module( ReluTest("relu-test", threshold, mode), ([1,1,64,64]), verify_cfg=VerifyConfig( @@ -1204,28 +1204,28 @@ def test_relu(test_device, test_kind, threshold, mode): ) -class BinaryTest(pybuda.PyBudaModule): +class BinaryTest(forge.ForgeModule): def __init__(self, name, _mode): super().__init__(name) self.mode = _mode def forward(self, act1, act2): if self.mode == "less": - p1 = pybuda.op.Less("less", act1, act2) + p1 = forge.op.Less("less", act1, act2) elif self.mode == "gteq": - p1 = pybuda.op.GreaterEqual("gteq", act1, act2) + p1 = forge.op.GreaterEqual("gteq", act1, act2) elif self.mode == "heaviside": - p1 = pybuda.op.Heaviside("heaviside", act1, act2) + p1 = forge.op.Heaviside("heaviside", act1, act2) elif self.mode == "lteq": - p1 = pybuda.op.LessEqual("lteq", act1, act2) + p1 = forge.op.LessEqual("lteq", act1, act2) elif self.mode == "greater": - p1 = pybuda.op.Greater("greater", act1, act2) + p1 = forge.op.Greater("greater", act1, act2) elif self.mode == "ne": - p1 = pybuda.op.NotEqual("ne", act1, act2) + p1 = forge.op.NotEqual("ne", act1, act2) elif self.mode == "maximum": - p1 = pybuda.op.Max("maximum", act1, act2) + p1 = forge.op.Max("maximum", act1, act2) else: - p1 = pybuda.op.Equal("eq", act1, act2) + p1 = forge.op.Equal("eq", act1, act2) return p1 @@ -1233,7 +1233,7 @@ def forward(self, act1, act2): def test_binary(test_device, mode): x = Tensor.create_from_torch(torch.randn((1, 1, 64, 64), requires_grad=True)) y = Tensor.create_from_torch(torch.randn((1, 1, 64, 64), requires_grad=True)) - pybuda.verify.verify_module( + forge.verify.verify_module( BinaryTest(f"binary-{mode}-test", mode), ([1,1,64,64]), verify_cfg=VerifyConfig( @@ -1256,9 +1256,9 @@ def test_large_reshape(shape): verify_cfg=VerifyConfig(run_golden=True), # reshape not supported by backend ) def simple_large_reshape(x, y): - x = pybuda.op.Multiply("mult0", x, x) - x = pybuda.op.Reshape("reshape0", x, (1,outer,num_blocks,block_size)) - y = pybuda.op.Multiply("mult1", x, y) + x = forge.op.Multiply("mult0", x, x) + x = forge.op.Reshape("reshape0", x, (1,outer,num_blocks,block_size)) + y = forge.op.Multiply("mult1", x, y) return y x = Tensor.create_from_torch(torch.rand((outer, num_blocks*block_size))) @@ -1267,17 +1267,17 @@ def simple_large_reshape(x, y): def test_invalid_vstack_candidate(test_kind, test_device): - class Model(pybuda.PyBudaModule): + class Model(forge.ForgeModule): def __init__(self, name): super().__init__(name) self.add_constant("c") self.set_constant("c", torch.ones((1, 256, 1, 1))) - self.add_parameter("b", pybuda.Parameter(*(324,), requires_grad=True)) - self.add_parameter("w", pybuda.Parameter(*(324, 256, 3, 3), requires_grad=True)) + self.add_parameter("b", forge.Parameter(*(324,), requires_grad=True)) + self.add_parameter("w", forge.Parameter(*(324, 256, 3, 3), requires_grad=True)) def forward(self, x): - x = pybuda.op.Add("", x, self.get_constant("c")) - x = pybuda.op.Conv2d("", x, self.get_parameter("w"), self.get_parameter("b"), 1, (1, 1, 1, 1), 1, 1, 0) + x = forge.op.Add("", x, self.get_constant("c")) + x = forge.op.Conv2d("", x, self.get_parameter("w"), self.get_parameter("b"), 1, (1, 1, 1, 1), 1, 1, 0) return x module = Model("invalid_vstack_candidate") @@ -1295,36 +1295,36 @@ def test_intermediate_verification(test_kind): if test_kind.is_training(): pytest.skip() - class InterVer(PyBudaModule): + class InterVer(ForgeModule): def __init__(self, name): super().__init__(name) const0 = torch.ones((1,1)) self.add_constant("const0") - self.set_constant("const0", pybuda.Tensor.create_from_torch(const0, constant=True)) + self.set_constant("const0", forge.Tensor.create_from_torch(const0, constant=True)) const1 = torch.ones((1,1)) self.add_constant("const1") - self.set_constant("const1", pybuda.Tensor.create_from_torch(const1, constant=True)) + self.set_constant("const1", forge.Tensor.create_from_torch(const1, constant=True)) const2 = torch.ones((1,1,8,1)) self.add_constant("const2") - self.set_constant("const2", pybuda.Tensor.create_from_torch(const2, constant=True)) + self.set_constant("const2", forge.Tensor.create_from_torch(const2, constant=True)) def forward(self, inp): - index = pybuda.op.Index("index", inp, -1, 0, 512, 1) - reshape0 = pybuda.op.Reshape("Reshape0", index, [1, 64, 8, 64]) - mult = pybuda.op.Multiply("Mul", reshape0, reshape0) - reduce_sum = pybuda.op.ReduceSum("Sum", mult, -1) - reshape1 = pybuda.op.Reshape("Reshape1", reduce_sum, [1, 64, 8, 1]) - sqrt = pybuda.op.Sqrt("Sqrt", reshape1) - max = pybuda.op.Max("max", sqrt, self.get_constant("const0")) - min = pybuda.op.Min("min", max, self.get_constant("const1")) - sub = pybuda.op.Subtract("Sub", sqrt, min,) - add = pybuda.op.Add("Add", sqrt, sub) - - recip = pybuda.op.Reciprocal("Recip", add) - mult2 = pybuda.op.Multiply("mul2", recip, reshape0) - mult3 = pybuda.op.Multiply("mul3", mult2, self.get_constant("const2")) - trans = pybuda.op.Transpose("transpose", mult3, -3, -2, 8) + index = forge.op.Index("index", inp, -1, 0, 512, 1) + reshape0 = forge.op.Reshape("Reshape0", index, [1, 64, 8, 64]) + mult = forge.op.Multiply("Mul", reshape0, reshape0) + reduce_sum = forge.op.ReduceSum("Sum", mult, -1) + reshape1 = forge.op.Reshape("Reshape1", reduce_sum, [1, 64, 8, 1]) + sqrt = forge.op.Sqrt("Sqrt", reshape1) + max = forge.op.Max("max", sqrt, self.get_constant("const0")) + min = forge.op.Min("min", max, self.get_constant("const1")) + sub = forge.op.Subtract("Sub", sqrt, min,) + add = forge.op.Add("Add", sqrt, sub) + + recip = forge.op.Reciprocal("Recip", add) + mult2 = forge.op.Multiply("mul2", recip, reshape0) + mult3 = forge.op.Multiply("mul3", mult2, self.get_constant("const2")) + trans = forge.op.Transpose("transpose", mult3, -3, -2, 8) return trans compiler_config = _get_global_compiler_config() @@ -1335,31 +1335,31 @@ def test_channel_fuse_concat_select(test_kind): if test_kind.is_training(): pytest.skip() - class channel_select_fusion(PyBudaModule): + class channel_select_fusion(ForgeModule): def __init__(self, name): super().__init__(name) const0 = torch.ones((1,1)) self.add_constant("const0") - self.set_constant("const0", pybuda.Tensor.create_from_torch(const0, constant=True)) + self.set_constant("const0", forge.Tensor.create_from_torch(const0, constant=True)) const1 = torch.ones((1,1)) self.add_constant("const1") - self.set_constant("const1", pybuda.Tensor.create_from_torch(const1, constant=True)) + self.set_constant("const1", forge.Tensor.create_from_torch(const1, constant=True)) const2 = torch.ones((1,1)) self.add_constant("const2") - self.set_constant("const2", pybuda.Tensor.create_from_torch(const2, constant=True)) + self.set_constant("const2", forge.Tensor.create_from_torch(const2, constant=True)) def forward(self, inp): - index0 = pybuda.op.Index("index0", inp, -3, 0, 1, 1) - index1 = pybuda.op.Index("index1", inp, -3, 1, 2, 1) - index2 = pybuda.op.Index("index2", inp, -3, 2, 3, 1) + index0 = forge.op.Index("index0", inp, -3, 0, 1, 1) + index1 = forge.op.Index("index1", inp, -3, 1, 2, 1) + index2 = forge.op.Index("index2", inp, -3, 2, 3, 1) - mult0 = pybuda.op.Multiply("Mul0", index0, self.get_constant("const0")) - mult1 = pybuda.op.Multiply("Mul1", index1, self.get_constant("const1")) - add2 = pybuda.op.Add("Add2", mult1, self.get_constant("const2")) - concat = pybuda.op.Concatenate("Concat", mult0, add2, index2, axis=-3) - m1 = pybuda.op.Matmul("matmul1", concat, concat) + mult0 = forge.op.Multiply("Mul0", index0, self.get_constant("const0")) + mult1 = forge.op.Multiply("Mul1", index1, self.get_constant("const1")) + add2 = forge.op.Add("Add2", mult1, self.get_constant("const2")) + concat = forge.op.Concatenate("Concat", mult0, add2, index2, axis=-3) + m1 = forge.op.Matmul("matmul1", concat, concat) return m1 @@ -1373,21 +1373,21 @@ def test_erase_consecutive_reshape_binary(test_kind): param_shape = (1, 16, 1, 1) out_shape = (1, 1, 4, 5888) - class Model(PyBudaModule): + class Model(ForgeModule): def __init__(self, name, inter_shape, out_shape, param_shape): super().__init__(name) self.inter_shape = inter_shape self.out_shape = out_shape self.param_shape = param_shape - self.param = pybuda.Parameter(*self.param_shape, requires_grad=True) + self.param = forge.Parameter(*self.param_shape, requires_grad=True) def forward(self, x, y): - x = pybuda.op.Multiply("mult0", x, x) - x = pybuda.op.Transpose("t0", x, -2, -1) - x = pybuda.op.Reshape("reshape0", x, self.inter_shape) - x = pybuda.op.Add("add0", x, self.param) - x = pybuda.op.Reshape("reshape1", x, self.out_shape) - x = pybuda.op.Multiply("multiply", x, y) + x = forge.op.Multiply("mult0", x, x) + x = forge.op.Transpose("t0", x, -2, -1) + x = forge.op.Reshape("reshape0", x, self.inter_shape) + x = forge.op.Add("add0", x, self.param) + x = forge.op.Reshape("reshape1", x, self.out_shape) + x = forge.op.Multiply("multiply", x, y) return x mod = Model("consecutive_reshape_binary", inter_shape, out_shape, param_shape) @@ -1402,17 +1402,17 @@ def forward(self, x, y): def test_dual_reduce(test_kind): input_shape = (1, 1, 3, 1024) - class Model(PyBudaModule): + class Model(ForgeModule): def __init__(self): super().__init__("dual_reduce") def forward(self, x): - x = pybuda.op.Softmax("", x, dim=-1) - x = pybuda.op.Reshape("", x, (1, 3, 32, 32)) - x = pybuda.op.ReduceSum("", x, dim=-2) - x = pybuda.op.ReduceSum("", x, dim=-1) - x = pybuda.op.Reshape("", x, (1, 1, 3, 1)) - x = pybuda.op.Softmax("", x, dim=-1) + x = forge.op.Softmax("", x, dim=-1) + x = forge.op.Reshape("", x, (1, 3, 32, 32)) + x = forge.op.ReduceSum("", x, dim=-2) + x = forge.op.ReduceSum("", x, dim=-1) + x = forge.op.Reshape("", x, (1, 1, 3, 1)) + x = forge.op.Softmax("", x, dim=-1) return x mod = Model() @@ -1438,23 +1438,23 @@ def test_embedding(test_device, seq_len, grid_r, grid_c): arch=test_device.arch), ) def simple_embedding(x, table=None): - x = pybuda.op.Embedding("embedding", table, x) + x = forge.op.Embedding("embedding", table, x) return x compiler_config = _get_global_compiler_config() compiler_config.enable_tvm_cpu_fallback = False - pybuda.config.override_op_size("embedding", (grid_r, grid_c)) + forge.config.override_op_size("embedding", (grid_r, grid_c)) dictionary_size = 64 hidden_dim = 128 x = Tensor.create_from_torch(torch.randint(dictionary_size, (1, seq_len), dtype=torch.int)) - table = pybuda.Parameter.create_from_torch(torch.nn.Parameter(torch.randn((dictionary_size, hidden_dim)))) + table = forge.Parameter.create_from_torch(torch.nn.Parameter(torch.randn((dictionary_size, hidden_dim)))) simple_embedding(x, table=table) @pytest.mark.parametrize("mode", ["hslice", "hstack", "vslice", "vstack"]) def test_slice_stack_non_tile_aligned(test_kind, test_device, mode): - class SliceStackModule(PyBudaModule): + class SliceStackModule(ForgeModule): def __init__(self, name, factor, mode): super().__init__(name) self.factor = factor @@ -1462,13 +1462,13 @@ def __init__(self, name, factor, mode): def forward(self, activations): if mode == "hslice": - ret = pybuda.op.HSlice("hslice0", activations, self.factor) + ret = forge.op.HSlice("hslice0", activations, self.factor) elif mode == "hstack": - ret = pybuda.op.HStack("hstack0", activations, self.factor) + ret = forge.op.HStack("hstack0", activations, self.factor) elif mode == "vslice": - ret = pybuda.op.VSlice("vslice0", activations, self.factor) + ret = forge.op.VSlice("vslice0", activations, self.factor) else: - ret = pybuda.op.VStack("vstack0", activations, self.factor) + ret = forge.op.VStack("vstack0", activations, self.factor) return ret # input shape @@ -1496,8 +1496,8 @@ def forward(self, activations): ) def test_negative_reduce_max(test_device): - df = pybuda.config.DataFormat.Float16 - pybuda.config.set_configuration_options(default_df_override=df, accumulate_df=df) + df = forge.config.DataFormat.Float16 + forge.config.set_configuration_options(default_df_override=df, accumulate_df=df) def f(a, b): mae = torch.mean(torch.abs(a - b)) @@ -1512,7 +1512,7 @@ def f(a, b): golden_compare_callback=f), ) def negative_reduce_max(a): - return pybuda.op.ReduceMax("reduce", a, dim=-1) + return forge.op.ReduceMax("reduce", a, dim=-1) a = Tensor.create_from_torch(torch.randn(1, 1, 32, 32) - 100.0) negative_reduce_max(a) @@ -1526,8 +1526,8 @@ def test_unary_transpose(test_device, dims): arch=test_device.arch), ) def eltwise_unary_transpose(x): - opA = pybuda.op.Transpose(name='',operandA=x, dim0=-2, dim1=-1) - return pybuda.op.Exp('', opA) + opA = forge.op.Transpose(name='',operandA=x, dim0=-2, dim1=-1) + return forge.op.Exp('', opA) compiler_config = _get_global_compiler_config() compiler_config.enable_tvm_cpu_fallback = False @@ -1545,9 +1545,9 @@ def test_binary_transpose(test_device, dims, trans_both): arch=test_device.arch), ) def eltwise_binary_transpose(x, y): - opA = pybuda.op.Transpose(name='transA',operandA=x, dim0=-2, dim1=-1) if trans_both else x #axb - opB = pybuda.op.Transpose(name='transB',operandA=y, dim0=-2, dim1=-1) - return pybuda.op.Add('', opA, opB) + opA = forge.op.Transpose(name='transA',operandA=x, dim0=-2, dim1=-1) if trans_both else x #axb + opB = forge.op.Transpose(name='transB',operandA=y, dim0=-2, dim1=-1) + return forge.op.Add('', opA, opB) compiler_config = _get_global_compiler_config() compiler_config.enable_tvm_cpu_fallback = False @@ -1572,7 +1572,7 @@ def test_grad_eltwise_op(test_device): shape = (1, 1, 512, 512) test_kind = TestKind.TRAINING - if test_device.arch == pybuda.BackendDevice.Blackhole: + if test_device.arch == forge.BackendDevice.Blackhole: pytest.skip("Skip until BudaBackend#2628 is consumed.") @run( @@ -1582,35 +1582,35 @@ def test_grad_eltwise_op(test_device): arch=test_device.arch), ) def forked_op(x, weight=None): - prod = pybuda.op.Matmul("", x, weight) - op = pybuda.op.Add("", prod, weight) + prod = forge.op.Matmul("", x, weight) + op = forge.op.Add("", prod, weight) return op compiler_config = _get_global_compiler_config() compiler_config.enable_tvm_cpu_fallback = False x = Tensor.create_from_torch(torch.randn(shape, requires_grad=test_kind.is_training(), dtype=torch.bfloat16)) - w = pybuda.Parameter(torch.randn(shape, requires_grad=test_kind.is_training(), dtype=torch.bfloat16)) + w = forge.Parameter(torch.randn(shape, requires_grad=test_kind.is_training(), dtype=torch.bfloat16)) forked_op(x, weight=w) def test_3d_mm(test_device): - class Module(PyBudaModule): + class Module(ForgeModule): def __init__(self, name): super().__init__(name) - self.add_parameter("param", pybuda.Parameter(*(1, 1), requires_grad=True)) + self.add_parameter("param", forge.Parameter(*(1, 1), requires_grad=True)) def forward(self, x): - y = pybuda.op.Multiply("", x, self.get_parameter("param")) - y = pybuda.op.HSlice("", y, 8) + y = forge.op.Multiply("", x, self.get_parameter("param")) + y = forge.op.HSlice("", y, 8) - x = pybuda.op.HSlice("", x, 8) - x = pybuda.op.Transpose("", x, 2, 3) - x = pybuda.op.Matmul("", y, x) + x = forge.op.HSlice("", x, 8) + x = forge.op.Transpose("", x, 2, 3) + x = forge.op.Matmul("", y, x) return x - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object #compiler_cfg.balancer_op_override("multiply_0", "t_stream_shape", (1,1)) #compiler_cfg.balancer_op_override("matmul_4", "t_stream_shape", (2,1)) #compiler_cfg.balancer_op_override("matmul_4", "t_stream_dir", "r") @@ -1638,12 +1638,12 @@ def test_multipliers_overrides(test_device): VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch), ) def simple_matmul_buffer_overrides(x, weight=None): - return pybuda.op.Matmul("mm0", x, weight) + return forge.op.Matmul("mm0", x, weight) x = Tensor.create_from_torch(torch.randn(shape, requires_grad=test_kind.is_training())) - w = pybuda.Parameter(torch.randn(shape, requires_grad=test_kind.is_training())) - pybuda.config.override_input_buffer_multiplier("mm0", 0, multiplier=4) - pybuda.config.internal_override_output_buffer_multiplier("mm0", multiplier=4) + w = forge.Parameter(torch.randn(shape, requires_grad=test_kind.is_training())) + forge.config.override_input_buffer_multiplier("mm0", 0, multiplier=4) + forge.config.internal_override_output_buffer_multiplier("mm0", multiplier=4) simple_matmul_buffer_overrides(x, weight=w) @@ -1652,48 +1652,48 @@ def test_broadcast_transpose(test_device): @run(test_device) def broadcast_transpose(x): - x = pybuda.op.Broadcast("", x, -2, 64) - return pybuda.op.Transpose("", x, -2, -1) + x = forge.op.Broadcast("", x, -2, 64) + return forge.op.Transpose("", x, -2, -1) broadcast_transpose(Tensor.create_from_torch(torch.randn(1, 1, 1, 128))) def test_scalar_matmul_bias(test_device): - pybuda.set_configuration_options(backend_output_dir=f"tt_build/test_scalar_matmul_bias") + forge.set_configuration_options(backend_output_dir=f"tt_build/test_scalar_matmul_bias") @run(test_device) def scalar_matmul_bias(a, w=None, b=None): - x = pybuda.op.Matmul("", a, w) - x = pybuda.op.Add("", x, b) + x = forge.op.Matmul("", a, w) + x = forge.op.Add("", x, b) return x x = Tensor.create_from_torch(torch.randn(1, 1, 32, 32)) - w = pybuda.Parameter.create_from_torch(torch.randn(1, 1, 32, 128)) + w = forge.Parameter.create_from_torch(torch.randn(1, 1, 32, 128)) tmp = torch.zeros(1, 1, 1, 1) tmp[0, 0, 0, 0] = 1000.0 - b = pybuda.Parameter.create_from_torch(tmp) + b = forge.Parameter.create_from_torch(tmp) scalar_matmul_bias(x, w=w, b=b) def test_mismatch_repro(test_device): pytest.xfail() - class Module(PyBudaModule): + class Module(ForgeModule): def __init__(self, name): super().__init__(name) - self.add_parameter("features.7.weight", pybuda.Parameter(*(64, 32, 3, 3), requires_grad=True)) - self.add_parameter("features.7.bias", pybuda.Parameter(*(64,), requires_grad=True)) + self.add_parameter("features.7.weight", forge.Parameter(*(64, 32, 3, 3), requires_grad=True)) + self.add_parameter("features.7.bias", forge.Parameter(*(64,), requires_grad=True)) def forward(self, x): - x = pybuda.op.Conv2d("", x, self.get_parameter("features.7.weight"), self.get_parameter("features.7.bias"), stride=[1, 1], padding=[1, 1, 1, 1], dilation=1, groups=1, channel_last=0) + x = forge.op.Conv2d("", x, self.get_parameter("features.7.weight"), self.get_parameter("features.7.bias"), stride=[1, 1], padding=[1, 1, 1, 1], dilation=1, groups=1, channel_last=0) return x - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" compiler_cfg.balancer_op_override("conv2d_0.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", "t_stream_shape", (2,1)) import os - os.environ["PYBUDA_REPRODUCE_SUBGRAPH"] = "1" - os.environ["PYBUDA_REPRODUCE_SUBGRAPH_INPUT"] = "conv2d_0.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2" - os.environ["PYBUDA_REPRODUCE_SUBGRAPH_OUTPUT"] = "conv2d_0.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2" + os.environ["FORGE_REPRODUCE_SUBGRAPH"] = "1" + os.environ["FORGE_REPRODUCE_SUBGRAPH_INPUT"] = "conv2d_0.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2" + os.environ["FORGE_REPRODUCE_SUBGRAPH_OUTPUT"] = "conv2d_0.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2" input_shapes = ((1, 32, 16, 16),) @@ -1713,7 +1713,7 @@ def forward(self, x): def test_mismatch_repro_smm(test_device): pytest.xfail() - class Module(PyBudaModule): + class Module(ForgeModule): def __init__(self, name): super().__init__(name) idx = torch.arange(256).tolist() @@ -1721,15 +1721,15 @@ def __init__(self, name): sparse = torch.stack([sparse]*9, -3) sparse = torch.unsqueeze(sparse, 0) self.add_constant("sparse") - self.set_constant("sparse", pybuda.Tensor.create_from_torch(sparse, constant=True)) + self.set_constant("sparse", forge.Tensor.create_from_torch(sparse, constant=True)) def forward(self, x): - x = pybuda.op.Transpose("", x, -1, -2) - x = pybuda.op.SparseMatmul("", self.get_constant("sparse"), x) - x = pybuda.op.VStack("", x, 9) + x = forge.op.Transpose("", x, -1, -2) + x = forge.op.SparseMatmul("", self.get_constant("sparse"), x) + x = forge.op.VStack("", x, 9) return x - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" compiler_cfg.balancer_op_override("sparse_matmul_1.dc.sparse_matmul.1.lc2", "t_stream_shape", (2,1)) compiler_cfg.balancer_op_override("sparse_matmul_1.dc.sparse_matmul.1.lc2", "grid_shape", (2,1)) @@ -1752,8 +1752,8 @@ def forward(self, x): def test_multi_repeat(test_device): @run(test_device) def multi_repeat(x): - x = pybuda.op.Repeat("", x, [1, 1, 1, 2]); - x = pybuda.op.Repeat("", x, [1, 1, 1, 2]); + x = forge.op.Repeat("", x, [1, 1, 1, 2]); + x = forge.op.Repeat("", x, [1, 1, 1, 2]); return x x = Tensor.create_from_torch(torch.rand(1, 1, 32, 32)) @@ -1762,7 +1762,7 @@ def multi_repeat(x): def get_device_intermediates(op_intermediates: List[str]) -> Dict[str, List[torch.Tensor]]: device_intermediates: Dict[str, List[torch.Tensor]] = defaultdict(list) - intermediates_queue = pybuda.get_intermediates_queue() + intermediates_queue = forge.get_intermediates_queue() while not intermediates_queue.empty(): intermediate_tensors = intermediates_queue.get() @@ -1777,8 +1777,8 @@ def test_read_back_intermediates(test_kind, test_device): else: op_intermediates = ["matmul_intermediate"] - os.environ["PYBUDA_DISABLE_STREAM_OUTPUT"] = "1" #issue #2657 - pybuda.set_configuration_options(op_intermediates_to_save=op_intermediates) + os.environ["FORGE_DISABLE_STREAM_OUTPUT"] = "1" #issue #2657 + forge.set_configuration_options(op_intermediates_to_save=op_intermediates) num_inputs = 4 @run( @@ -1792,15 +1792,15 @@ def test_read_back_intermediates(test_kind, test_device): num_inputs=num_inputs, ) def fetch_intermediates(x0, x1, x2): - intermediate = pybuda.op.Matmul("matmul_intermediate", x0, x1) - return pybuda.op.Matmul("matmul_output", intermediate, x2) + intermediate = forge.op.Matmul("matmul_intermediate", x0, x1) + return forge.op.Matmul("matmul_output", intermediate, x2) x = Tensor.create_from_torch(torch.randn(1, 1, 63, 63, requires_grad=test_kind.is_training())) y = Tensor.create_from_torch(torch.randn(1, 1, 63, 63, requires_grad=test_kind.is_training())) z = Tensor.create_from_torch(torch.randn(1, 1, 63, 63, requires_grad=test_kind.is_training())) fetch_intermediates(x, y, z) - device = pybuda.get_tenstorrent_device() + device = forge.get_tenstorrent_device() compiled_results = device.get_compiled_results() golden_intermediates: Dict[str, torch.Tensor ] = compiled_results.golden_intermediates # golden replicated @@ -1825,7 +1825,7 @@ def daisy_chain_2d(x): outputs = [] for i in range(rows): for j in range(columns): - op = pybuda.op.Gelu(f"gelu_{i}_{j}", x) + op = forge.op.Gelu(f"gelu_{i}_{j}", x) outputs.append(op) input = "inputs" @@ -1833,11 +1833,11 @@ def daisy_chain_2d(x): # insert daisy-chain along each column for j in range(columns): gelu_rows = [f"gelu_{i}_{j}" for i in range(rows)] - pybuda.insert_nop(input, gelu_rows, daisy_chain=True) + forge.insert_nop(input, gelu_rows, daisy_chain=True) # insert daisy-chain across first row gelu_first_row = [f"buffer_0_inputs_gelu_{0}_{j}" for j in range(columns)] - pybuda.insert_nop(input, gelu_first_row, daisy_chain=True) + forge.insert_nop(input, gelu_first_row, daisy_chain=True) return outputs @@ -1848,11 +1848,11 @@ def daisy_chain_2d(x): def test_forked_dram_inputs(test_device): @run(test_device) def forked_dram_inputs(x): - op_gelu1 = pybuda.op.Gelu(f"", x) - op_gelu2 = pybuda.op.Gelu(f"", x) - op_output = pybuda.op.Add(f"", op_gelu1, op_gelu2) + op_gelu1 = forge.op.Gelu(f"", x) + op_gelu2 = forge.op.Gelu(f"", x) + op_output = forge.op.Add(f"", op_gelu1, op_gelu2) return op_output - pybuda.config.set_configuration_options(enable_auto_fusing=False) + forge.config.set_configuration_options(enable_auto_fusing=False) x = Tensor.create_from_torch(torch.rand(1, 1, 32, 32)) forked_dram_inputs(x) @@ -1864,16 +1864,16 @@ def test_conv3d(test_device): padding = 0 dilation = 1 - class Module(PyBudaModule): + class Module(ForgeModule): def __init__(self, name): super().__init__(name) - self.add_parameter("weight", pybuda.Parameter(*(outC, inC, kD, kH, kW), requires_grad=True)) + self.add_parameter("weight", forge.Parameter(*(outC, inC, kD, kH, kW), requires_grad=True)) def forward(self, x): - x = pybuda.op.Conv3d("", x, self.get_parameter("weight"), None, stride=[stride, stride, stride], padding=[padding, padding, padding, padding, padding, padding], dilation=dilation, groups=1, channel_last=0) + x = forge.op.Conv3d("", x, self.get_parameter("weight"), None, stride=[stride, stride, stride], padding=[padding, padding, padding, padding, padding, padding], dilation=dilation, groups=1, channel_last=0) return x - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" input_shapes = ((1, inC, inD, inH, inW),) @@ -1898,15 +1898,15 @@ def test_maxpool3d(test_device): padding = 0 dilation = 1 - class Module(PyBudaModule): + class Module(ForgeModule): def __init__(self, name): super().__init__(name) def forward(self, x): - x = pybuda.op.MaxPool3d("", x, (kD, kH, kW), stride=stride, padding=padding, dilation=dilation, channel_last=0) + x = forge.op.MaxPool3d("", x, (kD, kH, kW), stride=stride, padding=padding, dilation=dilation, channel_last=0) return x - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" input_shapes = ((1, inC, inD, inH, inW),) @@ -1927,15 +1927,15 @@ def test_resize3d(test_device): inD, inH, inW = (8, 32, 32) outD, outH, outW = (16, 64, 64) - class Module(PyBudaModule): + class Module(ForgeModule): def __init__(self, name): super().__init__(name) def forward(self, x): - x = pybuda.op.Resize3d("", x, (outD, outH, outW), channel_last=0) + x = forge.op.Resize3d("", x, (outD, outH, outW), channel_last=0) return x - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "Ribbon" input_shapes = ((1, 3, inD, inH, inW),) @@ -1953,16 +1953,16 @@ def forward(self, x): ) def test_emulate_harvested(test_device): - os.environ["PYBUDA_FORCE_EMULATE_HARVESTED"] = "1" - class Module(PyBudaModule): + os.environ["FORGE_FORCE_EMULATE_HARVESTED"] = "1" + class Module(ForgeModule): def __init__(self, name): super().__init__(name) def forward(self, x): - x = pybuda.op.Add("", x, x) + x = forge.op.Add("", x, x) return x - compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object + compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object input_shapes = ((1, 3, 32, 32),) @@ -1979,13 +1979,13 @@ def forward(self, x): ) def test_blackhole_golden_sanity(): - class Module(PyBudaModule): + class Module(ForgeModule): def __init__(self, name): super().__init__(name) def forward(self, a, b, c): - x = pybuda.op.Add("add0", a, b) - x = pybuda.op.Matmul("matmul0", x, c) + x = forge.op.Add("add0", a, b) + x = forge.op.Matmul("matmul0", x, c) return x input_shapes = ((1, 3, 64, 64),(1, 3, 64, 64), (1, 3, 64, 64)) @@ -2013,17 +2013,17 @@ def __init__(self, in_channel,out_channel,kernel_size,stride,padding,groups): def forward(self, input): return self.model(input) - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b - os.environ["PYBUDA_RIBBON2"] = "1" - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + compiler_cfg.default_df_override = forge.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" # Different in_channel and out_channel model = Conv2d_transpose_model(in_channel=256,out_channel=512,kernel_size=(4, 4),stride=(2, 2),padding=(1, 1),groups=1) model.eval() - tt_model = pybuda.PyTorchModule("conv2d_transpose", model) + tt_model = forge.PyTorchModule("conv2d_transpose", model) input_shape = (1, 256, 12, 40) verify_module( @@ -2048,16 +2048,16 @@ def __init__(self, in_channel,out_channel,kernel_size,stride,padding,groups): def forward(self, input): return self.model(input) - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b - os.environ["PYBUDA_RIBBON2"] = "1" + compiler_cfg.default_df_override = forge.DataFormat.Float16_b + os.environ["FORGE_RIBBON2"] = "1" # Same in_channel and out_channel, but different groups model = Conv2d_transpose_model(in_channel=256,out_channel=256,kernel_size=(4, 4),stride=(2, 2),padding=(1, 1),groups=256) model.eval() - tt_model = pybuda.PyTorchModule("conv2d_transpose", model) + tt_model = forge.PyTorchModule("conv2d_transpose", model) input_shape = (1, 256, 12, 40) verify_module( diff --git a/pybuda/test/test_shapes.py b/forge/test/test_shapes.py similarity index 76% rename from pybuda/test/test_shapes.py rename to forge/test/test_shapes.py index ffc1443d1..1f6ccbc39 100644 --- a/pybuda/test/test_shapes.py +++ b/forge/test/test_shapes.py @@ -9,14 +9,14 @@ import torch from loguru import logger -import pybuda -import pybuda.op -from pybuda import ( - PyBudaModule, +import forge +import forge.op +from forge import ( + ForgeModule, TTDevice, BackendType, Tensor, - pybuda_compile, + forge_compile, CompilerConfig, VerifyConfig, ) @@ -101,23 +101,23 @@ ) -class EltwiseBinary(PyBudaModule): +class EltwiseBinary(ForgeModule): def __init__(self, name, weight_shape): super().__init__(name) self.shape = weight_shape - self.weights1 = pybuda.Parameter(*self.shape, requires_grad=True) + self.weights1 = forge.Parameter(*self.shape, requires_grad=True) def forward(self, act): - return pybuda.op.Add("add", act, self.weights1) + return forge.op.Add("add", act, self.weights1) -class Matmul(PyBudaModule): +class Matmul(ForgeModule): def __init__(self, name, weight_shape): super().__init__(name) self.shape = weight_shape - self.weights1 = pybuda.Parameter(*self.shape, requires_grad=True) + self.weights1 = forge.Parameter(*self.shape, requires_grad=True) def forward(self, act): - return pybuda.op.Matmul("matmul", act, self.weights1) + return forge.op.Matmul("matmul", act, self.weights1) @pytest.mark.parametrize("shape", single_shapes) @pytest.mark.parametrize("model", (EltwiseBinary,)) @@ -126,7 +126,7 @@ def test_eltwise_binary_same_shape(shape, training, model): logger.info(f"Testing shape {shape}") mod = model("test_module", shape) - sgd_optimizer = pybuda.optimizers.SGD( + sgd_optimizer = forge.optimizers.SGD( learning_rate=0.5, parameters=mod.get_parameters() ) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) @@ -137,7 +137,7 @@ def test_eltwise_binary_same_shape(shape, training, model): mod.set_parameter("weights1", torch.rand(*shape, requires_grad=True)) sgd_optimizer.set_optimizer_parameters() - pybuda_compile(tt0, "shapes", act, compiler_cfg=CompilerConfig(enable_training=training), verify_cfg=verify_cfg) + forge_compile(tt0, "shapes", act, compiler_cfg=CompilerConfig(enable_training=training), verify_cfg=verify_cfg) @pytest.mark.parametrize("shapes", broadcast_shapes) @pytest.mark.parametrize("model", (EltwiseBinary,)) @@ -145,7 +145,7 @@ def test_eltwise_binary_broadcast_shapes(shapes, training, model): logger.info(f"Testing shapes {shapes}") mod = model("test_module", shapes[1]) - sgd_optimizer = pybuda.optimizers.SGD( + sgd_optimizer = forge.optimizers.SGD( learning_rate=0.5, parameters=mod.get_parameters() ) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) @@ -156,7 +156,7 @@ def test_eltwise_binary_broadcast_shapes(shapes, training, model): mod.set_parameter("weights1", torch.rand(*shapes[1], requires_grad=True)) sgd_optimizer.set_optimizer_parameters() - pybuda_compile(tt0, "brcst_shapes", act, compiler_cfg=CompilerConfig(enable_training=training), verify_cfg=verify_cfg) + forge_compile(tt0, "brcst_shapes", act, compiler_cfg=CompilerConfig(enable_training=training), verify_cfg=verify_cfg) @pytest.mark.parametrize("shapes", matmul_shapes) @@ -165,7 +165,7 @@ def test_matmul_shapes(shapes, training, model): logger.info(f"Testing shapes {shapes}") mod = model("test_module", shapes[1]) - sgd_optimizer = pybuda.optimizers.SGD( + sgd_optimizer = forge.optimizers.SGD( learning_rate=0.5, parameters=mod.get_parameters() ) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) @@ -176,27 +176,27 @@ def test_matmul_shapes(shapes, training, model): mod.set_parameter("weights1", torch.rand(*shapes[1], requires_grad=True)) sgd_optimizer.set_optimizer_parameters() - pybuda_compile(tt0, "matmul_brcst_shapes", act, compiler_cfg=CompilerConfig(enable_training=training), verify_cfg=verify_cfg) + forge_compile(tt0, "matmul_brcst_shapes", act, compiler_cfg=CompilerConfig(enable_training=training), verify_cfg=verify_cfg) def test_tile_broadcast(training): # Test simple situation of eltwise add where broadcast within a tile is needed - class ScalarBroadcast(PyBudaModule): + class ScalarBroadcast(ForgeModule): def __init__(self, name): super().__init__(name) - self.eltwise_param = pybuda.Parameter(64, 64, requires_grad=True) + self.eltwise_param = forge.Parameter(64, 64, requires_grad=True) def forward(self, act): # (1, 1) + (64, 64) - need to scalar-broadcast act to get correct result - add = pybuda.op.Add("add", act, self.eltwise_param) + add = forge.op.Add("add", act, self.eltwise_param) return add mod = ScalarBroadcast("tile_broadcast") - sgd_optimizer = pybuda.optimizers.SGD( + sgd_optimizer = forge.optimizers.SGD( learning_rate=50.0, parameters=mod.get_parameters() ) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) @@ -206,32 +206,32 @@ def forward(self, act): mod.set_parameter("eltwise_param", torch.rand((64, 64), requires_grad=True)) sgd_optimizer.set_optimizer_parameters() - pybuda_compile(tt0, "tile_broadcast", act, compiler_cfg=CompilerConfig(enable_training=training), verify_cfg=verify_cfg) + forge_compile(tt0, "tile_broadcast", act, compiler_cfg=CompilerConfig(enable_training=training), verify_cfg=verify_cfg) def test_tile_fork(training): # Test a situation where one side needs a scalar broadcast and the other doesn't (or, in fact, can't have it or would get wrong data) - class ScalarFork(PyBudaModule): + class ScalarFork(ForgeModule): def __init__(self, name): super().__init__(name) - self.weights1 = pybuda.Parameter(1, 1, requires_grad=True) - self.eltwise_param = pybuda.Parameter(64, 64, requires_grad=True) + self.weights1 = forge.Parameter(1, 1, requires_grad=True) + self.eltwise_param = forge.Parameter(64, 64, requires_grad=True) def forward(self, act): # (1, 1) x (1, 1) - can't broadcast both inputs or data will be wrong - mat = pybuda.op.Matmul("matmul", act, self.weights1) + mat = forge.op.Matmul("matmul", act, self.weights1) # (1, 1) + (64, 64) - need to scalar-broadcast act to get correct result - add = pybuda.op.Add("add", act, self.eltwise_param) + add = forge.op.Add("add", act, self.eltwise_param) # (1, 1) + (64, 64) - need to scalar broadcast matmul output to get the right result - return pybuda.op.Add("final_add", mat, add) + return forge.op.Add("final_add", mat, add) mod = ScalarFork("tile_fork") - sgd_optimizer = pybuda.optimizers.SGD( + sgd_optimizer = forge.optimizers.SGD( learning_rate=50.0, parameters=mod.get_parameters() ) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) @@ -243,30 +243,30 @@ def forward(self, act): mod.set_parameter("eltwise_param", torch.rand((64, 64), requires_grad=True)) sgd_optimizer.set_optimizer_parameters() - pybuda_compile(tt0, "tile_fork", act, compiler_cfg=CompilerConfig(enable_training=training), verify_cfg=verify_cfg) + forge_compile(tt0, "tile_fork", act, compiler_cfg=CompilerConfig(enable_training=training), verify_cfg=verify_cfg) def test_reduce_folding(training): # Test the scenario where tile broadcast folds into a reduce - class ReduceFolding(PyBudaModule): + class ReduceFolding(ForgeModule): def __init__(self, name): super().__init__(name) - self.eltwise_param = pybuda.Parameter(64, 64, requires_grad=True) + self.eltwise_param = forge.Parameter(64, 64, requires_grad=True) def forward(self, act): # (64, 64) -> (64, 1) - red = pybuda.op.ReduceSum("reduce", act, dim=-1) + red = forge.op.ReduceSum("reduce", act, dim=-1) # (64, 1) + (64, 64) - need to scalar-broadcast act to get correct result - add = pybuda.op.Add("add", red, self.eltwise_param) + add = forge.op.Add("add", red, self.eltwise_param) return add mod = ReduceFolding("reduce_folding") - sgd_optimizer = pybuda.optimizers.SGD( + sgd_optimizer = forge.optimizers.SGD( learning_rate=50.0, parameters=mod.get_parameters() ) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) @@ -276,26 +276,26 @@ def forward(self, act): mod.set_parameter("eltwise_param", torch.rand((64, 64), requires_grad=True)) sgd_optimizer.set_optimizer_parameters() - pybuda_compile(tt0, "reduce_folding", act, compiler_cfg=CompilerConfig(enable_training=training), verify_cfg=verify_cfg) + forge_compile(tt0, "reduce_folding", act, compiler_cfg=CompilerConfig(enable_training=training), verify_cfg=verify_cfg) def test_input_folding(training): # Test the scenario where tile broadcast folds into an input node - class InputFolding(PyBudaModule): + class InputFolding(ForgeModule): def __init__(self, name): super().__init__(name) - self.eltwise_param = pybuda.Parameter(64, 64, requires_grad=True) + self.eltwise_param = forge.Parameter(64, 64, requires_grad=True) def forward(self, act): # (1, 1) + (64, 64) - need to scalar-broadcast act to get correct result - add = pybuda.op.Add("add", act, self.eltwise_param) + add = forge.op.Add("add", act, self.eltwise_param) return add mod = InputFolding("input_folding") - sgd_optimizer = pybuda.optimizers.SGD( + sgd_optimizer = forge.optimizers.SGD( learning_rate=50.0, parameters=mod.get_parameters() ) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) @@ -305,5 +305,5 @@ def forward(self, act): mod.set_parameter("eltwise_param", torch.rand((64, 64), requires_grad=True)) sgd_optimizer.set_optimizer_parameters() - pybuda_compile(tt0, "input_folding", act, compiler_cfg=CompilerConfig(enable_training=training), verify_cfg=verify_cfg) + forge_compile(tt0, "input_folding", act, compiler_cfg=CompilerConfig(enable_training=training), verify_cfg=verify_cfg) diff --git a/pybuda/test/test_splice.py b/forge/test/test_splice.py similarity index 95% rename from pybuda/test/test_splice.py rename to forge/test/test_splice.py index a6879a53f..30e40b88b 100644 --- a/pybuda/test/test_splice.py +++ b/forge/test/test_splice.py @@ -9,11 +9,11 @@ import time import os -import pybuda -from pybuda.op.eval.buda.splice import Splice -from pybuda.op.eval.buda.tm import eval as tm_eval -from pybuda._C.balancer import OpShape -from pybuda.pybudaglobal import TILE_DIM +import forge +from forge.op.eval.buda.splice import Splice +from forge.op.eval.buda.tm import eval as tm_eval +from forge._C.balancer import OpShape +from forge.forgeglobal import TILE_DIM def factorize(n): diff --git a/forge/test/test_streaming.py b/forge/test/test_streaming.py new file mode 100644 index 000000000..c0a88f7d1 --- /dev/null +++ b/forge/test/test_streaming.py @@ -0,0 +1,137 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 +import forge +import pytest +import torch + +from forge.config import CompileDepth, _get_global_compiler_config +from .common import run + + +def test_stream_transpose(test_kind, test_device): + if test_kind.is_training(): + pytest.skip() + + @run( + forge.VerifyConfig( + test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch + ), + ) + def stream_transpose(a, b, param=None): + x = forge.op.Add("add0", a, b) + x = forge.op.Transpose("transpose0", x, 2, 3) + x = forge.op.Matmul("mm0", x, param) + return x + + compiler_cfg = forge.config._get_global_compiler_config() + + forge.config.override_op_size("add0", (1, 1)) + forge.config.override_op_size("transpose0", (1, 1)) + forge.config.override_op_size("mm0", (1, 1)) + + shape = (1, 1, 32, 16384) + a = forge.Tensor.create_from_torch( + torch.rand(*shape, requires_grad=test_kind.is_training()) + ) + b = forge.Tensor.create_from_torch( + torch.rand(*shape, requires_grad=test_kind.is_training()) + ) + c = forge.Tensor.create_from_torch(torch.rand(1, 1, 32, 32), constant=True) + stream_transpose(a, b, param=c) + + +def test_stream_to_slice(test_kind, test_device): + if test_kind.is_training(): + pytest.skip() + + @run( + forge.VerifyConfig( + test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch + ), + ) + def stream_to_slice(x): + x = forge.op.Buffer("buf0", x) + x = forge.op.VSlice("vslice0", x, 512) + x = forge.op.Buffer("buf1", x) + return x + + compiler_cfg = forge.config._get_global_compiler_config() + + forge.config.override_op_size("buf0", (1, 1)) + forge.config.override_op_size("buf1", (1, 1)) + + shape = (1, 1, 16384, 32) + a = forge.Tensor.create_from_torch( + torch.rand(*shape, requires_grad=test_kind.is_training()) + ) + stream_to_slice(a) + + +@pytest.mark.parametrize("mode", ["producer_streaming", "consumer_streaming", "both_streaming"]) +def test_stream_slice_transpose(test_kind, test_device, mode): + if test_kind.is_training(): + pytest.skip() + + @run( + forge.VerifyConfig( + test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch + ), + ) + def stream_slice_transpose(x): + x = forge.op.Buffer("producer", x) + x = forge.op.VSlice("vslice", x, 2) + x = forge.op.Transpose("consumer", x, 2, 3) + return x + + compiler_cfg = forge.config._get_global_compiler_config() + + if mode == "producer_streaming": + forge.config.override_t_stream_shape("producer", (2, 1)) + forge.config.override_t_stream_shape("consumer", (1, 1)) + elif mode == "consumer_streaming": + forge.config.override_t_stream_shape("producer", (1, 1)) + forge.config.override_t_stream_shape("consumer", (1, 2)) + elif mode == "both_streaming": + forge.config.override_t_stream_shape("producer", (2, 1)) + forge.config.override_t_stream_shape("consumer", (1, 2)) + + shape = (1, 1, 128, 32) + a = forge.Tensor.create_from_torch( + torch.rand(*shape, requires_grad=test_kind.is_training()) + ) + stream_slice_transpose(a) + + +@pytest.mark.parametrize("dir", ["r", "c"]) +def test_stream_interleave(test_device, dir): + forge.config.set_configuration_options(balancer_policy="MaximizeTMinimizeGrid") + forge.config.override_t_stream_dir("interleave", dir) + + @run(test_device) + def stream_interleave(a, b): + return forge.op.Interleave("interleave", a, b, axis=-3, stride=1) + + shape = (1, 4, 512, 512) + a = forge.Tensor.create_from_torch(torch.randn(*shape)) + b = forge.Tensor.create_from_torch(torch.randn(*shape)) + stream_interleave(a, b) + + +def test_manual_streaming(test_device): + + @run(test_device) + def manual_stream(x): + x = forge.op.Buffer("buf0", x) + x = forge.op.Buffer("buf1", x) + x = forge.op.Buffer("buf2", x) + return x + + compiler_cfg = forge.config._get_global_compiler_config() + compiler_cfg.manual_t_streaming = True + + forge.config.override_t_stream_shape("buf1", (4, 1)) + + shape = (1, 1, 128, 128) + a = forge.Tensor.create_from_torch(torch.rand(*shape)) + manual_stream(a) diff --git a/pybuda/test/test_transpose_ops_placement.py b/forge/test/test_transpose_ops_placement.py similarity index 66% rename from pybuda/test/test_transpose_ops_placement.py rename to forge/test/test_transpose_ops_placement.py index aa4d210e3..fa3792353 100644 --- a/pybuda/test/test_transpose_ops_placement.py +++ b/forge/test/test_transpose_ops_placement.py @@ -7,15 +7,15 @@ import os import math -import pybuda -import pybuda.op -import pybuda.op.nn as nn - -from pybuda import PyBudaModule, PyTorchModule, TTDevice, Tensor, Parameter, pybuda_compile, CompilerConfig, SGD -from pybuda.verify import verify_module, VerifyConfig, TestKind -from pybuda.ttdevice import get_device_config -from pybuda.config import _get_global_compiler_config -from pybuda._C.backend_api import BackendType, BackendDevice +import forge +import forge.op +import forge.op.nn as nn + +from forge import ForgeModule, PyTorchModule, TTDevice, Tensor, Parameter, forge_compile, CompilerConfig, SGD +from forge.verify import verify_module, VerifyConfig, TestKind +from forge.ttdevice import get_device_config +from forge.config import _get_global_compiler_config +from forge._C.backend_api import BackendType, BackendDevice from transformers import BertModel, BertConfig verify_cfg = VerifyConfig(run_golden=True) # Run backend golden check on all tests in here @@ -41,79 +41,79 @@ def get_relaxed_atol_pcc_bert_encoder(test_kind, test_device, microbatch_size = return relative_atol, pcc -class ExpModule(pybuda.PyBudaModule): +class ExpModule(forge.ForgeModule): def __init__(self, name): super().__init__(name) def forward(self, act1): - return pybuda.op.Exp("exp", act1) + return forge.op.Exp("exp", act1) -class TwoOpsModule(pybuda.PyBudaModule): +class TwoOpsModule(forge.ForgeModule): def __init__(self, name, r, c): super().__init__(name) - self.weights1 = pybuda.Parameter(torch.rand(r*32, c*32), requires_grad=True) + self.weights1 = forge.Parameter(torch.rand(r*32, c*32), requires_grad=True) def forward(self, act1, act2): - e1 = pybuda.op.Exp("exp", act1) - m1 = pybuda.op.Matmul("matmul", e1, self.weights1) - t1 = pybuda.op.Transpose("transpose", m1, 1,2) - a1 = pybuda.op.Add("add", t1, act2) + e1 = forge.op.Exp("exp", act1) + m1 = forge.op.Matmul("matmul", e1, self.weights1) + t1 = forge.op.Transpose("transpose", m1, 1,2) + a1 = forge.op.Add("add", t1, act2) return a1 -class ThreeOpsModule(pybuda.PyBudaModule): +class ThreeOpsModule(forge.ForgeModule): def __init__(self, name, r, c): super().__init__(name) - self.weights1 = pybuda.Parameter(torch.rand(r*32, c*32), requires_grad=True) + self.weights1 = forge.Parameter(torch.rand(r*32, c*32), requires_grad=True) def forward(self, act1, act2, act3): - e1 = pybuda.op.Exp("exp1", act1) - a1 = pybuda.op.Add("add1", e1, act2) - e2 = pybuda.op.Exp("exp2", act3) - c1 = pybuda.op.Concatenate("concat", a1,a1, axis=1) - m1 = pybuda.op.Matmul("matmul", e2, self.weights1) - a2 = pybuda.op.Add("add2", c1, m1) + e1 = forge.op.Exp("exp1", act1) + a1 = forge.op.Add("add1", e1, act2) + e2 = forge.op.Exp("exp2", act3) + c1 = forge.op.Concatenate("concat", a1,a1, axis=1) + m1 = forge.op.Matmul("matmul", e2, self.weights1) + a2 = forge.op.Add("add2", c1, m1) return a2 -class FourOpsModule(pybuda.PyBudaModule): +class FourOpsModule(forge.ForgeModule): def __init__(self, name, r, c): super().__init__(name) - self.weights1 = pybuda.Parameter(torch.rand(r*32, c*32), requires_grad=True) + self.weights1 = forge.Parameter(torch.rand(r*32, c*32), requires_grad=True) def forward(self, act1, act2, act3): - e1 = pybuda.op.Exp("exp1", act1) - e2 = pybuda.op.Exp("exp2", act2) - a1 = pybuda.op.Add("add1", e1, e1) - c1 = pybuda.op.Concatenate("concat", a1, e2, axis=1) - m1 = pybuda.op.Matmul("matmul", c1, self.weights1) - a2 = pybuda.op.Add("add2", act3, m1) + e1 = forge.op.Exp("exp1", act1) + e2 = forge.op.Exp("exp2", act2) + a1 = forge.op.Add("add1", e1, e1) + c1 = forge.op.Concatenate("concat", a1, e2, axis=1) + m1 = forge.op.Matmul("matmul", c1, self.weights1) + a2 = forge.op.Add("add2", act3, m1) return a2 -class TwoOpsNoTModule(pybuda.PyBudaModule): +class TwoOpsNoTModule(forge.ForgeModule): def __init__(self, name, r, c): super().__init__(name) - self.weights1 = pybuda.Parameter(torch.rand(r*32, c*32), requires_grad=True) + self.weights1 = forge.Parameter(torch.rand(r*32, c*32), requires_grad=True) def forward(self, act1, act2): - e1 = pybuda.op.Exp("exp", act1) - m1 = pybuda.op.Matmul("matmul", e1, self.weights1) - a1 = pybuda.op.Add("add", m1, act2) + e1 = forge.op.Exp("exp", act1) + m1 = forge.op.Matmul("matmul", e1, self.weights1) + a1 = forge.op.Add("add", m1, act2) return a1 -class TwoOpsModulev2(pybuda.PyBudaModule): +class TwoOpsModulev2(forge.ForgeModule): def __init__(self, name): super().__init__(name) def forward(self, act1, act2): - e1 = pybuda.op.Exp("exp1", act1) - e2 = pybuda.op.Exp("exp2", act2) - m1 = pybuda.op.Matmul("matmul", e1, e2) + e1 = forge.op.Exp("exp1", act1) + e2 = forge.op.Exp("exp2", act2) + m1 = forge.op.Matmul("matmul", e1, e2) return m1 @pytest.mark.parametrize("r", [x+1 for x in range(10)]) @pytest.mark.parametrize("c", [x+1 for x in range(10)]) def test_manual_op_transpose(test_device, r, c): - if (test_device.arch == pybuda.BackendDevice.Wormhole_B0 or test_device.arch == pybuda.BackendDevice.Blackhole) and (r > 8 or c > 8): + if (test_device.arch == forge.BackendDevice.Wormhole_B0 or test_device.arch == forge.BackendDevice.Blackhole) and (r > 8 or c > 8): pytest.skip(f"{test_device.arch.to_string()} has 8 columns, skip the op-test with c = 9 or 10") compiler_cfg = _get_global_compiler_config() @@ -121,8 +121,8 @@ def test_manual_op_transpose(test_device, r, c): if r == c or c > dev_cfg.grid_size.r or r > dev_cfg.grid_size.c: pytest.skip("op's r and c are the same, or invalid op-shape considering grid-size") - pybuda.config.override_op_size("exp", (r, c)) - pybuda.config.override_op_placement("exp", transpose_op=True) + forge.config.override_op_size("exp", (r, c)) + forge.config.override_op_placement("exp", transpose_op=True) mod = ExpModule("test_manual_T_module") tt0 = TTDevice("tt0", devtype=BackendType.Golden, arch=test_device.arch) @@ -130,7 +130,7 @@ def test_manual_op_transpose(test_device, r, c): act1 = Tensor.create_from_torch(torch.rand((1, 1, r*32, c*32), requires_grad=True)) - compile_result = pybuda_compile(tt0, "sanity-manual_T", act1, compiler_cfg=compiler_cfg, verify_cfg=verify_cfg) + compile_result = forge_compile(tt0, "sanity-manual_T", act1, compiler_cfg=compiler_cfg, verify_cfg=verify_cfg) placer_solution = compile_result.pass_specific_output_kwargs["placer_solution"] placed_core = placer_solution.name_to_op_placement["exp"].placed_cores @@ -143,9 +143,9 @@ def test_manual_op_transpose(test_device, r, c): def test_auto_op_transpose_case1(test_device): compiler_cfg = _get_global_compiler_config() - pybuda.config.override_op_size("exp", (1, 3)) - pybuda.config.override_op_size("add", (2, 1)) - pybuda.set_configuration_options(enable_auto_transposing_placement=True) + forge.config.override_op_size("exp", (1, 3)) + forge.config.override_op_size("add", (2, 1)) + forge.set_configuration_options(enable_auto_transposing_placement=True) mod = TwoOpsModule("test_auto_T1", 3, 2) tt0 = TTDevice("tt0", devtype=BackendType.Golden, arch=test_device.arch) @@ -154,7 +154,7 @@ def test_auto_op_transpose_case1(test_device): act1 = Tensor.create_from_torch(torch.rand((1, 32, 96), requires_grad=True)) act2 = Tensor.create_from_torch(torch.rand((1, 64, 32), requires_grad=True)) - compile_result = pybuda_compile(tt0, "sanity-auto_T1", act1, act2, compiler_cfg=compiler_cfg, verify_cfg=verify_cfg) + compile_result = forge_compile(tt0, "sanity-auto_T1", act1, act2, compiler_cfg=compiler_cfg, verify_cfg=verify_cfg) placer_solution = compile_result.pass_specific_output_kwargs["placer_solution"] placed_core = placer_solution.name_to_op_placement["add"].placed_cores @@ -173,9 +173,9 @@ def test_auto_op_transpose_case1(test_device): def test_auto_op_transpose_case2(test_device): compiler_cfg = _get_global_compiler_config() - pybuda.config.override_op_size("exp", (10, 1)) - pybuda.config.override_op_size("add", (10, 1)) - pybuda.set_configuration_options(enable_auto_transposing_placement=True) + forge.config.override_op_size("exp", (10, 1)) + forge.config.override_op_size("add", (10, 1)) + forge.set_configuration_options(enable_auto_transposing_placement=True) mod = TwoOpsNoTModule("test_auto_T2", 1, 1) tt0 = TTDevice("tt0", devtype=BackendType.Golden, arch=test_device.arch) @@ -184,7 +184,7 @@ def test_auto_op_transpose_case2(test_device): act1 = Tensor.create_from_torch(torch.rand((1, 10*32, 32), requires_grad=True)) act2 = Tensor.create_from_torch(torch.rand((1, 10*32, 32), requires_grad=True)) - compile_result = pybuda_compile(tt0, "sanity-auto_T2", act1, act2, compiler_cfg=compiler_cfg, verify_cfg=verify_cfg) + compile_result = forge_compile(tt0, "sanity-auto_T2", act1, act2, compiler_cfg=compiler_cfg, verify_cfg=verify_cfg) placer_solution = compile_result.pass_specific_output_kwargs["placer_solution"] placed_core = placer_solution.name_to_op_placement["add"].placed_cores @@ -201,10 +201,10 @@ def test_auto_op_transpose_case2(test_device): def test_auto_op_transpose_case3(test_device): compiler_cfg = _get_global_compiler_config() - pybuda.config.override_op_size("exp1", (2, 7)) - pybuda.config.override_op_size("add1", (2, 7)) - pybuda.config.override_op_size("exp2", (4, 1)) - pybuda.set_configuration_options(enable_auto_transposing_placement=True) + forge.config.override_op_size("exp1", (2, 7)) + forge.config.override_op_size("add1", (2, 7)) + forge.config.override_op_size("exp2", (4, 1)) + forge.set_configuration_options(enable_auto_transposing_placement=True) mod = ThreeOpsModule("test_auto_T3", 1, 7) tt0 = TTDevice("tt0", devtype=BackendType.Golden, arch=test_device.arch) @@ -214,7 +214,7 @@ def test_auto_op_transpose_case3(test_device): act2 = Tensor.create_from_torch(torch.rand((1, 2*32, 7*32), requires_grad=True)) act3 = Tensor.create_from_torch(torch.rand((1, 4*32, 32), requires_grad=True)) - compile_result = pybuda_compile(tt0, "sanity-auto_T3", act1, act2, act3, compiler_cfg=compiler_cfg, verify_cfg=verify_cfg) + compile_result = forge_compile(tt0, "sanity-auto_T3", act1, act2, act3, compiler_cfg=compiler_cfg, verify_cfg=verify_cfg) placer_solution = compile_result.pass_specific_output_kwargs["placer_solution"] placed_core = placer_solution.name_to_op_placement["exp2"].placed_cores @@ -227,16 +227,16 @@ def test_auto_op_transpose_case3(test_device): def test_auto_op_transpose_multi_rows1(test_device): - if test_device.arch != pybuda.BackendDevice.Grayskull: + if test_device.arch != forge.BackendDevice.Grayskull: pytest.skip("Targetting grid-size of GS only") compiler_cfg = _get_global_compiler_config() - pybuda.config.override_op_size("exp1", (4, 5)) - pybuda.config.override_op_size("exp2", (1, 5)) - pybuda.config.override_op_size("add1", (4, 5)) - pybuda.config.override_op_size("add2", (5, 2)) - pybuda.set_configuration_options(enable_auto_transposing_placement=True) + forge.config.override_op_size("exp1", (4, 5)) + forge.config.override_op_size("exp2", (1, 5)) + forge.config.override_op_size("add1", (4, 5)) + forge.config.override_op_size("add2", (5, 2)) + forge.set_configuration_options(enable_auto_transposing_placement=True) mod = FourOpsModule("test_auto_T4", 5, 2) tt0 = TTDevice("tt0", devtype=BackendType.Golden, arch=test_device.arch) @@ -246,7 +246,7 @@ def test_auto_op_transpose_multi_rows1(test_device): act2 = Tensor.create_from_torch(torch.rand((1, 1*32, 5*32), requires_grad=True)) act3 = Tensor.create_from_torch(torch.rand((1, 5*32, 2*32), requires_grad=True)) - compile_result = pybuda_compile(tt0, "sanity-auto_T4", act1, act2, act3, compiler_cfg=compiler_cfg, verify_cfg=verify_cfg) + compile_result = forge_compile(tt0, "sanity-auto_T4", act1, act2, act3, compiler_cfg=compiler_cfg, verify_cfg=verify_cfg) placer_solution = compile_result.pass_specific_output_kwargs["placer_solution"] placed_core = placer_solution.name_to_op_placement["add2"].placed_cores @@ -259,14 +259,14 @@ def test_auto_op_transpose_multi_rows1(test_device): def test_auto_op_transpose_multi_rows2(test_device): - if test_device.arch != pybuda.BackendDevice.Grayskull: + if test_device.arch != forge.BackendDevice.Grayskull: pytest.skip("Targetting grid-size of GS only") compiler_cfg = _get_global_compiler_config() - pybuda.config.override_op_size("exp1", (3, 8)) - pybuda.config.override_op_size("exp2", (8, 1)) - pybuda.set_configuration_options(enable_auto_transposing_placement=True) + forge.config.override_op_size("exp1", (3, 8)) + forge.config.override_op_size("exp2", (8, 1)) + forge.set_configuration_options(enable_auto_transposing_placement=True) mod = TwoOpsModulev2("test_auto_T5") tt0 = TTDevice("tt0", devtype=BackendType.Golden, arch=test_device.arch) @@ -275,7 +275,7 @@ def test_auto_op_transpose_multi_rows2(test_device): act1 = Tensor.create_from_torch(torch.rand((1, 3*32, 8*32), requires_grad=True)) act2 = Tensor.create_from_torch(torch.rand((1, 8*32, 1*32), requires_grad=True)) - compile_result = pybuda_compile(tt0, "sanity-auto_T5", act1, act2, compiler_cfg=compiler_cfg, verify_cfg=verify_cfg) + compile_result = forge_compile(tt0, "sanity-auto_T5", act1, act2, compiler_cfg=compiler_cfg, verify_cfg=verify_cfg) placer_solution = compile_result.pass_specific_output_kwargs["placer_solution"] placed_core = placer_solution.name_to_op_placement["exp2"].placed_cores diff --git a/pybuda/test/test_user.py b/forge/test/test_user.py similarity index 65% rename from pybuda/test/test_user.py rename to forge/test/test_user.py index 72122742a..380ec88c6 100644 --- a/pybuda/test/test_user.py +++ b/forge/test/test_user.py @@ -8,16 +8,16 @@ # There's also no verification of correctness of data, as that's not the point of these tests. # # All of these tests will run on silicon, in concurrent mode, by default. However, setting -# PYBUDA_DEVMODE=1 env variable will drop them into Golden+sequential mode. +# FORGE_DEVMODE=1 env variable will drop them into Golden+sequential mode. import queue import torch -import pybuda +import forge import pytest -from pybuda.config import _get_global_compiler_config +from forge.config import _get_global_compiler_config -from pybuda.schedulers import LearningRateScheduler -from pybuda.pybudaglobal import pybuda_reset +from forge.schedulers import LearningRateScheduler +from forge.forgeglobal import forge_reset from test.utils import download_model # https://github.com/pytorch/pytorch/wiki/Autograd-and-Fork @@ -32,75 +32,75 @@ def _safe_read(q): data = q.get(timeout = 0.5) return data except queue.Empty as _: - if pybuda.error_raised(): - raise RuntimeError("Error raised in pybuda") + if forge.error_raised(): + raise RuntimeError("Error raised in forge") except KeyboardInterrupt: return None -# Sample PyBuda module -class PyBudaTestModule(pybuda.PyBudaModule): +# Sample Forge module +class ForgeTestModule(forge.ForgeModule): def __init__(self, name): super().__init__(name) - self.weights1 = pybuda.Parameter(torch.rand(32, 32), requires_grad=True) - self.weights2 = pybuda.Parameter(torch.rand(32, 32), requires_grad=True) + self.weights1 = forge.Parameter(torch.rand(32, 32), requires_grad=True) + self.weights2 = forge.Parameter(torch.rand(32, 32), requires_grad=True) def forward(self, act1, act2): - m1 = pybuda.op.Matmul("matmul1", act1, self.weights1) - m2 = pybuda.op.Matmul("matmul2", act2, self.weights2) + m1 = forge.op.Matmul("matmul1", act1, self.weights1) + m2 = forge.op.Matmul("matmul2", act2, self.weights2) return m1 + m2, m2 -# Sample PyBuda module -class PyBudaTestModuleOneOut(pybuda.PyBudaModule): +# Sample Forge module +class ForgeTestModuleOneOut(forge.ForgeModule): def __init__(self, name): super().__init__(name) - self.weights1 = pybuda.Parameter(torch.rand(32, 32), requires_grad=True) - self.weights2 = pybuda.Parameter(torch.rand(32, 32), requires_grad=True) + self.weights1 = forge.Parameter(torch.rand(32, 32), requires_grad=True) + self.weights2 = forge.Parameter(torch.rand(32, 32), requires_grad=True) def forward(self, act1, act2): - m1 = pybuda.op.Matmul("matmul1", act1, self.weights1) - m2 = pybuda.op.Matmul("matmul2", act2, self.weights2) + m1 = forge.op.Matmul("matmul1", act1, self.weights1) + m2 = forge.op.Matmul("matmul2", act2, self.weights2) return m1 + m2 -# Sample PyBuda module -class PyBudaTestQueryKeyModule(pybuda.PyBudaModule): +# Sample Forge module +class ForgeTestQueryKeyModule(forge.ForgeModule): def __init__(self, name, hidden_dim = 128, num_heads = 4): super().__init__(name) self.hidden_dim = hidden_dim self.num_heads = num_heads - self.key_weights = pybuda.Parameter(torch.rand(1, 1, hidden_dim, hidden_dim), requires_grad=True) - self.query_weights = pybuda.Parameter(torch.rand(1, 1, hidden_dim, hidden_dim), requires_grad=True) - self.value_weights = pybuda.Parameter(torch.rand(1, 1, hidden_dim, hidden_dim), requires_grad=True) + self.key_weights = forge.Parameter(torch.rand(1, 1, hidden_dim, hidden_dim), requires_grad=True) + self.query_weights = forge.Parameter(torch.rand(1, 1, hidden_dim, hidden_dim), requires_grad=True) + self.value_weights = forge.Parameter(torch.rand(1, 1, hidden_dim, hidden_dim), requires_grad=True) def forward(self, encoder_input): - query = pybuda.op.Matmul(f"mha_query", encoder_input, self.query_weights) - query = pybuda.op.HSlice(f"mha_query_slice", query, self.num_heads) + query = forge.op.Matmul(f"mha_query", encoder_input, self.query_weights) + query = forge.op.HSlice(f"mha_query_slice", query, self.num_heads) - key = pybuda.op.Matmul(f"mha_key", encoder_input, self.key_weights) - key = pybuda.op.HSlice(f"mha_key_slice", key, self.num_heads) - key = pybuda.op.Transpose(f"mha_key_transpose", key, 2, 3) + key = forge.op.Matmul(f"mha_key", encoder_input, self.key_weights) + key = forge.op.HSlice(f"mha_key_slice", key, self.num_heads) + key = forge.op.Transpose(f"mha_key_transpose", key, 2, 3) - attention_scores = pybuda.op.Matmul(f"mha_as", query, key) + attention_scores = forge.op.Matmul(f"mha_as", query, key) return attention_scores -class PyBudaTestForkWithThreeUsers(pybuda.PyBudaModule): +class ForgeTestForkWithThreeUsers(forge.ForgeModule): def __init__(self, name, hidden_dim = 128, num_heads = 4): super().__init__(name) self.hidden_dim = hidden_dim self.num_heads = num_heads - self.mm_a_weights = pybuda.Parameter(torch.rand(1, 1, hidden_dim, hidden_dim), requires_grad=True) - self.mm_b_weights = pybuda.Parameter(torch.rand(1, 1, hidden_dim, hidden_dim), requires_grad=True) - self.mm_c_weights = pybuda.Parameter(torch.rand(1, 1, hidden_dim, hidden_dim), requires_grad=True) + self.mm_a_weights = forge.Parameter(torch.rand(1, 1, hidden_dim, hidden_dim), requires_grad=True) + self.mm_b_weights = forge.Parameter(torch.rand(1, 1, hidden_dim, hidden_dim), requires_grad=True) + self.mm_c_weights = forge.Parameter(torch.rand(1, 1, hidden_dim, hidden_dim), requires_grad=True) def forward(self, encoder_input): - a = pybuda.op.Matmul(f"mm_a", encoder_input, self.mm_a_weights) - b = pybuda.op.Matmul(f"mm_b", encoder_input, self.mm_b_weights) - c = pybuda.op.Matmul(f"mm_c", encoder_input, self.mm_c_weights) + a = forge.op.Matmul(f"mm_a", encoder_input, self.mm_a_weights) + b = forge.op.Matmul(f"mm_b", encoder_input, self.mm_b_weights) + c = forge.op.Matmul(f"mm_c", encoder_input, self.mm_c_weights) - add_a_b = pybuda.op.Add(f"add_a_b", a, b) - add_a_b_c = pybuda.op.Add(f"add_a_b_c", add_a_b, c) + add_a_b = forge.op.Add(f"add_a_b", a, b) + add_a_b_c = forge.op.Add(f"add_a_b_c", add_a_b, c) return add_a_b_c @@ -145,32 +145,32 @@ def forward(self, input): # # Run inference on module directly # -def test_module_direct_pybuda(): +def test_module_direct_forge(): input1 = torch.rand(4, 32, 32) input2 = torch.rand(4, 32, 32) - # Run single inference pass on a PyBuda module directly - output = PyBudaTestModule("direct").run(input1, input2) + # Run single inference pass on a Forge module directly + output = ForgeTestModule("direct").run(input1, input2) print(output) def test_module_direct_pytorch(): input1 = torch.rand(4, 32, 32) input2 = torch.rand(4, 32, 32) - # Run single inference pass on a PyTorch module, using a wrapper to convert to PyBuda first - output = pybuda.PyTorchModule("direct_pt", PyTorchTestModule()).run(input1, input2) + # Run single inference pass on a PyTorch module, using a wrapper to convert to Forge first + output = forge.PyTorchModule("direct_pt", PyTorchTestModule()).run(input1, input2) print(output) # # Run inference through run_inference without placing on device # -def test_run_inference_direct_pybuda(): +def test_run_inference_direct_forge(): input1 = torch.rand(4, 32, 32) input2 = torch.rand(4, 32, 32) - # Run inference on a PyBuda module, with given inputs + # Run inference on a Forge module, with given inputs inputs = {"act2" : input2, "act1" : input1} - output_q = pybuda.run_inference(PyBudaTestModule("run_direct"), inputs=[inputs]) + output_q = forge.run_inference(ForgeTestModule("run_direct"), inputs=[inputs]) output = _safe_read(output_q) print(output) @@ -178,9 +178,9 @@ def test_run_inference_direct_pytorch(): input1 = torch.rand(4, 32, 32) input2 = torch.rand(4, 32, 32) - # Run inference, using a wrapper to convert PyTorch module to PyBuda, and with given inputs + # Run inference, using a wrapper to convert PyTorch module to Forge, and with given inputs inputs = {"act2" : input2, "act1" : input1} - output_q = pybuda.run_inference(pybuda.PyTorchModule("run_direct_pt", PyTorchTestModule()), inputs=[inputs]) + output_q = forge.run_inference(forge.PyTorchModule("run_direct_pt", PyTorchTestModule()), inputs=[inputs]) output = _safe_read(output_q) print(output) @@ -188,21 +188,21 @@ def test_run_inference_direct_pytorch(): # # Run inference by placing on device first # -def test_run_inference_placed_pybuda(): +def test_run_inference_placed_forge(): input1 = torch.rand(4, 32, 32) input2 = torch.rand(4, 32, 32) # Create a TT device - tt0 = pybuda.TTDevice("tt0") + tt0 = forge.TTDevice("tt0") # Place a module on the device - tt0.place_module(PyBudaTestModule("placed")) + tt0.place_module(ForgeTestModule("placed")) # Push intputs to the device tt0.push_to_inputs((input1, input2)) # Run pipeline, and read the outputs - output_q = pybuda.run_inference() + output_q = forge.run_inference() output = _safe_read(output_q) print(output) @@ -211,16 +211,16 @@ def test_run_inference_placed_pytorch(): input2 = torch.rand(4, 32, 32) # Create a TT device - tt0 = pybuda.TTDevice("tt0") + tt0 = forge.TTDevice("tt0") - # Place a module on the device, using a wrapper to convert PyTorch module to PyBuda - tt0.place_module(pybuda.PyTorchModule("placed_pt", PyTorchTestModule())) + # Place a module on the device, using a wrapper to convert PyTorch module to Forge + tt0.place_module(forge.PyTorchModule("placed_pt", PyTorchTestModule())) # Push intputs to the device tt0.push_to_inputs((input1, input2)) # Run pipeline, and read the outputs - output_q = pybuda.run_inference() + output_q = forge.run_inference() output = _safe_read(output_q) print(output) @@ -228,7 +228,7 @@ def test_run_inference_placed_pytorch(): # Repeated calls to run inference on the same module # def test_module_direct_repeated(): - module = PyBudaTestModule("direct") + module = ForgeTestModule("direct") # Run on given inputs input1 = torch.rand(4, 32, 32) @@ -251,12 +251,12 @@ def test_module_direct_repeated(): def test_run_inference_placed_repeated(): input1 = torch.rand(4, 32, 32) input2 = torch.rand(4, 32, 32) - tt0 = pybuda.TTDevice("tt0") - tt0.place_module(PyBudaTestModule("placed")) + tt0 = forge.TTDevice("tt0") + tt0.place_module(ForgeTestModule("placed")) # Push one input and run tt0.push_to_inputs((input1, input2)) - output_q = pybuda.run_inference() + output_q = forge.run_inference() output = _safe_read(output_q) print(output) @@ -267,7 +267,7 @@ def test_run_inference_placed_repeated(): input2 = torch.rand(4, 32, 32) tt0.push_to_inputs((input1, input2)) - pybuda.run_inference(input_count=2) + forge.run_inference(input_count=2) for _ in range(2): output = _safe_read(output_q) @@ -278,18 +278,18 @@ def test_run_inference_placed_repeated(): # Run inference through setup + run_forward calls # def test_setup_forward_calls(): - tt0 = pybuda.TTDevice("tt0") - tt0.place_module(PyBudaTestModule("placed")) + tt0 = forge.TTDevice("tt0") + tt0.place_module(ForgeTestModule("placed")) # Compile & initialize the pipeline for inference, with given shapes - output_q = pybuda.initialize_pipeline(training=False, sample_inputs=(torch.rand(4, 32, 32), torch.rand(4, 32, 32))) + output_q = forge.initialize_pipeline(training=False, sample_inputs=(torch.rand(4, 32, 32), torch.rand(4, 32, 32))) # Push & run_forward manually for _ in range(2): input1 = torch.rand(4, 32, 32) input2 = torch.rand(4, 32, 32) tt0.push_to_inputs((input1, input2)) - pybuda.run_forward(input_count=1) + forge.run_forward(input_count=1) print(_safe_read(output_q)) @@ -301,19 +301,19 @@ def test_run_inference_delayed_push(): #### Skip the test on golden import os - if "PYBUDA_DEVMODE" in os.environ: + if "FORGE_DEVMODE" in os.environ: pytest.skip() #### - tt0 = pybuda.TTDevice("tt0") - tt0.place_module(PyBudaTestModule("placed")) + tt0 = forge.TTDevice("tt0") + tt0.place_module(ForgeTestModule("placed")) input1 = torch.rand(4, 32, 32) input2 = torch.rand(4, 32, 32) tt0.push_to_inputs((input1, input2)) # Run with input count 3, although only one is pushed - output_q = pybuda.run_inference(input_count=3) + output_q = forge.run_inference(input_count=3) # Read one output that should've been produced output = _safe_read(output_q) @@ -335,37 +335,37 @@ def test_run_inference_delayed_push(): # def test_cpu_tt_pipeline(): - cpu0 = pybuda.CPUDevice("cpu0") - cpu0.place_module(pybuda.PyTorchModule("stage0", PyTorchTestModule())) - tt1 = pybuda.TTDevice("tt1") - tt1.place_module(PyBudaTestModule("stage1")) + cpu0 = forge.CPUDevice("cpu0") + cpu0.place_module(forge.PyTorchModule("stage0", PyTorchTestModule())) + tt1 = forge.TTDevice("tt1") + tt1.place_module(ForgeTestModule("stage1")) input1 = torch.rand(4, 32, 32) input2 = torch.rand(4, 32, 32) cpu0.push_to_inputs((input1, input2)) - output_q = pybuda.run_inference() + output_q = forge.run_inference() print(_safe_read(output_q)) def test_cpu_tt_pipeline_compact(): - cpu0 = pybuda.CPUDevice("cpu0", module=pybuda.PyTorchModule("stage0", PyTorchTestModule())) - tt1 = pybuda.TTDevice("tt1", module=PyBudaTestModule("stage1")) + cpu0 = forge.CPUDevice("cpu0", module=forge.PyTorchModule("stage0", PyTorchTestModule())) + tt1 = forge.TTDevice("tt1", module=ForgeTestModule("stage1")) input1 = torch.rand(4, 32, 32) input2 = torch.rand(4, 32, 32) cpu0.push_to_inputs((input1, input2)) - output_q = pybuda.run_inference() + output_q = forge.run_inference() print(_safe_read(output_q)) # Run training, read back checkpoints and loss def test_training_read_back(): - pybuda.config.set_configuration_options( - default_df_override=pybuda.DataFormat.Float16_b, + forge.config.set_configuration_options( + default_df_override=forge.DataFormat.Float16_b, ) - tt0 = pybuda.TTDevice("tt0", module=PyBudaTestModuleOneOut("module")) - tt0.place_loss_module(pybuda.op.loss.L1Loss("l1_loss")) + tt0 = forge.TTDevice("tt0", module=ForgeTestModuleOneOut("module")) + tt0.place_loss_module(forge.op.loss.L1Loss("l1_loss")) loss_q = mp_context.Queue() checkpoint_q = mp_context.Queue() @@ -375,7 +375,7 @@ def test_training_read_back(): tt0.push_to_inputs((input1, input2)) tt0.push_to_target_inputs(torch.rand(4, 32, 32)) - pybuda.run_training(checkpoint_queue = checkpoint_q, loss_queue=loss_q) + forge.run_training(checkpoint_queue = checkpoint_q, loss_queue=loss_q) print("checkpoint: ", _safe_read(checkpoint_q)) print("loss: ", _safe_read(loss_q)) @@ -383,9 +383,9 @@ def test_training_read_back(): # Run training pipeline, with loss on CPU, read back checkpoints and loss #@pytest.mark.skip(reason="Intermittent hangs on silicon") def test_training_pipeline_read_back(): - tt0 = pybuda.TTDevice("tt0", module=PyBudaTestModule("stage0")) - cpu1 = pybuda.CPUDevice("cpu1", module=pybuda.PyTorchModule("stage1", PyTorchTestModuleOneOut())) - cpu1.place_loss_module(pybuda.PyTorchModule("l1loss", torch.nn.L1Loss())) + tt0 = forge.TTDevice("tt0", module=ForgeTestModule("stage0")) + cpu1 = forge.CPUDevice("cpu1", module=forge.PyTorchModule("stage1", PyTorchTestModuleOneOut())) + cpu1.place_loss_module(forge.PyTorchModule("l1loss", torch.nn.L1Loss())) loss_q = mp_context.Queue() checkpoint_q = mp_context.Queue() @@ -396,7 +396,7 @@ def test_training_pipeline_read_back(): cpu1.push_to_target_inputs(torch.rand(4, 32, 32)) - pybuda.run_training(checkpoint_queue = checkpoint_q, loss_queue=loss_q) + forge.run_training(checkpoint_queue = checkpoint_q, loss_queue=loss_q) print("checkpoint: ", _safe_read(checkpoint_q)) print("loss: ", _safe_read(loss_q)) @@ -414,11 +414,11 @@ def test_transformers_pipeline_inference(): input_tokens = tokenizer.encode(input_sentence, max_length=128, pad_to_max_length=True) model = download_model(BertModel.from_pretrained, "prajjwal1/bert-tiny", torchscript=False, add_pooling_layer=False) - cpu0 = pybuda.CPUDevice("cpu0", module=pybuda.PyTorchModule("bert_embeddings", model.embeddings)) - tt0 = pybuda.TTDevice("tt1", module=pybuda.PyTorchModule("bert_encoder", model.encoder)) + cpu0 = forge.CPUDevice("cpu0", module=forge.PyTorchModule("bert_embeddings", model.embeddings)) + tt0 = forge.TTDevice("tt1", module=forge.PyTorchModule("bert_encoder", model.encoder)) cpu0.push_to_inputs(torch.Tensor(input_tokens).int().unsqueeze(0)) - output_q = pybuda.run_inference() + output_q = forge.run_inference() print(_safe_read(output_q)) @@ -429,18 +429,18 @@ def test_transformers_pipeline_fallback_inference(): from transformers import BertModel, BertTokenizer - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() tokenizer = download_model(BertTokenizer.from_pretrained, "prajjwal1/bert-tiny") input_sentence = "BERT is a transformers model pretrained on a large corpus of English data in a self-supervised fashion. This means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely, it was pretrained with two objectives: Masked language modeling (MLM): taking a sentence, the model randomly masks 15% of the words in the input then run the entire masked sentence through the model and has to predict the masked words. This is different from traditional recurrent neural networks (RNNs) that usually see the words one after the other, or from autoregressive models like GPT which internally mask the future tokens. It allows the model to learn a bidirectional representation of the sentence." input_tokens = tokenizer.encode(input_sentence, max_length=128, pad_to_max_length=True) model = download_model(BertModel.from_pretrained, "prajjwal1/bert-tiny", torchscript=False, add_pooling_layer=False) - tt0 = pybuda.TTDevice("tt0", module=pybuda.PyTorchModule("bert", model)) + tt0 = forge.TTDevice("tt0", module=forge.PyTorchModule("bert", model)) for i in range(5): tt0.push_to_inputs(torch.Tensor(input_tokens).int().unsqueeze(0)) - output_q = pybuda.run_inference() + output_q = forge.run_inference() print(_safe_read(output_q)) # @@ -451,13 +451,13 @@ def test_training_manual_loop_with_cpu_fallback(): config = download_model(BertConfig.from_pretrained, "prajjwal1/bert-tiny") model = BertForMaskedLM(config) - tt0 = pybuda.TTDevice("tt0", module=pybuda.PyTorchModule("bert", model), optimizer=pybuda.optimizers.SGD(learning_rate=0.1, device_params=True)) - tt0.place_loss_module(pybuda.PyTorchModule("CEL", torch.nn.CrossEntropyLoss())) + tt0 = forge.TTDevice("tt0", module=forge.PyTorchModule("bert", model), optimizer=forge.optimizers.SGD(learning_rate=0.1, device_params=True)) + tt0.place_loss_module(forge.PyTorchModule("CEL", torch.nn.CrossEntropyLoss())) sample_inputs = (torch.randint(config.vocab_size, (1,128)) ,) sample_targets = (torch.rand(1, config.vocab_size) ,) - checkpoint_q = pybuda.initialize_pipeline( + checkpoint_q = forge.initialize_pipeline( training=True, sample_inputs=sample_inputs, sample_targets=sample_targets) @@ -467,10 +467,10 @@ def test_training_manual_loop_with_cpu_fallback(): for acc_step in range(2): tt0.push_to_inputs(torch.randint(config.vocab_size, (1,128))) tt0.push_to_target_inputs(torch.rand(1, config.vocab_size).long()) - pybuda.run_forward(input_count = 1) - pybuda.run_backward(input_count = 1, zero_grad = (acc_step == 0)) + forge.run_forward(input_count = 1) + forge.run_backward(input_count = 1, zero_grad = (acc_step == 0)) - pybuda.run_optimizer(checkpoint=True) + forge.run_optimizer(checkpoint=True) # Run training through run_training without placing on device # Run training by placing on device first @@ -484,15 +484,15 @@ def test_training_manual_loop_with_cpu_fallback(): # def test_training_manual_loop(): - tt0 = pybuda.TTDevice("tt0", module=PyBudaTestModule("stage0"), optimizer=pybuda.optimizers.SGD(learning_rate=0.1, device_params=True)) - cpu1 = pybuda.CPUDevice("cpu1", module=pybuda.PyTorchModule("stage1", PyTorchTestModuleOneOut()), + tt0 = forge.TTDevice("tt0", module=ForgeTestModule("stage0"), optimizer=forge.optimizers.SGD(learning_rate=0.1, device_params=True)) + cpu1 = forge.CPUDevice("cpu1", module=forge.PyTorchModule("stage1", PyTorchTestModuleOneOut()), optimizer_f = lambda m: torch.optim.SGD(m.parameters(), lr=0.5)) - cpu1.place_loss_module(pybuda.PyTorchModule("l1loss", torch.nn.L1Loss())) + cpu1.place_loss_module(forge.PyTorchModule("l1loss", torch.nn.L1Loss())) # Compile & initialize the pipeline for training, with given shapes input1 = torch.rand(4, 32, 32) input2 = torch.rand(4, 32, 32) - checkpoint_q = pybuda.initialize_pipeline( + checkpoint_q = forge.initialize_pipeline( training=True, sample_inputs=(input1, input2), sample_targets=(torch.rand(4, 32, 32),)) @@ -503,10 +503,10 @@ def test_training_manual_loop(): tt0.push_to_inputs((input1, input2)) cpu1.push_to_target_inputs(torch.rand(4, 32, 32)) - pybuda.run_forward(input_count = 1) - pybuda.run_backward(input_count = 1, zero_grad = (acc_step == 0)) + forge.run_forward(input_count = 1) + forge.run_backward(input_count = 1, zero_grad = (acc_step == 0)) - pybuda.run_optimizer(checkpoint=True) + forge.run_optimizer(checkpoint=True) print("Checkpoint: ", _safe_read(checkpoint_q)) @@ -517,16 +517,16 @@ def test_training_manual_loop_no_opt(): #### Skip the test on golden. It should work, need to debug why it doesn't. import os - if "PYBUDA_DEVMODE" in os.environ: + if "FORGE_DEVMODE" in os.environ: pytest.skip() #### - tt0 = pybuda.TTDevice("tt0", module=PyBudaTestModule("stage0")) - cpu1 = pybuda.CPUDevice("cpu1", module=pybuda.PyTorchModule("stage1", PyTorchTestModuleOneOut())) - cpu1.place_loss_module(pybuda.PyTorchModule("l1loss", torch.nn.L1Loss())) + tt0 = forge.TTDevice("tt0", module=ForgeTestModule("stage0")) + cpu1 = forge.CPUDevice("cpu1", module=forge.PyTorchModule("stage1", PyTorchTestModuleOneOut())) + cpu1.place_loss_module(forge.PyTorchModule("l1loss", torch.nn.L1Loss())) # Compile & initialize the pipeline for training, with given shapes - pybuda.initialize_pipeline( + forge.initialize_pipeline( training=True, sample_inputs=(torch.rand(4, 32, 32), torch.rand(4, 32, 32)), sample_targets=(torch.rand(4, 32, 32),)) @@ -542,10 +542,10 @@ def test_training_manual_loop_no_opt(): cpu1.push_to_target_inputs(torch.rand(4, 32, 32)) - pybuda.run_forward(input_count = 1) - pybuda.run_backward(input_count = 1, zero_grad = (acc_step == 0)) + forge.run_forward(input_count = 1) + forge.run_backward(input_count = 1, zero_grad = (acc_step == 0)) - print("Gradients on step ", step, ": ", pybuda.get_parameter_gradients()) + print("Gradients on step ", step, ": ", forge.get_parameter_gradients()) # # Run training and upload new weights from host @@ -554,16 +554,16 @@ def test_training_weight_update_on_host(): #### Skip the test on golden. It should work, need to debug why it doesn't. import os - if "PYBUDA_DEVMODE" in os.environ: + if "FORGE_DEVMODE" in os.environ: pytest.skip() #### - tt0 = pybuda.TTDevice("tt0", module=PyBudaTestModule("stage0")) - cpu1 = pybuda.CPUDevice("cpu1", module=pybuda.PyTorchModule("stage1", PyTorchTestModuleOneOut())) - cpu1.place_loss_module(pybuda.PyTorchModule("l1loss", torch.nn.L1Loss())) + tt0 = forge.TTDevice("tt0", module=ForgeTestModule("stage0")) + cpu1 = forge.CPUDevice("cpu1", module=forge.PyTorchModule("stage1", PyTorchTestModuleOneOut())) + cpu1.place_loss_module(forge.PyTorchModule("l1loss", torch.nn.L1Loss())) # Compile & initialize the pipeline for training, with given shapes - pybuda.initialize_pipeline(training=True, + forge.initialize_pipeline(training=True, sample_inputs=(torch.rand(4, 32, 32), torch.rand(4, 32, 32)), sample_targets=(torch.rand(4, 32, 32),)) @@ -575,36 +575,36 @@ def test_training_weight_update_on_host(): cpu1.push_to_target_inputs(torch.rand(4, 32, 32)) # Run fwd/bwd to calculate parameter gradients - pybuda.run_forward(input_count = 1) - pybuda.run_backward(input_count = 1, zero_grad = True) + forge.run_forward(input_count = 1) + forge.run_backward(input_count = 1, zero_grad = True) # Retrieve weights and gradients, and use host optimizer to update weights - grads = pybuda.get_parameter_gradients(tt0) - params = pybuda.get_parameter_checkpoint(tt0) + grads = forge.get_parameter_gradients(tt0) + params = forge.get_parameter_checkpoint(tt0) for name in params[0]: params[0][name].value().grad = grads[0][name].value() opt = torch.optim.SGD([p.value() for p in params[0].values()], lr=10.0) opt.step() # Push new weights to the device - pybuda.update_device_parameters(tt0, params) + forge.update_device_parameters(tt0, params) # Run again with new weights - pybuda.run_forward(input_count = 1) - pybuda.run_backward(input_count = 1, zero_grad = True) + forge.run_forward(input_count = 1) + forge.run_backward(input_count = 1, zero_grad = True) # # Run inference pipeline and provide mp queues for device-to-device data # def test_inference_device_to_device_data(): - tt0 = pybuda.TTDevice("tt0", module=PyBudaTestModule("stage0")) - cpu1 = pybuda.CPUDevice("cpu1", module=pybuda.PyTorchModule("stage1", PyTorchTestModule())) - cpu2 = pybuda.CPUDevice("cpu2", module=pybuda.PyTorchModule("stage2", PyTorchTestModuleOneOut())) + tt0 = forge.TTDevice("tt0", module=ForgeTestModule("stage0")) + cpu1 = forge.CPUDevice("cpu1", module=forge.PyTorchModule("stage1", PyTorchTestModule())) + cpu2 = forge.CPUDevice("cpu2", module=forge.PyTorchModule("stage2", PyTorchTestModuleOneOut())) # Compile & initialize the pipeline for inference, and provide d2d mp queues to store device-to-device data in for further analysis tt0_output_q = mp_context.Queue() cpu1_output_q = mp_context.Queue() - pybuda.initialize_pipeline(training=False, d2d_fwd_queues=[tt0_output_q, cpu1_output_q], + forge.initialize_pipeline(training=False, d2d_fwd_queues=[tt0_output_q, cpu1_output_q], sample_inputs=(torch.rand(4, 32, 32), torch.rand(4, 32, 32) )) for _ in range(2): @@ -613,7 +613,7 @@ def test_inference_device_to_device_data(): tt0.push_to_inputs((input1, input2)) # Run fwd - pybuda.run_forward(input_count = 1) + forge.run_forward(input_count = 1) # Read d2d queues print(_safe_read(tt0_output_q)) @@ -625,17 +625,17 @@ def test_inference_device_to_device_data(): def test_training_device_to_device_data(): - tt0 = pybuda.TTDevice("tt0", module=PyBudaTestModule("stage0")) - cpu1 = pybuda.CPUDevice("cpu1", module=pybuda.PyTorchModule("stage1", PyTorchTestModule())) - cpu2 = pybuda.CPUDevice("cpu2", module=pybuda.PyTorchModule("stage2", PyTorchTestModuleOneOut())) - cpu2.place_loss_module(pybuda.PyTorchModule("l1loss", torch.nn.L1Loss())) + tt0 = forge.TTDevice("tt0", module=ForgeTestModule("stage0")) + cpu1 = forge.CPUDevice("cpu1", module=forge.PyTorchModule("stage1", PyTorchTestModule())) + cpu2 = forge.CPUDevice("cpu2", module=forge.PyTorchModule("stage2", PyTorchTestModuleOneOut())) + cpu2.place_loss_module(forge.PyTorchModule("l1loss", torch.nn.L1Loss())) # Compile & initialize the pipeline for inference, and provide d2d mp queues to store device-to-device data in for further analysis tt0_output_q = mp_context.Queue() cpu1_output_q = mp_context.Queue() cpu1_bwd_output_q = mp_context.Queue() cpu2_bwd_output_q = mp_context.Queue() - pybuda.initialize_pipeline( + forge.initialize_pipeline( training=True, d2d_fwd_queues=[tt0_output_q, cpu1_output_q], d2d_bwd_queues=[cpu1_bwd_output_q, cpu2_bwd_output_q], @@ -650,59 +650,59 @@ def test_training_device_to_device_data(): cpu2.push_to_target_inputs(torch.rand(4, 32, 32)) # Run fwd/bwd - pybuda.run_forward() - pybuda.run_backward(zero_grad = True) + forge.run_forward() + forge.run_backward(zero_grad = True) # Read d2d queues print(_safe_read(tt0_output_q)) print(_safe_read(cpu1_output_q)) print(_safe_read(cpu1_bwd_output_q)) print(_safe_read(cpu2_bwd_output_q)) - pybuda.get_parameter_gradients(tt0) + forge.get_parameter_gradients(tt0) # # Override data formats # def test_data_formats_input_override(): - mod = PyBudaTestModule("mod") - tt0 = pybuda.TTDevice("tt0", module=mod) + mod = ForgeTestModule("mod") + tt0 = forge.TTDevice("tt0", module=mod) # Explicitly set data formats for parameters and inputs - mod.weights1.set_data_format(pybuda.DataFormat.Float16) - mod.weights2.set_data_format(pybuda.DataFormat.Float16) + mod.weights1.set_data_format(forge.DataFormat.Float16) + mod.weights2.set_data_format(forge.DataFormat.Float16) input1 = torch.rand(4, 32, 32, dtype=torch.float16) input2 = torch.rand(4, 32, 32, dtype=torch.float16) tt0.push_to_inputs((input1, input2)) - pybuda.run_inference() + forge.run_inference() def test_data_formats_fp32_fallback(): # On this device, fall back to Float16 wherever Float32 is used - tt0 = pybuda.TTDevice("tt0", module=PyBudaTestModule("mod"), fp32_fallback=pybuda.DataFormat.Float16) + tt0 = forge.TTDevice("tt0", module=ForgeTestModule("mod"), fp32_fallback=forge.DataFormat.Float16) # Push Float32, which will be converted to Float16 due to fp32_fallback input1 = torch.rand(4, 32, 32) input2 = torch.rand(4, 32, 32) tt0.push_to_inputs((input1, input2)) - pybuda.run_inference() + forge.run_inference() def test_data_formats_op_override(): - tt0 = pybuda.TTDevice("tt0", module=PyBudaTestModule("mod")) + tt0 = forge.TTDevice("tt0", module=ForgeTestModule("mod")) # Use API to set manual data format override on an op - pybuda.configure_mixed_precision(name_regex="matmul1", output_df=pybuda.DataFormat.Bfp8_b) + forge.configure_mixed_precision(name_regex="matmul1", output_df=forge.DataFormat.Bfp8_b) input1 = torch.rand(4, 32, 32) input2 = torch.rand(4, 32, 32) tt0.push_to_inputs((input1, input2)) - pybuda.run_inference() + forge.run_inference() -class TorchSchedulerWithWarmupAndDecay(pybuda.torch_schedulers.TorchLearningRateScheduler): +class TorchSchedulerWithWarmupAndDecay(forge.torch_schedulers.TorchLearningRateScheduler): def __init__(self, optimizer): super().__init__(optimizer) @@ -723,7 +723,7 @@ def get_lr(self): def step(self): super().step() - print(f"Pybuda optimizer learning rate updated to {self.optimizer.learning_rate}") + print(f"Forge optimizer learning rate updated to {self.optimizer.learning_rate}") def get_pytorch_scheduler(self, optimizer: torch.optim.Optimizer): if self.torch_scheduler is None: @@ -739,18 +739,18 @@ def get_pytorch_scheduler(self, optimizer: torch.optim.Optimizer): def test_learning_rate_scheduler(): lr = 1 - optimizer = pybuda.optimizers.SGD(learning_rate=lr, device_params=True) + optimizer = forge.optimizers.SGD(learning_rate=lr, device_params=True) scheduler = TestScheduler(optimizer=optimizer) - tt0 = pybuda.TTDevice( + tt0 = forge.TTDevice( "tt0", - module=PyBudaTestModuleOneOut("stage0"), + module=ForgeTestModuleOneOut("stage0"), optimizer=optimizer, scheduler=scheduler ) - cpu1 = pybuda.CPUDevice( + cpu1 = forge.CPUDevice( "cpu1", - module=pybuda.PyTorchModule( + module=forge.PyTorchModule( "stage1", PyTorchTestModuleOneInputAndOneOut() ), @@ -758,19 +758,19 @@ def test_learning_rate_scheduler(): scheduler_f=lambda optimizer: scheduler.get_pytorch_scheduler(optimizer) ) cpu1.place_loss_module( - pybuda.PyTorchModule( + forge.PyTorchModule( "loss", PyTorchLoss() ) ) sequential = True - pybuda.initialize_pipeline(training=True, + forge.initialize_pipeline(training=True, sample_inputs=(torch.rand(4, 32, 32), torch.rand(4, 32, 32)), sample_targets=(torch.rand(4, 32, 32),), _sequential=sequential) for _ in range(100): - pybuda.run_schedulers(sequential) + forge.run_schedulers(sequential) @@ -778,7 +778,7 @@ def test_specific_chip_id(): """ Run inference on a specific chip on a multi-chip system """ - num_devices = len(pybuda.detect_available_devices()) + num_devices = len(forge.detect_available_devices()) if num_devices < 2: pytest.skip("Need at least 2 devices to run chip-id test") @@ -787,43 +787,43 @@ def test_specific_chip_id(): input2 = torch.rand(4, 32, 32) # Create a TT device, on last available chip - tt0 = pybuda.TTDevice("tt0", chip_ids=[num_devices-1]) + tt0 = forge.TTDevice("tt0", chip_ids=[num_devices-1]) # Place a module on the device - tt0.place_module(PyBudaTestModule("last_chip")) + tt0.place_module(ForgeTestModule("last_chip")) # Push intputs to the device tt0.push_to_inputs((input1, input2)) # Run pipeline, and read the outputs - output_q = pybuda.run_inference() + output_q = forge.run_inference() output = _safe_read(output_q) print(output) def _run_on_chip(chip_id: int): # Each process needs to have its own temporary dir - pybuda.set_configuration_options(backend_output_dir=f"tt_build/test_out_chip_{chip_id}") + forge.set_configuration_options(backend_output_dir=f"tt_build/test_out_chip_{chip_id}") input1 = torch.rand(4, 32, 32) input2 = torch.rand(4, 32, 32) # Create a TT device, on last available chip - tt0 = pybuda.TTDevice("tt0", chip_ids=[chip_id]) + tt0 = forge.TTDevice("tt0", chip_ids=[chip_id]) # Place a module on the device - tt0.place_module(PyBudaTestModule(f"chip_{chip_id}")) + tt0.place_module(ForgeTestModule(f"chip_{chip_id}")) # Push intputs to the device tt0.push_to_inputs((input1, input2)) # Run pipeline, and read the outputs - output_q = pybuda.run_inference() + output_q = forge.run_inference() output = _safe_read(output_q) print("From chip ", chip_id, ":", output) # Clean up the process so we can end it cleanly - pybuda.shutdown() + forge.shutdown() def test_parallel_chips(): @@ -831,7 +831,7 @@ def test_parallel_chips(): Run different models on multiple chips at the same time """ pytest.skip("Appears to hang now") - num_devices = len(pybuda.detect_available_devices()) + num_devices = len(forge.detect_available_devices()) if num_devices < 2: pytest.skip("Need at least 2 devices to run parallel chip test") @@ -846,22 +846,22 @@ def test_parallel_chips(): p.join() # def test_tti_inference_save_and_load(): -# available_devices = pybuda.detect_available_devices() +# available_devices = forge.detect_available_devices() # if available_devices and available_devices[0] == BackendDevice.Grayskull: -# tt0 = pybuda.TTDevice( +# tt0 = forge.TTDevice( # "tt0", # arch=BackendDevice.Grayskull, # devtype=BackendType.Golden, # ) # else: -# tt0 = pybuda.TTDevice( +# tt0 = forge.TTDevice( # "tt0", # arch=BackendDevice.Wormhole_B0, # devtype=BackendType.Golden, # ) -# module = PyBudaTestModule("test_pybuda_module") +# module = ForgeTestModule("test_forge_module") # tt0.place_module(module) # # Saving to Archive @@ -872,64 +872,64 @@ def test_parallel_chips(): # training=False, # sample_inputs=(input1, input2), # ) -# pybuda_reset() # flush the global state that lingers around for test +# forge_reset() # flush the global state that lingers around for test # # Loading from Archive -# tt1 = pybuda.TTDevice.load_image(img_path="device_images/test_tt0.tti") +# tt1 = forge.TTDevice.load_image(img_path="device_images/test_tt0.tti") # tt1.push_to_inputs((input1, input2)) -# output_q = pybuda.run_inference() +# output_q = forge.run_inference() # output = _safe_read(output_q) @pytest.mark.parametrize("hoist_tms", [True, False]) def test_nop_insertion_api(hoist_tms): - tt0 = pybuda.TTDevice("tt0", module=PyBudaTestQueryKeyModule(f"query_key_module_hoist_tms_{hoist_tms}")) + tt0 = forge.TTDevice("tt0", module=ForgeTestQueryKeyModule(f"query_key_module_hoist_tms_{hoist_tms}")) # Use API to set manual data format override on an op - pybuda.insert_nop("mha_key", "mha_as", hoist_tms=hoist_tms) + forge.insert_nop("mha_key", "mha_as", hoist_tms=hoist_tms) microbatch_size, seq_len, hidden_dim = (1, 128, 128) encoder_input = torch.rand(microbatch_size, seq_len, hidden_dim) tt0.push_to_inputs((encoder_input)) - pybuda.run_inference() + forge.run_inference() @pytest.mark.parametrize("hoist_tms", [True, False]) def test_nop_fork_insertion_api(hoist_tms): - tt0 = pybuda.TTDevice("tt0", module=PyBudaTestQueryKeyModule(f"forking_nop_insertion{hoist_tms}")) + tt0 = forge.TTDevice("tt0", module=ForgeTestQueryKeyModule(f"forking_nop_insertion{hoist_tms}")) # Use API to set manual data format override on an op - pybuda.insert_nop("encoder_input", ["mha_key", "mha_query"], hoist_tms=hoist_tms) + forge.insert_nop("encoder_input", ["mha_key", "mha_query"], hoist_tms=hoist_tms) microbatch_size, seq_len, hidden_dim = (1, 128, 128) encoder_input = torch.rand(microbatch_size, seq_len, hidden_dim) tt0.push_to_inputs((encoder_input)) - pybuda.run_inference() + forge.run_inference() @pytest.mark.parametrize("hoist_tms", [True, False]) def test_nop_daily_chain_insertion_api(hoist_tms): - tt0 = pybuda.TTDevice("tt0", module=PyBudaTestForkWithThreeUsers(f"daisy_chain_nop_insertion{hoist_tms}")) + tt0 = forge.TTDevice("tt0", module=ForgeTestForkWithThreeUsers(f"daisy_chain_nop_insertion{hoist_tms}")) # Use API to set manual data format override on an op - pybuda.insert_nop("encoder_input", ["mm_a", "mm_b", "mm_c"], hoist_tms=hoist_tms) - pybuda.insert_nop("buffer_0_encoder_input_mm_a", ["mm_b", "mm_c"], hoist_tms=hoist_tms) - pybuda.insert_nop("buffer_0_buffer_0_encoder_input_mm_a_mm_b", ["mm_c"], hoist_tms=hoist_tms) + forge.insert_nop("encoder_input", ["mm_a", "mm_b", "mm_c"], hoist_tms=hoist_tms) + forge.insert_nop("buffer_0_encoder_input_mm_a", ["mm_b", "mm_c"], hoist_tms=hoist_tms) + forge.insert_nop("buffer_0_buffer_0_encoder_input_mm_a_mm_b", ["mm_c"], hoist_tms=hoist_tms) microbatch_size, seq_len, hidden_dim = (1, 128, 128) encoder_input = torch.rand(microbatch_size, seq_len, hidden_dim) tt0.push_to_inputs((encoder_input)) - pybuda.run_inference() + forge.run_inference() def test_dram_channel_override(): - tt0 = pybuda.TTDevice("tt0", module=PyBudaTestModule(f"dram_channel_override")) + tt0 = forge.TTDevice("tt0", module=ForgeTestModule(f"dram_channel_override")) # Use API to set manual data format override on an op input1 = torch.rand(4, 32, 32) input2 = torch.rand(4, 32, 32) - pybuda.config.override_dram_queue_placement("e2e_matmul1_0", channel=0) - pybuda.config.set_epoch_break("matmul2") + forge.config.override_dram_queue_placement("e2e_matmul1_0", channel=0) + forge.config.set_epoch_break("matmul2") tt0.push_to_inputs((input1, input2)) - pybuda.run_inference() + forge.run_inference() @pytest.mark.parametrize("loss", ["l1", "mse"]) def test_loss_module_on_ttdevice(loss): @@ -944,21 +944,21 @@ def forward(self, src): return output model = Lin(1) - tt0 = pybuda.TTDevice( + tt0 = forge.TTDevice( "tt0", - module=pybuda.PyTorchModule("lin", model), - optimizer=pybuda.optimizers.SGD(learning_rate=0.1, device_params=True) + module=forge.PyTorchModule("lin", model), + optimizer=forge.optimizers.SGD(learning_rate=0.1, device_params=True) ) if loss == "mse": - tt0.place_loss_module(pybuda.PyTorchModule("mse_loss", nn.MSELoss())) + tt0.place_loss_module(forge.PyTorchModule("mse_loss", nn.MSELoss())) else: - tt0.place_loss_module(pybuda.PyTorchModule("l1_loss", nn.L1Loss())) + tt0.place_loss_module(forge.PyTorchModule("l1_loss", nn.L1Loss())) inputs = torch.rand(1, 1) targets = torch.rand(1, 1) # Initialize pipeline - checkpoint_q = pybuda.initialize_pipeline( + checkpoint_q = forge.initialize_pipeline( training=True, sample_inputs=(inputs,), sample_targets=(targets,) @@ -966,7 +966,7 @@ def forward(self, src): tt0.push_to_inputs(inputs) tt0.push_to_target_inputs(targets) - pybuda.run_forward(input_count=1) - pybuda.run_backward(input_count=1, zero_grad=True) - pybuda.run_optimizer(checkpoint=True) + forge.run_forward(input_count=1) + forge.run_backward(input_count=1, zero_grad=True) + forge.run_optimizer(checkpoint=True) diff --git a/pybuda/test/tvm/clip_guided_diffusion/CLIP/__init__.py b/forge/test/tvm/clip_guided_diffusion/CLIP/__init__.py similarity index 100% rename from pybuda/test/tvm/clip_guided_diffusion/CLIP/__init__.py rename to forge/test/tvm/clip_guided_diffusion/CLIP/__init__.py diff --git a/pybuda/test/tvm/clip_guided_diffusion/CLIP/test_CLIP.py b/forge/test/tvm/clip_guided_diffusion/CLIP/test_CLIP.py similarity index 87% rename from pybuda/test/tvm/clip_guided_diffusion/CLIP/test_CLIP.py rename to forge/test/tvm/clip_guided_diffusion/CLIP/test_CLIP.py index 7d83b43e3..6ba9ab341 100644 --- a/pybuda/test/tvm/clip_guided_diffusion/CLIP/test_CLIP.py +++ b/forge/test/tvm/clip_guided_diffusion/CLIP/test_CLIP.py @@ -1,34 +1,34 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -from pybuda.verify.backend import verify_module +from forge.verify.backend import verify_module import pytest import torch import torch.nn as nn -import pybuda -import pybuda.op +import forge +import forge.op -from pybuda import ( +from forge import ( TTDevice, BackendType, - pybuda_compile, + forge_compile, VerifyConfig, PyTorchModule, CompilerConfig, ) -from pybuda.config import CompileDepth +from forge.config import CompileDepth -from pybuda import TTDevice, VerifyConfig, pybuda_compile +from forge import TTDevice, VerifyConfig, forge_compile # from .pytorch_unet import UNetModel from test.legacy_tests.clip_guided_diffusion.clip.clip_torch import CLIP, VisionTransformer, create_CLIP -from pybuda.verify import verify_module_pipeline -from test.tvm.utils import evaluate_framework_vs_pybuda +from forge.verify import verify_module_pipeline +from test.tvm.utils import evaluate_framework_vs_forge -from pybuda.config import _get_global_compiler_config +from forge.config import _get_global_compiler_config def test_tvm_CLIP(test_kind, test_device): diff --git a/pybuda/test/tvm/clip_guided_diffusion/CLIP/test_CLIP_units.py b/forge/test/tvm/clip_guided_diffusion/CLIP/test_CLIP_units.py similarity index 88% rename from pybuda/test/tvm/clip_guided_diffusion/CLIP/test_CLIP_units.py rename to forge/test/tvm/clip_guided_diffusion/CLIP/test_CLIP_units.py index 8197364de..90d144c8a 100644 --- a/pybuda/test/tvm/clip_guided_diffusion/CLIP/test_CLIP_units.py +++ b/forge/test/tvm/clip_guided_diffusion/CLIP/test_CLIP_units.py @@ -1,29 +1,29 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -from pybuda.verify.backend import verify_module +from forge.verify.backend import verify_module import pytest import torch import torch.nn as nn from loguru import logger -import pybuda +import forge -from pybuda import ( +from forge import ( TTDevice, BackendType, - pybuda_compile, + forge_compile, VerifyConfig, PyTorchModule, CompilerConfig, ) -from pybuda.config import CompileDepth -from pybuda import TTDevice, VerifyConfig, pybuda_compile, Tensor -from test.tvm.utils import evaluate_framework_vs_pybuda +from forge.config import CompileDepth +from forge import TTDevice, VerifyConfig, forge_compile, Tensor +from test.tvm.utils import evaluate_framework_vs_forge from test.legacy_tests.clip_guided_diffusion.clip.clip_torch import torch_mha, CLIP, QuickGELU, ResidualAttentionBlock, CLIP_N_HEAD, Transformer, VisionTransformer, CLIP_D_MODEL, QuickGELU -from pybuda.config import _get_global_compiler_config +from forge.config import _get_global_compiler_config @pytest.mark.parametrize("use_quick_gelu", (True, False), ids=["quick_gelu", "no_quick_gelu"]) @@ -94,11 +94,11 @@ def forward(self, x1, ): model = TextEmbedding() input_text = torch.rand((16, 1)) mod = PyTorchModule("text_embedding", model) - sgd_optimizer = pybuda.optimizers.SGD(learning_rate=0.5, device_params=True) + sgd_optimizer = forge.optimizers.SGD(learning_rate=0.5, device_params=True) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) tt0.place_module(mod) - ret = pybuda_compile( + ret = forge_compile( tt0, "text_embedding", input_text, @@ -109,7 +109,7 @@ def forward(self, x1, ): ), verify_cfg=VerifyConfig(intermediates=True), ) - evaluate_framework_vs_pybuda(model, ret, input_text) + evaluate_framework_vs_forge(model, ret, input_text) def test_tvm_clip_quick_gelu(test_kind, test_device): @@ -157,7 +157,7 @@ def forward(self, x1, ): def test_tvm_clip_arange(training=False, recompute=False): - pytest.xfail() # generating tensors is not supported in pybuda/buda + pytest.xfail() # generating tensors is not supported in forge/buda if not training and recompute: pytest.skip() @@ -173,11 +173,11 @@ def forward(self, x1, ): model = Arange() act1 = torch.rand((16, 77, 512)) mod = PyTorchModule("arange", model) - sgd_optimizer = pybuda.optimizers.SGD(learning_rate=0.5, device_params=True) + sgd_optimizer = forge.optimizers.SGD(learning_rate=0.5, device_params=True) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) tt0.place_module(mod) - ret = pybuda_compile( + ret = forge_compile( tt0, "arange", act1, @@ -188,7 +188,7 @@ def forward(self, x1, ): ), verify_cfg=VerifyConfig(intermediates=True), ) - evaluate_framework_vs_pybuda(model, ret, act1) + evaluate_framework_vs_forge(model, ret, act1) # @pytest.mark.parametrize("num_blocks", (2, 12), ids=["two", "twelve"]) @@ -212,7 +212,7 @@ def test_tvm_clip_multi_resblocks(test_kind, test_device, num_blocks): def test_tvm_clip_rand(training=False, recompute=False): - pytest.xfail() # we do not support generating tensors in pybuda/buda + pytest.xfail() # we do not support generating tensors in forge/buda if not training and recompute: pytest.skip() class Rand(nn.Module): @@ -225,11 +225,11 @@ def forward(self, ): model = Rand() mod = PyTorchModule("rand", model) - sgd_optimizer = pybuda.optimizers.SGD(learning_rate=0.5, device_params=True) + sgd_optimizer = forge.optimizers.SGD(learning_rate=0.5, device_params=True) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) tt0.place_module(mod) - ret = pybuda_compile( + ret = forge_compile( tt0, "rand", compiler_cfg=CompilerConfig( @@ -239,7 +239,7 @@ def forward(self, ): ), verify_cfg=VerifyConfig(intermediates=True), ) - evaluate_framework_vs_pybuda(model, ret, ) + evaluate_framework_vs_forge(model, ret, ) def test_tvm_permute(test_kind, test_device): diff --git a/pybuda/test/tvm/clip_guided_diffusion/UNet/__init__.py b/forge/test/tvm/clip_guided_diffusion/UNet/__init__.py similarity index 100% rename from pybuda/test/tvm/clip_guided_diffusion/UNet/__init__.py rename to forge/test/tvm/clip_guided_diffusion/UNet/__init__.py diff --git a/pybuda/test/tvm/clip_guided_diffusion/UNet/test_UNet.py b/forge/test/tvm/clip_guided_diffusion/UNet/test_UNet.py similarity index 88% rename from pybuda/test/tvm/clip_guided_diffusion/UNet/test_UNet.py rename to forge/test/tvm/clip_guided_diffusion/UNet/test_UNet.py index 39b245808..beaa801c1 100644 --- a/pybuda/test/tvm/clip_guided_diffusion/UNet/test_UNet.py +++ b/forge/test/tvm/clip_guided_diffusion/UNet/test_UNet.py @@ -6,20 +6,20 @@ import torch import torch.nn as nn -import pybuda +import forge -from pybuda import ( +from forge import ( TTDevice, CPUDevice, BackendType, - pybuda_compile, + forge_compile, VerifyConfig, PyTorchModule, CompilerConfig, ) -from pybuda.config import CompileDepth -from test.tvm.utils import evaluate_framework_vs_pybuda -from pybuda.verify import verify_module_pipeline +from forge.config import CompileDepth +from test.tvm.utils import evaluate_framework_vs_forge +from forge.verify import verify_module_pipeline from test.legacy_tests.clip_guided_diffusion.unet.pytorch_unet import UNetModel, create_UNet, timestep_embedding @@ -56,7 +56,7 @@ def test_tvm_unet(depth, training=False, recompute=False): model = UNetModel(**config) mod = PyTorchModule("UNet", model) - sgd_optimizer = pybuda.optimizers.SGD(learning_rate=0.5, device_params=True) + sgd_optimizer = forge.optimizers.SGD(learning_rate=0.5, device_params=True) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) tt0.place_module(mod) @@ -64,7 +64,7 @@ def test_tvm_unet(depth, training=False, recompute=False): act1 = torch.rand(*shape) timesteps = torch.randint(0, 1, size=(1,)).float() - ret = pybuda_compile( + ret = forge_compile( tt0, "UNet", act1, @@ -75,7 +75,7 @@ def test_tvm_unet(depth, training=False, recompute=False): compile_depth=depth), verify_cfg=VerifyConfig(intermediates=True),) - evaluate_framework_vs_pybuda(model, ret, act1, timesteps) + evaluate_framework_vs_forge(model, ret, act1, timesteps) def test_tvm_unet_time_embed(training=False, recompute=False): @@ -108,11 +108,11 @@ def forward(self, time_embedded_steps): model = TimeEmb(UNet_config['model_channels'], UNet_config['model_channels'] * 4) mod = PyTorchModule('time_emb',model) - sgd_optimizer = pybuda.optimizers.SGD(learning_rate=0.5, device_params=True) + sgd_optimizer = forge.optimizers.SGD(learning_rate=0.5, device_params=True) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) tt0.place_module(mod) - ret = pybuda_compile( + ret = forge_compile( tt0, "time_emb", embedded_res, @@ -122,7 +122,7 @@ def forward(self, time_embedded_steps): compile_depth=CompileDepth.BUDA_GRAPH_PRE_PLACER), verify_cfg=verify_cfg,) - evaluate_framework_vs_pybuda(model, ret, embedded_res) + evaluate_framework_vs_forge(model, ret, embedded_res) def test_tvm_unet_emb_precomp(training=False, recompute=False): @@ -140,11 +140,11 @@ def test_tvm_unet_emb_precomp(training=False, recompute=False): act1, embedded_res = UNet_embeddings(timesteps, act1) UNet_mod = PyTorchModule('UNet_model',UNet_no_emb) - sgd_optimizer = pybuda.optimizers.SGD(learning_rate=0.5, device_params=True) + sgd_optimizer = forge.optimizers.SGD(learning_rate=0.5, device_params=True) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) tt0.place_module(UNet_mod) - ret = pybuda_compile( + ret = forge_compile( tt0, "UNet", act1, @@ -155,7 +155,7 @@ def test_tvm_unet_emb_precomp(training=False, recompute=False): compile_depth=CompileDepth.BUDA_GRAPH_PRE_PLACER, compile_tvm_to_python=False), verify_cfg=verify_cfg,) - evaluate_framework_vs_pybuda(UNet_no_emb, ret, act1, embedded_res) + evaluate_framework_vs_forge(UNet_no_emb, ret, act1, embedded_res) def test_tvm_splitted_unet(training=False, recompute=False): diff --git a/pybuda/test/tvm/clip_guided_diffusion/UNet/test_UNet_blocks.py b/forge/test/tvm/clip_guided_diffusion/UNet/test_UNet_blocks.py similarity index 86% rename from pybuda/test/tvm/clip_guided_diffusion/UNet/test_UNet_blocks.py rename to forge/test/tvm/clip_guided_diffusion/UNet/test_UNet_blocks.py index a093a6c6f..fedad66e4 100644 --- a/pybuda/test/tvm/clip_guided_diffusion/UNet/test_UNet_blocks.py +++ b/forge/test/tvm/clip_guided_diffusion/UNet/test_UNet_blocks.py @@ -6,18 +6,18 @@ import torch import torch.nn as nn -import pybuda -from pybuda import ( +import forge +from forge import ( TTDevice, BackendType, - pybuda_compile, + forge_compile, VerifyConfig, PyTorchModule, CompilerConfig, CompileDepth, ) -from pybuda.config import CompileDepth -from test.tvm.utils import evaluate_framework_vs_pybuda +from forge.config import CompileDepth +from test.tvm.utils import evaluate_framework_vs_forge from test.legacy_tests.clip_guided_diffusion.unet.pytorch_unet import TimestepEmbedSequential, AttentionBlock, ResBlock, QKVAttentionLegacy, timestep_embedding from test.legacy_tests.clip_guided_diffusion.unet.test_attention_block import init_attention_block @@ -91,11 +91,11 @@ def test_tvm_unet_resblock_attention_block_upsample_resblock(training=False, rec act1 = torch.randn(1, 1536, 32, 32) torch_emb = torch.randn(1, 1024) - sgd_optimizer = pybuda.optimizers.SGD(learning_rate=0.5, device_params=True) + sgd_optimizer = forge.optimizers.SGD(learning_rate=0.5, device_params=True) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) tt0.place_module(mod) - ret = pybuda_compile( + ret = forge_compile( tt0, "attetion_block_upsample", act1, @@ -106,7 +106,7 @@ def test_tvm_unet_resblock_attention_block_upsample_resblock(training=False, rec compile_depth=CompileDepth.BALANCER_PASS), verify_cfg=VerifyConfig(intermediates=True),) - evaluate_framework_vs_pybuda(model, ret, act1, torch_emb) + evaluate_framework_vs_forge(model, ret, act1, torch_emb) # TODO: WHAT IS GOING ON? @@ -123,21 +123,21 @@ def forward(self, x1, target_output=224): model = AdaptiveAveragePool() mod = PyTorchModule("adaptive_ave_pool", model) - sgd_optimizer = pybuda.optimizers.SGD(learning_rate=0.5, device_params=True) + sgd_optimizer = forge.optimizers.SGD(learning_rate=0.5, device_params=True) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) tt0.place_module(mod) # TODO: add another test with a simpler shape shape = (1, 3, 239, 239) act1 = torch.rand(*shape) - ret = pybuda_compile(tt0, + ret = forge_compile(tt0, "adaptive_ave_pool", act1, compiler_cfg=CompilerConfig(enable_training=training, enable_recompute=recompute, compile_depth=CompileDepth.POST_INITIAL_GRAPH_PASS), verify_cfg=VerifyConfig(intermediates=True),) - evaluate_framework_vs_pybuda(model, ret, act1) + evaluate_framework_vs_forge(model, ret, act1) def test_tvm_unet_resblock_downsample_resblock(training=False, recompute=False): @@ -162,11 +162,11 @@ def test_tvm_unet_resblock_downsample_resblock(training=False, recompute=False): act1 = torch.randn(1, 512, 64, 64) torch_emb = torch.randn(1, 1024) - sgd_optimizer = pybuda.optimizers.SGD(learning_rate=0.5, device_params=True) + sgd_optimizer = forge.optimizers.SGD(learning_rate=0.5, device_params=True) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) tt0.place_module(mod) - ret = pybuda_compile( + ret = forge_compile( tt0, "resblockdownsampleresblock", act1, @@ -177,7 +177,7 @@ def test_tvm_unet_resblock_downsample_resblock(training=False, recompute=False): compile_depth=CompileDepth.BUDA_GRAPH_PRE_PLACER), verify_cfg=VerifyConfig(intermediates=True)) - evaluate_framework_vs_pybuda(model, ret, act1, torch_emb) + evaluate_framework_vs_forge(model, ret, act1, torch_emb) def test_tvm_unet_resblock_upsample_resblock(training=False, recompute=False): @@ -202,11 +202,11 @@ def test_tvm_unet_resblock_upsample_resblock(training=False, recompute=False): act1 = torch.randn(1, 768, 64, 64) torch_emb = torch.randn(1, 1024) - sgd_optimizer = pybuda.optimizers.SGD(learning_rate=0.5, device_params=True) + sgd_optimizer = forge.optimizers.SGD(learning_rate=0.5, device_params=True) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) tt0.place_module(mod) - ret = pybuda_compile( + ret = forge_compile( tt0, "resblockupsampleresblock", act1, @@ -217,7 +217,7 @@ def test_tvm_unet_resblock_upsample_resblock(training=False, recompute=False): compile_depth=CompileDepth.BUDA_GRAPH_PRE_PLACER), verify_cfg=VerifyConfig(intermediates=True)) - evaluate_framework_vs_pybuda(model, ret, act1, torch_emb) + evaluate_framework_vs_forge(model, ret, act1, torch_emb) def test_tvm_unet_resblock_attention_block(training=False, recompute=False): @@ -243,11 +243,11 @@ def test_tvm_unet_resblock_attention_block(training=False, recompute=False): torch_emb = torch.randn(1, 1024) - sgd_optimizer = pybuda.optimizers.SGD(learning_rate=0.5, device_params=True) + sgd_optimizer = forge.optimizers.SGD(learning_rate=0.5, device_params=True) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) tt0.place_module(mod) - ret = pybuda_compile( + ret = forge_compile( tt0, "attetion_block", act1, @@ -258,7 +258,7 @@ def test_tvm_unet_resblock_attention_block(training=False, recompute=False): compile_depth=CompileDepth.BUDA_GRAPH_PRE_PLACER), verify_cfg=VerifyConfig(intermediates=True),) - evaluate_framework_vs_pybuda(model, ret, act1, torch_emb) + evaluate_framework_vs_forge(model, ret, act1, torch_emb) @@ -274,11 +274,11 @@ def test_tvm_unet_resblock_attention_block_resblock(training=False, recompute=Fa act1 = torch.randn(1, 1024, 8, 8) torch_emb = torch.randn(1, 1024) - sgd_optimizer = pybuda.optimizers.SGD(learning_rate=0.5, device_params=True) + sgd_optimizer = forge.optimizers.SGD(learning_rate=0.5, device_params=True) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) tt0.place_module(mod) - ret = pybuda_compile( + ret = forge_compile( tt0, "attetion_block_resblock", act1, @@ -289,7 +289,7 @@ def test_tvm_unet_resblock_attention_block_resblock(training=False, recompute=Fa compile_depth=CompileDepth.BUDA_GRAPH_PRE_PLACER), verify_cfg=VerifyConfig(intermediates=True),) - evaluate_framework_vs_pybuda(model, ret, act1, torch_emb) + evaluate_framework_vs_forge(model, ret, act1, torch_emb) def test_tvm_unet_resblock_attention_block_downsample_resblock(training=False, recompute=False): @@ -304,11 +304,11 @@ def test_tvm_unet_resblock_attention_block_downsample_resblock(training=False, r act1 = torch.randn(1, 512, 64, 64) torch_emb = torch.randn(1, 1024) - sgd_optimizer = pybuda.optimizers.SGD(learning_rate=0.5, device_params=True) + sgd_optimizer = forge.optimizers.SGD(learning_rate=0.5, device_params=True) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) tt0.place_module(mod) - ret = pybuda_compile( + ret = forge_compile( tt0, "attetion_block_downsample", act1, @@ -319,7 +319,7 @@ def test_tvm_unet_resblock_attention_block_downsample_resblock(training=False, r compile_depth=CompileDepth.BUDA_GRAPH_PRE_PLACER), verify_cfg=VerifyConfig(intermediates=True),) - evaluate_framework_vs_pybuda(model, ret, act1, torch_emb) + evaluate_framework_vs_forge(model, ret, act1, torch_emb) @pytest.mark.parametrize("shape", ((1, 256, 256, 256), (1, 256, 128, 128)),) @@ -348,18 +348,18 @@ def forward(self, x, ): model = GroupNorm(num_groups, num_channels) mod = PyTorchModule("group_norm", model) - sgd_optimizer = pybuda.optimizers.SGD(learning_rate=0.5, device_params=True) + sgd_optimizer = forge.optimizers.SGD(learning_rate=0.5, device_params=True) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) tt0.place_module(mod) - ret = pybuda_compile(tt0, "groupnorm", act1, + ret = forge_compile(tt0, "groupnorm", act1, compiler_cfg=CompilerConfig( enable_training=training, enable_recompute=recompute, compile_depth=CompileDepth.BUDA_GRAPH_PRE_PLACER), verify_cfg=VerifyConfig(intermediates=True),) - evaluate_framework_vs_pybuda(model, ret, act1) + evaluate_framework_vs_forge(model, ret, act1) def test_tvm_unet_upsample(training=False, recompute=False): @@ -376,7 +376,7 @@ def forward(self, x1, ): model = Upsample() mod = PyTorchModule("upsample", model) - sgd_optimizer = pybuda.optimizers.SGD(learning_rate=0.5, device_params=True) + sgd_optimizer = forge.optimizers.SGD(learning_rate=0.5, device_params=True) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) tt0.place_module(mod) @@ -387,7 +387,7 @@ def forward(self, x1, ): # [1 x 512 x 64 x 64] # [1 x 256 x 128 x 128] act1 = torch.rand(*shape) - ret = pybuda_compile( + ret = forge_compile( tt0, "upsample", act1, @@ -397,7 +397,7 @@ def forward(self, x1, ): compile_depth=CompileDepth.BUDA_GRAPH_PRE_PLACER), verify_cfg=VerifyConfig(intermediates=True),) - evaluate_framework_vs_pybuda(model, ret, act1) + evaluate_framework_vs_forge(model, ret, act1) def test_tvm_avg_pool(training=False, recompute=False): @@ -414,7 +414,7 @@ def forward(self, x1): model = AveragePool() mod = PyTorchModule("ave_pool", model) - sgd_optimizer = pybuda.optimizers.SGD(learning_rate=0.5, device_params=True) + sgd_optimizer = forge.optimizers.SGD(learning_rate=0.5, device_params=True) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) tt0.place_module(mod) @@ -423,7 +423,7 @@ def forward(self, x1): kernel_size=2 act1 = torch.rand(*shape) - ret = pybuda_compile( + ret = forge_compile( tt0, "ave_pool", act1, @@ -433,7 +433,7 @@ def forward(self, x1): compile_depth=CompileDepth.BUDA_GRAPH_PRE_PLACER), verify_cfg=VerifyConfig(intermediates=True),) - evaluate_framework_vs_pybuda(model, ret, act1) + evaluate_framework_vs_forge(model, ret, act1) def test_tvm_qkv_attention(training=False, recompute=False): @@ -448,11 +448,11 @@ def test_tvm_qkv_attention(training=False, recompute=False): mod = PyTorchModule("qkv_attn_reshape", model) - sgd_optimizer = pybuda.optimizers.SGD(learning_rate=0.5, device_params=True) + sgd_optimizer = forge.optimizers.SGD(learning_rate=0.5, device_params=True) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) tt0.place_module(mod) - ret = pybuda_compile( + ret = forge_compile( tt0, "qkv_attn_reshape", acts, @@ -464,7 +464,7 @@ def test_tvm_qkv_attention(training=False, recompute=False): verify_cfg=VerifyConfig(intermediates=True), ) - evaluate_framework_vs_pybuda(model, ret, acts) + evaluate_framework_vs_forge(model, ret, acts) def test_tvm_attention_block(training=False, recompute=False): @@ -483,11 +483,11 @@ def test_tvm_attention_block(training=False, recompute=False): model = init_attention_block(channels) mod = PyTorchModule("attn_block", model) - sgd_optimizer = pybuda.optimizers.SGD(learning_rate=0.5, device_params=True) + sgd_optimizer = forge.optimizers.SGD(learning_rate=0.5, device_params=True) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) tt0.place_module(mod) - ret = pybuda_compile( + ret = forge_compile( tt0, "attn_block", act1, @@ -498,7 +498,7 @@ def test_tvm_attention_block(training=False, recompute=False): ), verify_cfg=VerifyConfig(intermediates=True), ) - evaluate_framework_vs_pybuda(model, ret, act1) + evaluate_framework_vs_forge(model, ret, act1) def test_tvm_timestep_embed_sequential(training=False, recompute=False): @@ -520,11 +520,11 @@ def test_tvm_timestep_embed_sequential(training=False, recompute=False): torch_emb = torch.randn(1, 1024) - sgd_optimizer = pybuda.optimizers.SGD(learning_rate=0.5, device_params=True) + sgd_optimizer = forge.optimizers.SGD(learning_rate=0.5, device_params=True) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) tt0.place_module(mod) - ret = pybuda_compile( + ret = forge_compile( tt0, "TimestepEmbedSequential", act1, @@ -535,7 +535,7 @@ def test_tvm_timestep_embed_sequential(training=False, recompute=False): compile_depth=CompileDepth.BUDA_GRAPH_PRE_PLACER), verify_cfg=VerifyConfig(intermediates=True)) - evaluate_framework_vs_pybuda(model, ret, act1, torch_emb) + evaluate_framework_vs_forge(model, ret, act1, torch_emb) def test_tvm_timestep_embedding(training=False, recompute=False): @@ -557,13 +557,13 @@ def forward(self, x1): model = TimestepEmbedding() mod = PyTorchModule("TimestepEmbedding", model) - sgd_optimizer = pybuda.optimizers.SGD(learning_rate=0.5, device_params=True) + sgd_optimizer = forge.optimizers.SGD(learning_rate=0.5, device_params=True) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) tt0.place_module(mod) act1 = torch.randint(0, 1, size=(1,)) - ret = pybuda_compile( + ret = forge_compile( tt0, "TimestepEmbedding", act1, @@ -573,4 +573,4 @@ def forward(self, x1): compile_depth=CompileDepth.BUDA_GRAPH_PRE_PLACER), verify_cfg=VerifyConfig(intermediates=True),) - evaluate_framework_vs_pybuda(model, ret, act1) \ No newline at end of file + evaluate_framework_vs_forge(model, ret, act1) \ No newline at end of file diff --git a/pybuda/test/tvm/clip_guided_diffusion/UNet/test_resblock.py b/forge/test/tvm/clip_guided_diffusion/UNet/test_resblock.py similarity index 91% rename from pybuda/test/tvm/clip_guided_diffusion/UNet/test_resblock.py rename to forge/test/tvm/clip_guided_diffusion/UNet/test_resblock.py index 99319c7dc..807a1c3e3 100644 --- a/pybuda/test/tvm/clip_guided_diffusion/UNet/test_resblock.py +++ b/forge/test/tvm/clip_guided_diffusion/UNet/test_resblock.py @@ -2,30 +2,30 @@ # SPDX-License-Identifier: Apache-2.0 from time import time -from pybuda.verify.backend import verify_module +from forge.verify.backend import verify_module import pytest import torch import torch.nn as nn from loguru import logger -import pybuda -from pybuda.config import CompileDepth +import forge +from forge.config import CompileDepth -from pybuda import ( +from forge import ( TTDevice, BackendType, - pybuda_compile, + forge_compile, VerifyConfig, PyTorchModule, CompilerConfig, - pybuda_compile + forge_compile ) -from test.tvm.utils import evaluate_framework_vs_pybuda +from test.tvm.utils import evaluate_framework_vs_forge from test.legacy_tests.clip_guided_diffusion.unet.pytorch_unet import ResBlock -from pybuda.config import _get_global_compiler_config +from forge.config import _get_global_compiler_config def init_resblock( ch, out_channels, upsample=False, downsample=False, use_layer_norm=False, time_embed_dim = 1024 diff --git a/pybuda/test/tvm/cnn/mxnet/test_alexnet.py b/forge/test/tvm/cnn/mxnet/test_alexnet.py similarity index 83% rename from pybuda/test/tvm/cnn/mxnet/test_alexnet.py rename to forge/test/tvm/cnn/mxnet/test_alexnet.py index 6ab216f62..533d17b41 100644 --- a/pybuda/test/tvm/cnn/mxnet/test_alexnet.py +++ b/forge/test/tvm/cnn/mxnet/test_alexnet.py @@ -6,16 +6,16 @@ from mxnet.gluon.model_zoo.vision import get_model import pytest import torch -from pybuda import ( +from forge import ( MXNetModule, BackendType, VerifyConfig, ) -from pybuda.config import CompileDepth +from forge.config import CompileDepth -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind from test.utils import download_model diff --git a/pybuda/test/tvm/cnn/mxnet/test_densenet.py b/forge/test/tvm/cnn/mxnet/test_densenet.py similarity index 83% rename from pybuda/test/tvm/cnn/mxnet/test_densenet.py rename to forge/test/tvm/cnn/mxnet/test_densenet.py index 5ce1424eb..1edf9c5fb 100644 --- a/pybuda/test/tvm/cnn/mxnet/test_densenet.py +++ b/forge/test/tvm/cnn/mxnet/test_densenet.py @@ -6,16 +6,16 @@ from mxnet.gluon.model_zoo.vision import get_model import pytest import torch -from pybuda import ( +from forge import ( MXNetModule, BackendType, VerifyConfig, ) -from pybuda.config import CompileDepth +from forge.config import CompileDepth -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind from test.utils import download_model diff --git a/pybuda/test/tvm/cnn/mxnet/test_mobilenet.py b/forge/test/tvm/cnn/mxnet/test_mobilenet.py similarity index 89% rename from pybuda/test/tvm/cnn/mxnet/test_mobilenet.py rename to forge/test/tvm/cnn/mxnet/test_mobilenet.py index 422543bb2..fa0df47dc 100644 --- a/pybuda/test/tvm/cnn/mxnet/test_mobilenet.py +++ b/forge/test/tvm/cnn/mxnet/test_mobilenet.py @@ -6,16 +6,16 @@ from mxnet.gluon.model_zoo.vision import get_model import pytest import torch -from pybuda import ( +from forge import ( MXNetModule, BackendType, VerifyConfig, ) -from pybuda.config import CompileDepth +from forge.config import CompileDepth -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind from test.utils import download_model diff --git a/pybuda/test/tvm/cnn/mxnet/test_resnet.py b/forge/test/tvm/cnn/mxnet/test_resnet.py similarity index 83% rename from pybuda/test/tvm/cnn/mxnet/test_resnet.py rename to forge/test/tvm/cnn/mxnet/test_resnet.py index 69335f1e0..3fbcb43ee 100644 --- a/pybuda/test/tvm/cnn/mxnet/test_resnet.py +++ b/forge/test/tvm/cnn/mxnet/test_resnet.py @@ -6,16 +6,16 @@ from mxnet.gluon.model_zoo.vision import get_model import pytest import torch -from pybuda import ( +from forge import ( MXNetModule, BackendType, VerifyConfig, ) -from pybuda.config import CompileDepth +from forge.config import CompileDepth -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind from test.utils import download_model diff --git a/pybuda/test/tvm/cnn/mxnet/test_squeezenet.py b/forge/test/tvm/cnn/mxnet/test_squeezenet.py similarity index 83% rename from pybuda/test/tvm/cnn/mxnet/test_squeezenet.py rename to forge/test/tvm/cnn/mxnet/test_squeezenet.py index cfbdd3f6f..fad2e697c 100644 --- a/pybuda/test/tvm/cnn/mxnet/test_squeezenet.py +++ b/forge/test/tvm/cnn/mxnet/test_squeezenet.py @@ -6,16 +6,16 @@ from mxnet.gluon.model_zoo.vision import get_model import pytest import torch -from pybuda import ( +from forge import ( MXNetModule, BackendType, VerifyConfig, ) -from pybuda.config import CompileDepth +from forge.config import CompileDepth -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind from test.utils import download_model diff --git a/pybuda/test/tvm/cnn/mxnet/test_vgg.py b/forge/test/tvm/cnn/mxnet/test_vgg.py similarity index 83% rename from pybuda/test/tvm/cnn/mxnet/test_vgg.py rename to forge/test/tvm/cnn/mxnet/test_vgg.py index 31aae2105..64cae3685 100644 --- a/pybuda/test/tvm/cnn/mxnet/test_vgg.py +++ b/forge/test/tvm/cnn/mxnet/test_vgg.py @@ -6,16 +6,16 @@ from mxnet.gluon.model_zoo.vision import get_model import pytest import torch -from pybuda import ( +from forge import ( MXNetModule, BackendType, VerifyConfig, ) -from pybuda.config import CompileDepth +from forge.config import CompileDepth -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind from test.utils import download_model diff --git a/pybuda/test/tvm/cnn/onnx/test_fcn.py b/forge/test/tvm/cnn/onnx/test_fcn.py similarity index 89% rename from pybuda/test/tvm/cnn/onnx/test_fcn.py rename to forge/test/tvm/cnn/onnx/test_fcn.py index 8afcaf39b..b4816800f 100644 --- a/pybuda/test/tvm/cnn/onnx/test_fcn.py +++ b/forge/test/tvm/cnn/onnx/test_fcn.py @@ -7,14 +7,14 @@ import onnx import pytest -from pybuda import ( +from forge import ( OnnxModule, BackendType, VerifyConfig, ) -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind -from pybuda.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config def test_tvm_fcn_onnx(test_kind, test_device): diff --git a/pybuda/test/tvm/cnn/onnx/test_lstm_genom.py b/forge/test/tvm/cnn/onnx/test_lstm_genom.py similarity index 72% rename from pybuda/test/tvm/cnn/onnx/test_lstm_genom.py rename to forge/test/tvm/cnn/onnx/test_lstm_genom.py index 7dded6904..7405d3e47 100644 --- a/pybuda/test/tvm/cnn/onnx/test_lstm_genom.py +++ b/forge/test/tvm/cnn/onnx/test_lstm_genom.py @@ -6,11 +6,11 @@ import os import onnx import tensorflow as tf -import pybuda -from pybuda.verify.backend import verify_module -from pybuda import VerifyConfig -from pybuda._C.backend_api import BackendType, BackendDevice -from pybuda.verify.config import TestKind +import forge +from forge.verify.backend import verify_module +from forge import VerifyConfig +from forge._C.backend_api import BackendType, BackendDevice +from forge.verify.config import TestKind def test_lstm_genom_onnx(test_device): load_path = "third_party/confidential_customer_models/model_2/onnx/saved/lstm_genom/lstm-genom-model.onnx" @@ -19,7 +19,7 @@ def test_lstm_genom_onnx(test_device): # Run inference on Tenstorrent device inputs = tf.random.uniform(shape=[1, 10, 4]) verify_module( - pybuda.OnnxModule("onnx_lstm", model, load_path), + forge.OnnxModule("onnx_lstm", model, load_path), input_shapes=(inputs.shape,), inputs=[(inputs,)], verify_cfg=VerifyConfig( diff --git a/pybuda/test/tvm/cnn/onnx/test_mnist.py b/forge/test/tvm/cnn/onnx/test_mnist.py similarity index 86% rename from pybuda/test/tvm/cnn/onnx/test_mnist.py rename to forge/test/tvm/cnn/onnx/test_mnist.py index b187e6353..9e57f9a59 100644 --- a/pybuda/test/tvm/cnn/onnx/test_mnist.py +++ b/forge/test/tvm/cnn/onnx/test_mnist.py @@ -5,21 +5,21 @@ import onnxruntime as ort import pytest import torch -from pybuda import ( +from forge import ( OnnxModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, TFGraphDefModule, ) -from pybuda.config import CompileDepth +from forge.config import CompileDepth -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind import urllib import os diff --git a/pybuda/test/tvm/cnn/onnx/test_resnet.py b/forge/test/tvm/cnn/onnx/test_resnet.py similarity index 86% rename from pybuda/test/tvm/cnn/onnx/test_resnet.py rename to forge/test/tvm/cnn/onnx/test_resnet.py index b0d5cad3f..4a7750252 100644 --- a/pybuda/test/tvm/cnn/onnx/test_resnet.py +++ b/forge/test/tvm/cnn/onnx/test_resnet.py @@ -9,13 +9,13 @@ import numpy as np import onnxruntime -from pybuda import ( +from forge import ( OnnxModule, VerifyConfig, ) -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind -from pybuda.config import _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind +from forge.config import _get_global_compiler_config def test_resnet_onnx(test_kind, test_device): @@ -38,7 +38,7 @@ def test_resnet_onnx(test_kind, test_device): # LOAD ONNX model onnx_model = onnx.load(save_path) onnx.checker.check_model(onnx_model) - pybuda_onnx_model = OnnxModule( + forge_onnx_model = OnnxModule( "resnet50_v1_7_onnx", onnx_model, save_path, @@ -53,7 +53,7 @@ def test_resnet_onnx(test_kind, test_device): # Compile and verify verify_module( - pybuda_onnx_model, + forge_onnx_model, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, diff --git a/pybuda/test/tvm/cnn/pytorch/alphapose/256x192_res50_lr1e-3_1x.yaml b/forge/test/tvm/cnn/pytorch/alphapose/256x192_res50_lr1e-3_1x.yaml similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/alphapose/256x192_res50_lr1e-3_1x.yaml rename to forge/test/tvm/cnn/pytorch/alphapose/256x192_res50_lr1e-3_1x.yaml diff --git a/pybuda/test/tvm/cnn/pytorch/alphapose/models/__init__.py b/forge/test/tvm/cnn/pytorch/alphapose/models/__init__.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/alphapose/models/__init__.py rename to forge/test/tvm/cnn/pytorch/alphapose/models/__init__.py diff --git a/pybuda/test/tvm/cnn/pytorch/alphapose/models/builder.py b/forge/test/tvm/cnn/pytorch/alphapose/models/builder.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/alphapose/models/builder.py rename to forge/test/tvm/cnn/pytorch/alphapose/models/builder.py diff --git a/pybuda/test/tvm/cnn/pytorch/alphapose/models/fastpose.py b/forge/test/tvm/cnn/pytorch/alphapose/models/fastpose.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/alphapose/models/fastpose.py rename to forge/test/tvm/cnn/pytorch/alphapose/models/fastpose.py diff --git a/pybuda/test/tvm/cnn/pytorch/alphapose/models/fastpose_duc.py b/forge/test/tvm/cnn/pytorch/alphapose/models/fastpose_duc.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/alphapose/models/fastpose_duc.py rename to forge/test/tvm/cnn/pytorch/alphapose/models/fastpose_duc.py diff --git a/pybuda/test/tvm/cnn/pytorch/alphapose/models/fastpose_duc_dense.py b/forge/test/tvm/cnn/pytorch/alphapose/models/fastpose_duc_dense.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/alphapose/models/fastpose_duc_dense.py rename to forge/test/tvm/cnn/pytorch/alphapose/models/fastpose_duc_dense.py diff --git a/pybuda/test/tvm/cnn/pytorch/alphapose/models/layers/DUC.py b/forge/test/tvm/cnn/pytorch/alphapose/models/layers/DUC.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/alphapose/models/layers/DUC.py rename to forge/test/tvm/cnn/pytorch/alphapose/models/layers/DUC.py diff --git a/pybuda/test/tvm/cnn/pytorch/alphapose/models/layers/PixelUnshuffle.py b/forge/test/tvm/cnn/pytorch/alphapose/models/layers/PixelUnshuffle.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/alphapose/models/layers/PixelUnshuffle.py rename to forge/test/tvm/cnn/pytorch/alphapose/models/layers/PixelUnshuffle.py diff --git a/pybuda/test/tvm/cnn/pytorch/alphapose/models/layers/Resnet.py b/forge/test/tvm/cnn/pytorch/alphapose/models/layers/Resnet.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/alphapose/models/layers/Resnet.py rename to forge/test/tvm/cnn/pytorch/alphapose/models/layers/Resnet.py diff --git a/pybuda/test/tvm/cnn/pytorch/alphapose/models/layers/SE_Resnet.py b/forge/test/tvm/cnn/pytorch/alphapose/models/layers/SE_Resnet.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/alphapose/models/layers/SE_Resnet.py rename to forge/test/tvm/cnn/pytorch/alphapose/models/layers/SE_Resnet.py diff --git a/pybuda/test/tvm/cnn/pytorch/alphapose/models/layers/SE_module.py b/forge/test/tvm/cnn/pytorch/alphapose/models/layers/SE_module.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/alphapose/models/layers/SE_module.py rename to forge/test/tvm/cnn/pytorch/alphapose/models/layers/SE_module.py diff --git a/pybuda/test/tvm/cnn/pytorch/alphapose/models/layers/ShuffleResnet.py b/forge/test/tvm/cnn/pytorch/alphapose/models/layers/ShuffleResnet.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/alphapose/models/layers/ShuffleResnet.py rename to forge/test/tvm/cnn/pytorch/alphapose/models/layers/ShuffleResnet.py diff --git a/pybuda/test/tvm/cnn/pytorch/alphapose/models/simplepose.py b/forge/test/tvm/cnn/pytorch/alphapose/models/simplepose.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/alphapose/models/simplepose.py rename to forge/test/tvm/cnn/pytorch/alphapose/models/simplepose.py diff --git a/pybuda/test/tvm/cnn/pytorch/alphapose/utils/__init__.py b/forge/test/tvm/cnn/pytorch/alphapose/utils/__init__.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/alphapose/utils/__init__.py rename to forge/test/tvm/cnn/pytorch/alphapose/utils/__init__.py diff --git a/pybuda/test/tvm/cnn/pytorch/alphapose/utils/config.py b/forge/test/tvm/cnn/pytorch/alphapose/utils/config.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/alphapose/utils/config.py rename to forge/test/tvm/cnn/pytorch/alphapose/utils/config.py diff --git a/pybuda/test/tvm/cnn/pytorch/alphapose/utils/registry.py b/forge/test/tvm/cnn/pytorch/alphapose/utils/registry.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/alphapose/utils/registry.py rename to forge/test/tvm/cnn/pytorch/alphapose/utils/registry.py diff --git a/pybuda/test/tvm/cnn/pytorch/alphapose/utils/transforms.py b/forge/test/tvm/cnn/pytorch/alphapose/utils/transforms.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/alphapose/utils/transforms.py rename to forge/test/tvm/cnn/pytorch/alphapose/utils/transforms.py diff --git a/pybuda/test/tvm/cnn/pytorch/dall_e_vae/__init__.py b/forge/test/tvm/cnn/pytorch/dall_e_vae/__init__.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/dall_e_vae/__init__.py rename to forge/test/tvm/cnn/pytorch/dall_e_vae/__init__.py diff --git a/pybuda/test/tvm/cnn/pytorch/dall_e_vae/decoder.py b/forge/test/tvm/cnn/pytorch/dall_e_vae/decoder.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/dall_e_vae/decoder.py rename to forge/test/tvm/cnn/pytorch/dall_e_vae/decoder.py diff --git a/pybuda/test/tvm/cnn/pytorch/dall_e_vae/encoder.py b/forge/test/tvm/cnn/pytorch/dall_e_vae/encoder.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/dall_e_vae/encoder.py rename to forge/test/tvm/cnn/pytorch/dall_e_vae/encoder.py diff --git a/pybuda/test/tvm/cnn/pytorch/dall_e_vae/license b/forge/test/tvm/cnn/pytorch/dall_e_vae/license similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/dall_e_vae/license rename to forge/test/tvm/cnn/pytorch/dall_e_vae/license diff --git a/pybuda/test/tvm/cnn/pytorch/dall_e_vae/utils.py b/forge/test/tvm/cnn/pytorch/dall_e_vae/utils.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/dall_e_vae/utils.py rename to forge/test/tvm/cnn/pytorch/dall_e_vae/utils.py diff --git a/pybuda/test/tvm/cnn/pytorch/fastdepth/imagenet/__init__.py b/forge/test/tvm/cnn/pytorch/fastdepth/imagenet/__init__.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/fastdepth/imagenet/__init__.py rename to forge/test/tvm/cnn/pytorch/fastdepth/imagenet/__init__.py diff --git a/pybuda/test/tvm/cnn/pytorch/fastdepth/imagenet/mobilenet.py b/forge/test/tvm/cnn/pytorch/fastdepth/imagenet/mobilenet.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/fastdepth/imagenet/mobilenet.py rename to forge/test/tvm/cnn/pytorch/fastdepth/imagenet/mobilenet.py diff --git a/pybuda/test/tvm/cnn/pytorch/fastdepth/license b/forge/test/tvm/cnn/pytorch/fastdepth/license similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/fastdepth/license rename to forge/test/tvm/cnn/pytorch/fastdepth/license diff --git a/pybuda/test/tvm/cnn/pytorch/fastdepth/metrics.py b/forge/test/tvm/cnn/pytorch/fastdepth/metrics.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/fastdepth/metrics.py rename to forge/test/tvm/cnn/pytorch/fastdepth/metrics.py diff --git a/pybuda/test/tvm/cnn/pytorch/fastdepth/models.py b/forge/test/tvm/cnn/pytorch/fastdepth/models.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/fastdepth/models.py rename to forge/test/tvm/cnn/pytorch/fastdepth/models.py diff --git a/pybuda/test/tvm/cnn/pytorch/fastdepth/utils.py b/forge/test/tvm/cnn/pytorch/fastdepth/utils.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/fastdepth/utils.py rename to forge/test/tvm/cnn/pytorch/fastdepth/utils.py diff --git a/pybuda/test/tvm/cnn/pytorch/gscnn/Resnet.py b/forge/test/tvm/cnn/pytorch/gscnn/Resnet.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/gscnn/Resnet.py rename to forge/test/tvm/cnn/pytorch/gscnn/Resnet.py diff --git a/pybuda/test/tvm/cnn/pytorch/gscnn/SEresnext.py b/forge/test/tvm/cnn/pytorch/gscnn/SEresnext.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/gscnn/SEresnext.py rename to forge/test/tvm/cnn/pytorch/gscnn/SEresnext.py diff --git a/pybuda/test/tvm/cnn/pytorch/gscnn/__init__.py b/forge/test/tvm/cnn/pytorch/gscnn/__init__.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/gscnn/__init__.py rename to forge/test/tvm/cnn/pytorch/gscnn/__init__.py diff --git a/pybuda/test/tvm/cnn/pytorch/gscnn/config.py b/forge/test/tvm/cnn/pytorch/gscnn/config.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/gscnn/config.py rename to forge/test/tvm/cnn/pytorch/gscnn/config.py diff --git a/pybuda/test/tvm/cnn/pytorch/gscnn/gated_spatial_conv.py b/forge/test/tvm/cnn/pytorch/gscnn/gated_spatial_conv.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/gscnn/gated_spatial_conv.py rename to forge/test/tvm/cnn/pytorch/gscnn/gated_spatial_conv.py diff --git a/pybuda/test/tvm/cnn/pytorch/gscnn/gscnn.py b/forge/test/tvm/cnn/pytorch/gscnn/gscnn.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/gscnn/gscnn.py rename to forge/test/tvm/cnn/pytorch/gscnn/gscnn.py diff --git a/pybuda/test/tvm/cnn/pytorch/gscnn/mynn.py b/forge/test/tvm/cnn/pytorch/gscnn/mynn.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/gscnn/mynn.py rename to forge/test/tvm/cnn/pytorch/gscnn/mynn.py diff --git a/pybuda/test/tvm/cnn/pytorch/gscnn/wider_resnet.py b/forge/test/tvm/cnn/pytorch/gscnn/wider_resnet.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/gscnn/wider_resnet.py rename to forge/test/tvm/cnn/pytorch/gscnn/wider_resnet.py diff --git a/pybuda/test/tvm/cnn/pytorch/tests_A/__init__.py b/forge/test/tvm/cnn/pytorch/tests_A/__init__.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/tests_A/__init__.py rename to forge/test/tvm/cnn/pytorch/tests_A/__init__.py diff --git a/pybuda/test/tvm/cnn/pytorch/tests_A/test_alexnet.py b/forge/test/tvm/cnn/pytorch/tests_A/test_alexnet.py similarity index 83% rename from pybuda/test/tvm/cnn/pytorch/tests_A/test_alexnet.py rename to forge/test/tvm/cnn/pytorch/tests_A/test_alexnet.py index 66d93b9bd..011b4749b 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_A/test_alexnet.py +++ b/forge/test/tvm/cnn/pytorch/tests_A/test_alexnet.py @@ -11,15 +11,15 @@ import math import torch -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind from test.utils import download_model -import pybuda +import forge def test_tvm_alexnet(test_kind, test_device): if ( @@ -28,7 +28,7 @@ def test_tvm_alexnet(test_kind, test_device): pytest.skip() if (test_kind == TestKind.TRAINING_RECOMPUTE): - pytest.skip() # tenstorrent/pybuda#215 + pytest.skip() # tenstorrent/forge#215 compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" diff --git a/pybuda/test/tvm/cnn/pytorch/tests_A/test_autoencoder.py b/forge/test/tvm/cnn/pytorch/tests_A/test_autoencoder.py similarity index 95% rename from pybuda/test/tvm/cnn/pytorch/tests_A/test_autoencoder.py rename to forge/test/tvm/cnn/pytorch/tests_A/test_autoencoder.py index 8ad70db9d..f3abf6eeb 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_A/test_autoencoder.py +++ b/forge/test/tvm/cnn/pytorch/tests_A/test_autoencoder.py @@ -10,13 +10,13 @@ from torch import nn -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind # SPDX-FileCopyrightText: Copyright (c) 2018 Udacity # diff --git a/pybuda/test/tvm/cnn/pytorch/tests_A/test_convnext.py b/forge/test/tvm/cnn/pytorch/tests_A/test_convnext.py similarity index 87% rename from pybuda/test/tvm/cnn/pytorch/tests_A/test_convnext.py rename to forge/test/tvm/cnn/pytorch/tests_A/test_convnext.py index 3b8dd25a2..a8d04c360 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_A/test_convnext.py +++ b/forge/test/tvm/cnn/pytorch/tests_A/test_convnext.py @@ -8,15 +8,15 @@ from transformers import ConvNextModel -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind from test.utils import download_model -import pybuda +import forge def test_convnext_tiny(test_kind, test_device): @@ -26,15 +26,15 @@ def test_convnext_tiny(test_kind, test_device): pytest.skip() # import os - # os.environ["PYBUDA_PRINT_GRAPH_VIZ_FORMAT_AT"] = "PRE_PLACER" - # os.environ["PYBUDA_PRINT_GRAPH_VIZ_FORMAT_DIR"] = "forward_only" - # os.environ["PYBUDA_PRINT_GRAPH_VIZ_FORMAT_DIR"] = "backward_only" + # os.environ["FORGE_PRINT_GRAPH_VIZ_FORMAT_AT"] = "PRE_PLACER" + # os.environ["FORGE_PRINT_GRAPH_VIZ_FORMAT_DIR"] = "forward_only" + # os.environ["FORGE_PRINT_GRAPH_VIZ_FORMAT_DIR"] = "backward_only" compiler_cfg = _get_global_compiler_config() if not test_kind.is_training(): compiler_cfg.compile_depth = CompileDepth.FULL else: - # tenstorrent/pybuda#365 + # tenstorrent/forge#365 compiler_cfg.compile_depth = CompileDepth.BUDA_GRAPH_PRE_PLACER compiler_cfg.balancer_policy = "CNN" compiler_cfg.retain_tvm_python_files = True diff --git a/pybuda/test/tvm/cnn/pytorch/tests_A/test_dalle_vae.py b/forge/test/tvm/cnn/pytorch/tests_A/test_dalle_vae.py similarity index 82% rename from pybuda/test/tvm/cnn/pytorch/tests_A/test_dalle_vae.py rename to forge/test/tvm/cnn/pytorch/tests_A/test_dalle_vae.py index cea0ed347..38da7237f 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_A/test_dalle_vae.py +++ b/forge/test/tvm/cnn/pytorch/tests_A/test_dalle_vae.py @@ -1,7 +1,7 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -from pybuda.config import CompileDepth +from forge.config import CompileDepth import pytest import os @@ -10,15 +10,15 @@ from test.tvm.cnn.pytorch.dall_e_vae import Encoder, Decoder -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind -import pybuda +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind +import forge def test_tvm_dalle_Encoder(test_kind, test_device): @@ -45,14 +45,14 @@ def test_tvm_dalle_Encoder(test_kind, test_device): def test_tvm_dalle_Decoder(test_kind, test_device): - if test_device.arch == pybuda.BackendDevice.Grayskull: + if test_device.arch == forge.BackendDevice.Grayskull: pytest.skip() if test_kind.is_training(): pytest.skip() compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_RIBBON2"] = "1" if test_kind.is_training(): compiler_cfg.compile_depth = CompileDepth.BUDA_GRAPH_PRE_PLACER diff --git a/pybuda/test/tvm/cnn/pytorch/tests_A/test_deeplab.py b/forge/test/tvm/cnn/pytorch/tests_A/test_deeplab.py similarity index 85% rename from pybuda/test/tvm/cnn/pytorch/tests_A/test_deeplab.py rename to forge/test/tvm/cnn/pytorch/tests_A/test_deeplab.py index dc6a31d1d..c473ced6e 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_A/test_deeplab.py +++ b/forge/test/tvm/cnn/pytorch/tests_A/test_deeplab.py @@ -5,13 +5,13 @@ import torch -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind from test.utils import download_model diff --git a/pybuda/test/tvm/cnn/pytorch/tests_A/test_efficientnet.py b/forge/test/tvm/cnn/pytorch/tests_A/test_efficientnet.py similarity index 94% rename from pybuda/test/tvm/cnn/pytorch/tests_A/test_efficientnet.py rename to forge/test/tvm/cnn/pytorch/tests_A/test_efficientnet.py index 254163b78..75373fa29 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_A/test_efficientnet.py +++ b/forge/test/tvm/cnn/pytorch/tests_A/test_efficientnet.py @@ -8,14 +8,14 @@ import torch -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, BackendType, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind from test.utils import download_model def test_efficientnet_layer(test_kind, test_device): diff --git a/pybuda/test/tvm/cnn/pytorch/tests_A/test_fcn.py b/forge/test/tvm/cnn/pytorch/tests_A/test_fcn.py similarity index 76% rename from pybuda/test/tvm/cnn/pytorch/tests_A/test_fcn.py rename to forge/test/tvm/cnn/pytorch/tests_A/test_fcn.py index f272af846..14c2a2d77 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_A/test_fcn.py +++ b/forge/test/tvm/cnn/pytorch/tests_A/test_fcn.py @@ -5,13 +5,13 @@ import torch -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, ) -from pybuda.config import _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from forge.config import _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind from test.utils import download_model @@ -33,16 +33,16 @@ def test_fcn_pytorch(test_kind, test_device): compiler_cfg.balancer_policy = "CNN" # Issue below is still valid, though it doesn't trigger when fracturing is turned on - # tenstorrent/pybuda#310 - import pybuda - pybuda.config.override_t_stream_shape( + # tenstorrent/forge#310 + import forge + forge.config.override_t_stream_shape( "conv2d_0.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (28, 1) ) - # tenstorrent/pybuda#392 + # tenstorrent/forge#392 import os - os.environ["PYBUDA_DISABLE_CONSTANT_FOLDING"] = "1" - os.environ["PYBUDA_FORCE_RESIZE_DENSE_MM"] = "1" + os.environ["FORGE_DISABLE_CONSTANT_FOLDING"] = "1" + os.environ["FORGE_FORCE_RESIZE_DENSE_MM"] = "1" model = download_model(torch.hub.load, "pytorch/vision:v0.10.0", "fcn_resnet50", pretrained=True, force_reload=True ) diff --git a/pybuda/test/tvm/cnn/pytorch/tests_A/test_googlenet.py b/forge/test/tvm/cnn/pytorch/tests_A/test_googlenet.py similarity index 84% rename from pybuda/test/tvm/cnn/pytorch/tests_A/test_googlenet.py rename to forge/test/tvm/cnn/pytorch/tests_A/test_googlenet.py index 57a333319..b9d0c472d 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_A/test_googlenet.py +++ b/forge/test/tvm/cnn/pytorch/tests_A/test_googlenet.py @@ -7,26 +7,26 @@ import torch from torchvision import transforms, models -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind from test.utils import download_model -import pybuda +import forge def test_tvm_googlenet(test_kind, test_device): - if test_device.arch == pybuda.BackendDevice.Wormhole_B0: + if test_device.arch == forge.BackendDevice.Wormhole_B0: pytest.skip("Skip for Wormhole_B0") if ( test_kind == TestKind.TRAINING ): # Always run with recompute in post-commit CI. Nightly tests both pytest.skip() - os.environ["PYBUDA_EXTRA_L1_MARGIN"] = "30000" + os.environ["FORGE_EXTRA_L1_MARGIN"] = "30000" compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "CNN" @@ -62,7 +62,7 @@ def test_googlenet_torchvision(test_kind, test_device): import os # This will allow the test to pass but we should use conv padding to fix the issue instead - # os.environ["PYBUDA_EXTRA_L1_MARGIN"] = "30000" + # os.environ["FORGE_EXTRA_L1_MARGIN"] = "30000" # unknown padding to add compiler_cfg = _get_global_compiler_config() diff --git a/pybuda/test/tvm/cnn/pytorch/tests_A/test_gscnn.py b/forge/test/tvm/cnn/pytorch/tests_A/test_gscnn.py similarity index 93% rename from pybuda/test/tvm/cnn/pytorch/tests_A/test_gscnn.py rename to forge/test/tvm/cnn/pytorch/tests_A/test_gscnn.py index ba1c77cb8..577551897 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_A/test_gscnn.py +++ b/forge/test/tvm/cnn/pytorch/tests_A/test_gscnn.py @@ -6,13 +6,13 @@ import torch -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind from test.utils import download_model @@ -61,7 +61,7 @@ def test_wider_resnet_torch(test_kind, test_device): compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "CNN" - compiler_cfg.compile_depth = CompileDepth.GENERATE_INITIAL_GRAPH # Needs neg maxpool support tenstorrent/pybuda#188 + compiler_cfg.compile_depth = CompileDepth.GENERATE_INITIAL_GRAPH # Needs neg maxpool support tenstorrent/forge#188 module = PyTorchModule("wider_resnet_torch", submodel) @@ -93,7 +93,7 @@ def test_gated_spatial_conv_torch(test_kind, test_device): compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "CNN" - compiler_cfg.compile_depth = CompileDepth.PRE_LOWERING_PASS # tenstorrent/pybuda#185 + compiler_cfg.compile_depth = CompileDepth.PRE_LOWERING_PASS # tenstorrent/forge#185 module = PyTorchModule("gated_spatial_conv_torch", model) diff --git a/pybuda/test/tvm/cnn/pytorch/tests_A/test_hrnet.py b/forge/test/tvm/cnn/pytorch/tests_A/test_hrnet.py similarity index 94% rename from pybuda/test/tvm/cnn/pytorch/tests_A/test_hrnet.py rename to forge/test/tvm/cnn/pytorch/tests_A/test_hrnet.py index 754826d15..7a2c5b66c 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_A/test_hrnet.py +++ b/forge/test/tvm/cnn/pytorch/tests_A/test_hrnet.py @@ -8,14 +8,14 @@ import timm -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, BackendType, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind def test_hrnet_full_model(test_kind, test_device): diff --git a/pybuda/test/tvm/cnn/pytorch/tests_A/test_inception.py b/forge/test/tvm/cnn/pytorch/tests_A/test_inception.py similarity index 95% rename from pybuda/test/tvm/cnn/pytorch/tests_A/test_inception.py rename to forge/test/tvm/cnn/pytorch/tests_A/test_inception.py index 91a7b830c..e6825bc05 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_A/test_inception.py +++ b/forge/test/tvm/cnn/pytorch/tests_A/test_inception.py @@ -5,13 +5,13 @@ import torch -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind from test.utils import download_model @@ -53,7 +53,7 @@ def test_inceptionv3_b_pytorch(test_kind, test_device): compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "CNN" - compiler_cfg.enable_conv_prestride = False # tenstorrent/pybuda#925 + compiler_cfg.enable_conv_prestride = False # tenstorrent/forge#925 if test_kind.is_training(): compiler_cfg.compile_depth = CompileDepth.GENERATE_INITIAL_GRAPH diff --git a/pybuda/test/tvm/cnn/pytorch/tests_A/test_midas.py b/forge/test/tvm/cnn/pytorch/tests_A/test_midas.py similarity index 84% rename from pybuda/test/tvm/cnn/pytorch/tests_A/test_midas.py rename to forge/test/tvm/cnn/pytorch/tests_A/test_midas.py index c6ea1468c..e3940e038 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_A/test_midas.py +++ b/forge/test/tvm/cnn/pytorch/tests_A/test_midas.py @@ -5,13 +5,13 @@ import torch -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind from test.utils import download_model diff --git a/pybuda/test/tvm/cnn/pytorch/tests_A/test_mnasnet.py b/forge/test/tvm/cnn/pytorch/tests_A/test_mnasnet.py similarity index 83% rename from pybuda/test/tvm/cnn/pytorch/tests_A/test_mnasnet.py rename to forge/test/tvm/cnn/pytorch/tests_A/test_mnasnet.py index b508b301b..a9702045e 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_A/test_mnasnet.py +++ b/forge/test/tvm/cnn/pytorch/tests_A/test_mnasnet.py @@ -6,13 +6,13 @@ import torch from torchvision.models.mnasnet import MNASNet -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind def test_mnasnet(test_kind, test_device): diff --git a/pybuda/test/tvm/cnn/pytorch/tests_B/SSD/__init__.py b/forge/test/tvm/cnn/pytorch/tests_B/SSD/__init__.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/tests_B/SSD/__init__.py rename to forge/test/tvm/cnn/pytorch/tests_B/SSD/__init__.py diff --git a/pybuda/test/tvm/cnn/pytorch/tests_B/SSD/ssd.py b/forge/test/tvm/cnn/pytorch/tests_B/SSD/ssd.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/tests_B/SSD/ssd.py rename to forge/test/tvm/cnn/pytorch/tests_B/SSD/ssd.py diff --git a/pybuda/test/tvm/cnn/pytorch/tests_B/__init__.py b/forge/test/tvm/cnn/pytorch/tests_B/__init__.py similarity index 100% rename from pybuda/test/tvm/cnn/pytorch/tests_B/__init__.py rename to forge/test/tvm/cnn/pytorch/tests_B/__init__.py diff --git a/pybuda/test/tvm/cnn/pytorch/tests_B/test_alphapose.py b/forge/test/tvm/cnn/pytorch/tests_B/test_alphapose.py similarity index 87% rename from pybuda/test/tvm/cnn/pytorch/tests_B/test_alphapose.py rename to forge/test/tvm/cnn/pytorch/tests_B/test_alphapose.py index dacc2a9f6..dde219e27 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_B/test_alphapose.py +++ b/forge/test/tvm/cnn/pytorch/tests_B/test_alphapose.py @@ -9,13 +9,13 @@ import torch import os -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind from test.tvm.cnn.pytorch.alphapose.utils.config import update_config from test.tvm.cnn.pytorch.alphapose.models import builder diff --git a/pybuda/test/tvm/cnn/pytorch/tests_B/test_fastdepth.py b/forge/test/tvm/cnn/pytorch/tests_B/test_fastdepth.py similarity index 85% rename from pybuda/test/tvm/cnn/pytorch/tests_B/test_fastdepth.py rename to forge/test/tvm/cnn/pytorch/tests_B/test_fastdepth.py index a10097900..63f153ecb 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_B/test_fastdepth.py +++ b/forge/test/tvm/cnn/pytorch/tests_B/test_fastdepth.py @@ -5,15 +5,15 @@ import torch -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, ) -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from forge.verify.backend import verify_module +from forge.verify.config import TestKind from test.tvm.cnn.pytorch.fastdepth.models import MobileNetSkipAdd -from pybuda.config import CompileDepth, _get_global_compiler_config +from forge.config import CompileDepth, _get_global_compiler_config def test_fastdepth_pytorch(test_kind, test_device): diff --git a/pybuda/test/tvm/cnn/pytorch/tests_B/test_ghostnet.py b/forge/test/tvm/cnn/pytorch/tests_B/test_ghostnet.py similarity index 87% rename from pybuda/test/tvm/cnn/pytorch/tests_B/test_ghostnet.py rename to forge/test/tvm/cnn/pytorch/tests_B/test_ghostnet.py index 5eab8f1d9..df1e51df0 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_B/test_ghostnet.py +++ b/forge/test/tvm/cnn/pytorch/tests_B/test_ghostnet.py @@ -10,13 +10,13 @@ import importlib import urllib -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind def test_ghostnet(test_kind, test_device): if test_kind.is_training(): @@ -25,12 +25,12 @@ def test_ghostnet(test_kind, test_device): compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "CNN" - #Fusing disabled due to tenstorrent/pybuda#800 + #Fusing disabled due to tenstorrent/forge#800 compiler_cfg.enable_auto_fusing=False - # tenstorrent/pybuda#392 + # tenstorrent/forge#392 import os - os.environ["PYBUDA_DISABLE_CONSTANT_FOLDING"] = "1" + os.environ["FORGE_DISABLE_CONSTANT_FOLDING"] = "1" # model = torch.hub.load('huawei-noah/ghostnet', 'ghostnet_1x', pretrained=True) # Top file from torch hub depends on cuda import, so just get the model directly. @@ -58,7 +58,7 @@ def test_ghostnet_v2(test_kind, test_device): pytest.skip("Needs padding") - # STEP 1: Set PyBuda configuration parameters + # STEP 1: Set Forge configuration parameters compiler_cfg = _get_global_compiler_config() # load global compiler config object compiler_cfg.balancer_policy = "CNN" diff --git a/pybuda/test/tvm/cnn/pytorch/tests_B/test_graph_cnn.py b/forge/test/tvm/cnn/pytorch/tests_B/test_graph_cnn.py similarity index 89% rename from pybuda/test/tvm/cnn/pytorch/tests_B/test_graph_cnn.py rename to forge/test/tvm/cnn/pytorch/tests_B/test_graph_cnn.py index e22ebf1a2..4e5d3de23 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_B/test_graph_cnn.py +++ b/forge/test/tvm/cnn/pytorch/tests_B/test_graph_cnn.py @@ -8,19 +8,19 @@ import torch -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, ) # from torch_geometric.nn import GCNConv -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind def test_tvm_graph_cnn(test_kind, test_device): - # Scatter Addition op is not supported in PyBuda. Can be revised + # Scatter Addition op is not supported in Forge. Can be revised # once embeddings (over take op) are supported on HW side pytest.skip() diff --git a/pybuda/test/tvm/cnn/pytorch/tests_B/test_hf_clip.py b/forge/test/tvm/cnn/pytorch/tests_B/test_hf_clip.py similarity index 90% rename from pybuda/test/tvm/cnn/pytorch/tests_B/test_hf_clip.py rename to forge/test/tvm/cnn/pytorch/tests_B/test_hf_clip.py index 280d0e0be..65e97fdc7 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_B/test_hf_clip.py +++ b/forge/test/tvm/cnn/pytorch/tests_B/test_hf_clip.py @@ -14,14 +14,14 @@ from transformers import CLIPProcessor, CLIPModel, CLIPConfig -import pybuda -from pybuda import ( +import forge +from forge import ( PyTorchModule, VerifyConfig, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind class ClipWrapper(torch.nn.Module): def __init__(self, model): @@ -69,10 +69,10 @@ def test_hf_clip(test_kind, test_device): ), inputs=[(input_ids, pixel_values, attention_mask)], ) - # tt0 = pybuda.TTDevice("tt0", + # tt0 = forge.TTDevice("tt0", # devtype=test_device.devtype, arch=test_device.arch, module=PyTorchModule("clip", ClipWrapper(model))) # tt0.push_to_inputs((input_ids, pixel_values, attention_mask)) - # output_q = pybuda.run_inference() + # output_q = forge.run_inference() # outputs = output_q.get() # outputs = model(input_ids, pixel_values, attention_mask) diff --git a/pybuda/test/tvm/cnn/pytorch/tests_B/test_mnist.py b/forge/test/tvm/cnn/pytorch/tests_B/test_mnist.py similarity index 91% rename from pybuda/test/tvm/cnn/pytorch/tests_B/test_mnist.py rename to forge/test/tvm/cnn/pytorch/tests_B/test_mnist.py index 5f12509d2..84844acd8 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_B/test_mnist.py +++ b/forge/test/tvm/cnn/pytorch/tests_B/test_mnist.py @@ -10,13 +10,13 @@ import torch.nn as nn import torch.nn.functional as F -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, ) -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind -from pybuda.compile import _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind +from forge.compile import _get_global_compiler_config def test_mnist_pytorch(test_kind, test_device): diff --git a/pybuda/test/tvm/cnn/pytorch/tests_B/test_mobilenet_v2.py b/forge/test/tvm/cnn/pytorch/tests_B/test_mobilenet_v2.py similarity index 87% rename from pybuda/test/tvm/cnn/pytorch/tests_B/test_mobilenet_v2.py rename to forge/test/tvm/cnn/pytorch/tests_B/test_mobilenet_v2.py index 646f80faa..20c1c34f6 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_B/test_mobilenet_v2.py +++ b/forge/test/tvm/cnn/pytorch/tests_B/test_mobilenet_v2.py @@ -5,18 +5,18 @@ import torch -from pybuda.config import CompileDepth, _get_global_compiler_config +from forge.config import CompileDepth, _get_global_compiler_config from transformers import MobileNetV2FeatureExtractor, MobileNetV2ForSemanticSegmentation from transformers import AutoImageProcessor -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, ) -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind -from pybuda import DataFormat +from forge.verify.backend import verify_module +from forge.verify.config import TestKind +from forge import DataFormat from test.utils import download_model @@ -29,9 +29,9 @@ def test_mobilenetv2_pytorch(test_kind, test_device): if test_kind.is_training(): pytest.skip() # Backward is currently unsupported - # tenstorrent/pybuda#392 + # tenstorrent/forge#392 import os - os.environ["PYBUDA_DISABLE_CONSTANT_FOLDING"] = "1" + os.environ["FORGE_DISABLE_CONSTANT_FOLDING"] = "1" compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "CNN" @@ -74,7 +74,7 @@ def test_mobilenetv2_deeplab(test_kind, test_device): pytest.skip() # Backward is currently unsupported import os - os.environ["PYBUDA_PAD_SPARSE_MM"] = "{25:26}" + os.environ["FORGE_PAD_SPARSE_MM"] = "{25:26}" compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "CNN" diff --git a/pybuda/test/tvm/cnn/pytorch/tests_B/test_mobilenet_v3.py b/forge/test/tvm/cnn/pytorch/tests_B/test_mobilenet_v3.py similarity index 88% rename from pybuda/test/tvm/cnn/pytorch/tests_B/test_mobilenet_v3.py rename to forge/test/tvm/cnn/pytorch/tests_B/test_mobilenet_v3.py index 7a88b92ca..89e0222f0 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_B/test_mobilenet_v3.py +++ b/forge/test/tvm/cnn/pytorch/tests_B/test_mobilenet_v3.py @@ -8,14 +8,14 @@ import torch from torch import nn -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind -from pybuda import DataFormat +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind +from forge import DataFormat from test.utils import download_model @@ -31,9 +31,9 @@ def test_mobilenet_v3_small(test_kind, test_device): compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "CNN" - # tenstorrent/pybuda#392 + # tenstorrent/forge#392 import os - os.environ["PYBUDA_DISABLE_CONSTANT_FOLDING"] = "1" + os.environ["FORGE_DISABLE_CONSTANT_FOLDING"] = "1" model = download_model(torch.hub.load, "pytorch/vision:v0.10.0", "mobilenet_v3_small", pretrained=True diff --git a/pybuda/test/tvm/cnn/pytorch/tests_B/test_regnety.py b/forge/test/tvm/cnn/pytorch/tests_B/test_regnety.py similarity index 85% rename from pybuda/test/tvm/cnn/pytorch/tests_B/test_regnety.py rename to forge/test/tvm/cnn/pytorch/tests_B/test_regnety.py index 34d8e7cbb..fc2616440 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_B/test_regnety.py +++ b/forge/test/tvm/cnn/pytorch/tests_B/test_regnety.py @@ -8,14 +8,14 @@ import torch -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, CompileDepth, ) -from pybuda.verify.backend import verify_module -from pybuda.config import _get_global_compiler_config -from pybuda.verify.config import TestKind +from forge.verify.backend import verify_module +from forge.config import _get_global_compiler_config +from forge.verify.config import TestKind import timm diff --git a/pybuda/test/tvm/cnn/pytorch/tests_B/test_resnet.py b/forge/test/tvm/cnn/pytorch/tests_B/test_resnet.py similarity index 92% rename from pybuda/test/tvm/cnn/pytorch/tests_B/test_resnet.py rename to forge/test/tvm/cnn/pytorch/tests_B/test_resnet.py index ce8c3a25e..5a4a5b51a 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_B/test_resnet.py +++ b/forge/test/tvm/cnn/pytorch/tests_B/test_resnet.py @@ -8,14 +8,14 @@ import torch -import pybuda -from pybuda import ( +import forge +from forge import ( PyTorchModule, VerifyConfig, ) -from pybuda.verify.config import TestKind -from pybuda.verify.backend import verify_module -from pybuda.config import CompileDepth, _get_global_compiler_config +from forge.verify.config import TestKind +from forge.verify.backend import verify_module +from forge.config import CompileDepth, _get_global_compiler_config from test.utils import download_model @@ -53,8 +53,8 @@ def test_resnet_pytorch(test_kind, test_device): # compiler_cfg.place_on_new_epoch("max_pool2d_14.dc.reshape.0_operand_commute_clone411.dc.sparse_matmul.4.lc2") # Issue below is still valid, though it doesn't trigger when fracturing is turned on - # tenstorrent/pybuda#310 - #pybuda.config.override_t_stream_shape( + # tenstorrent/forge#310 + #forge.config.override_t_stream_shape( # "conv2d_0.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (28, 1) #) diff --git a/pybuda/test/tvm/cnn/pytorch/tests_B/test_resnext.py b/forge/test/tvm/cnn/pytorch/tests_B/test_resnext.py similarity index 80% rename from pybuda/test/tvm/cnn/pytorch/tests_B/test_resnext.py rename to forge/test/tvm/cnn/pytorch/tests_B/test_resnext.py index 4a7f2656d..f360c1db4 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_B/test_resnext.py +++ b/forge/test/tvm/cnn/pytorch/tests_B/test_resnext.py @@ -8,14 +8,14 @@ import torch -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, BackendType, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind from test.utils import download_model @@ -36,9 +36,9 @@ def test_resnext(test_kind, test_device): if test_kind.is_training(): compiler_cfg.compile_depth = CompileDepth.BUDA_GRAPH_PRE_PLACER - #import pybuda - # tenstorrent/pybuda#310 - #pybuda.config.override_t_stream_shape("conv2d_0.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (28, 1)) + #import forge + # tenstorrent/forge#310 + #forge.config.override_t_stream_shape("conv2d_0.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (28, 1)) model = download_model(torch.hub.load, "pytorch/vision:v0.10.0", "resnext50_32x4d", pretrained=True) module = PyTorchModule("resnext50_32x4d", model) diff --git a/pybuda/test/tvm/cnn/pytorch/tests_B/test_shufflenet.py b/forge/test/tvm/cnn/pytorch/tests_B/test_shufflenet.py similarity index 85% rename from pybuda/test/tvm/cnn/pytorch/tests_B/test_shufflenet.py rename to forge/test/tvm/cnn/pytorch/tests_B/test_shufflenet.py index 28e0850e1..cefa10807 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_B/test_shufflenet.py +++ b/forge/test/tvm/cnn/pytorch/tests_B/test_shufflenet.py @@ -5,13 +5,13 @@ import torch -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind from test.utils import download_model diff --git a/pybuda/test/tvm/cnn/pytorch/tests_B/test_ssd.py b/forge/test/tvm/cnn/pytorch/tests_B/test_ssd.py similarity index 85% rename from pybuda/test/tvm/cnn/pytorch/tests_B/test_ssd.py rename to forge/test/tvm/cnn/pytorch/tests_B/test_ssd.py index c4707c394..a1b57a1f5 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_B/test_ssd.py +++ b/forge/test/tvm/cnn/pytorch/tests_B/test_ssd.py @@ -8,13 +8,13 @@ import torch -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind from .SSD.ssd import SSD diff --git a/pybuda/test/tvm/cnn/pytorch/tests_B/test_vgg.py b/forge/test/tvm/cnn/pytorch/tests_B/test_vgg.py similarity index 85% rename from pybuda/test/tvm/cnn/pytorch/tests_B/test_vgg.py rename to forge/test/tvm/cnn/pytorch/tests_B/test_vgg.py index 7e1b2add2..d1f3d1a98 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_B/test_vgg.py +++ b/forge/test/tvm/cnn/pytorch/tests_B/test_vgg.py @@ -8,13 +8,13 @@ import torch -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind from test.utils import download_model diff --git a/pybuda/test/tvm/cnn/pytorch/tests_B/test_videopose.py b/forge/test/tvm/cnn/pytorch/tests_B/test_videopose.py similarity index 87% rename from pybuda/test/tvm/cnn/pytorch/tests_B/test_videopose.py rename to forge/test/tvm/cnn/pytorch/tests_B/test_videopose.py index 287a73e57..6ee4231e4 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_B/test_videopose.py +++ b/forge/test/tvm/cnn/pytorch/tests_B/test_videopose.py @@ -5,17 +5,17 @@ import torch -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, BackendType, ) -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from forge.verify.backend import verify_module +from forge.verify.config import TestKind import sys import os -from pybuda.config import CompileDepth, _get_global_compiler_config +from forge.config import CompileDepth, _get_global_compiler_config from test.tvm.cnn.pytorch.videopose.model import TemporalModel def test_videopose_pytorch(test_kind, test_device): diff --git a/pybuda/test/tvm/cnn/pytorch/tests_B/test_vilt.py b/forge/test/tvm/cnn/pytorch/tests_B/test_vilt.py similarity index 91% rename from pybuda/test/tvm/cnn/pytorch/tests_B/test_vilt.py rename to forge/test/tvm/cnn/pytorch/tests_B/test_vilt.py index ec2a015bf..21d135466 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_B/test_vilt.py +++ b/forge/test/tvm/cnn/pytorch/tests_B/test_vilt.py @@ -5,13 +5,13 @@ from transformers import ViltModel, ViltConfig -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind def test_tvm_vision_language_transformer_encoder(test_kind, test_device): diff --git a/pybuda/test/tvm/cnn/pytorch/tests_B/test_vit.py b/forge/test/tvm/cnn/pytorch/tests_B/test_vit.py similarity index 88% rename from pybuda/test/tvm/cnn/pytorch/tests_B/test_vit.py rename to forge/test/tvm/cnn/pytorch/tests_B/test_vit.py index 86540857d..b5ddb5199 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_B/test_vit.py +++ b/forge/test/tvm/cnn/pytorch/tests_B/test_vit.py @@ -7,18 +7,18 @@ import torch from transformers import ViTModel, ViTConfig -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind -import pybuda +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind +import forge def test_tvm_visual_transformer(test_kind, test_device): - if test_device.arch == pybuda.BackendDevice.Grayskull: + if test_device.arch == forge.BackendDevice.Grayskull: pytest.skip() if test_kind.is_training(): @@ -33,14 +33,14 @@ def test_tvm_visual_transformer(test_kind, test_device): config.num_attention_heads = 1 config.num_hidden_layers = 1 framework_model = ViTModel(config) - pybuda_model = PyTorchModule("pt_visual_transformer", framework_model) + forge_model = PyTorchModule("pt_visual_transformer", framework_model) # Sanity run input_shape = (1, 3, 224, 224) out = framework_model(torch.rand(input_shape)) verify_module( - pybuda_model, + forge_model, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, diff --git a/pybuda/test/tvm/cnn/pytorch/tests_C/test_densenet.py b/forge/test/tvm/cnn/pytorch/tests_C/test_densenet.py similarity index 76% rename from pybuda/test/tvm/cnn/pytorch/tests_C/test_densenet.py rename to forge/test/tvm/cnn/pytorch/tests_C/test_densenet.py index fb899b220..cf265bb1b 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_C/test_densenet.py +++ b/forge/test/tvm/cnn/pytorch/tests_C/test_densenet.py @@ -9,13 +9,13 @@ import torch from torchvision import models -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind from test.utils import download_model @@ -27,7 +27,7 @@ def test_densenet_121(test_kind, test_device): compiler_cfg.balancer_policy = "CNN" import os - os.environ["PYBUDA_DISABLE_CONSTANT_FOLDING"] = "1" + os.environ["FORGE_DISABLE_CONSTANT_FOLDING"] = "1" model = download_model(torch.hub.load, "pytorch/vision:v0.10.0", "densenet121", pretrained=True) module = PyTorchModule("densenet121_pt", model) @@ -43,7 +43,7 @@ def test_densenet_121(test_kind, test_device): ), ) - os.environ.pop('PYBUDA_DISABLE_CONSTANT_FOLDING', None) + os.environ.pop('FORGE_DISABLE_CONSTANT_FOLDING', None) def test_densenet_169(test_kind, test_device): @@ -56,8 +56,8 @@ def test_densenet_169(test_kind, test_device): compiler_cfg.balancer_policy = "CNN" import os - os.environ["PYBUDA_DISABLE_CONSTANT_FOLDING"] = "1" - os.environ["PYBUDA_GRAPHSOLVER_SELF_CUT_TYPE"] = "ConsumerOperandDataEdgesFirst" + os.environ["FORGE_DISABLE_CONSTANT_FOLDING"] = "1" + os.environ["FORGE_GRAPHSOLVER_SELF_CUT_TYPE"] = "ConsumerOperandDataEdgesFirst" model = download_model(models.densenet169, pretrained=True) @@ -74,7 +74,7 @@ def test_densenet_169(test_kind, test_device): ), ) - os.environ.pop('PYBUDA_DISABLE_CONSTANT_FOLDING', None) + os.environ.pop('FORGE_DISABLE_CONSTANT_FOLDING', None) def test_densenet_201(test_kind, test_device): @@ -88,8 +88,8 @@ def test_densenet_201(test_kind, test_device): compiler_cfg.balancer_policy = "CNN" import os - os.environ["PYBUDA_DISABLE_CONSTANT_FOLDING"] = "1" - os.environ["PYBUDA_GRAPHSOLVER_SELF_CUT_TYPE"] = "ConsumerOperandDataEdgesFirst" + os.environ["FORGE_DISABLE_CONSTANT_FOLDING"] = "1" + os.environ["FORGE_GRAPHSOLVER_SELF_CUT_TYPE"] = "ConsumerOperandDataEdgesFirst" model = download_model(models.densenet201, pretrained=True) @@ -106,7 +106,7 @@ def test_densenet_201(test_kind, test_device): ), ) - os.environ.pop('PYBUDA_DISABLE_CONSTANT_FOLDING', None) + os.environ.pop('FORGE_DISABLE_CONSTANT_FOLDING', None) def test_densenet_161(test_kind, test_device): @@ -120,8 +120,8 @@ def test_densenet_161(test_kind, test_device): compiler_cfg.balancer_policy = "CNN" import os - os.environ["PYBUDA_DISABLE_CONSTANT_FOLDING"] = "1" - os.environ["PYBUDA_GRAPHSOLVER_SELF_CUT_TYPE"] = "ConsumerOperandDataEdgesFirst" + os.environ["FORGE_DISABLE_CONSTANT_FOLDING"] = "1" + os.environ["FORGE_GRAPHSOLVER_SELF_CUT_TYPE"] = "ConsumerOperandDataEdgesFirst" model = download_model(models.densenet161, pretrained=True) @@ -138,4 +138,4 @@ def test_densenet_161(test_kind, test_device): ), ) - os.environ.pop('PYBUDA_DISABLE_CONSTANT_FOLDING', None) + os.environ.pop('FORGE_DISABLE_CONSTANT_FOLDING', None) diff --git a/pybuda/test/tvm/cnn/pytorch/tests_C/test_yolov5.py b/forge/test/tvm/cnn/pytorch/tests_C/test_yolov5.py similarity index 85% rename from pybuda/test/tvm/cnn/pytorch/tests_C/test_yolov5.py rename to forge/test/tvm/cnn/pytorch/tests_C/test_yolov5.py index 5bb09107c..17132ceac 100644 --- a/pybuda/test/tvm/cnn/pytorch/tests_C/test_yolov5.py +++ b/forge/test/tvm/cnn/pytorch/tests_C/test_yolov5.py @@ -10,15 +10,15 @@ import torch.nn as nn from PIL import Image -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind from test.utils import download_model -import pybuda +import forge class Identity(nn.Module): def __init__(self): @@ -31,7 +31,7 @@ def forward(self, x): def test_yolov5_320x320(test_kind, test_device): - if test_device.arch == pybuda.BackendDevice.Grayskull: + if test_device.arch == forge.BackendDevice.Grayskull: pytest.skip() # This one works @@ -61,7 +61,7 @@ def test_yolov5_320x320(test_kind, test_device): arch=test_device.arch, devtype=test_device.devtype, test_kind=test_kind, - verify_pybuda_codegen_vs_framework = True, + verify_forge_codegen_vs_framework = True, ), ) @@ -81,9 +81,9 @@ def test_yolov5_480x480(test_kind, test_device): pytest.skip() # Backward is currently unsupported import os - os.environ["PYBUDA_PAD_SPARSE_MM"] = "{13:16}" - os.environ["PYBUDA_INSERT_SLICE_FOR_CONCAT"] = "1" - os.environ["PYBUDA_DECOMPOSE_SIGMOID"] = "1" + os.environ["FORGE_PAD_SPARSE_MM"] = "{13:16}" + os.environ["FORGE_INSERT_SLICE_FOR_CONCAT"] = "1" + os.environ["FORGE_DECOMPOSE_SIGMOID"] = "1" compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "CNN" @@ -123,9 +123,9 @@ def test_yolov5m_640x640(test_kind, test_device): pytest.skip() # Backward is currently unsupported import os - os.environ["PYBUDA_PAD_SPARSE_MM"] = "{13:16, 3:4}" - os.environ["PYBUDA_INSERT_SLICE_FOR_CONCAT"] = "1" - os.environ["PYBUDA_CONCAT_SLICE_Y"] = "8" + os.environ["FORGE_PAD_SPARSE_MM"] = "{13:16, 3:4}" + os.environ["FORGE_INSERT_SLICE_FOR_CONCAT"] = "1" + os.environ["FORGE_CONCAT_SLICE_Y"] = "8" compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "CNN" @@ -161,8 +161,8 @@ def test_yolov5_1280x1280(test_kind, test_device): pytest.skip() # Backward is currently unsupported import os - os.environ["PYBUDA_PAD_SPARSE_MM"] = "{13:16}" - os.environ["PYBUDA_INSERT_SLICE_FOR_CONCAT"] = "1" + os.environ["FORGE_PAD_SPARSE_MM"] = "{13:16}" + os.environ["FORGE_INSERT_SLICE_FOR_CONCAT"] = "1" compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "CNN" diff --git a/pybuda/test/tvm/cnn/tensorflow/tests_A/test_convnext.py b/forge/test/tvm/cnn/tensorflow/tests_A/test_convnext.py similarity index 82% rename from pybuda/test/tvm/cnn/tensorflow/tests_A/test_convnext.py rename to forge/test/tvm/cnn/tensorflow/tests_A/test_convnext.py index d0e3cc513..3cf2c835e 100644 --- a/pybuda/test/tvm/cnn/tensorflow/tests_A/test_convnext.py +++ b/forge/test/tvm/cnn/tensorflow/tests_A/test_convnext.py @@ -8,7 +8,7 @@ from transformers import TFConvNextModel, ConvNextConfig -from pybuda import ( +from forge import ( PyTorchModule, TFModule, TTDevice, @@ -16,14 +16,14 @@ CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from test.tvm.utils import evaluate_framework_vs_pybuda +from forge.config import CompileDepth, _get_global_compiler_config +from test.tvm.utils import evaluate_framework_vs_forge import tensorflow as tf -from pybuda.verify.backend import verify_module +from forge.verify.backend import verify_module def test_tvm_convnext(test_kind, test_device): if test_kind.is_training(): @@ -31,7 +31,7 @@ def test_tvm_convnext(test_kind, test_device): compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "CNN" - # tenstorrent/pybuda#842 + # tenstorrent/forge#842 compiler_cfg.compile_depth = ( CompileDepth.BUDA_GRAPH_PRE_PLACER ) diff --git a/pybuda/test/tvm/cnn/tensorflow/tests_A/test_nasnet.py b/forge/test/tvm/cnn/tensorflow/tests_A/test_nasnet.py similarity index 84% rename from pybuda/test/tvm/cnn/tensorflow/tests_A/test_nasnet.py rename to forge/test/tvm/cnn/tensorflow/tests_A/test_nasnet.py index 6820608ee..22dacca76 100644 --- a/pybuda/test/tvm/cnn/tensorflow/tests_A/test_nasnet.py +++ b/forge/test/tvm/cnn/tensorflow/tests_A/test_nasnet.py @@ -6,7 +6,7 @@ import torch -from pybuda import ( +from forge import ( PyTorchModule, TFModule, TTDevice, @@ -14,10 +14,10 @@ CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from test.tvm.utils import evaluate_framework_vs_pybuda +from forge.config import CompileDepth, _get_global_compiler_config +from test.tvm.utils import evaluate_framework_vs_forge import tensorflow as tf @@ -42,7 +42,7 @@ def test_tvm_nasnet_mobile_tf(training=False): act1 = tf.random.uniform((1, 224, 224, 3)) - ret = pybuda_compile( + ret = forge_compile( tt0, "nasnet_mobile_tf", act1, @@ -56,4 +56,4 @@ def test_tvm_nasnet_mobile_tf(training=False): intermediates=True, ), ) - evaluate_framework_vs_pybuda(model, ret, act1) \ No newline at end of file + evaluate_framework_vs_forge(model, ret, act1) \ No newline at end of file diff --git a/pybuda/test/tvm/cnn/tensorflow/tests_A/test_resnet.py b/forge/test/tvm/cnn/tensorflow/tests_A/test_resnet.py similarity index 90% rename from pybuda/test/tvm/cnn/tensorflow/tests_A/test_resnet.py rename to forge/test/tvm/cnn/tensorflow/tests_A/test_resnet.py index 9f89e5104..6a8b4f6e2 100644 --- a/pybuda/test/tvm/cnn/tensorflow/tests_A/test_resnet.py +++ b/forge/test/tvm/cnn/tensorflow/tests_A/test_resnet.py @@ -6,7 +6,7 @@ import torch -from pybuda import ( +from forge import ( PyTorchModule, TFModule, TTDevice, @@ -14,12 +14,12 @@ CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from forge.verify.backend import verify_module +from forge.verify.config import TestKind import tensorflow as tf -from pybuda.config import CompileDepth, _get_global_compiler_config +from forge.config import CompileDepth, _get_global_compiler_config def test_tvm_resnet_tf(test_kind, test_device): diff --git a/pybuda/test/tvm/cnn/tensorflow/tests_A/test_xception.py b/forge/test/tvm/cnn/tensorflow/tests_A/test_xception.py similarity index 83% rename from pybuda/test/tvm/cnn/tensorflow/tests_A/test_xception.py rename to forge/test/tvm/cnn/tensorflow/tests_A/test_xception.py index 8c9647c0e..7b2aa48dd 100644 --- a/pybuda/test/tvm/cnn/tensorflow/tests_A/test_xception.py +++ b/forge/test/tvm/cnn/tensorflow/tests_A/test_xception.py @@ -6,7 +6,7 @@ import torch -from pybuda import ( +from forge import ( PyTorchModule, TFModule, TTDevice, @@ -14,12 +14,12 @@ CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from forge.verify.backend import verify_module +from forge.verify.config import TestKind import tensorflow as tf -from pybuda.config import CompileDepth, _get_global_compiler_config +from forge.config import CompileDepth, _get_global_compiler_config def test_xception(test_kind, test_device): diff --git a/pybuda/test/tvm/cnn/tensorflow/tests_B/test_alexnet.py b/forge/test/tvm/cnn/tensorflow/tests_B/test_alexnet.py similarity index 95% rename from pybuda/test/tvm/cnn/tensorflow/tests_B/test_alexnet.py rename to forge/test/tvm/cnn/tensorflow/tests_B/test_alexnet.py index 7e6532458..4124ceffb 100644 --- a/pybuda/test/tvm/cnn/tensorflow/tests_B/test_alexnet.py +++ b/forge/test/tvm/cnn/tensorflow/tests_B/test_alexnet.py @@ -10,13 +10,13 @@ from keras import Model import tensorflow as tf -from pybuda import ( +from forge import ( TFModule, VerifyConfig, ) -from pybuda.verify.config import TestKind -from pybuda.verify.backend import verify_module -from pybuda.config import CompileDepth, _get_global_compiler_config +from forge.verify.config import TestKind +from forge.verify.backend import verify_module +from forge.config import CompileDepth, _get_global_compiler_config def test_alexnet(test_kind, test_device): diff --git a/pybuda/test/tvm/cnn/tensorflow/tests_B/test_autoencoder.py b/forge/test/tvm/cnn/tensorflow/tests_B/test_autoencoder.py similarity index 93% rename from pybuda/test/tvm/cnn/tensorflow/tests_B/test_autoencoder.py rename to forge/test/tvm/cnn/tensorflow/tests_B/test_autoencoder.py index 489bf7dbd..237fee150 100644 --- a/pybuda/test/tvm/cnn/tensorflow/tests_B/test_autoencoder.py +++ b/forge/test/tvm/cnn/tensorflow/tests_B/test_autoencoder.py @@ -10,13 +10,13 @@ from keras import layers import tensorflow as tf -from pybuda import ( +from forge import ( TFModule, VerifyConfig, ) -from pybuda.verify.config import TestKind -from pybuda.verify.backend import verify_module -from pybuda.config import CompileDepth, _get_global_compiler_config +from forge.verify.config import TestKind +from forge.verify.backend import verify_module +from forge.config import CompileDepth, _get_global_compiler_config def test_conv_autoencoder(test_kind, test_device): diff --git a/pybuda/test/tvm/cnn/tensorflow/tests_B/test_densenet.py b/forge/test/tvm/cnn/tensorflow/tests_B/test_densenet.py similarity index 86% rename from pybuda/test/tvm/cnn/tensorflow/tests_B/test_densenet.py rename to forge/test/tvm/cnn/tensorflow/tests_B/test_densenet.py index 4abbaf1b4..7621047a3 100644 --- a/pybuda/test/tvm/cnn/tensorflow/tests_B/test_densenet.py +++ b/forge/test/tvm/cnn/tensorflow/tests_B/test_densenet.py @@ -5,17 +5,17 @@ import tensorflow as tf -from pybuda import ( +from forge import ( TFModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from pybuda.config import CompileDepth -from test.tvm.utils import evaluate_framework_vs_pybuda +from forge.config import CompileDepth +from test.tvm.utils import evaluate_framework_vs_forge input_shapes = [(1, 224, 224, 3)] @@ -46,7 +46,7 @@ def test_densenet_tf(training, input_shape): act1 = tf.random.uniform(input_shape) - ret = pybuda_compile( + ret = forge_compile( tt0, "densenet121_tf", act1, @@ -61,4 +61,4 @@ def test_densenet_tf(training, input_shape): ), ) - evaluate_framework_vs_pybuda(model, ret, act1) \ No newline at end of file + evaluate_framework_vs_forge(model, ret, act1) \ No newline at end of file diff --git a/pybuda/test/tvm/cnn/tensorflow/tests_B/test_efficientnet.py b/forge/test/tvm/cnn/tensorflow/tests_B/test_efficientnet.py similarity index 88% rename from pybuda/test/tvm/cnn/tensorflow/tests_B/test_efficientnet.py rename to forge/test/tvm/cnn/tensorflow/tests_B/test_efficientnet.py index d66b54f8d..a0b07b2e2 100644 --- a/pybuda/test/tvm/cnn/tensorflow/tests_B/test_efficientnet.py +++ b/forge/test/tvm/cnn/tensorflow/tests_B/test_efficientnet.py @@ -8,7 +8,7 @@ import tensorflow as tf -from pybuda import ( +from forge import ( TFModule, TTDevice, BackendType, @@ -16,13 +16,13 @@ VerifyConfig, CompileDepth, optimizers, - pybuda_compile, + forge_compile, ) -from test.tvm.utils import evaluate_framework_vs_pybuda -from pybuda.config import _get_global_compiler_config -from pybuda.verify.config import TestKind -from pybuda.verify.backend import verify_module -import pybuda +from test.tvm.utils import evaluate_framework_vs_forge +from forge.config import _get_global_compiler_config +from forge.verify.config import TestKind +from forge.verify.backend import verify_module +import forge input_shapes = [(1, 1, 32, 16)] @@ -53,7 +53,7 @@ def call(self, x): act1 = tf.random.uniform(input_shape) x = model(act1) - ret = pybuda_compile( + ret = forge_compile( tt0, "transpose_batch_dim_tf", act1, @@ -69,7 +69,7 @@ def call(self, x): ), ) - evaluate_framework_vs_pybuda(model, ret, act1) + evaluate_framework_vs_forge(model, ret, act1) def test_efficientnet_layer(test_kind, test_device): diff --git a/pybuda/test/tvm/cnn/tensorflow/tests_B/test_inception.py b/forge/test/tvm/cnn/tensorflow/tests_B/test_inception.py similarity index 84% rename from pybuda/test/tvm/cnn/tensorflow/tests_B/test_inception.py rename to forge/test/tvm/cnn/tensorflow/tests_B/test_inception.py index f01b2790b..f4b4c6cf5 100644 --- a/pybuda/test/tvm/cnn/tensorflow/tests_B/test_inception.py +++ b/forge/test/tvm/cnn/tensorflow/tests_B/test_inception.py @@ -5,15 +5,15 @@ import tensorflow as tf -import pybuda -from pybuda import ( +import forge +from forge import ( TFModule, VerifyConfig, CompileDepth, ) -from pybuda.config import _get_global_compiler_config -from pybuda.verify.config import TestKind -from pybuda.verify.backend import verify_module +from forge.config import _get_global_compiler_config +from forge.verify.config import TestKind +from forge.verify.backend import verify_module def test_inceptionv3_tf(test_kind, test_device): if test_kind == TestKind.TRAINING: diff --git a/pybuda/test/tvm/cnn/tensorflow/tests_B/test_mnist.py b/forge/test/tvm/cnn/tensorflow/tests_B/test_mnist.py similarity index 90% rename from pybuda/test/tvm/cnn/tensorflow/tests_B/test_mnist.py rename to forge/test/tvm/cnn/tensorflow/tests_B/test_mnist.py index 721e5e94a..615139363 100644 --- a/pybuda/test/tvm/cnn/tensorflow/tests_B/test_mnist.py +++ b/forge/test/tvm/cnn/tensorflow/tests_B/test_mnist.py @@ -5,15 +5,15 @@ import tensorflow as tf -from pybuda import ( +from forge import ( TFModule, VerifyConfig, CompileDepth, ) -from test.tvm.utils import evaluate_framework_vs_pybuda -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from test.tvm.utils import evaluate_framework_vs_forge +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind input_shapes = [(1, 32, 32, 1)] diff --git a/pybuda/test/tvm/cnn/tensorflow/tests_B/test_mobilenet.py b/forge/test/tvm/cnn/tensorflow/tests_B/test_mobilenet.py similarity index 92% rename from pybuda/test/tvm/cnn/tensorflow/tests_B/test_mobilenet.py rename to forge/test/tvm/cnn/tensorflow/tests_B/test_mobilenet.py index 750670c12..b871257ad 100644 --- a/pybuda/test/tvm/cnn/tensorflow/tests_B/test_mobilenet.py +++ b/forge/test/tvm/cnn/tensorflow/tests_B/test_mobilenet.py @@ -5,20 +5,20 @@ import tensorflow as tf -from pybuda import ( +from forge import ( TTDevice, - pybuda_compile, + forge_compile, VerifyConfig, TFModule, CompilerConfig, optimizers, CompileDepth, BackendType, - pybuda_reset, + forge_reset, ) -from pybuda.config import _get_global_compiler_config -from pybuda.verify.config import TestKind -from pybuda.verify.backend import verify_module +from forge.config import _get_global_compiler_config +from forge.verify.config import TestKind +from forge.verify.backend import verify_module def test_mobilenetv1_tf(test_kind, test_device): if test_kind == TestKind.TRAINING: diff --git a/pybuda/test/tvm/cnn/tensorflow/tests_B/test_regnety.py b/forge/test/tvm/cnn/tensorflow/tests_B/test_regnety.py similarity index 86% rename from pybuda/test/tvm/cnn/tensorflow/tests_B/test_regnety.py rename to forge/test/tvm/cnn/tensorflow/tests_B/test_regnety.py index b33c4dba0..e79d3a617 100644 --- a/pybuda/test/tvm/cnn/tensorflow/tests_B/test_regnety.py +++ b/forge/test/tvm/cnn/tensorflow/tests_B/test_regnety.py @@ -6,7 +6,7 @@ import torch -from pybuda import ( +from forge import ( PyTorchModule, TFModule, TTDevice, @@ -14,11 +14,11 @@ CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from pybuda.config import CompileDepth -from pybuda.config import _get_global_compiler_config -from pybuda.verify.backend import verify_module +from forge.config import CompileDepth +from forge.config import _get_global_compiler_config +from forge.verify.backend import verify_module import tensorflow as tf import tensorflow_hub as hub diff --git a/pybuda/test/tvm/cnn/tensorflow/tests_B/test_vgg.py b/forge/test/tvm/cnn/tensorflow/tests_B/test_vgg.py similarity index 86% rename from pybuda/test/tvm/cnn/tensorflow/tests_B/test_vgg.py rename to forge/test/tvm/cnn/tensorflow/tests_B/test_vgg.py index e5b4a31aa..a676e2c17 100644 --- a/pybuda/test/tvm/cnn/tensorflow/tests_B/test_vgg.py +++ b/forge/test/tvm/cnn/tensorflow/tests_B/test_vgg.py @@ -2,21 +2,21 @@ # SPDX-License-Identifier: Apache-2.0 -from pybuda.config import CompileDepth +from forge.config import CompileDepth import pytest import tensorflow as tf -from pybuda import ( +from forge import ( TFModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from test.tvm.utils import evaluate_framework_vs_pybuda +from test.tvm.utils import evaluate_framework_vs_forge def test_tvm_vgg16_tf(training): recompute = True # Always run with recompute in post-commit CI. Nightly tests both @@ -43,7 +43,7 @@ def test_tvm_vgg16_tf(training): act1 = tf.random.uniform((1, 224, 224, 3)) - ret = pybuda_compile( + ret = forge_compile( tt0, "vgg16_tf", act1, @@ -58,7 +58,7 @@ def test_tvm_vgg16_tf(training): pcc=0.97, ), ) - evaluate_framework_vs_pybuda(model, ret, act1) + evaluate_framework_vs_forge(model, ret, act1) if __name__ == "__main__": test_tvm_vgg16_tf(False, False) diff --git a/pybuda/test/tvm/cnn/tflite/test_efficientnet_lite.py b/forge/test/tvm/cnn/tflite/test_efficientnet_lite.py similarity index 83% rename from pybuda/test/tvm/cnn/tflite/test_efficientnet_lite.py rename to forge/test/tvm/cnn/tflite/test_efficientnet_lite.py index 968e286b6..76a72e026 100644 --- a/pybuda/test/tvm/cnn/tflite/test_efficientnet_lite.py +++ b/forge/test/tvm/cnn/tflite/test_efficientnet_lite.py @@ -7,27 +7,27 @@ import torch.nn as nn from PIL import Image -from pybuda import ( +from forge import ( TFLiteModule, VerifyConfig, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind from tvm import relay import tflite import tensorflow as tf -import pybuda +import forge def test_efficientnet_lite0(test_device): - if test_device.arch == pybuda.BackendDevice.Grayskull: + if test_device.arch == forge.BackendDevice.Grayskull: pytest.skip() compiler_cfg = _get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" compiler_cfg.enable_tvm_constant_prop = True - pybuda.config.override_op_size("conv2d_29.dc.sparse_matmul.7.dc.sparse_matmul.1.lc2", (7, 1)) + forge.config.override_op_size("conv2d_29.dc.sparse_matmul.7.dc.sparse_matmul.1.lc2", (7, 1)) tflite_path = "third_party/confidential_customer_models/model_2/tflite/efficientnet-lite0-fp32.tflite" compiler_cfg.conv_multi_op_fracture_factor_override["conv2d_18"] = 5 @@ -48,7 +48,7 @@ def test_efficientnet_lite0(test_device): ) def test_efficientnet_lite4(test_device): - if test_device.arch == pybuda.BackendDevice.Wormhole_B0 or test_device.arch == pybuda.BackendDevice.Blackhole: + if test_device.arch == forge.BackendDevice.Wormhole_B0 or test_device.arch == forge.BackendDevice.Blackhole: pytest.skip() compiler_cfg = _get_global_compiler_config() @@ -57,11 +57,11 @@ def test_efficientnet_lite4(test_device): compiler_cfg.enable_conv_prestride = True compiler_cfg.graph_solver_self_cut_type = "ConsumerOperandDataEdgesFirst" - #Fusing disabled due to tenstorrent/pybuda#789 + #Fusing disabled due to tenstorrent/forge#789 compiler_cfg.enable_auto_fusing=False import os - os.environ["PYBUDA_PAD_SPARSE_MM"] = "{13:16}" + os.environ["FORGE_PAD_SPARSE_MM"] = "{13:16}" tflite_path = "third_party/confidential_customer_models/model_2/tflite/efficientnet-lite4-fp32.tflite" compiler_cfg.conv_multi_op_fracture_factor_override["conv2d_93"] = 5 diff --git a/pybuda/test/tvm/cnn/tflite/test_pose_landmark.py b/forge/test/tvm/cnn/tflite/test_pose_landmark.py similarity index 90% rename from pybuda/test/tvm/cnn/tflite/test_pose_landmark.py rename to forge/test/tvm/cnn/tflite/test_pose_landmark.py index bea2d8308..7a671f826 100644 --- a/pybuda/test/tvm/cnn/tflite/test_pose_landmark.py +++ b/forge/test/tvm/cnn/tflite/test_pose_landmark.py @@ -7,14 +7,14 @@ import torch.nn as nn from PIL import Image -from pybuda import ( +from forge import ( TFLiteModule, VerifyConfig, BackendType, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.verify.config import TestKind from tvm import relay import tflite import tensorflow as tf diff --git a/pybuda/test/tvm/nightly/get_pytorch_model_with_activations.py b/forge/test/tvm/nightly/get_pytorch_model_with_activations.py similarity index 98% rename from pybuda/test/tvm/nightly/get_pytorch_model_with_activations.py rename to forge/test/tvm/nightly/get_pytorch_model_with_activations.py index c274df70d..6264488af 100644 --- a/pybuda/test/tvm/nightly/get_pytorch_model_with_activations.py +++ b/forge/test/tvm/nightly/get_pytorch_model_with_activations.py @@ -28,7 +28,7 @@ ConvNextModel, ) from transformers.models.gptj.modeling_gptj import GPTJBlock -from pybuda import ( +from forge import ( CompileDepth, CompilerConfig, ) @@ -796,7 +796,7 @@ def get_open_pose_hand_torch(training, recompute): ], compile_cfg -pytorch_model_name_to_pybuda_model = { +pytorch_model_name_to_forge_model = { "albert_attention_torch": get_albert_attention, "alexnet_torch": get_alexnet_model, "bert_encoder_torch": get_bert_encoder, @@ -840,7 +840,7 @@ def get_open_pose_hand_torch(training, recompute): } -passing_pytorch_model_name_to_pybuda_model_inference = [ +passing_pytorch_model_name_to_forge_model_inference = [ "albert_attention_torch", "alexnet_torch", "bert_encoder_torch", @@ -856,7 +856,7 @@ def get_open_pose_hand_torch(training, recompute): "efficientnet_layer_torch", "fcn_torch", "gpt2_block_torch", - "gptj_block_torch", # tenstorrent/pybuda#63 + "gptj_block_torch", # tenstorrent/forge#63 "gptneo_125M_block_torch", "gptneo_13B_block_torch", "gptneo_27B_block_torch", @@ -879,7 +879,7 @@ def get_open_pose_hand_torch(training, recompute): ] -passing_pytorch_model_name_to_pybuda_model_training = [ +passing_pytorch_model_name_to_forge_model_training = [ "albert_attention_torch", "bert_encoder_torch", "CLIP_guided_diffusion_qkv_attention", @@ -889,6 +889,6 @@ def get_open_pose_hand_torch(training, recompute): "gptneo_125M_block_torch", "mnasnet_torch", "mobilenet_v1_torch", - # "gptneo_13B_block_torch", # Error: TT_ASSERT @ pybuda/csrc/passes/balancer_error_handlers.cpp:46: op_node->is_matmul() - # "gptneo_27B_block_torch", # Error: TT_ASSERT @ pybuda/csrc/passes/balancer_error_handlers.cpp:46: op_node->is_matmul() + # "gptneo_13B_block_torch", # Error: TT_ASSERT @ forge/csrc/passes/balancer_error_handlers.cpp:46: op_node->is_matmul() + # "gptneo_27B_block_torch", # Error: TT_ASSERT @ forge/csrc/passes/balancer_error_handlers.cpp:46: op_node->is_matmul() ] diff --git a/pybuda/test/tvm/nightly/get_tensorflow_model_with_activations.py b/forge/test/tvm/nightly/get_tensorflow_model_with_activations.py similarity index 98% rename from pybuda/test/tvm/nightly/get_tensorflow_model_with_activations.py rename to forge/test/tvm/nightly/get_tensorflow_model_with_activations.py index 952906a1a..3b34daa3d 100644 --- a/pybuda/test/tvm/nightly/get_tensorflow_model_with_activations.py +++ b/forge/test/tvm/nightly/get_tensorflow_model_with_activations.py @@ -23,7 +23,7 @@ from test.tvm.nlp.tensorflow.detr.config import TrainingConfig from test.tvm.nlp.tensorflow.detr.detr import get_detr_model -from pybuda.config import CompileDepth, CompilerConfig +from forge.config import CompileDepth, CompilerConfig class Identity(tf.keras.Model): def __init__(self): @@ -431,7 +431,7 @@ def get_detr(training, recompute): return model, [act], compile_cfg -tensorflow_model_name_to_pybuda_model = { +tensorflow_model_name_to_forge_model = { "albert_attention_tf": get_albert_attention, "bert_encoder_tf": get_bert_layer, "detr": get_detr, @@ -449,7 +449,7 @@ def get_detr(training, recompute): "wav2vec2_tf": get_wav2vec2_tf, } -passing_tensorflow_model_name_to_pybuda_model_inference = [ +passing_tensorflow_model_name_to_forge_model_inference = [ "detr", "efficientnet_layer_tf", "gptj_full_tf", @@ -460,7 +460,7 @@ def get_detr(training, recompute): "vgg16_tf", ] -passing_tensorflow_model_name_to_pybuda_model_training = [ +passing_tensorflow_model_name_to_forge_model_training = [ "efficientnet_layer_tf", "roberta_encoder_tf", ] diff --git a/pybuda/test/tvm/nightly/test_pytorch_models.py b/forge/test/tvm/nightly/test_pytorch_models.py similarity index 85% rename from pybuda/test/tvm/nightly/test_pytorch_models.py rename to forge/test/tvm/nightly/test_pytorch_models.py index 3a2e3096e..7ab01a8b8 100644 --- a/pybuda/test/tvm/nightly/test_pytorch_models.py +++ b/forge/test/tvm/nightly/test_pytorch_models.py @@ -9,19 +9,19 @@ from urllib.error import HTTPError -from pybuda import ( +from forge import ( TTDevice, BackendType, - pybuda_compile, + forge_compile, VerifyConfig, PyTorchModule, CompilerConfig, CompileDepth, optimizers, ) -import pybuda.compile as COMPILE_INFO +import forge.compile as COMPILE_INFO -from test.tvm.utils import evaluate_framework_vs_pybuda +from test.tvm.utils import evaluate_framework_vs_forge from test.tvm.nightly.get_pytorch_model_with_activations import * @@ -36,12 +36,12 @@ def initialize_device(model_name, model): @pytest.mark.parametrize("mode", ["inference", "training", "recompute"]) -@pytest.mark.parametrize("model_name", [x for x in pytorch_model_name_to_pybuda_model.keys()]) +@pytest.mark.parametrize("model_name", [x for x in pytorch_model_name_to_forge_model.keys()]) def test_real_networks(mode, model_name): - if mode == "inference" and model_name in passing_pytorch_model_name_to_pybuda_model_inference: + if mode == "inference" and model_name in passing_pytorch_model_name_to_forge_model_inference: pytest.skip() - if (mode == "training" or mode == "recompute") and model_name in passing_pytorch_model_name_to_pybuda_model_training: + if (mode == "training" or mode == "recompute") and model_name in passing_pytorch_model_name_to_forge_model_training: pytest.skip() if mode == "inference": @@ -59,7 +59,7 @@ def test_real_networks(mode, model_name): http_tries = 0 while (http_tries < max_http_tries): try: - model_config = pytorch_model_name_to_pybuda_model[model_name](training, recompute) + model_config = pytorch_model_name_to_forge_model[model_name](training, recompute) break except HTTPError as e: http_tries += 1 @@ -79,7 +79,7 @@ def test_real_networks(mode, model_name): tt_device = initialize_device(model_name, model) try: - ret = pybuda_compile( + ret = forge_compile( tt_device, model_name, inputs[0], @@ -87,7 +87,7 @@ def test_real_networks(mode, model_name): verify_cfg=VerifyConfig(intermediates=True, verify_last=False, waive_gradient_errors=waive_gradients), ) - evaluate_framework_vs_pybuda(model, ret, *inputs) + evaluate_framework_vs_forge(model, ret, *inputs) except Exception as e: pytest.fail( msg=f"Last completed compile stage: {COMPILE_INFO.LAST_SUCCESSFUL_STAGE}. Error: {e}", diff --git a/pybuda/test/tvm/nightly/test_supported_pytorch_models.py b/forge/test/tvm/nightly/test_supported_pytorch_models.py similarity index 86% rename from pybuda/test/tvm/nightly/test_supported_pytorch_models.py rename to forge/test/tvm/nightly/test_supported_pytorch_models.py index 64bcc4911..a76362e94 100644 --- a/pybuda/test/tvm/nightly/test_supported_pytorch_models.py +++ b/forge/test/tvm/nightly/test_supported_pytorch_models.py @@ -8,21 +8,21 @@ import pytest from urllib.error import HTTPError -from pybuda import ( +from forge import ( TTDevice, BackendType, - pybuda_compile, + forge_compile, VerifyConfig, PyTorchModule, CompilerConfig, CompileDepth, optimizers, ) -import pybuda.compile as COMPILE_INFO +import forge.compile as COMPILE_INFO -from pybuda.verify import verify_module +from forge.verify import verify_module from test.tvm.nightly.get_pytorch_model_with_activations import * -from pybuda.config import CompileDepth, _set_global_compiler_config +from forge.config import CompileDepth, _set_global_compiler_config def initialize_device(model_name, model): mod = PyTorchModule(model_name, model) @@ -35,9 +35,9 @@ def initialize_device(model_name, model): @pytest.mark.parametrize("mode", ["inference", "training", "recompute"]) -@pytest.mark.parametrize("model_name", passing_pytorch_model_name_to_pybuda_model_inference) +@pytest.mark.parametrize("model_name", passing_pytorch_model_name_to_forge_model_inference) def test_supported_real_networks(model_name, mode): - if (mode == "training" or mode == "recompute") and model_name not in passing_pytorch_model_name_to_pybuda_model_training: + if (mode == "training" or mode == "recompute") and model_name not in passing_pytorch_model_name_to_forge_model_training: pytest.skip() if mode == "inference": @@ -55,7 +55,7 @@ def test_supported_real_networks(model_name, mode): http_tries = 0 while (http_tries < max_http_tries): try: - model_config = pytorch_model_name_to_pybuda_model[model_name](training, recompute) + model_config = pytorch_model_name_to_forge_model[model_name](training, recompute) break except HTTPError as e: http_tries += 1 diff --git a/pybuda/test/tvm/nightly/test_supported_tensorflow_models.py b/forge/test/tvm/nightly/test_supported_tensorflow_models.py similarity index 85% rename from pybuda/test/tvm/nightly/test_supported_tensorflow_models.py rename to forge/test/tvm/nightly/test_supported_tensorflow_models.py index c286f893d..f735a6fe0 100644 --- a/pybuda/test/tvm/nightly/test_supported_tensorflow_models.py +++ b/forge/test/tvm/nightly/test_supported_tensorflow_models.py @@ -8,28 +8,28 @@ from urllib.error import HTTPError -from pybuda import ( +from forge import ( TTDevice, BackendType, - pybuda_compile, + forge_compile, VerifyConfig, TFModule, CompilerConfig, CompileDepth, optimizers, ) -import pybuda.compile as COMPILE_INFO +import forge.compile as COMPILE_INFO from test.tvm.nightly.get_tensorflow_model_with_activations import * -from pybuda.verify import verify_module -from pybuda.config import CompileDepth, _set_global_compiler_config +from forge.verify import verify_module +from forge.config import CompileDepth, _set_global_compiler_config @pytest.mark.parametrize("mode", ["inference", "training", "recompute"]) @pytest.mark.parametrize("enable_tvm_constant_prop", [True, False]) -@pytest.mark.parametrize("model_name", passing_tensorflow_model_name_to_pybuda_model_inference) +@pytest.mark.parametrize("model_name", passing_tensorflow_model_name_to_forge_model_inference) def test_real_networks(mode, enable_tvm_constant_prop, model_name): - if (mode == "training" or mode == "recompute") and model_name not in passing_tensorflow_model_name_to_pybuda_model_training: + if (mode == "training" or mode == "recompute") and model_name not in passing_tensorflow_model_name_to_forge_model_training: pytest.skip() if mode == "inference": @@ -47,7 +47,7 @@ def test_real_networks(mode, enable_tvm_constant_prop, model_name): http_tries = 0 while (http_tries < max_http_tries): try: - model_config = tensorflow_model_name_to_pybuda_model[model_name](training, recompute) + model_config = tensorflow_model_name_to_forge_model[model_name](training, recompute) break except HTTPError as e: http_tries += 1 diff --git a/pybuda/test/tvm/nightly/test_tensorflow_models.py b/forge/test/tvm/nightly/test_tensorflow_models.py similarity index 85% rename from pybuda/test/tvm/nightly/test_tensorflow_models.py rename to forge/test/tvm/nightly/test_tensorflow_models.py index 00c40d0fd..8fc00c796 100644 --- a/pybuda/test/tvm/nightly/test_tensorflow_models.py +++ b/forge/test/tvm/nightly/test_tensorflow_models.py @@ -10,19 +10,19 @@ from urllib.error import HTTPError -from pybuda import ( +from forge import ( TTDevice, BackendType, - pybuda_compile, + forge_compile, VerifyConfig, TFModule, CompilerConfig, CompileDepth, optimizers, ) -import pybuda.compile as COMPILE_INFO +import forge.compile as COMPILE_INFO -from test.tvm.utils import evaluate_framework_vs_pybuda +from test.tvm.utils import evaluate_framework_vs_forge from test.tvm.nightly.get_tensorflow_model_with_activations import * @@ -38,12 +38,12 @@ def initialize_device(model_name, model): @pytest.mark.parametrize("mode", ["inference", "training", "recompute"]) @pytest.mark.parametrize("enable_tvm_constant_prop", [True, False]) -@pytest.mark.parametrize("model_name", [x for x in tensorflow_model_name_to_pybuda_model.keys()]) +@pytest.mark.parametrize("model_name", [x for x in tensorflow_model_name_to_forge_model.keys()]) def test_real_networks(mode, enable_tvm_constant_prop, model_name): - if mode == "inference" and model_name in passing_tensorflow_model_name_to_pybuda_model_inference: + if mode == "inference" and model_name in passing_tensorflow_model_name_to_forge_model_inference: pytest.skip() - if (mode == "training" or mode == "recompute") and model_name in passing_tensorflow_model_name_to_pybuda_model_training: + if (mode == "training" or mode == "recompute") and model_name in passing_tensorflow_model_name_to_forge_model_training: pytest.skip() if mode == "inference": @@ -61,7 +61,7 @@ def test_real_networks(mode, enable_tvm_constant_prop, model_name): http_tries = 0 while (http_tries < max_http_tries): try: - model_config = tensorflow_model_name_to_pybuda_model[model_name](training, recompute) + model_config = tensorflow_model_name_to_forge_model[model_name](training, recompute) break except HTTPError as e: http_tries += 1 @@ -83,7 +83,7 @@ def test_real_networks(mode, enable_tvm_constant_prop, model_name): tt_device = initialize_device(model_name, model) try: - ret = pybuda_compile( + ret = forge_compile( tt_device, model_name, *inputs, @@ -91,7 +91,7 @@ def test_real_networks(mode, enable_tvm_constant_prop, model_name): verify_cfg=VerifyConfig(intermediates=True, verify_last=False, waive_gradient_errors=waive_gradients), ) - evaluate_framework_vs_pybuda(model, ret, *inputs) + evaluate_framework_vs_forge(model, ret, *inputs) except Exception as e: pytest.fail( msg=f"Last completed compile stage: {COMPILE_INFO.LAST_SUCCESSFUL_STAGE}. Error: {e}", diff --git a/pybuda/test/tvm/nlp/__init__.py b/forge/test/tvm/nlp/__init__.py similarity index 100% rename from pybuda/test/tvm/nlp/__init__.py rename to forge/test/tvm/nlp/__init__.py diff --git a/pybuda/test/tvm/nlp/jax/test_bert.py b/forge/test/tvm/nlp/jax/test_bert.py similarity index 94% rename from pybuda/test/tvm/nlp/jax/test_bert.py rename to forge/test/tvm/nlp/jax/test_bert.py index 229fcb834..2fb036bcd 100644 --- a/pybuda/test/tvm/nlp/jax/test_bert.py +++ b/forge/test/tvm/nlp/jax/test_bert.py @@ -14,13 +14,13 @@ from transformers.models.bert.modeling_flax_bert import FlaxBertEmbeddings, FlaxBertAttention, FlaxBertSelfAttention, FlaxBertIntermediate, FlaxBertEncoder, FlaxBertPooler from transformers.models.bert.configuration_bert import BertConfig -from pybuda import ( +from forge import ( JaxModule, VerifyConfig, ) -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind -from pybuda.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config def test_bert(test_kind, test_device): @@ -74,9 +74,9 @@ def __call__(self, input_ids): # input_ids=input_ids, # ) - pybuda_module = JaxModule("bert_jax", framework_module) + forge_module = JaxModule("bert_jax", framework_module) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -147,9 +147,9 @@ def __call__(self, input_ids): input_ids=input_ids, ) - pybuda_module = JaxModule("bert_embeddings_jax", framework_module) + forge_module = JaxModule("bert_embeddings_jax", framework_module) verify_module( - pybuda_module, + forge_module, (input_ids_shape,), inputs=[(input_ids,),], verify_cfg=VerifyConfig( @@ -214,9 +214,9 @@ def __call__(self, hidden_state): # hidden_state=hidden_state, # ) - pybuda_module = JaxModule("bert_attention_jax", framework_module) + forge_module = JaxModule("bert_attention_jax", framework_module) verify_module( - pybuda_module, + forge_module, (hidden_state_shape,), inputs=[(hidden_state,),], verify_cfg=VerifyConfig( @@ -277,9 +277,9 @@ def __call__(self, hidden_state): # hidden_state=hidden_state, # ) - pybuda_module = JaxModule("bert_intermediates_jax", framework_module) + forge_module = JaxModule("bert_intermediates_jax", framework_module) verify_module( - pybuda_module, + forge_module, (hidden_state_shape,), inputs=[(hidden_state,),], verify_cfg=VerifyConfig( @@ -347,9 +347,9 @@ def __call__(self, hidden_state): # hidden_state=hidden_state, # ) - pybuda_module = JaxModule("bert_self_attention_jax", framework_module) + forge_module = JaxModule("bert_self_attention_jax", framework_module) verify_module( - pybuda_module, + forge_module, (hidden_state_shape,), inputs=[(hidden_state,),], verify_cfg=VerifyConfig( @@ -429,9 +429,9 @@ def __call__(self, hidden_states): # hidden_states=hidden_states, # ) - pybuda_module = JaxModule("bert_encoder_jax", framework_module) + forge_module = JaxModule("bert_encoder_jax", framework_module) verify_module( - pybuda_module, + forge_module, (hidden_states_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -474,9 +474,9 @@ def test_bert_pooler(test_kind, test_device): # hidden_states=hidden_states, # ) - pybuda_module = JaxModule("bert_pooler_jax", framework_module) + forge_module = JaxModule("bert_pooler_jax", framework_module) verify_module( - pybuda_module, + forge_module, (hidden_states_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, diff --git a/pybuda/test/tvm/nlp/onnx/__init__.py b/forge/test/tvm/nlp/onnx/__init__.py similarity index 100% rename from pybuda/test/tvm/nlp/onnx/__init__.py rename to forge/test/tvm/nlp/onnx/__init__.py diff --git a/pybuda/test/tvm/nlp/onnx/tests_A/__init__.py b/forge/test/tvm/nlp/onnx/tests_A/__init__.py similarity index 100% rename from pybuda/test/tvm/nlp/onnx/tests_A/__init__.py rename to forge/test/tvm/nlp/onnx/tests_A/__init__.py diff --git a/pybuda/test/tvm/nlp/onnx/tests_A/test_roberta.py b/forge/test/tvm/nlp/onnx/tests_A/test_roberta.py similarity index 95% rename from pybuda/test/tvm/nlp/onnx/tests_A/test_roberta.py rename to forge/test/tvm/nlp/onnx/tests_A/test_roberta.py index fab372aea..1019c85be 100644 --- a/pybuda/test/tvm/nlp/onnx/tests_A/test_roberta.py +++ b/forge/test/tvm/nlp/onnx/tests_A/test_roberta.py @@ -3,24 +3,24 @@ # SPDX-License-Identifier: Apache-2.0 import onnx import onnxruntime as ort -from pybuda._C.backend_api import BackendDevice +from forge._C.backend_api import BackendDevice import pytest import torch -from pybuda import ( +from forge import ( OnnxModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, TFGraphDefModule, ) -from pybuda.config import CompileDepth +from forge.config import CompileDepth from test.backend.models.test_bert import get_relaxed_atol_pcc -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind import urllib from loguru import logger diff --git a/pybuda/test/tvm/nlp/onnx/tests_A/test_unispeech.py b/forge/test/tvm/nlp/onnx/tests_A/test_unispeech.py similarity index 88% rename from pybuda/test/tvm/nlp/onnx/tests_A/test_unispeech.py rename to forge/test/tvm/nlp/onnx/tests_A/test_unispeech.py index 3761f2062..45270ee2a 100644 --- a/pybuda/test/tvm/nlp/onnx/tests_A/test_unispeech.py +++ b/forge/test/tvm/nlp/onnx/tests_A/test_unispeech.py @@ -9,18 +9,18 @@ import torch from transformers import UniSpeechModel -from pybuda import ( +from forge import ( OnnxModule, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from pybuda.config import CompileDepth -from test.tvm.utils import evaluate_framework_vs_pybuda +from forge.config import CompileDepth +from test.tvm.utils import evaluate_framework_vs_forge -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind import os import onnx diff --git a/pybuda/test/tvm/nlp/onnx/tests_A/test_wav2vec.py b/forge/test/tvm/nlp/onnx/tests_A/test_wav2vec.py similarity index 86% rename from pybuda/test/tvm/nlp/onnx/tests_A/test_wav2vec.py rename to forge/test/tvm/nlp/onnx/tests_A/test_wav2vec.py index b2c9bac6f..eb433d272 100644 --- a/pybuda/test/tvm/nlp/onnx/tests_A/test_wav2vec.py +++ b/forge/test/tvm/nlp/onnx/tests_A/test_wav2vec.py @@ -6,16 +6,16 @@ import torch from transformers import Wav2Vec2Model -from pybuda import ( +from forge import ( OnnxModule, VerifyConfig, ) -from pybuda.config import CompileDepth -from test.tvm.utils import evaluate_framework_vs_pybuda -from test.tvm.utils import evaluate_framework_vs_pybuda -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth +from test.tvm.utils import evaluate_framework_vs_forge +from test.tvm.utils import evaluate_framework_vs_forge +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind import os import onnx diff --git a/pybuda/test/tvm/nlp/onnx/tests_B/__init__.py b/forge/test/tvm/nlp/onnx/tests_B/__init__.py similarity index 100% rename from pybuda/test/tvm/nlp/onnx/tests_B/__init__.py rename to forge/test/tvm/nlp/onnx/tests_B/__init__.py diff --git a/pybuda/test/tvm/nlp/onnx/tests_B/test_albert.py b/forge/test/tvm/nlp/onnx/tests_B/test_albert.py similarity index 88% rename from pybuda/test/tvm/nlp/onnx/tests_B/test_albert.py rename to forge/test/tvm/nlp/onnx/tests_B/test_albert.py index 7d97bd64b..4b93cb678 100644 --- a/pybuda/test/tvm/nlp/onnx/tests_B/test_albert.py +++ b/forge/test/tvm/nlp/onnx/tests_B/test_albert.py @@ -2,13 +2,13 @@ # SPDX-License-Identifier: Apache-2.0 -import pybuda +import forge import pytest import torch from transformers import AlbertConfig, AlbertModel -from pybuda import ( +from forge import ( OnnxModule, TTDevice, CPUDevice, @@ -16,12 +16,12 @@ CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from test.tvm.utils import evaluate_framework_vs_pybuda -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from test.tvm.utils import evaluate_framework_vs_forge +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind import onnx import os diff --git a/pybuda/test/tvm/nlp/onnx/tests_B/test_bart.py b/forge/test/tvm/nlp/onnx/tests_B/test_bart.py similarity index 97% rename from pybuda/test/tvm/nlp/onnx/tests_B/test_bart.py rename to forge/test/tvm/nlp/onnx/tests_B/test_bart.py index 2fc929e7f..d3bd7993b 100644 --- a/pybuda/test/tvm/nlp/onnx/tests_B/test_bart.py +++ b/forge/test/tvm/nlp/onnx/tests_B/test_bart.py @@ -11,12 +11,12 @@ from transformers import BartConfig, BartModel import onnx -from pybuda import ( +from forge import ( OnnxModule, VerifyConfig, ) -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.verify import verify_module +from forge.verify.config import TestKind def test_bart_encoder(test_kind, test_device): diff --git a/pybuda/test/tvm/nlp/onnx/tests_B/test_bert.py b/forge/test/tvm/nlp/onnx/tests_B/test_bert.py similarity index 90% rename from pybuda/test/tvm/nlp/onnx/tests_B/test_bert.py rename to forge/test/tvm/nlp/onnx/tests_B/test_bert.py index 68b75c20c..cca5e9321 100644 --- a/pybuda/test/tvm/nlp/onnx/tests_B/test_bert.py +++ b/forge/test/tvm/nlp/onnx/tests_B/test_bert.py @@ -5,21 +5,21 @@ import onnxruntime as ort import pytest import torch -from pybuda import ( +from forge import ( OnnxModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, TFGraphDefModule, ) -from pybuda.config import CompileDepth +from forge.config import CompileDepth -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind from test.backend.models.test_bert import get_relaxed_atol_pcc import urllib @@ -69,9 +69,9 @@ def test_tvm_bert_squad_onnx(test_kind, test_device): from transformers import BertModel, BertConfig, BertForPreTraining -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind def test_bert_encoder(test_kind, test_device): if test_kind == TestKind.TRAINING: # only run recompute test in post-commit diff --git a/pybuda/test/tvm/nlp/onnx/tests_B/test_detr.py b/forge/test/tvm/nlp/onnx/tests_B/test_detr.py similarity index 93% rename from pybuda/test/tvm/nlp/onnx/tests_B/test_detr.py rename to forge/test/tvm/nlp/onnx/tests_B/test_detr.py index dfb9ecb13..4fdeba866 100644 --- a/pybuda/test/tvm/nlp/onnx/tests_B/test_detr.py +++ b/forge/test/tvm/nlp/onnx/tests_B/test_detr.py @@ -11,20 +11,20 @@ import torch from transformers import DetrModel -from pybuda import ( +from forge import ( OnnxModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from pybuda.config import CompileDepth -from test.tvm.utils import evaluate_framework_vs_pybuda -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth +from test.tvm.utils import evaluate_framework_vs_forge +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind def test_detr_encoder_layer(test_kind, test_device): diff --git a/pybuda/test/tvm/nlp/onnx/tests_B/test_distilbert.py b/forge/test/tvm/nlp/onnx/tests_B/test_distilbert.py similarity index 91% rename from pybuda/test/tvm/nlp/onnx/tests_B/test_distilbert.py rename to forge/test/tvm/nlp/onnx/tests_B/test_distilbert.py index 419f769e4..1d73b5a26 100644 --- a/pybuda/test/tvm/nlp/onnx/tests_B/test_distilbert.py +++ b/forge/test/tvm/nlp/onnx/tests_B/test_distilbert.py @@ -12,10 +12,10 @@ from transformers import DistilBertModel -from pybuda import OnnxModule, VerifyConfig -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind -from pybuda.config import CompileDepth, _get_global_compiler_config +from forge import OnnxModule, VerifyConfig +from forge.verify import verify_module +from forge.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config def test_distilbert_onnx(test_kind, test_device): diff --git a/pybuda/test/tvm/nlp/onnx/tests_B/test_gpt2.py b/forge/test/tvm/nlp/onnx/tests_B/test_gpt2.py similarity index 96% rename from pybuda/test/tvm/nlp/onnx/tests_B/test_gpt2.py rename to forge/test/tvm/nlp/onnx/tests_B/test_gpt2.py index f6281efd6..32400f82d 100644 --- a/pybuda/test/tvm/nlp/onnx/tests_B/test_gpt2.py +++ b/forge/test/tvm/nlp/onnx/tests_B/test_gpt2.py @@ -5,21 +5,21 @@ import onnxruntime as ort import pytest import torch -from pybuda import ( +from forge import ( OnnxModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, TFGraphDefModule, ) -from pybuda.config import CompileDepth +from forge.config import CompileDepth -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind import urllib import os diff --git a/pybuda/test/tvm/nlp/onnx/tests_B/test_gptj.py b/forge/test/tvm/nlp/onnx/tests_B/test_gptj.py similarity index 95% rename from pybuda/test/tvm/nlp/onnx/tests_B/test_gptj.py rename to forge/test/tvm/nlp/onnx/tests_B/test_gptj.py index 224a71e74..fc8d7664f 100644 --- a/pybuda/test/tvm/nlp/onnx/tests_B/test_gptj.py +++ b/forge/test/tvm/nlp/onnx/tests_B/test_gptj.py @@ -14,20 +14,20 @@ from transformers import GPTJConfig from transformers.models.gptj.modeling_gptj import GPTJBlock -from pybuda import ( +from forge import ( OnnxModule, CompileDepth, VerifyConfig, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind def test_gptj_block(test_kind, test_device): # unspported op Gather with the new environment - # tenstorrent/pybuda#1610 + # tenstorrent/forge#1610 pytest.skip() # Only run recompute test in post-commit @@ -69,7 +69,7 @@ def test_gptj_block(test_kind, test_device): # Load ONNX module onnx_module = onnx.load(save_path) onnx.checker.check_model(onnx_module) - pybuda_onnx_module = OnnxModule( + forge_onnx_module = OnnxModule( "gptj_block_onnx", onnx_module, save_path, @@ -82,7 +82,7 @@ def test_gptj_block(test_kind, test_device): input_shape.append(i_shape) verify_module( - pybuda_onnx_module, + forge_onnx_module, input_shape, verify_cfg=VerifyConfig( arch=test_device.arch, diff --git a/pybuda/test/tvm/nlp/onnx/tests_C/__init__.py b/forge/test/tvm/nlp/onnx/tests_C/__init__.py similarity index 100% rename from pybuda/test/tvm/nlp/onnx/tests_C/__init__.py rename to forge/test/tvm/nlp/onnx/tests_C/__init__.py diff --git a/pybuda/test/tvm/nlp/onnx/tests_C/test_gptneo.py b/forge/test/tvm/nlp/onnx/tests_C/test_gptneo.py similarity index 90% rename from pybuda/test/tvm/nlp/onnx/tests_C/test_gptneo.py rename to forge/test/tvm/nlp/onnx/tests_C/test_gptneo.py index 3baae7003..2b0090c96 100644 --- a/pybuda/test/tvm/nlp/onnx/tests_C/test_gptneo.py +++ b/forge/test/tvm/nlp/onnx/tests_C/test_gptneo.py @@ -6,16 +6,16 @@ import torch from transformers import GPTNeoModel, GPTNeoConfig -from pybuda import ( +from forge import ( PyTorchModule, CompileDepth, VerifyConfig, OnnxModule, ) -from test.tvm.utils import evaluate_framework_vs_pybuda -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from test.tvm.utils import evaluate_framework_vs_forge +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind import onnx import os diff --git a/pybuda/test/tvm/nlp/onnx/tests_C/test_nbeats.py b/forge/test/tvm/nlp/onnx/tests_C/test_nbeats.py similarity index 97% rename from pybuda/test/tvm/nlp/onnx/tests_C/test_nbeats.py rename to forge/test/tvm/nlp/onnx/tests_C/test_nbeats.py index d706a721c..30217b0f9 100644 --- a/pybuda/test/tvm/nlp/onnx/tests_C/test_nbeats.py +++ b/forge/test/tvm/nlp/onnx/tests_C/test_nbeats.py @@ -13,15 +13,15 @@ NBEATSSeasonalBlock, ) -from pybuda import ( +from forge import ( OnnxModule, BackendType, VerifyConfig, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind -from pybuda.config import CompileDepth +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind +from forge.config import CompileDepth def test_tvm_nbeats_block(test_kind, test_device): diff --git a/pybuda/test/tvm/nlp/onnx/tests_C/test_opt.py b/forge/test/tvm/nlp/onnx/tests_C/test_opt.py similarity index 94% rename from pybuda/test/tvm/nlp/onnx/tests_C/test_opt.py rename to forge/test/tvm/nlp/onnx/tests_C/test_opt.py index b697524e4..5a2c190be 100644 --- a/pybuda/test/tvm/nlp/onnx/tests_C/test_opt.py +++ b/forge/test/tvm/nlp/onnx/tests_C/test_opt.py @@ -9,12 +9,12 @@ from loguru import logger -from pybuda import ( +from forge import ( OnnxModule, VerifyConfig, ) -from pybuda.config import _get_global_compiler_config -from pybuda.verify import verify_module +from forge.config import _get_global_compiler_config +from forge.verify import verify_module from transformers import OPTModel, OPTConfig def test_tvm_opt_fallback(test_kind, test_device): diff --git a/pybuda/test/tvm/nlp/onnx/tests_C/test_squeeze_bert.py b/forge/test/tvm/nlp/onnx/tests_C/test_squeeze_bert.py similarity index 93% rename from pybuda/test/tvm/nlp/onnx/tests_C/test_squeeze_bert.py rename to forge/test/tvm/nlp/onnx/tests_C/test_squeeze_bert.py index 3eba76f44..62ed8e170 100644 --- a/pybuda/test/tvm/nlp/onnx/tests_C/test_squeeze_bert.py +++ b/forge/test/tvm/nlp/onnx/tests_C/test_squeeze_bert.py @@ -1,7 +1,7 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -from pybuda.config import CompileDepth +from forge.config import CompileDepth import pytest import torch @@ -11,7 +11,7 @@ import math import itertools -from pybuda import ( +from forge import ( PyTorchModule, TTDevice, BackendType, @@ -19,14 +19,14 @@ VerifyConfig, OnnxModule, optimizers, - pybuda_compile, + forge_compile, tvm_to_python, ) -from test.tvm.utils import evaluate_framework_vs_pybuda +from test.tvm.utils import evaluate_framework_vs_forge -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind import os import onnx import onnxruntime as ort diff --git a/pybuda/test/tvm/nlp/onnx/tests_C/test_t5.py b/forge/test/tvm/nlp/onnx/tests_C/test_t5.py similarity index 96% rename from pybuda/test/tvm/nlp/onnx/tests_C/test_t5.py rename to forge/test/tvm/nlp/onnx/tests_C/test_t5.py index add2723f2..b41554294 100644 --- a/pybuda/test/tvm/nlp/onnx/tests_C/test_t5.py +++ b/forge/test/tvm/nlp/onnx/tests_C/test_t5.py @@ -5,7 +5,7 @@ import onnxruntime as ort import pytest import torch -from pybuda import ( +from forge import ( OnnxModule, TTDevice, BackendType, @@ -13,14 +13,14 @@ CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, TFGraphDefModule, ) -from pybuda.config import CompileDepth +from forge.config import CompileDepth from loguru import logger -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind from transformers import T5Config, T5Model, T5ForConditionalGeneration, T5Tokenizer, T5EncoderModel import urllib diff --git a/pybuda/test/tvm/nlp/onnx/tests_C/test_xglm.py b/forge/test/tvm/nlp/onnx/tests_C/test_xglm.py similarity index 92% rename from pybuda/test/tvm/nlp/onnx/tests_C/test_xglm.py rename to forge/test/tvm/nlp/onnx/tests_C/test_xglm.py index f7f1978ac..f9c9d1edc 100644 --- a/pybuda/test/tvm/nlp/onnx/tests_C/test_xglm.py +++ b/forge/test/tvm/nlp/onnx/tests_C/test_xglm.py @@ -1,14 +1,14 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -from pybuda._C.backend_api import BackendDevice -from pybuda.config import CompileDepth -from pybuda.verify.config import TestKind +from forge._C.backend_api import BackendDevice +from forge.config import CompileDepth +from forge.verify.config import TestKind import pytest import torch from transformers import XGLMModel, XGLMConfig -from pybuda import ( +from forge import ( PyTorchModule, OnnxModule, TTDevice, @@ -16,14 +16,14 @@ CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, tvm_to_python, ) -from pybuda.config import CompileDepth, _get_global_compiler_config +from forge.config import CompileDepth, _get_global_compiler_config import os import onnx -from pybuda.verify import verify_module +from forge.verify import verify_module def test_tvm_xglm(test_kind, test_device): if test_kind == TestKind.TRAINING: diff --git a/pybuda/test/tvm/nlp/onnx/tests_C/test_xlm.py b/forge/test/tvm/nlp/onnx/tests_C/test_xlm.py similarity index 87% rename from pybuda/test/tvm/nlp/onnx/tests_C/test_xlm.py rename to forge/test/tvm/nlp/onnx/tests_C/test_xlm.py index 18e93bf2c..f319c0149 100644 --- a/pybuda/test/tvm/nlp/onnx/tests_C/test_xlm.py +++ b/forge/test/tvm/nlp/onnx/tests_C/test_xlm.py @@ -1,7 +1,7 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -from pybuda.config import CompileDepth +from forge.config import CompileDepth import pytest import torch @@ -10,22 +10,22 @@ import math import itertools -from pybuda import ( +from forge import ( PyTorchModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, tvm_to_python, OnnxModule, ) -from test.tvm.utils import evaluate_framework_vs_pybuda +from test.tvm.utils import evaluate_framework_vs_forge -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind import os import onnx diff --git a/pybuda/test/tvm/nlp/pytorch/__init__.py b/forge/test/tvm/nlp/pytorch/__init__.py similarity index 100% rename from pybuda/test/tvm/nlp/pytorch/__init__.py rename to forge/test/tvm/nlp/pytorch/__init__.py diff --git a/pybuda/test/tvm/nlp/pytorch/bloom/__init__.py b/forge/test/tvm/nlp/pytorch/bloom/__init__.py similarity index 100% rename from pybuda/test/tvm/nlp/pytorch/bloom/__init__.py rename to forge/test/tvm/nlp/pytorch/bloom/__init__.py diff --git a/pybuda/test/tvm/nlp/pytorch/bloom/model.py b/forge/test/tvm/nlp/pytorch/bloom/model.py similarity index 100% rename from pybuda/test/tvm/nlp/pytorch/bloom/model.py rename to forge/test/tvm/nlp/pytorch/bloom/model.py diff --git a/pybuda/test/tvm/nlp/pytorch/bloom/ttmodel.py b/forge/test/tvm/nlp/pytorch/bloom/ttmodel.py similarity index 99% rename from pybuda/test/tvm/nlp/pytorch/bloom/ttmodel.py rename to forge/test/tvm/nlp/pytorch/bloom/ttmodel.py index 4b5a64560..eb392d647 100644 --- a/pybuda/test/tvm/nlp/pytorch/bloom/ttmodel.py +++ b/forge/test/tvm/nlp/pytorch/bloom/ttmodel.py @@ -212,7 +212,7 @@ def forward(self, hidden_states, alibi, transpose_hidden_states): # preallocting result tensor: [b * np, sq, sk] matmul_result = alibi[:output_size[0]*output_size[1], :, :output_size[3]] - # manual baddbmm as this isn't supported by pybuda yet + # manual baddbmm as this isn't supported by forge yet beta = 1.0 / self.layer_number alpha = 1.0 / self.norm_factor matmul_result = beta * matmul_result + alpha * torch.bmm( diff --git a/pybuda/test/tvm/nlp/pytorch/gnmt/__init__.py b/forge/test/tvm/nlp/pytorch/gnmt/__init__.py similarity index 100% rename from pybuda/test/tvm/nlp/pytorch/gnmt/__init__.py rename to forge/test/tvm/nlp/pytorch/gnmt/__init__.py diff --git a/pybuda/test/tvm/nlp/pytorch/gnmt/attention.py b/forge/test/tvm/nlp/pytorch/gnmt/attention.py similarity index 100% rename from pybuda/test/tvm/nlp/pytorch/gnmt/attention.py rename to forge/test/tvm/nlp/pytorch/gnmt/attention.py diff --git a/pybuda/test/tvm/nlp/pytorch/gnmt/config.py b/forge/test/tvm/nlp/pytorch/gnmt/config.py similarity index 100% rename from pybuda/test/tvm/nlp/pytorch/gnmt/config.py rename to forge/test/tvm/nlp/pytorch/gnmt/config.py diff --git a/pybuda/test/tvm/nlp/pytorch/gnmt/decoder.py b/forge/test/tvm/nlp/pytorch/gnmt/decoder.py similarity index 100% rename from pybuda/test/tvm/nlp/pytorch/gnmt/decoder.py rename to forge/test/tvm/nlp/pytorch/gnmt/decoder.py diff --git a/pybuda/test/tvm/nlp/pytorch/gnmt/encoder.py b/forge/test/tvm/nlp/pytorch/gnmt/encoder.py similarity index 100% rename from pybuda/test/tvm/nlp/pytorch/gnmt/encoder.py rename to forge/test/tvm/nlp/pytorch/gnmt/encoder.py diff --git a/pybuda/test/tvm/nlp/pytorch/gnmt/gnmt.py b/forge/test/tvm/nlp/pytorch/gnmt/gnmt.py similarity index 100% rename from pybuda/test/tvm/nlp/pytorch/gnmt/gnmt.py rename to forge/test/tvm/nlp/pytorch/gnmt/gnmt.py diff --git a/pybuda/test/tvm/nlp/pytorch/gnmt/seq2seq_base.py b/forge/test/tvm/nlp/pytorch/gnmt/seq2seq_base.py similarity index 100% rename from pybuda/test/tvm/nlp/pytorch/gnmt/seq2seq_base.py rename to forge/test/tvm/nlp/pytorch/gnmt/seq2seq_base.py diff --git a/pybuda/test/tvm/nlp/pytorch/gnmt/utils.py b/forge/test/tvm/nlp/pytorch/gnmt/utils.py similarity index 100% rename from pybuda/test/tvm/nlp/pytorch/gnmt/utils.py rename to forge/test/tvm/nlp/pytorch/gnmt/utils.py diff --git a/pybuda/test/tvm/nlp/pytorch/tests_A/__init__.py b/forge/test/tvm/nlp/pytorch/tests_A/__init__.py similarity index 100% rename from pybuda/test/tvm/nlp/pytorch/tests_A/__init__.py rename to forge/test/tvm/nlp/pytorch/tests_A/__init__.py diff --git a/pybuda/test/tvm/nlp/pytorch/tests_A/test_albert.py b/forge/test/tvm/nlp/pytorch/tests_A/test_albert.py similarity index 88% rename from pybuda/test/tvm/nlp/pytorch/tests_A/test_albert.py rename to forge/test/tvm/nlp/pytorch/tests_A/test_albert.py index c239bd5c4..1e07f8b41 100644 --- a/pybuda/test/tvm/nlp/pytorch/tests_A/test_albert.py +++ b/forge/test/tvm/nlp/pytorch/tests_A/test_albert.py @@ -5,13 +5,13 @@ # Some basic bring-up tests of tracing functionality # import configparser -import pybuda +import forge import pytest import torch from transformers import AlbertConfig, AlbertModel -from pybuda import ( +from forge import ( PyTorchModule, TTDevice, CPUDevice, @@ -19,14 +19,14 @@ CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from test.tvm.utils import evaluate_framework_vs_pybuda -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from test.tvm.utils import evaluate_framework_vs_forge +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind -from pybuda.op.eval.common import compare_tensor_to_golden +from forge.op.eval.common import compare_tensor_to_golden from test.utils import download_model @@ -78,7 +78,7 @@ def test_albert_pipeline(test_device, version, add_pooling_layer): extended_attention_mask = extended_attention_mask.to(dtype=model.dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 cpu0.push_to_inputs(input_ids, extended_attention_mask) - output_q = pybuda.run_inference(_verify_cfg=VerifyConfig(relative_atol=relative_atol)) + output_q = forge.run_inference(_verify_cfg=VerifyConfig(relative_atol=relative_atol)) outputs = output_q.get() torch_outputs = model(input_ids, attention_mask=attention_mask) @@ -111,7 +111,7 @@ def test_albert_v1(test_kind, test_device): waive_gradient_errors={"key.bias"}, ) ) - # evaluate_framework_vs_pybuda(submodel, res, hidden_states) + # evaluate_framework_vs_forge(submodel, res, hidden_states) def test_albert_v2(test_kind, test_device): if test_kind == TestKind.TRAINING: # only run recompute test in post-commit @@ -136,4 +136,4 @@ def test_albert_v2(test_kind, test_device): waive_gradient_errors={"key.bias"}, ) ) - # evaluate_framework_vs_pybuda(submodel, res, hidden_states) + # evaluate_framework_vs_forge(submodel, res, hidden_states) diff --git a/pybuda/test/tvm/nlp/pytorch/tests_A/test_bert.py b/forge/test/tvm/nlp/pytorch/tests_A/test_bert.py similarity index 90% rename from pybuda/test/tvm/nlp/pytorch/tests_A/test_bert.py rename to forge/test/tvm/nlp/pytorch/tests_A/test_bert.py index 4fd1a2ddd..d62b3166a 100644 --- a/pybuda/test/tvm/nlp/pytorch/tests_A/test_bert.py +++ b/forge/test/tvm/nlp/pytorch/tests_A/test_bert.py @@ -11,24 +11,24 @@ import torch from torch import nn from test.backend.models.test_bert import get_relaxed_atol_pcc -from pybuda.tensor import to_pt_tensors +from forge.tensor import to_pt_tensors from transformers import BertModel, BertConfig, BertForPreTraining -import pybuda -from pybuda import ( +import forge +from forge import ( PyTorchModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from pybuda.op.eval import compare_tensor_to_golden -from test.tvm.utils import evaluate_framework_vs_pybuda -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.op.eval import compare_tensor_to_golden +from test.tvm.utils import evaluate_framework_vs_forge +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind from test.utils import download_model @@ -66,7 +66,7 @@ def test_bert_encoder(test_kind, test_device, size): ), input_params=[{"requires_grad": False}], ) - # evaluate_framework_vs_pybuda(submodel, ret, hidden_states) + # evaluate_framework_vs_forge(submodel, ret, hidden_states) def test_pt_pretrain_heads(test_device): @@ -155,14 +155,14 @@ def test_bert_direct_fallback(test_kind, test_device): model = BertModel(config, add_pooling_layer=False) mod = PyTorchModule("bert", model) - tt1 = pybuda.TTDevice("tt1", + tt1 = forge.TTDevice("tt1", devtype=test_device.devtype, arch=test_device.arch, module=mod) input_shape = (1, 128) input_ids = torch.randint(high=25000, size=input_shape) attention_mask = torch.ones(input_shape) tt1.push_to_inputs(input_ids, attention_mask) - output_q = pybuda.run_inference(_verify_cfg=VerifyConfig(relative_atol=0.3), _sequential=True) + output_q = forge.run_inference(_verify_cfg=VerifyConfig(relative_atol=0.3), _sequential=True) output = to_pt_tensors(output_q.get())[0] pt_output = model(input_ids, attention_mask)[0] diff --git a/pybuda/test/tvm/nlp/pytorch/tests_A/test_detr.py b/forge/test/tvm/nlp/pytorch/tests_A/test_detr.py similarity index 94% rename from pybuda/test/tvm/nlp/pytorch/tests_A/test_detr.py rename to forge/test/tvm/nlp/pytorch/tests_A/test_detr.py index 10ec33df1..099af87bd 100644 --- a/pybuda/test/tvm/nlp/pytorch/tests_A/test_detr.py +++ b/forge/test/tvm/nlp/pytorch/tests_A/test_detr.py @@ -10,19 +10,19 @@ import torch from transformers import DetrConfig, DetrModel -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, ) -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind -from pybuda.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config from test.utils import download_model def test_detr_50_full(test_kind, test_device): - # tenstorrent/pybuda#392 - os.environ["PYBUDA_DISABLE_CONSTANT_FOLDING"] = "1" + # tenstorrent/forge#392 + os.environ["FORGE_DISABLE_CONSTANT_FOLDING"] = "1" if test_kind.is_training(): # Training is currently unsupported @@ -60,7 +60,7 @@ def forward(self, hidden_states): "facebook/detr-resnet-50", torchscript=True ) framework_module = Wrapper(framework_module, input_shape) - pybuda_module = PyTorchModule( + forge_module = PyTorchModule( "pt_detr50", framework_module, ) @@ -70,7 +70,7 @@ def forward(self, hidden_states): # out = framework_module(act) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -119,7 +119,7 @@ def forward(self, hidden_states): # compiler_cfg.enable_tvm_unsupported_ops = True compiler_cfg.enable_tvm_constant_prop = True # compiler_cfg.cpu_fallback_ops.add("zeros") - # verify_cfg.verify_pybuda_codegen_vs_framework = False # PCC is over 0.992 + # verify_cfg.verify_forge_codegen_vs_framework = False # PCC is over 0.992 # Inputs input_shape = (1, 3, 256, 256) @@ -130,7 +130,7 @@ def forward(self, hidden_states): framework_module = DetrModel(framework_config) framework_module = Wrapper(framework_module, input_shape) - pybuda_module = PyTorchModule( + forge_module = PyTorchModule( "pt_detr18", framework_module, ) @@ -140,7 +140,7 @@ def forward(self, hidden_states): # out = framework_module(act) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, diff --git a/pybuda/test/tvm/nlp/pytorch/tests_A/test_t5_small.py b/forge/test/tvm/nlp/pytorch/tests_A/test_t5_small.py similarity index 92% rename from pybuda/test/tvm/nlp/pytorch/tests_A/test_t5_small.py rename to forge/test/tvm/nlp/pytorch/tests_A/test_t5_small.py index b98ce44bd..9a151914c 100644 --- a/pybuda/test/tvm/nlp/pytorch/tests_A/test_t5_small.py +++ b/forge/test/tvm/nlp/pytorch/tests_A/test_t5_small.py @@ -5,30 +5,30 @@ import torch from transformers import T5Config, T5Model, T5ForConditionalGeneration, T5Tokenizer, T5EncoderModel -from pybuda.transformers.pipeline import pipeline as pybuda_pipeline -import pybuda -from pybuda import ( +from forge.transformers.pipeline import pipeline as forge_pipeline +import forge +from forge import ( PyTorchModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, CPUDevice, TTDevice, ) -from test.tvm.utils import evaluate_framework_vs_pybuda -from pybuda._C.backend_api import BackendType, BackendDevice +from test.tvm.utils import evaluate_framework_vs_forge +from forge._C.backend_api import BackendType, BackendDevice -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind from loguru import logger -from pybuda.pybudaglobal import TILE_DIM +from forge.forgeglobal import TILE_DIM -from pybuda.op.eval.common import compare_tensor_to_golden +from forge.op.eval.common import compare_tensor_to_golden from test.utils import download_model @pytest.mark.skip(reason="Tested with fallback") @@ -196,7 +196,7 @@ def test_t5_encoder_pipeline(test_device): attention_mask = torch.ones((1, seq_len)) extended_attention_mask = t5_model.get_extended_attention_mask(attention_mask, input_ids.size()) cpu0.push_to_inputs(input_ids, extended_attention_mask) - output_q = pybuda.run_inference() + output_q = forge.run_inference() outputs = output_q.get() torch_outputs = t5_model(input_ids, attention_mask=attention_mask) @@ -277,7 +277,7 @@ def forward(self, hidden_states): input_ids = inputs["input_ids"] attention_mask = inputs["attention_mask"] cpu0.push_to_inputs(input_ids, attention_mask) - output_q = pybuda.run_inference() + output_q = forge.run_inference() outputs = output_q.get() @@ -384,15 +384,15 @@ def test_t5_past_cache(variant, test_device): pytest.skip() import os - os.environ["PYBUDA_PAD_OUTPUT_BUFFER"] = "1" + os.environ["FORGE_PAD_OUTPUT_BUFFER"] = "1" os.environ["TT_BACKEND_MULTI_THREADED_PUSH"] = "1" - os.environ["PYBUDA_EXTRA_L1_MARGIN"] = "169536" - os.environ["PYBUDA_FORCE_SEQUENTIAL"] = "1" - os.environ["PYBUDA_NLP_MANUAL_TARGET"] = "30000" + os.environ["FORGE_EXTRA_L1_MARGIN"] = "169536" + os.environ["FORGE_FORCE_SEQUENTIAL"] = "1" + os.environ["FORGE_NLP_MANUAL_TARGET"] = "30000" os.environ["TT_BACKEND_DRAM_POLLING_FREQUENCY"] = "64" - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.enable_tvm_cpu_fallback = False - compiler_cfg.default_df_override = pybuda._C.Float16_b + compiler_cfg.default_df_override = forge._C.Float16_b compiler_cfg.default_dram_parameters = False compiler_cfg.input_queues_on_host = True compiler_cfg.enable_auto_fusing = False @@ -483,8 +483,8 @@ def test_t5_past_cache(variant, test_device): inputs += (torch.zeros(enc_past_cache_shape), torch.zeros(enc_past_cache_shape), torch.unsqueeze(blocks.unshape(torch.nn.functional.pad(model_out[1][i][2], pad_shape)), 0), torch.unsqueeze(blocks.unshape(torch.nn.functional.pad(model_out[1][i][3], pad_shape)), 0)) - tt0 = pybuda.TTDevice("tt0", devtype=test_device.devtype, arch=test_device.arch, module=PyTorchModule("t5", blocks)) - output_q = pybuda.initialize_pipeline(training=False, sample_inputs=inputs) + tt0 = forge.TTDevice("tt0", devtype=test_device.devtype, arch=test_device.arch, module=PyTorchModule("t5", blocks)) + output_q = forge.initialize_pipeline(training=False, sample_inputs=inputs) import time abs_index = 480 @@ -498,7 +498,7 @@ def test_t5_past_cache(variant, test_device): generate_inputs = (encoder_last_hidden_state, decoder_input_ids, decoder_attention_mask, encoder_attention_mask ) tt0.push_to_inputs(generate_inputs) - pybuda.run_generate(input_count=1, write_index=current_token_index // TILE_DIM) + forge.run_generate(input_count=1, write_index=current_token_index // TILE_DIM) ans = output_q.get() lm_head_out = ans[0].value().detach() next_token = torch.argmax(lm_head_out[0, current_token_index % TILE_DIM]) @@ -517,21 +517,21 @@ def test_t5_past_cache(variant, test_device): variants = ["t5-small", "t5-base", "t5-large", "google/flan-t5-small", "google/flan-t5-base", "google/flan-t5-large"] @pytest.mark.parametrize("variant", variants, ids=variants) -def test_t5_past_cache_pybuda_pipeline(variant, test_device): +def test_t5_past_cache_forge_pipeline(variant, test_device): # Too slow for post-commit ci if test_device.devtype != BackendType.Silicon and variant != "t5-small": pytest.skip() import os - os.environ["PYBUDA_PAD_OUTPUT_BUFFER"] = "1" + os.environ["FORGE_PAD_OUTPUT_BUFFER"] = "1" os.environ["TT_BACKEND_MULTI_THREADED_PUSH"] = "1" - os.environ["PYBUDA_EXTRA_L1_MARGIN"] = "169536" - os.environ["PYBUDA_FORCE_SEQUENTIAL"] = "1" - os.environ["PYBUDA_NLP_MANUAL_TARGET"] = "30000" + os.environ["FORGE_EXTRA_L1_MARGIN"] = "169536" + os.environ["FORGE_FORCE_SEQUENTIAL"] = "1" + os.environ["FORGE_NLP_MANUAL_TARGET"] = "30000" os.environ["TT_BACKEND_DRAM_POLLING_FREQUENCY"] = "64" - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.enable_tvm_cpu_fallback = False - compiler_cfg.default_df_override = pybuda._C.Float16_b + compiler_cfg.default_df_override = forge._C.Float16_b compiler_cfg.default_dram_parameters = False compiler_cfg.input_queues_on_host = True compiler_cfg.enable_auto_fusing = False @@ -621,8 +621,8 @@ def test_t5_past_cache_pybuda_pipeline(variant, test_device): inputs += (torch.zeros(enc_past_cache_shape), torch.zeros(enc_past_cache_shape), torch.unsqueeze(blocks.unshape(torch.nn.functional.pad(model_out[1][i][2], pad_shape)), 0), torch.unsqueeze(blocks.unshape(torch.nn.functional.pad(model_out[1][i][3], pad_shape)), 0)) - tt0 = pybuda.TTDevice("tt0", devtype=test_device.devtype, arch=test_device.arch, module=PyTorchModule("t5", blocks)) - output_q = pybuda.initialize_pipeline(training=False, sample_inputs=inputs) + tt0 = forge.TTDevice("tt0", devtype=test_device.devtype, arch=test_device.arch, module=PyTorchModule("t5", blocks)) + output_q = forge.initialize_pipeline(training=False, sample_inputs=inputs) abs_index = 480 current_token_index = 0 @@ -633,7 +633,7 @@ def wrap_generate(inputs): decoder_attention_mask[0, abs_index + (current_token_index % TILE_DIM)] = 1 generate_inputs = (encoder_last_hidden_state, decoder_input_ids, decoder_attention_mask, encoder_attention_mask) tt0.push_to_inputs(generate_inputs) - pybuda.run_generate(input_count=1, write_index=current_token_index // TILE_DIM) + forge.run_generate(input_count=1, write_index=current_token_index // TILE_DIM) ans = output_q.get() lm_head_out = ans[0].value().detach() lm_head_out = lm_head_out[:, :(current_token_index % TILE_DIM) + 1, :] @@ -645,7 +645,7 @@ def wrap_generate(inputs): decoder_input_ids[0, :] = tokenizer.pad_token_id return lm_head_out - text_generator = pybuda_pipeline("text2text-generation", model=model, tokenizer=tokenizer, forward_fn=wrap_generate) + text_generator = forge_pipeline("text2text-generation", model=model, tokenizer=tokenizer, forward_fn=wrap_generate) import time start = time.time() @@ -704,19 +704,19 @@ def test_t5_past_cache_enc_dec(variant, test_device): pytest.skip() import os - os.environ["PYBUDA_PAD_OUTPUT_BUFFER"] = "1" + os.environ["FORGE_PAD_OUTPUT_BUFFER"] = "1" os.environ["TT_BACKEND_MULTI_THREADED_PUSH"] = "1" - os.environ["PYBUDA_EXTRA_L1_MARGIN"] = "120000" - # os.environ["PYBUDA_EXTRA_L1_MARGIN"] = "169536" - os.environ["PYBUDA_FORCE_SEQUENTIAL"] = "1" - os.environ["PYBUDA_NLP_MANUAL_TARGET"] = "30000" + os.environ["FORGE_EXTRA_L1_MARGIN"] = "120000" + # os.environ["FORGE_EXTRA_L1_MARGIN"] = "169536" + os.environ["FORGE_FORCE_SEQUENTIAL"] = "1" + os.environ["FORGE_NLP_MANUAL_TARGET"] = "30000" os.environ["TT_BACKEND_DRAM_POLLING_FREQUENCY"] = "64" os.environ["TT_BACKEND_PROFILER"] = "1" os.environ["TT_BACKEND_EPOCH_BIN_NUM_SLOTS"] = "64" - os.environ["PYBUDA_ROTATE_PAST_CACHE_PARAMS"] = "1" - compiler_cfg = pybuda.config._get_global_compiler_config() + os.environ["FORGE_ROTATE_PAST_CACHE_PARAMS"] = "1" + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.enable_tvm_cpu_fallback = False - compiler_cfg.default_df_override = pybuda._C.Float16_b + compiler_cfg.default_df_override = forge._C.Float16_b compiler_cfg.default_dram_parameters = False compiler_cfg.input_queues_on_host = True compiler_cfg.enable_auto_fusing = False @@ -725,7 +725,7 @@ def test_t5_past_cache_enc_dec(variant, test_device): compiler_cfg.enable_enumerate_u_kt = False compiler_cfg.enable_link_past_cache_ios = True - # pybuda.set_configuration_options(performance_trace=pybuda.PerfTraceLevel.VERBOSE) + # forge.set_configuration_options(performance_trace=forge.PerfTraceLevel.VERBOSE) model_name = variant config = T5Config.from_pretrained(model_name) config_dict = config.to_dict() @@ -742,10 +742,10 @@ def test_t5_past_cache_enc_dec(variant, test_device): num_blocks = len(model.decoder.block) # num_blocks = n_layers for i in range(num_blocks): - pybuda.config.override_op_size(f"t5.decoder.block.{i}.layer.0.SelfAttention.k.weight_cache_nop", [1, 1]) - pybuda.config.override_op_size(f"t5.decoder.block.{i}.layer.0.SelfAttention.v.weight_cache_nop", [1, 1]) - pybuda.config.override_t_stream_shape(f"t5.decoder.block.{i}.layer.0.SelfAttention.k.weight_cache_nop", [15, 1]) - pybuda.config.override_t_stream_shape(f"t5.decoder.block.{i}.layer.0.SelfAttention.v.weight_cache_nop", [15, 1]) + forge.config.override_op_size(f"t5.decoder.block.{i}.layer.0.SelfAttention.k.weight_cache_nop", [1, 1]) + forge.config.override_op_size(f"t5.decoder.block.{i}.layer.0.SelfAttention.v.weight_cache_nop", [1, 1]) + forge.config.override_t_stream_shape(f"t5.decoder.block.{i}.layer.0.SelfAttention.k.weight_cache_nop", [15, 1]) + forge.config.override_t_stream_shape(f"t5.decoder.block.{i}.layer.0.SelfAttention.v.weight_cache_nop", [15, 1]) input_length = 64 input_text = "translate English to German: The house is wonderful. We have really enjoyed living here for the past eight years. The only problem that I have with it is that it is too small and the parks are not very close." @@ -774,13 +774,13 @@ def test_t5_past_cache_enc_dec(variant, test_device): encoder_module = PyTorchModule("T5_encoder", T5_encoder(model)) decoder_module_cross_attention = PyTorchModule("T5_decoder_with_ca", T5_decoder(model)) decoder_module_no_cross_attention = PyTorchModule("T5_decoder_no_ca", T5_decoder(model)) - tt0 = pybuda.TTDevice( + tt0 = forge.TTDevice( "tt0", devtype=test_device.devtype, arch=test_device.arch, module=[encoder_module, decoder_module_cross_attention, decoder_module_no_cross_attention]) - output_q = pybuda.initialize_pipeline( + output_q = forge.initialize_pipeline( training=False, sample_inputs=( (input_ids, encoder_attention_mask), @@ -793,7 +793,7 @@ def test_t5_past_cache_enc_dec(variant, test_device): start = time.time() tt0.set_active_subgraph(0) tt0.push_to_inputs((input_ids, encoder_attention_mask)) - pybuda.run_forward() + forge.run_forward() ans = output_q.get() encoder_last_hidden_state = ans[0].value().detach() first_current_index = max_length - TILE_DIM @@ -809,13 +809,13 @@ def test_t5_past_cache_enc_dec(variant, test_device): tt0.set_active_subgraph(1) generate_inputs = (decoder_input_ids, decoder_attention_mask, encoder_last_hidden_state, encoder_attention_mask) tt0.push_to_inputs(generate_inputs) - pybuda.run_generate(input_count=1, write_index=0) + forge.run_generate(input_count=1, write_index=0) ans = output_q.get() else: tt0.set_active_subgraph(2) generate_inputs = (decoder_input_ids, decoder_attention_mask, encoder_attention_mask) tt0.push_to_inputs(generate_inputs) - pybuda.run_generate(input_count=1, write_index=0) + forge.run_generate(input_count=1, write_index=0) ans = output_q.get() lm_head_out = ans[0].value().detach() @@ -827,7 +827,7 @@ def test_t5_past_cache_enc_dec(variant, test_device): past_cache_pages = current_token_index // TILE_DIM # after one page of past cache, we have to rotate. tt0.set_active_subgraph(3) - pybuda.run_generate(input_count=0, write_index=0) + forge.run_generate(input_count=0, write_index=0) pages_current = 1 decoder_attention_mask[0, -(past_cache_pages + pages_current) * TILE_DIM:] = 1 @@ -920,14 +920,14 @@ def test_t5_small_tiny_tile(test_device): pytest.skip("Grayskull test failing with TM ERROR (producer = matmul_49, consumer = matmul_53): input using kernel_broadcast but post-TM input canonical form is not periodic") import os - os.environ["PYBUDA_ENABLE_TINY_TILE"] = "1" - # Add PyBUDA configurations - compiler_cfg = pybuda.config._get_global_compiler_config() + os.environ["FORGE_ENABLE_TINY_TILE"] = "1" + # Add Forge configurations + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.enable_tvm_cpu_fallback = False - compiler_cfg.enable_auto_fusing = False # tenstorrent/pybuda#844 + compiler_cfg.enable_auto_fusing = False # tenstorrent/forge#844 compiler_cfg.amp_level = 1 compiler_cfg.enable_enumerate_u_kt = False - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b compiler_cfg.compile_depth = CompileDepth.POST_PATTERN_MATCHER # Load tokenizer and model from HuggingFace @@ -949,7 +949,7 @@ def __init__(self, model): def forward(self, decoder_input_ids, encoder_outputs): return self.model(None, None, decoder_input_ids, None, None, None, None, (encoder_outputs,)) - tt_model = pybuda.PyTorchModule("t5_small_tiny_tile", Wrapper(model)) + tt_model = forge.PyTorchModule("t5_small_tiny_tile", Wrapper(model)) decoder_input_ids = torch.randint(0, model.config.vocab_size, (1, 1), dtype=torch.int32) encoder_outputs = torch.randn(1, 1, 512) diff --git a/pybuda/test/tvm/nlp/pytorch/tests_A/test_xlm.py b/forge/test/tvm/nlp/pytorch/tests_A/test_xlm.py similarity index 96% rename from pybuda/test/tvm/nlp/pytorch/tests_A/test_xlm.py rename to forge/test/tvm/nlp/pytorch/tests_A/test_xlm.py index a5863b79f..34bff3502 100644 --- a/pybuda/test/tvm/nlp/pytorch/tests_A/test_xlm.py +++ b/forge/test/tvm/nlp/pytorch/tests_A/test_xlm.py @@ -4,7 +4,7 @@ # # Some basic bring-up tests of tracing functionality # -from pybuda.config import CompileDepth +from forge.config import CompileDepth import pytest import torch @@ -13,21 +13,21 @@ import math import itertools -from pybuda import ( +from forge import ( PyTorchModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, tvm_to_python, ) -from test.tvm.utils import evaluate_framework_vs_pybuda +from test.tvm.utils import evaluate_framework_vs_forge -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind diff --git a/pybuda/test/tvm/nlp/pytorch/tests_A/test_xlnet.py b/forge/test/tvm/nlp/pytorch/tests_A/test_xlnet.py similarity index 90% rename from pybuda/test/tvm/nlp/pytorch/tests_A/test_xlnet.py rename to forge/test/tvm/nlp/pytorch/tests_A/test_xlnet.py index 9b4ee9a72..37bbc5f9d 100644 --- a/pybuda/test/tvm/nlp/pytorch/tests_A/test_xlnet.py +++ b/forge/test/tvm/nlp/pytorch/tests_A/test_xlnet.py @@ -4,23 +4,23 @@ # # Some basic bring-up tests of tracing functionality # -from pybuda.config import CompileDepth +from forge.config import CompileDepth import pytest import torch from transformers import XLNetConfig, XLNetModel -from pybuda import ( +from forge import ( PyTorchModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, tvm_to_python, ) -from test.tvm.utils import evaluate_framework_vs_pybuda +from test.tvm.utils import evaluate_framework_vs_forge input_shapes = [(1, 16, 1024)] @@ -64,7 +64,7 @@ def test_tvm_xlnet(training, recompute, input_shape): hidden_states = [torch.rand(*input_shape), output_g, non_tgt_mask, attn_mask, pos_emb, seg_mat] - ret = pybuda_compile( + ret = forge_compile( tt0, "XLNet", *hidden_states, @@ -78,4 +78,4 @@ def test_tvm_xlnet(training, recompute, input_shape): ), ) - evaluate_framework_vs_pybuda(model.layer[0], ret, *hidden_states) + evaluate_framework_vs_forge(model.layer[0], ret, *hidden_states) diff --git a/pybuda/test/tvm/nlp/pytorch/tests_B/__init__.py b/forge/test/tvm/nlp/pytorch/tests_B/__init__.py similarity index 100% rename from pybuda/test/tvm/nlp/pytorch/tests_B/__init__.py rename to forge/test/tvm/nlp/pytorch/tests_B/__init__.py diff --git a/pybuda/test/tvm/nlp/pytorch/tests_B/test_distilbert.py b/forge/test/tvm/nlp/pytorch/tests_B/test_distilbert.py similarity index 91% rename from pybuda/test/tvm/nlp/pytorch/tests_B/test_distilbert.py rename to forge/test/tvm/nlp/pytorch/tests_B/test_distilbert.py index 387836e57..ebbc95b03 100644 --- a/pybuda/test/tvm/nlp/pytorch/tests_B/test_distilbert.py +++ b/forge/test/tvm/nlp/pytorch/tests_B/test_distilbert.py @@ -9,12 +9,12 @@ import pytest from transformers import DistilBertModel -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, ) -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.verify import verify_module +from forge.verify.config import TestKind from test.utils import download_model @@ -37,7 +37,7 @@ def forward(self, input_act): "distilbert-base-cased-distilled-squad" ) framework_module = Transformer(framework_module) - pybuda_module = PyTorchModule("distilbert_pt", framework_module) + forge_module = PyTorchModule("distilbert_pt", framework_module) # Input shapes input_act_shape = (1, 128) @@ -47,7 +47,7 @@ def forward(self, input_act): # out = framework_module(act) verify_module( - pybuda_module, + forge_module, (input_act_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -80,7 +80,7 @@ def forward(self, input_act): "distilbert-base-cased-distilled-squad" ) framework_module = Transformer(framework_module) - pybuda_module = PyTorchModule("distilbert_layer_pt", framework_module) + forge_module = PyTorchModule("distilbert_layer_pt", framework_module) # Input shapes input_act_shape = (1, 128, 768) @@ -90,7 +90,7 @@ def forward(self, input_act): # out = framework_module(act) verify_module( - pybuda_module, + forge_module, (input_act_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -124,7 +124,7 @@ def forward(self, q_act, k_act, v_act): "distilbert-base-cased-distilled-squad" ) framework_module = Transformer(framework_module) - pybuda_module = PyTorchModule("distilbert_layer_mha_pt", framework_module) + forge_module = PyTorchModule("distilbert_layer_mha_pt", framework_module) # Input shapes inp_shape = (1, 128, 768) @@ -136,7 +136,7 @@ def forward(self, q_act, k_act, v_act): out = framework_module(q_act, k_act, v_act) verify_module( - pybuda_module, + forge_module, (inp_shape, inp_shape, inp_shape), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -159,7 +159,7 @@ def test_distilbert_layer_with_embeddings_pt(test_kind, test_device): if test_kind.is_training(): pytest.skip() - os.environ["PYBUDA_RELOAD_GENERATED_MODULES"] = "1" + os.environ["FORGE_RELOAD_GENERATED_MODULES"] = "1" class Transformer(torch.nn.Module): def __init__(self, module): @@ -177,7 +177,7 @@ def forward(self, input_act): "distilbert-base-cased-distilled-squad" ) framework_module = Transformer(framework_module) - pybuda_module = PyTorchModule( + forge_module = PyTorchModule( "distilbert_layer_with_embeddings_pt", framework_module ) @@ -189,7 +189,7 @@ def forward(self, input_act): # out = framework_module(act) verify_module( - pybuda_module, + forge_module, (input_act_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -227,7 +227,7 @@ def forward(self, inputs_embeds): "distilbert-base-cased-distilled-squad", torchscript=True ) framework_module = Transformer(framework_module) - pybuda_module = PyTorchModule("distilbert_without_embeddings_pt", framework_module) + forge_module = PyTorchModule("distilbert_without_embeddings_pt", framework_module) # Input shapes input_emb_shape = (1, 32, 768) @@ -237,7 +237,7 @@ def forward(self, inputs_embeds): # out = framework_module(inputs_embeds) verify_module( - pybuda_module, + forge_module, (input_emb_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, diff --git a/pybuda/test/tvm/nlp/pytorch/tests_B/test_wmt.py b/forge/test/tvm/nlp/pytorch/tests_B/test_wmt.py similarity index 94% rename from pybuda/test/tvm/nlp/pytorch/tests_B/test_wmt.py rename to forge/test/tvm/nlp/pytorch/tests_B/test_wmt.py index cf2546e36..0680eff70 100644 --- a/pybuda/test/tvm/nlp/pytorch/tests_B/test_wmt.py +++ b/forge/test/tvm/nlp/pytorch/tests_B/test_wmt.py @@ -7,13 +7,13 @@ import torch.nn as nn from transformers import FSMTModel -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, ) -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind -from pybuda.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config class WMT_Encoder_Wrapper(nn.Module): diff --git a/pybuda/test/tvm/nlp/pytorch/tests_C/__init__.py b/forge/test/tvm/nlp/pytorch/tests_C/__init__.py similarity index 100% rename from pybuda/test/tvm/nlp/pytorch/tests_C/__init__.py rename to forge/test/tvm/nlp/pytorch/tests_C/__init__.py diff --git a/pybuda/test/tvm/nlp/pytorch/tests_C/test_opt.py b/forge/test/tvm/nlp/pytorch/tests_C/test_opt.py similarity index 95% rename from pybuda/test/tvm/nlp/pytorch/tests_C/test_opt.py rename to forge/test/tvm/nlp/pytorch/tests_C/test_opt.py index 3a5172f89..acbeec901 100644 --- a/pybuda/test/tvm/nlp/pytorch/tests_C/test_opt.py +++ b/forge/test/tvm/nlp/pytorch/tests_C/test_opt.py @@ -6,15 +6,15 @@ import torch from transformers import OPTModel, OPTConfig # from transformers.models.opt.modeling_opt import XGLMAttention, ACT2FN -from pybuda import ( +from forge import ( PyTorchModule, BackendType, VerifyConfig, ) -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.verify import verify_module +from forge.verify.config import TestKind def test_opt_decoder(test_kind, test_device): diff --git a/pybuda/test/tvm/nlp/pytorch/tests_C/test_roberta.py b/forge/test/tvm/nlp/pytorch/tests_C/test_roberta.py similarity index 90% rename from pybuda/test/tvm/nlp/pytorch/tests_C/test_roberta.py rename to forge/test/tvm/nlp/pytorch/tests_C/test_roberta.py index 10d3faca7..15aee703a 100644 --- a/pybuda/test/tvm/nlp/pytorch/tests_C/test_roberta.py +++ b/forge/test/tvm/nlp/pytorch/tests_C/test_roberta.py @@ -1,10 +1,10 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -import pybuda -from pybuda.config import CompileDepth -from pybuda.cpudevice import CPUDevice -from pybuda.verify.cpueval import TrainingEvalData +import forge +from forge.config import CompileDepth +from forge.cpudevice import CPUDevice +from forge.verify.cpueval import TrainingEvalData import pytest from loguru import logger @@ -12,21 +12,21 @@ import torch from transformers import RobertaModel, RobertaConfig -from pybuda import ( +from forge import ( PyTorchModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from pybuda.op.eval import compare_tensor_to_golden -from test.tvm.utils import evaluate_framework_vs_pybuda +from forge.op.eval import compare_tensor_to_golden +from test.tvm.utils import evaluate_framework_vs_forge from test.backend.models.test_bert import get_relaxed_atol_pcc -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind from test.utils import download_model class EmbWrapper(torch.nn.Module): @@ -82,7 +82,7 @@ def test_roberta_pipeline(test_kind, test_device): extended_attention_mask = model.get_extended_attention_mask(attention_mask, input_ids.size()) cpu0.push_to_inputs(input_ids, extended_attention_mask) # tt1.push_to_inputs(input_ids) - output_q = pybuda.run_inference() + output_q = forge.run_inference() outputs = output_q.get() torch_outputs = model(input_ids) diff --git a/pybuda/test/tvm/nlp/pytorch/tests_C/test_trocr.py b/forge/test/tvm/nlp/pytorch/tests_C/test_trocr.py similarity index 90% rename from pybuda/test/tvm/nlp/pytorch/tests_C/test_trocr.py rename to forge/test/tvm/nlp/pytorch/tests_C/test_trocr.py index e39471b54..ca160c624 100644 --- a/pybuda/test/tvm/nlp/pytorch/tests_C/test_trocr.py +++ b/forge/test/tvm/nlp/pytorch/tests_C/test_trocr.py @@ -14,15 +14,15 @@ VisionEncoderDecoderModel, ) -from pybuda import PyTorchModule, VerifyConfig -from pybuda.verify import verify_module -from pybuda.config import _get_global_compiler_config +from forge import PyTorchModule, VerifyConfig +from forge.verify import verify_module +from forge.config import _get_global_compiler_config def test_trocr_reduced_size(test_kind, test_device): # import os - # os.environ["PYBUDA_LEGALIZER_DETAILED_DEBUGGING"] = "1" - # os.environ["PYBUDA_RELOAD_GENERATED_MODULES"] = "1" + # os.environ["FORGE_LEGALIZER_DETAILED_DEBUGGING"] = "1" + # os.environ["FORGE_RELOAD_GENERATED_MODULES"] = "1" if test_kind.is_training(): pytest.skip() diff --git a/pybuda/test/tvm/nlp/pytorch/tests_C/test_unispeech.py b/forge/test/tvm/nlp/pytorch/tests_C/test_unispeech.py similarity index 90% rename from pybuda/test/tvm/nlp/pytorch/tests_C/test_unispeech.py rename to forge/test/tvm/nlp/pytorch/tests_C/test_unispeech.py index 4f639fd5d..79334c055 100644 --- a/pybuda/test/tvm/nlp/pytorch/tests_C/test_unispeech.py +++ b/forge/test/tvm/nlp/pytorch/tests_C/test_unispeech.py @@ -9,28 +9,28 @@ import torch from transformers import UniSpeechModel -from pybuda import ( +from forge import ( PyTorchModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from pybuda.config import CompileDepth -from test.tvm.utils import evaluate_framework_vs_pybuda +from forge.config import CompileDepth +from test.tvm.utils import evaluate_framework_vs_forge -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind from test.utils import download_model def test_unispeech(test_kind, test_device): if test_kind == TestKind.TRAINING: # only run recompute test in post-commit pytest.skip() - pytest.skip() # See tenstorrent/pybuda#1935 + pytest.skip() # See tenstorrent/forge#1935 compiler_cfg = _get_global_compiler_config() compiler_cfg.enable_tvm_constant_prop = True compiler_cfg.tvm_constnat_prop_mask={"encoder.pos_conv_embed.conv.weight_v"} @@ -150,7 +150,7 @@ def test_unispeech_transformer_encoder(training): torch.rand(input_shape), ] - pybuda_model_results = pybuda_compile( + forge_model_results = forge_compile( tt0, "unispeech_transformer_encoder", *inputs, @@ -171,4 +171,4 @@ def test_unispeech_transformer_encoder(training): ), ) - evaluate_framework_vs_pybuda(framework_submodel, pybuda_model_results, *inputs) + evaluate_framework_vs_forge(framework_submodel, forge_model_results, *inputs) diff --git a/pybuda/test/tvm/nlp/pytorch/tests_C/test_wav2vec2.py b/forge/test/tvm/nlp/pytorch/tests_C/test_wav2vec2.py similarity index 88% rename from pybuda/test/tvm/nlp/pytorch/tests_C/test_wav2vec2.py rename to forge/test/tvm/nlp/pytorch/tests_C/test_wav2vec2.py index eb5614ed3..f3634f51a 100644 --- a/pybuda/test/tvm/nlp/pytorch/tests_C/test_wav2vec2.py +++ b/forge/test/tvm/nlp/pytorch/tests_C/test_wav2vec2.py @@ -9,21 +9,21 @@ import torch from transformers import Wav2Vec2Model -from pybuda import ( +from forge import ( PyTorchModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from pybuda.config import CompileDepth -from test.tvm.utils import evaluate_framework_vs_pybuda -from test.tvm.utils import evaluate_framework_vs_pybuda -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth +from test.tvm.utils import evaluate_framework_vs_forge +from test.tvm.utils import evaluate_framework_vs_forge +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind from test.utils import download_model @@ -31,7 +31,7 @@ def test_wav2vec2(test_kind, test_device): if test_kind == TestKind.TRAINING: # only run recompute test in post-commit pytest.skip() - pytest.skip() # See tenstorrent/pybuda#1935 + pytest.skip() # See tenstorrent/forge#1935 compiler_cfg = _get_global_compiler_config() compiler_cfg.enable_tvm_constant_prop = True compiler_cfg.tvm_constnat_prop_mask={"encoder.pos_conv_embed.conv.weight_v"} diff --git a/pybuda/test/tvm/nlp/pytorch/tests_D/__init__.py b/forge/test/tvm/nlp/pytorch/tests_D/__init__.py similarity index 100% rename from pybuda/test/tvm/nlp/pytorch/tests_D/__init__.py rename to forge/test/tvm/nlp/pytorch/tests_D/__init__.py diff --git a/pybuda/test/tvm/nlp/pytorch/tests_D/test_bart.py b/forge/test/tvm/nlp/pytorch/tests_D/test_bart.py similarity index 94% rename from pybuda/test/tvm/nlp/pytorch/tests_D/test_bart.py rename to forge/test/tvm/nlp/pytorch/tests_D/test_bart.py index 908aa0f5f..7f8768ef3 100644 --- a/pybuda/test/tvm/nlp/pytorch/tests_D/test_bart.py +++ b/forge/test/tvm/nlp/pytorch/tests_D/test_bart.py @@ -14,28 +14,28 @@ from transformers.modeling_attn_mask_utils import _create_4d_causal_attention_mask from test.backend.models.test_bert import get_relaxed_atol_pcc -import pybuda -from pybuda import ( +import forge +from forge import ( PyTorchModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, CPUDevice, TTDevice, ) -from test.tvm.utils import evaluate_framework_vs_pybuda +from test.tvm.utils import evaluate_framework_vs_forge -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind from loguru import logger -from pybuda.op.eval.common import compare_tensor_to_golden +from forge.op.eval.common import compare_tensor_to_golden def test_bart_decoder(test_kind, test_device): if test_kind == TestKind.TRAINING: # only run recompute test in post-commit @@ -159,7 +159,7 @@ def test_bart_encoder_pipeline(test_device): input_ids = torch.randint(config.vocab_size, (1, seq_len)) cpu0.push_to_inputs(input_ids) - output_q = pybuda.run_inference(_verify_cfg=VerifyConfig(verify_last=False)) + output_q = forge.run_inference(_verify_cfg=VerifyConfig(verify_last=False)) outputs = output_q.get() torch_outputs = model.encoder(input_ids) @@ -191,7 +191,7 @@ def test_bart_decoder_pipeline(test_device): tt1 = TTDevice("tt1", devtype=test_device.devtype, arch=test_device.arch, module=PyTorchModule("decoder_blocks", decoder_blocks)) cpu0.push_to_inputs(input_ids) - output_q = pybuda.run_inference(_verify_cfg=VerifyConfig()) + output_q = forge.run_inference(_verify_cfg=VerifyConfig()) outputs = output_q.get() torch_outputs = model.decoder(input_ids) diff --git a/pybuda/test/tvm/nlp/pytorch/tests_D/test_bloom.py b/forge/test/tvm/nlp/pytorch/tests_D/test_bloom.py similarity index 91% rename from pybuda/test/tvm/nlp/pytorch/tests_D/test_bloom.py rename to forge/test/tvm/nlp/pytorch/tests_D/test_bloom.py index 3a4566fb7..f05839894 100644 --- a/pybuda/test/tvm/nlp/pytorch/tests_D/test_bloom.py +++ b/forge/test/tvm/nlp/pytorch/tests_D/test_bloom.py @@ -1,28 +1,28 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -from pybuda.config import CompileDepth +from forge.config import CompileDepth import pytest from typing import Optional, Tuple import torch import torch.nn as nn -from pybuda import ( +from forge import ( PyTorchModule, TTDevice, BackendDevice, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) import random from test.tvm.nlp.pytorch.bloom import GPTModel, Embedding, tinybloom_args, Transformer, init_method -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind from transformers import BloomModel from test.utils import download_model @@ -94,11 +94,11 @@ def test_bloom_hf(test_kind, test_device): pytest.skip() if test_device.arch == BackendDevice.Wormhole_B0 or test_device.arch == BackendDevice.Blackhole: - pytest.skip() # see tenstorrent/pybuda#969 + pytest.skip() # see tenstorrent/forge#969 compiler_cfg = _get_global_compiler_config() compiler_cfg.enable_tvm_constant_prop = True - #Fusing disabled due to tenstorrent/pybuda#789 + #Fusing disabled due to tenstorrent/forge#789 compiler_cfg.enable_auto_fusing=False class BloomWrapper(torch.nn.Module): diff --git a/pybuda/test/tvm/nlp/pytorch/tests_D/test_gnmt.py b/forge/test/tvm/nlp/pytorch/tests_D/test_gnmt.py similarity index 93% rename from pybuda/test/tvm/nlp/pytorch/tests_D/test_gnmt.py rename to forge/test/tvm/nlp/pytorch/tests_D/test_gnmt.py index fea31edfd..29d5ff71c 100644 --- a/pybuda/test/tvm/nlp/pytorch/tests_D/test_gnmt.py +++ b/forge/test/tvm/nlp/pytorch/tests_D/test_gnmt.py @@ -1,7 +1,7 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -from pybuda.config import CompileDepth +from forge.config import CompileDepth import pytest from typing import Optional, Tuple @@ -9,25 +9,25 @@ import torch.nn as nn import numpy as np -from pybuda import ( +from forge import ( PyTorchModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, Tensor, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind -import pybuda +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind +import forge from test.tvm.nlp.pytorch.gnmt.gnmt import GNMT from test.tvm.nlp.pytorch.gnmt.encoder import ResidualRecurrentEncoder from test.tvm.nlp.pytorch.gnmt.decoder import ResidualRecurrentDecoder, RecurrentAttention -from test.tvm.utils import evaluate_framework_vs_pybuda +from test.tvm.utils import evaluate_framework_vs_forge def test_gnmt_torch(test_kind, test_device): diff --git a/pybuda/test/tvm/nlp/pytorch/tests_D/test_gpt2.py b/forge/test/tvm/nlp/pytorch/tests_D/test_gpt2.py similarity index 92% rename from pybuda/test/tvm/nlp/pytorch/tests_D/test_gpt2.py rename to forge/test/tvm/nlp/pytorch/tests_D/test_gpt2.py index b3298fc7f..91da2ecf4 100644 --- a/pybuda/test/tvm/nlp/pytorch/tests_D/test_gpt2.py +++ b/forge/test/tvm/nlp/pytorch/tests_D/test_gpt2.py @@ -11,17 +11,17 @@ from transformers.pytorch_utils import Conv1D from test.backend.models.test_bert import get_relaxed_atol_pcc -from pybuda import ( +from forge import ( PyTorchModule, TTDevice, BackendType, VerifyConfig, run_generate, ) -import pybuda -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +import forge +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind from typing import Optional, Tuple, Union import math @@ -136,23 +136,23 @@ def forward(self, hidden_states, attention_mask, key_past, value_past): tt0 = TTDevice("tt0", devtype=test_device.devtype) tt0.place_module(mod) - output_q = pybuda.initialize_pipeline(training=False, sample_inputs=(torch.rand(hidden_states_shape), torch.rand(attention_mask_shape), torch.zeros(layer_past_shape), torch.zeros(layer_past_shape)), _verify_cfg=VerifyConfig( + output_q = forge.initialize_pipeline(training=False, sample_inputs=(torch.rand(hidden_states_shape), torch.rand(attention_mask_shape), torch.zeros(layer_past_shape), torch.zeros(layer_past_shape)), _verify_cfg=VerifyConfig( arch=test_device.arch, devtype=test_device.devtype, verify_all=True, )) - print(pybuda.get_parameter_checkpoint()[0]['tensor_1']) + print(forge.get_parameter_checkpoint()[0]['tensor_1']) tt0.push_to_inputs((torch.rand(hidden_states_shape), torch.rand(attention_mask_shape), )) - pybuda.run_generate(input_count=1, write_index=0) - pk = pybuda.get_parameter_checkpoint()[0]['tensor_1'].value() + forge.run_generate(input_count=1, write_index=0) + pk = forge.get_parameter_checkpoint()[0]['tensor_1'].value() ans = output_q.get(timeout = 0.5) - print(pybuda.get_parameter_checkpoint()[0]['tensor_1']) + print(forge.get_parameter_checkpoint()[0]['tensor_1']) tt0.push_to_inputs((torch.rand(hidden_states_shape), torch.rand(attention_mask_shape), )) - pybuda.run_generate(input_count=1, write_index=1) - print(pybuda.get_parameter_checkpoint()[0]['tensor_1']) + forge.run_generate(input_count=1, write_index=1) + print(forge.get_parameter_checkpoint()[0]['tensor_1']) tt0.push_to_inputs((torch.rand(hidden_states_shape), torch.rand(attention_mask_shape), )) - pybuda.run_generate(input_count=1, write_index=2) - print(pybuda.get_parameter_checkpoint()[0]['tensor_1']) + forge.run_generate(input_count=1, write_index=2) + print(forge.get_parameter_checkpoint()[0]['tensor_1']) class EmbWrapper(torch.nn.Module): def __init__(self, model): @@ -250,23 +250,23 @@ def test_gpt2_past_cache(test_device): last_prefix_token = inputs["attention_mask"].index(0) - 1 tokens_to_generate = 480 - cpu0 = pybuda.CPUDevice("cpu0", module=PyTorchModule("gpt2_embeddings", embeddings)) - tt1 = pybuda.TTDevice("tt1", + cpu0 = forge.CPUDevice("cpu0", module=PyTorchModule("gpt2_embeddings", embeddings)) + tt1 = forge.TTDevice("tt1", devtype=test_device.devtype, arch=test_device.arch, module=PyTorchModule("gpt2_blocks", blocks)) - cpu1 = pybuda.CPUDevice("cpu1", module=PyTorchModule("gpt2_lm_head", lm_head)) + cpu1 = forge.CPUDevice("cpu1", module=PyTorchModule("gpt2_lm_head", lm_head)) layer_past_shape = (1, 480, 768) inputs = (input_ids_tt, attention_mask, position_ids) for _ in range(num_blocks): inputs += (torch.zeros(layer_past_shape), torch.zeros(layer_past_shape)) - output_q = pybuda.initialize_pipeline(training=False, sample_inputs=inputs,) + output_q = forge.initialize_pipeline(training=False, sample_inputs=inputs,) write_index = 0 current_token_index = last_prefix_token for i in range(tokens_to_generate): position_ids = torch.arange(past_length, past_length + run_length) cpu0.push_to_inputs((input_ids_tt, attention_mask, position_ids)) - pybuda.run_generate(input_count=1, write_index=write_index) + forge.run_generate(input_count=1, write_index=write_index) outputs = output_q.get() lm_head_out = outputs[0].value().detach() k = 10 @@ -325,25 +325,25 @@ def forward(self, y, key_past): compiler_cfg = _get_global_compiler_config() compiler_cfg.loopback_outputs = {"key_past_1": 0} - output_q = pybuda.initialize_pipeline(training=False, sample_inputs=(torch.rand(1), torch.zeros(layer_past_shape)), _verify_cfg=VerifyConfig( + output_q = forge.initialize_pipeline(training=False, sample_inputs=(torch.rand(1), torch.zeros(layer_past_shape)), _verify_cfg=VerifyConfig( arch=test_device.arch, devtype=test_device.devtype, )) - print(pybuda.get_parameter_checkpoint()) + print(forge.get_parameter_checkpoint()) tt0.push_to_inputs((torch.rand(1), )) tt0.push_to_inputs((torch.rand(1), )) - pybuda.run_generate(input_count=2, tokens_per_iter=32, token_id=0) - print(pybuda.get_parameter_checkpoint()) + forge.run_generate(input_count=2, tokens_per_iter=32, token_id=0) + print(forge.get_parameter_checkpoint()) ans = output_q.get() tt0.push_to_inputs((torch.rand(1), )) - pybuda.run_generate(input_count=1, tokens_per_iter=31, token_id=64) + forge.run_generate(input_count=1, tokens_per_iter=31, token_id=64) ans = output_q.get() - print(pybuda.get_parameter_checkpoint()) + print(forge.get_parameter_checkpoint()) tt0.push_to_inputs((torch.rand(1), )) tt0.push_to_inputs((torch.rand(1), )) - pybuda.run_generate(input_count=2, tokens_per_iter=1, token_id=95) + forge.run_generate(input_count=2, tokens_per_iter=1, token_id=95) ans = output_q.get() - print(pybuda.get_parameter_checkpoint()) + print(forge.get_parameter_checkpoint()) def test_past_cache_prefill_generate(test_device): @@ -386,7 +386,7 @@ def forward(self, input_gen, prefill_output): compiler_cfg.balancer_op_override("matmul_3_output_nop_0", "t_stream_shape", (15,1)) # compiler_cfg.loopback_outputs = {"prefill_output": (0, 1)} - output_q = pybuda.initialize_pipeline( + output_q = forge.initialize_pipeline( training=False, sample_inputs=((torch.rand(input_prefil_shape),), (torch.rand(input_generate_shape), torch.rand(past_shape),),), _verify_cfg=VerifyConfig( @@ -396,11 +396,11 @@ def forward(self, input_gen, prefill_output): ) tt0.set_active_subgraph(0) tt0.push_to_inputs((torch.rand(input_prefil_shape), )) - pybuda.run_forward() + forge.run_forward() tt0.set_active_subgraph(1) tt0.push_to_inputs((torch.rand(input_generate_shape), torch.rand(past_shape),)) - pybuda.run_forward() + forge.run_forward() @pytest.mark.skip(reason="Tested with fallback") @@ -618,9 +618,9 @@ def forward(self, cache, line): return out def test_splice(test_device): - import pybuda + import forge mod = SpliceUnit() - pb_mod = pybuda.PyTorchModule('splice', mod) + pb_mod = forge.PyTorchModule('splice', mod) verify_module(pb_mod, [(1, 1, 32, 32), (1, 1, 32, 1)], VerifyConfig(test_kind=TestKind.INFERENCE, diff --git a/pybuda/test/tvm/nlp/pytorch/tests_D/test_gptj.py b/forge/test/tvm/nlp/pytorch/tests_D/test_gptj.py similarity index 87% rename from pybuda/test/tvm/nlp/pytorch/tests_D/test_gptj.py rename to forge/test/tvm/nlp/pytorch/tests_D/test_gptj.py index 15e0b24f8..471143e8f 100644 --- a/pybuda/test/tvm/nlp/pytorch/tests_D/test_gptj.py +++ b/forge/test/tvm/nlp/pytorch/tests_D/test_gptj.py @@ -12,7 +12,7 @@ from transformers import GPTJConfig from transformers.models.gptj.modeling_gptj import GPTJBlock -from pybuda import ( +from forge import ( PyTorchModule, TTDevice, BackendDevice, @@ -20,18 +20,18 @@ CompileDepth, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from pybuda.op.eval import compare_tensor_to_golden -from test.tvm.utils import evaluate_framework_vs_pybuda +from forge.op.eval import compare_tensor_to_golden +from test.tvm.utils import evaluate_framework_vs_forge -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind -import pybuda +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind +import forge def test_gptj_block(test_kind, test_device): - if test_device.arch == pybuda.BackendDevice.Grayskull: + if test_device.arch == forge.BackendDevice.Grayskull: pytest.skip() input_shape = (1, 128, 4096) compiler_cfg = _get_global_compiler_config() @@ -39,9 +39,9 @@ def test_gptj_block(test_kind, test_device): compiler_cfg.compile_depth = CompileDepth.BUDA_GRAPH_PRE_PLACER if test_device.arch == BackendDevice.Wormhole_B0 or test_device.arch == BackendDevice.Blackhole: - pytest.skip() # see tenstorrent/pybuda#969 + pytest.skip() # see tenstorrent/forge#969 - #Fusing disabled due to tenstorrent/pybuda#789 + #Fusing disabled due to tenstorrent/forge#789 if (test_kind == TestKind.INFERENCE): compiler_cfg.enable_auto_fusing=False diff --git a/pybuda/test/tvm/nlp/pytorch/tests_D/test_gptneo.py b/forge/test/tvm/nlp/pytorch/tests_D/test_gptneo.py similarity index 82% rename from pybuda/test/tvm/nlp/pytorch/tests_D/test_gptneo.py rename to forge/test/tvm/nlp/pytorch/tests_D/test_gptneo.py index 9acb9b85d..956115248 100644 --- a/pybuda/test/tvm/nlp/pytorch/tests_D/test_gptneo.py +++ b/forge/test/tvm/nlp/pytorch/tests_D/test_gptneo.py @@ -4,23 +4,23 @@ # # GPT Neo basic bring-up tests of tracing functionality # -from pybuda._C.backend_api import BackendDevice +from forge._C.backend_api import BackendDevice import pytest import torch from transformers import GPTNeoModel, GPTNeoConfig import os -from pybuda import ( +from forge import ( PyTorchModule, CompileDepth, VerifyConfig, BackendType, ) -from test.tvm.utils import evaluate_framework_vs_pybuda -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from test.tvm.utils import evaluate_framework_vs_forge +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind def test_gptneo_block(test_kind, test_device): @@ -52,8 +52,8 @@ def test_gptneo_block(test_kind, test_device): def test_gptneo_full(test_kind, test_device): # Pipegen error on silicon if enabled - os.environ["PYBUDA_DISABLE_STABLE_SOFTMAX"] = "1" - os.environ["PYBUDA_EXTRA_L1_MARGIN"] = "100000" + os.environ["FORGE_DISABLE_STABLE_SOFTMAX"] = "1" + os.environ["FORGE_EXTRA_L1_MARGIN"] = "100000" if test_kind == TestKind.TRAINING: pytest.skip() @@ -63,7 +63,7 @@ def test_gptneo_full(test_kind, test_device): if test_kind.is_training(): compiler_cfg.compile_depth = CompileDepth.BUDA_GRAPH_PRE_PLACER - #Fusing disabled due to tenstorrent/pybuda#789 + #Fusing disabled due to tenstorrent/forge#789 if test_kind == TestKind.INFERENCE and test_device.arch == BackendDevice.Wormhole_B0: compiler_cfg.enable_auto_fusing=False @@ -89,4 +89,4 @@ def test_gptneo_full(test_kind, test_device): uniform_inputs=True, ) - os.environ["PYBUDA_EXTRA_L1_MARGIN"] = "0" + os.environ["FORGE_EXTRA_L1_MARGIN"] = "0" diff --git a/pybuda/test/tvm/nlp/pytorch/tests_D/test_longformer.py b/forge/test/tvm/nlp/pytorch/tests_D/test_longformer.py similarity index 94% rename from pybuda/test/tvm/nlp/pytorch/tests_D/test_longformer.py rename to forge/test/tvm/nlp/pytorch/tests_D/test_longformer.py index 975905af1..7f8d48e84 100644 --- a/pybuda/test/tvm/nlp/pytorch/tests_D/test_longformer.py +++ b/forge/test/tvm/nlp/pytorch/tests_D/test_longformer.py @@ -10,17 +10,17 @@ from transformers import LongformerModel -from pybuda import ( +from forge import ( PyTorchModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from pybuda.config import CompileDepth -from test.tvm.utils import evaluate_framework_vs_pybuda +from forge.config import CompileDepth +from test.tvm.utils import evaluate_framework_vs_forge def test_longformer(training): @@ -65,7 +65,7 @@ def test_longformer(training): :, [1, 4, 21] ] = 1 # Randomly set global attentions for testing purposes - res = pybuda_compile( + res = forge_compile( tt0, "longformer", input_ids, @@ -81,7 +81,7 @@ def test_longformer(training): ), ) - evaluate_framework_vs_pybuda( + evaluate_framework_vs_forge( submodel, res, input_ids, attention_mask, global_attention_mask ) @@ -141,7 +141,7 @@ def test_longformer_layer(training): is_global_attn ] - res = pybuda_compile( + res = forge_compile( tt0, "longformer", *inputs, @@ -155,4 +155,4 @@ def test_longformer_layer(training): ), ) - evaluate_framework_vs_pybuda(submodel, res, inputs) + evaluate_framework_vs_forge(submodel, res, inputs) diff --git a/pybuda/test/tvm/nlp/pytorch/tests_D/test_nbeats.py b/forge/test/tvm/nlp/pytorch/tests_D/test_nbeats.py similarity index 93% rename from pybuda/test/tvm/nlp/pytorch/tests_D/test_nbeats.py rename to forge/test/tvm/nlp/pytorch/tests_D/test_nbeats.py index d85e871ca..34085ffcb 100644 --- a/pybuda/test/tvm/nlp/pytorch/tests_D/test_nbeats.py +++ b/forge/test/tvm/nlp/pytorch/tests_D/test_nbeats.py @@ -2,26 +2,26 @@ # SPDX-License-Identifier: Apache-2.0 -from pybuda.config import CompileDepth +from forge.config import CompileDepth from pytorch_forecasting.models.nbeats.sub_modules import NBEATSBlock, NBEATSGenericBlock, NBEATSTrendBlock, NBEATSSeasonalBlock import torch import pytest -from pybuda import ( +from forge import ( PyTorchModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from test.tvm.utils import evaluate_framework_vs_pybuda +from test.tvm.utils import evaluate_framework_vs_forge -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind def test_tvm_nbeats_block(test_kind, test_device): diff --git a/pybuda/test/tvm/nlp/pytorch/tests_D/test_squeeze_bert.py b/forge/test/tvm/nlp/pytorch/tests_D/test_squeeze_bert.py similarity index 88% rename from pybuda/test/tvm/nlp/pytorch/tests_D/test_squeeze_bert.py rename to forge/test/tvm/nlp/pytorch/tests_D/test_squeeze_bert.py index 4cc7f35de..88da612dd 100644 --- a/pybuda/test/tvm/nlp/pytorch/tests_D/test_squeeze_bert.py +++ b/forge/test/tvm/nlp/pytorch/tests_D/test_squeeze_bert.py @@ -1,7 +1,7 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -from pybuda.config import CompileDepth +from forge.config import CompileDepth import pytest import torch @@ -11,21 +11,21 @@ import math import itertools -from pybuda import ( +from forge import ( PyTorchModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, tvm_to_python, ) -from test.tvm.utils import evaluate_framework_vs_pybuda +from test.tvm.utils import evaluate_framework_vs_forge -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind def test_tvm_SqueezeBertEncoder(test_kind, test_device): diff --git a/pybuda/test/tvm/nlp/pytorch/tests_D/test_xglm.py b/forge/test/tvm/nlp/pytorch/tests_D/test_xglm.py similarity index 96% rename from pybuda/test/tvm/nlp/pytorch/tests_D/test_xglm.py rename to forge/test/tvm/nlp/pytorch/tests_D/test_xglm.py index ba8429c91..7df3c948f 100644 --- a/pybuda/test/tvm/nlp/pytorch/tests_D/test_xglm.py +++ b/forge/test/tvm/nlp/pytorch/tests_D/test_xglm.py @@ -6,7 +6,7 @@ # from codeop import Compile from colorama import Back -from pybuda.config import CompileDepth +from forge.config import CompileDepth import pytest from typing import Optional, Tuple @@ -14,21 +14,21 @@ import torch.nn as nn from transformers import XGLMModel, XGLMConfig from transformers.models.xglm.modeling_xglm import XGLMAttention, ACT2FN -from pybuda import ( +from forge import ( PyTorchModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from pybuda.op.eval import compare_tensor_to_golden -from test.tvm.utils import evaluate_framework_vs_pybuda +from forge.op.eval import compare_tensor_to_golden +from test.tvm.utils import evaluate_framework_vs_forge -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind # Dont use cache for now diff --git a/pybuda/test/tvm/nlp/pytorch/tests_E/1272-128104-0000.pt b/forge/test/tvm/nlp/pytorch/tests_E/1272-128104-0000.pt similarity index 100% rename from pybuda/test/tvm/nlp/pytorch/tests_E/1272-128104-0000.pt rename to forge/test/tvm/nlp/pytorch/tests_E/1272-128104-0000.pt diff --git a/pybuda/test/tvm/nlp/pytorch/tests_E/__init__.py b/forge/test/tvm/nlp/pytorch/tests_E/__init__.py similarity index 100% rename from pybuda/test/tvm/nlp/pytorch/tests_E/__init__.py rename to forge/test/tvm/nlp/pytorch/tests_E/__init__.py diff --git a/pybuda/test/tvm/nlp/pytorch/tests_E/test_codegen.py b/forge/test/tvm/nlp/pytorch/tests_E/test_codegen.py similarity index 89% rename from pybuda/test/tvm/nlp/pytorch/tests_E/test_codegen.py rename to forge/test/tvm/nlp/pytorch/tests_E/test_codegen.py index d32ec8ed8..1a703d2e8 100644 --- a/pybuda/test/tvm/nlp/pytorch/tests_E/test_codegen.py +++ b/forge/test/tvm/nlp/pytorch/tests_E/test_codegen.py @@ -6,7 +6,7 @@ import pytest import torch -from pybuda import ( +from forge import ( PyTorchModule, TTDevice, BackendType, @@ -14,9 +14,9 @@ VerifyConfig, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind from test.utils import download_model from loguru import logger @@ -57,7 +57,7 @@ def forward(self, input_ids, attention_mask, ): arch=test_device.arch, devtype=test_device.devtype, test_kind=test_kind, - verify_pybuda_codegen_vs_framework=True, + verify_forge_codegen_vs_framework=True, run_golden=True, pcc=0.98 ), diff --git a/pybuda/test/tvm/nlp/pytorch/tests_E/test_whisper.py b/forge/test/tvm/nlp/pytorch/tests_E/test_whisper.py similarity index 86% rename from pybuda/test/tvm/nlp/pytorch/tests_E/test_whisper.py rename to forge/test/tvm/nlp/pytorch/tests_E/test_whisper.py index c2ce0a268..7aa3eb95e 100644 --- a/pybuda/test/tvm/nlp/pytorch/tests_E/test_whisper.py +++ b/forge/test/tvm/nlp/pytorch/tests_E/test_whisper.py @@ -15,15 +15,15 @@ ) from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask -import pybuda -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind -from pybuda._C.backend_api import BackendType -from pybuda import PyTorchModule, VerifyConfig -from pybuda.config import _get_global_compiler_config +import forge +from forge.verify import verify_module +from forge.verify.config import TestKind +from forge._C.backend_api import BackendType +from forge import PyTorchModule, VerifyConfig +from forge.config import _get_global_compiler_config from test.utils import download_model -from pybuda.pybudaglobal import TILE_DIM +from forge.forgeglobal import TILE_DIM variants = [ "openai/whisper-tiny", @@ -40,11 +40,11 @@ def test_whisper_encoder(test_device, variant): compiler_cfg = _get_global_compiler_config() compiler_cfg.amp_level = 1 compiler_cfg.enable_tvm_cpu_fallback = False # Run full model on silicon - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b pcc = 0.93 if test_device.devtype == BackendType.Silicon else 0.99 if variant == "openai/whisper-small" or variant == "openai/whisper-medium" or variant == "openai/whisper-large": - os.environ["PYBUDA_PAD_MM"] = "{47:48}" + os.environ["FORGE_PAD_MM"] = "{47:48}" class Wrapper(torch.nn.Module): def __init__(self, model): @@ -68,10 +68,10 @@ def forward(self, input_features): ) framework_model = Wrapper(framework_model) - pybuda_model = PyTorchModule("pt_whisper", framework_model) + forge_model = PyTorchModule("pt_whisper", framework_model) # Load and preprocess sample audio - sample = torch.load("pybuda/test/model_demos/utils/nlp/pytorch/1272-128104-0000.pt") + sample = torch.load("forge/test/model_demos/utils/nlp/pytorch/1272-128104-0000.pt") sample_audio = sample["audio"]["array"] inputs = processor(sample_audio, return_tensors="pt") @@ -81,7 +81,7 @@ def forward(self, input_features): out = framework_model(input_features) verify_module( - pybuda_model, + forge_model, [ (input_features.shape), ], @@ -99,14 +99,14 @@ def forward(self, input_features): @pytest.mark.parametrize("variant", variants, ids=variants) def test_whisper_decoder(test_device, variant): - if test_device.arch == pybuda.BackendDevice.Grayskull: + if test_device.arch == forge.BackendDevice.Grayskull: pytest.skip() # Configurations compiler_cfg = _get_global_compiler_config() compiler_cfg.amp_level = 1 compiler_cfg.enable_tvm_cpu_fallback = False # Run full model on silicon - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b - os.environ["PYBUDA_PAD_OUTPUT_BUFFER"] = "1" + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b + os.environ["FORGE_PAD_OUTPUT_BUFFER"] = "1" class Wrapper(torch.nn.Module): def __init__(self, model): @@ -152,10 +152,10 @@ def forward(self, decoder_input_ids, encoder_hidden_states): ) framework_model = Wrapper(framework_model) - pybuda_model = PyTorchModule("pt_whisper", framework_model) + forge_model = PyTorchModule("pt_whisper", framework_model) # Load and preprocess sample audio - sample = torch.load("pybuda/test/model_demos/utils/nlp/pytorch/1272-128104-0000.pt") + sample = torch.load("forge/test/model_demos/utils/nlp/pytorch/1272-128104-0000.pt") sample_audio = sample["audio"]["array"] inputs = processor(sample_audio, return_tensors="pt") @@ -172,7 +172,7 @@ def forward(self, decoder_input_ids, encoder_hidden_states): pcc = 0.96 if test_device.devtype == BackendType.Silicon else 0.99 verify_module( - pybuda_model, + forge_model, [ (decoder_input_ids.shape, encoder_outputs.shape), ], @@ -241,14 +241,14 @@ def test_whisper_enc_dec(test_device, variant): compiler_cfg.input_queues_on_host = True compiler_cfg.compile_subgraphs = True compiler_cfg.enable_link_past_cache_ios = True - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b - os.environ["PYBUDA_FORCE_SEQUENTIAL"] = "1" - os.environ["PYBUDA_PAD_OUTPUT_BUFFER"] = "1" + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b + os.environ["FORGE_FORCE_SEQUENTIAL"] = "1" + os.environ["FORGE_PAD_OUTPUT_BUFFER"] = "1" os.environ["TT_BACKEND_MULTI_THREADED_PUSH"] = "1" os.environ["TT_BACKEND_DRAM_POLLING_FREQUENCY"] = "64" os.environ["TT_BACKEND_PROFILER"] = "1" - # pybuda.set_configuration_options(performance_trace=pybuda.PerfTraceLevel.VERBOSE) + # forge.set_configuration_options(performance_trace=forge.PerfTraceLevel.VERBOSE) processor = download_model(AutoProcessor.from_pretrained, variant) config = WhisperConfig.from_pretrained(variant) max_length = config.max_length @@ -259,15 +259,15 @@ def test_whisper_enc_dec(test_device, variant): ) feature_extractor = download_model(WhisperFeatureExtractor.from_pretrained, variant) tokenizer = WhisperTokenizer.from_pretrained(variant) - encoder_module = pybuda.PyTorchModule("Whisper_encoder", Whisper_encoder(model)) - decoder_module_cross_attention = pybuda.PyTorchModule("Whisper_decoder_with_ca", Whisper_decoder(model)) - decoder_module_no_cross_attention = pybuda.PyTorchModule("Whisper_decoder_no_ca", Whisper_decoder(model)) + encoder_module = forge.PyTorchModule("Whisper_encoder", Whisper_encoder(model)) + decoder_module_cross_attention = forge.PyTorchModule("Whisper_decoder_with_ca", Whisper_decoder(model)) + decoder_module_no_cross_attention = forge.PyTorchModule("Whisper_decoder_no_ca", Whisper_decoder(model)) for i in range(config.decoder_layers): - pybuda.config.override_t_stream_shape(f"model.model.decoder.layers.{i}.self_attn.k_proj.weight_cache_nop", [13, 1]) - pybuda.config.override_t_stream_shape(f"model.model.decoder.layers.{i}.self_attn.v_proj.weight_cache_nop", [13, 1]) + forge.config.override_t_stream_shape(f"model.model.decoder.layers.{i}.self_attn.k_proj.weight_cache_nop", [13, 1]) + forge.config.override_t_stream_shape(f"model.model.decoder.layers.{i}.self_attn.v_proj.weight_cache_nop", [13, 1]) - sample = torch.load("pybuda/test/model_demos/utils/nlp/pytorch/1272-128104-0000.pt") + sample = torch.load("forge/test/model_demos/utils/nlp/pytorch/1272-128104-0000.pt") sample_audio = sample["audio"]["array"] inputs = processor(sample_audio, return_tensors="pt") @@ -299,14 +299,14 @@ def test_whisper_enc_dec(test_device, variant): decoder_no_ca_inputs += [torch.zeros(enc_past_cache_self_shape), torch.zeros(enc_past_cache_self_shape), torch.zeros(enc_past_cache_cross_shape), torch.zeros(enc_past_cache_cross_shape)] - tt0 = pybuda.TTDevice( + tt0 = forge.TTDevice( "tt0", devtype=test_device.devtype, arch=test_device.arch, module=[decoder_module_cross_attention, decoder_module_no_cross_attention]) # module=[encoder_module, decoder_module_cross_attention, decoder_module_no_cross_attention]) - output_q = pybuda.initialize_pipeline( + output_q = forge.initialize_pipeline( training=False, sample_inputs=( # (input_features,), @@ -331,7 +331,7 @@ def test_whisper_enc_dec(test_device, variant): start = time.time() # tt0.set_active_subgraph(0) # tt0.push_to_inputs((input_features, )) - # pybuda.run_forward() + # forge.run_forward() # ans = output_q.get() # encoder_last_hidden_state = ans[0].value().detach() generated_tokens = [] @@ -345,13 +345,13 @@ def test_whisper_enc_dec(test_device, variant): tt0.set_active_subgraph(0) generate_inputs = (decoder_input_ids, decoder_attention_mask, encoder_last_hidden_state, position_embeds) tt0.push_to_inputs(generate_inputs) - pybuda.run_generate(input_count=1, write_index=current_token_index//TILE_DIM) + forge.run_generate(input_count=1, write_index=current_token_index//TILE_DIM) ans = output_q.get() else: tt0.set_active_subgraph(1) generate_inputs = (decoder_input_ids, decoder_attention_mask, position_embeds) tt0.push_to_inputs(generate_inputs) - pybuda.run_generate(input_count=1, write_index=current_token_index//TILE_DIM) + forge.run_generate(input_count=1, write_index=current_token_index//TILE_DIM) ans = output_q.get() lm_head_out = ans[0].value().detach() diff --git a/pybuda/test/tvm/nlp/tensorflow/__init__.py b/forge/test/tvm/nlp/tensorflow/__init__.py similarity index 100% rename from pybuda/test/tvm/nlp/tensorflow/__init__.py rename to forge/test/tvm/nlp/tensorflow/__init__.py diff --git a/pybuda/test/tvm/nlp/tensorflow/tests_A/test_albert.py b/forge/test/tvm/nlp/tensorflow/tests_A/test_albert.py similarity index 94% rename from pybuda/test/tvm/nlp/tensorflow/tests_A/test_albert.py rename to forge/test/tvm/nlp/tensorflow/tests_A/test_albert.py index fba954608..c1d383796 100644 --- a/pybuda/test/tvm/nlp/tensorflow/tests_A/test_albert.py +++ b/forge/test/tvm/nlp/tensorflow/tests_A/test_albert.py @@ -11,20 +11,20 @@ from transformers import AlbertConfig, TFAlbertModel from transformers.models.albert.modeling_tf_albert import TFAlbertAttention, TFAlbertLayer -from pybuda import ( +from forge import ( TFModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from pybuda.config import CompileDepth -from test.tvm.utils import evaluate_framework_vs_pybuda -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth +from test.tvm.utils import evaluate_framework_vs_forge +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind model_config_v1 = { diff --git a/pybuda/test/tvm/nlp/tensorflow/tests_A/test_t5_small_tf.py b/forge/test/tvm/nlp/tensorflow/tests_A/test_t5_small_tf.py similarity index 89% rename from pybuda/test/tvm/nlp/tensorflow/tests_A/test_t5_small_tf.py rename to forge/test/tvm/nlp/tensorflow/tests_A/test_t5_small_tf.py index d69ecd241..edcb1bca7 100644 --- a/pybuda/test/tvm/nlp/tensorflow/tests_A/test_t5_small_tf.py +++ b/forge/test/tvm/nlp/tensorflow/tests_A/test_t5_small_tf.py @@ -4,7 +4,7 @@ # # Some basic bring-up tests of tracing functionality # -from pybuda.module import TFModule +from forge.module import TFModule import pytest import tensorflow as tf @@ -14,18 +14,18 @@ -from pybuda import ( +from forge import ( TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from test.tvm.utils import evaluate_framework_vs_pybuda -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from test.tvm.utils import evaluate_framework_vs_forge +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind from test.utils import download_model from loguru import logger diff --git a/pybuda/test/tvm/nlp/tensorflow/tests_B/__init__.py b/forge/test/tvm/nlp/tensorflow/tests_B/__init__.py similarity index 100% rename from pybuda/test/tvm/nlp/tensorflow/tests_B/__init__.py rename to forge/test/tvm/nlp/tensorflow/tests_B/__init__.py diff --git a/pybuda/test/tvm/nlp/tensorflow/tests_B/test_bart.py b/forge/test/tvm/nlp/tensorflow/tests_B/test_bart.py similarity index 94% rename from pybuda/test/tvm/nlp/tensorflow/tests_B/test_bart.py rename to forge/test/tvm/nlp/tensorflow/tests_B/test_bart.py index 1b7ac4262..542251ab0 100644 --- a/pybuda/test/tvm/nlp/tensorflow/tests_B/test_bart.py +++ b/forge/test/tvm/nlp/tensorflow/tests_B/test_bart.py @@ -14,29 +14,29 @@ from transformers.models.bart.modeling_tf_bart import _make_causal_mask, shift_tokens_right from test.backend.models.test_bert import get_relaxed_atol_pcc -import pybuda -from pybuda import ( +import forge +from forge import ( TFModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, CPUDevice, TTDevice, Tensor, ) -from test.tvm.utils import evaluate_framework_vs_pybuda +from test.tvm.utils import evaluate_framework_vs_forge -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind from test.utils import download_model from loguru import logger -from pybuda.op.eval.common import compare_tensor_to_golden +from forge.op.eval.common import compare_tensor_to_golden def test_bart_decoder(test_kind, test_device): if test_kind == TestKind.TRAINING: # only run recompute test in post-commit @@ -148,7 +148,7 @@ def test_bart_encoder_pipeline(test_device): input_ids = tf.random.uniform((1, seq_len), maxval=config.vocab_size, dtype=tf.int32) cpu0.push_to_inputs(input_ids) - output_q = pybuda.run_inference(_verify_cfg=VerifyConfig(verify_last=False)) + output_q = forge.run_inference(_verify_cfg=VerifyConfig(verify_last=False)) outputs = output_q.get() tf_outputs = model.get_encoder()(input_ids) @@ -179,7 +179,7 @@ def test_bart_decoder_pipeline(test_device): tf_outputs = model.get_decoder()(input_ids) cpu0.push_to_inputs(input_ids) - output_q = pybuda.run_inference(_verify_cfg=VerifyConfig(verify_last=True)) + output_q = forge.run_inference(_verify_cfg=VerifyConfig(verify_last=True)) outputs = output_q.get() assert compare_tensor_to_golden("bart_decoder", tf_outputs[0], outputs[0].value(), is_buda=True) diff --git a/pybuda/test/tvm/nlp/tensorflow/tests_B/test_bert.py b/forge/test/tvm/nlp/tensorflow/tests_B/test_bert.py similarity index 95% rename from pybuda/test/tvm/nlp/tensorflow/tests_B/test_bert.py rename to forge/test/tvm/nlp/tensorflow/tests_B/test_bert.py index 1992b34c3..430861b45 100644 --- a/pybuda/test/tvm/nlp/tensorflow/tests_B/test_bert.py +++ b/forge/test/tvm/nlp/tensorflow/tests_B/test_bert.py @@ -11,9 +11,9 @@ import torch from transformers import BertConfig from transformers.models.bert.modeling_tf_bert import TFBertLayer, TFBertModel, TFBertMainLayer, TFBertForQuestionAnswering, TFBertAttention -from pybuda import BackendDevice +from forge import BackendDevice -from pybuda import ( +from forge import ( CPUDevice, TFModule, TTDevice, @@ -21,13 +21,13 @@ CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from test.tvm.utils import evaluate_framework_vs_pybuda +from test.tvm.utils import evaluate_framework_vs_forge from test.backend.models.test_bert import get_relaxed_atol_pcc -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module, verify_module_pipeline -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module, verify_module_pipeline +from forge.verify.config import TestKind from test.utils import download_model from test.backend.models.test_bert import get_relaxed_atol_pcc @@ -219,7 +219,7 @@ def call(self, hidden_states): # Initialize module config = BertConfig(**model_config) framework_module = SelfAttention(config=config) - pybuda_module = TFModule("bert_self_attention_tf", framework_module) + forge_module = TFModule("bert_self_attention_tf", framework_module) input_shape = (1, 128, 128) # Run module @@ -227,7 +227,7 @@ def call(self, hidden_states): # res = framework_module(hidden_states, None, None, None, None, None, None) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, diff --git a/pybuda/test/tvm/nlp/tensorflow/tests_B/test_gpt2.py b/forge/test/tvm/nlp/tensorflow/tests_B/test_gpt2.py similarity index 89% rename from pybuda/test/tvm/nlp/tensorflow/tests_B/test_gpt2.py rename to forge/test/tvm/nlp/tensorflow/tests_B/test_gpt2.py index 11fc4c87a..163d44404 100644 --- a/pybuda/test/tvm/nlp/tensorflow/tests_B/test_gpt2.py +++ b/forge/test/tvm/nlp/tensorflow/tests_B/test_gpt2.py @@ -11,19 +11,19 @@ from transformers import GPT2Config from transformers.models.gpt2.modeling_tf_gpt2 import TFBlock, TFGPT2Model -from pybuda import ( +from forge import ( TFModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from test.tvm.utils import evaluate_framework_vs_pybuda -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from test.tvm.utils import evaluate_framework_vs_forge +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind from test.backend.models.test_bert import get_relaxed_atol_pcc @pytest.mark.skip(reason="Tested with fallback") @@ -67,7 +67,7 @@ def call(self, hidden_states): def test_tvm_gpt2_fallback(test_kind, test_device): if test_kind.is_training(): pytest.skip() - #TODO: Fix tvm .14 regressions: tenstorrent/pybuda#2099 + #TODO: Fix tvm .14 regressions: tenstorrent/forge#2099 compiler_cfg = _get_global_compiler_config() compiler_cfg.enable_tvm_constant_prop = True diff --git a/pybuda/test/tvm/nlp/tensorflow/tests_B/test_gptj_tf.py b/forge/test/tvm/nlp/tensorflow/tests_B/test_gptj_tf.py similarity index 90% rename from pybuda/test/tvm/nlp/tensorflow/tests_B/test_gptj_tf.py rename to forge/test/tvm/nlp/tensorflow/tests_B/test_gptj_tf.py index 680f89dff..f9cf03145 100644 --- a/pybuda/test/tvm/nlp/tensorflow/tests_B/test_gptj_tf.py +++ b/forge/test/tvm/nlp/tensorflow/tests_B/test_gptj_tf.py @@ -17,7 +17,7 @@ from transformers.models.gptj.modeling_tf_gptj import TFGPTJModel, TFGPTJBlock, TFGPTJAttention, TFGPTJMLP from transformers.modeling_tf_utils import get_initializer -from pybuda import ( +from forge import ( TFModule, TTDevice, BackendDevice, @@ -25,14 +25,14 @@ CompileDepth, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from pybuda.op.eval import compare_tensor_to_golden -from test.tvm.utils import evaluate_framework_vs_pybuda -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind -import pybuda +from forge.op.eval import compare_tensor_to_golden +from test.tvm.utils import evaluate_framework_vs_forge +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind +import forge def fixed_pos_embedding(x: tf.Tensor, seq_dim: int = 1, seq_len: Optional[int] = None) -> Tuple[tf.Tensor, tf.Tensor]: dim = shape_list(x)[-1] @@ -120,18 +120,18 @@ def test_gptj_block(test_kind, test_device): ) def test_gptj_fallback(test_kind, test_device): - if test_device.arch == pybuda.BackendDevice.Grayskull: + if test_device.arch == forge.BackendDevice.Grayskull: pytest.skip() if test_kind.is_training(): # only run recompute test in post-commit pytest.skip() if test_device.arch == BackendDevice.Wormhole_B0: - pytest.skip() # see tenstorrent/pybuda#969 + pytest.skip() # see tenstorrent/forge#969 compiler_cfg = _get_global_compiler_config() compiler_cfg.enable_tvm_constant_prop = True - #Fusing disabled due to tenstorrent/pybuda#789 + #Fusing disabled due to tenstorrent/forge#789 compiler_cfg.enable_auto_fusing=False configuration = GPTJConfig(n_layer=1) diff --git a/pybuda/test/tvm/nlp/tensorflow/tests_B/test_wav2vec2.py b/forge/test/tvm/nlp/tensorflow/tests_B/test_wav2vec2.py similarity index 96% rename from pybuda/test/tvm/nlp/tensorflow/tests_B/test_wav2vec2.py rename to forge/test/tvm/nlp/tensorflow/tests_B/test_wav2vec2.py index 0cf015476..bb513d03d 100644 --- a/pybuda/test/tvm/nlp/tensorflow/tests_B/test_wav2vec2.py +++ b/forge/test/tvm/nlp/tensorflow/tests_B/test_wav2vec2.py @@ -16,14 +16,14 @@ TFWav2Vec2Encoder, ) -from pybuda import ( +from forge import ( TFModule, VerifyConfig, ) -from pybuda.config import CompileDepth -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind def test_wav2vec2_full_model(test_kind, test_device): diff --git a/pybuda/test/tvm/nlp/tensorflow/tests_C/test_distillbert.py b/forge/test/tvm/nlp/tensorflow/tests_C/test_distillbert.py similarity index 92% rename from pybuda/test/tvm/nlp/tensorflow/tests_C/test_distillbert.py rename to forge/test/tvm/nlp/tensorflow/tests_C/test_distillbert.py index 13045537a..87453ae3e 100644 --- a/pybuda/test/tvm/nlp/tensorflow/tests_C/test_distillbert.py +++ b/forge/test/tvm/nlp/tensorflow/tests_C/test_distillbert.py @@ -10,14 +10,14 @@ TFTransformer, ) -from pybuda import ( +from forge import ( TFModule, VerifyConfig, ) -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.verify import verify_module +from forge.verify.config import TestKind from test.backend.models.test_bert import get_relaxed_atol_pcc -from pybuda.config import CompileDepth, _get_global_compiler_config +from forge.config import CompileDepth, _get_global_compiler_config from test.utils import download_model @@ -41,7 +41,7 @@ def call(self, input_act): "distilbert-base-cased-distilled-squad" ) framework_module = Transformer(framework_module) - pybuda_module = TFModule("distilbert_tf", framework_module) + forge_module = TFModule("distilbert_tf", framework_module) # Input shapes input_act_shape = (1, 128) @@ -51,7 +51,7 @@ def call(self, input_act): # out = framework_module(act) verify_module( - pybuda_module, + forge_module, (input_act_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, diff --git a/pybuda/test/tvm/nlp/tensorflow/tests_C/test_opt.py b/forge/test/tvm/nlp/tensorflow/tests_C/test_opt.py similarity index 93% rename from pybuda/test/tvm/nlp/tensorflow/tests_C/test_opt.py rename to forge/test/tvm/nlp/tensorflow/tests_C/test_opt.py index 7482bb30b..66beb4d79 100644 --- a/pybuda/test/tvm/nlp/tensorflow/tests_C/test_opt.py +++ b/forge/test/tvm/nlp/tensorflow/tests_C/test_opt.py @@ -1,14 +1,14 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -from pybuda.config import CompileDepth +from forge.config import CompileDepth import pytest from typing import Optional, Tuple import torch import tensorflow as tf from transformers import OPTConfig from transformers.models.opt.modeling_tf_opt import TFOPTDecoderLayer, TFOPTModel -from pybuda import ( +from forge import ( TFModule, TTDevice, CPUDevice, @@ -16,17 +16,17 @@ CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module, verify_module_pipeline -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module, verify_module_pipeline +from forge.verify.config import TestKind from loguru import logger -import pybuda -from pybuda.op.eval.common import compare_tensor_to_golden +import forge +from forge.op.eval.common import compare_tensor_to_golden from test.utils import download_model diff --git a/pybuda/test/tvm/nlp/tensorflow/tests_C/test_roberta.py b/forge/test/tvm/nlp/tensorflow/tests_C/test_roberta.py similarity index 94% rename from pybuda/test/tvm/nlp/tensorflow/tests_C/test_roberta.py rename to forge/test/tvm/nlp/tensorflow/tests_C/test_roberta.py index bae51aef8..6e123665e 100644 --- a/pybuda/test/tvm/nlp/tensorflow/tests_C/test_roberta.py +++ b/forge/test/tvm/nlp/tensorflow/tests_C/test_roberta.py @@ -6,19 +6,19 @@ import tensorflow as tf from transformers import TFRobertaModel -from pybuda import ( +from forge import ( TFModule, TTDevice, BackendType, VerifyConfig, CPUDevice, ) -import pybuda +import forge -from pybuda.verify import verify_module, verify_module_pipeline -from pybuda.verify.config import TestKind +from forge.verify import verify_module, verify_module_pipeline +from forge.verify.config import TestKind -from pybuda.op.eval.common import compare_tensor_to_golden +from forge.op.eval.common import compare_tensor_to_golden from test.backend.models.test_bert import get_relaxed_atol_pcc from test.utils import download_model @@ -135,11 +135,11 @@ def call(self, input_ids): input_ids = tf.random.uniform((1, seq_len), maxval=vocab_size, dtype=tf.dtypes.int32) cpu0.push_to_inputs(tf.Variable(input_ids, trainable=False)) - output_q = pybuda.run_inference() + output_q = forge.run_inference() outputs = output_q.get() tf_outputs = roberta_model(input_ids) - torch_outputs = pybuda.tensor.to_pt_tensors(tf_outputs[0]) + torch_outputs = forge.tensor.to_pt_tensors(tf_outputs[0]) assert compare_tensor_to_golden("roberta", torch_outputs[0], outputs[0].value(), is_buda=True) diff --git a/pybuda/test/tvm/nlp/tensorflow/tests_C/test_xlm.py b/forge/test/tvm/nlp/tensorflow/tests_C/test_xlm.py similarity index 89% rename from pybuda/test/tvm/nlp/tensorflow/tests_C/test_xlm.py rename to forge/test/tvm/nlp/tensorflow/tests_C/test_xlm.py index 55e9b816b..d88451f24 100644 --- a/pybuda/test/tvm/nlp/tensorflow/tests_C/test_xlm.py +++ b/forge/test/tvm/nlp/tensorflow/tests_C/test_xlm.py @@ -1,7 +1,7 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -from pybuda.config import CompileDepth +from forge.config import CompileDepth import pytest import tensorflow as tf @@ -10,21 +10,21 @@ import math import itertools -from pybuda import ( +from forge import ( TFModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, tvm_to_python, ) -from test.tvm.utils import evaluate_framework_vs_pybuda +from test.tvm.utils import evaluate_framework_vs_forge -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind def test_tvm_xlm_attention_tf(test_kind, test_device): diff --git a/pybuda/test/tvm/nlp/tensorflow/tests_C/test_xlnet.py b/forge/test/tvm/nlp/tensorflow/tests_C/test_xlnet.py similarity index 84% rename from pybuda/test/tvm/nlp/tensorflow/tests_C/test_xlnet.py rename to forge/test/tvm/nlp/tensorflow/tests_C/test_xlnet.py index c7170bbc1..063d938fa 100644 --- a/pybuda/test/tvm/nlp/tensorflow/tests_C/test_xlnet.py +++ b/forge/test/tvm/nlp/tensorflow/tests_C/test_xlnet.py @@ -1,25 +1,25 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -from pybuda.config import CompileDepth +from forge.config import CompileDepth import pytest import tensorflow as tf from transformers import XLNetConfig from transformers.models.xlnet.modeling_tf_xlnet import TFXLNetLayer,TFXLNetMainLayer -from pybuda import ( +from forge import ( TFModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, tvm_to_python, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind def test_tvm_xlm_attention_tf(test_kind, test_device): @@ -35,7 +35,7 @@ def __init__(self, config): self.layer = TFXLNetLayer(config) def call(self, hidden_states,pos_emb): - # Pybuda -> TVM compile removes batch dim. + # Forge -> TVM compile removes batch dim. hidden_states = tf.transpose(hidden_states, perm=[1, 0, 2]) pos_emb = tf.transpose(pos_emb, perm=[1, 0, 2]) diff --git a/pybuda/test/tvm/python/test_fracturing.py b/forge/test/tvm/python/test_fracturing.py similarity index 78% rename from pybuda/test/tvm/python/test_fracturing.py rename to forge/test/tvm/python/test_fracturing.py index 76b47bc80..6634e93c0 100644 --- a/pybuda/test/tvm/python/test_fracturing.py +++ b/forge/test/tvm/python/test_fracturing.py @@ -5,15 +5,15 @@ import torch -import pybuda -from pybuda import ( +import forge +from forge import ( Tensor, Parameter, CompilerConfig, CompileDepth, VerifyConfig, ) -from pybuda.verify import verify_module +from forge.verify import verify_module class AttentionMatmul(torch.nn.Module): @@ -46,17 +46,17 @@ def test_attn_fracture_matmul_heads(test_kind, test_device): nh = 16 dh = d // nh mod = AttentionMatmul(nh) - module = pybuda.PyTorchModule('attn_matmul', mod) + module = forge.PyTorchModule('attn_matmul', mod) - pybuda.set_configuration_options(default_df_override=pybuda.DataFormat.Float32, - accumulate_df=pybuda.DataFormat.Float32) + forge.set_configuration_options(default_df_override=forge.DataFormat.Float32, + accumulate_df=forge.DataFormat.Float32) ''' Fracturing ''' factor = 2 - pybuda.config.insert_fracture_group([("matmul_7", -3, factor),]) + forge.config.insert_fracture_group([("matmul_7", -3, factor),]) verify_module(module, [(1, 1, d), (1, s, d)], VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, chip_ids=[0]),) @@ -95,13 +95,13 @@ def test_attn_cache_loopback(test_kind, test_device): nh = 16 dh = d // nh mod = AttentionMatmulLoopback(nh) - module = pybuda.PyTorchModule('attn_loopback_fractured', mod) + module = forge.PyTorchModule('attn_loopback_fractured', mod) - df = pybuda.DataFormat.Float16 - pybuda.set_configuration_options(default_df_override=df, + df = forge.DataFormat.Float16 + forge.set_configuration_options(default_df_override=df, accumulate_df=df) - from pybuda.config import _get_global_compiler_config + from forge.config import _get_global_compiler_config compiler_cfg = _get_global_compiler_config() compiler_cfg.loopback_outputs = {"k_cache_param": 1} @@ -110,16 +110,16 @@ def test_attn_cache_loopback(test_kind, test_device): # Fracturing # ''' factor = 2 - pybuda.config.insert_fracture_group([("concatenate_5", -3, factor)]) + forge.config.insert_fracture_group([("concatenate_5", -3, factor)]) - tt0 = pybuda.TTDevice("tt0", devtype=test_device.devtype, arch=test_device.arch, module=module) + tt0 = forge.TTDevice("tt0", devtype=test_device.devtype, arch=test_device.arch, module=module) compile_inputs = (torch.randn(1, 1, d), torch.randn(1, s, d), torch.randn(1, 32, d)) - output_q = pybuda.initialize_pipeline(training=False, sample_inputs=compile_inputs,) + output_q = forge.initialize_pipeline(training=False, sample_inputs=compile_inputs,) inputs = (torch.randn(1, 1, d), torch.randn(1, 32, d)) tt0.push_to_inputs(inputs) - pybuda.run_generate(input_count=1, write_index=0) + forge.run_generate(input_count=1, write_index=0) class AttentionModuleLoopback(torch.nn.Module): @@ -175,13 +175,13 @@ def test_attn_module_cache_loopback(test_kind, test_device): nh = 12 dh = d // nh mod = AttentionModuleLoopback(nh) - module = pybuda.PyTorchModule('attn_module_loopback_fractured', mod) + module = forge.PyTorchModule('attn_module_loopback_fractured', mod) - df = pybuda.DataFormat.Float16 - pybuda.set_configuration_options(default_df_override=df, + df = forge.DataFormat.Float16 + forge.set_configuration_options(default_df_override=df, accumulate_df=df) - from pybuda.config import _get_global_compiler_config + from forge.config import _get_global_compiler_config compiler_cfg = _get_global_compiler_config() compiler_cfg.loopback_outputs = {"k_cache_param": 1, "v_cache_param": 2} @@ -190,7 +190,7 @@ def test_attn_module_cache_loopback(test_kind, test_device): # Fracturing # ''' factor = 2 - pybuda.config.insert_fracture_group([ + forge.config.insert_fracture_group([ ("k_cache_param", -1, factor), ("concatenate_4", -3, factor), ("matmul_7", -3, factor), @@ -198,14 +198,14 @@ def test_attn_module_cache_loopback(test_kind, test_device): ("matmul_17", -3, factor), ]) - tt0 = pybuda.TTDevice("tt0", devtype=test_device.devtype, arch=test_device.arch, module=module) + tt0 = forge.TTDevice("tt0", devtype=test_device.devtype, arch=test_device.arch, module=module) qkv_act_size = (1, 32, d) cache_size = (1, s, d) compile_inputs = (torch.randn(qkv_act_size), torch.randn(cache_size), torch.randn(qkv_act_size), torch.randn(cache_size), torch.randn(qkv_act_size)) - output_q = pybuda.initialize_pipeline(training=False, sample_inputs=compile_inputs,) + output_q = forge.initialize_pipeline(training=False, sample_inputs=compile_inputs,) inputs = (torch.randn(qkv_act_size), torch.randn(qkv_act_size), torch.randn(qkv_act_size)) tt0.push_to_inputs(inputs) - pybuda.run_generate(input_count=1, write_index=0) + forge.run_generate(input_count=1, write_index=0) diff --git a/pybuda/test/tvm/python/test_sanity.py b/forge/test/tvm/python/test_sanity.py similarity index 84% rename from pybuda/test/tvm/python/test_sanity.py rename to forge/test/tvm/python/test_sanity.py index 986d324ce..ca2af3010 100644 --- a/pybuda/test/tvm/python/test_sanity.py +++ b/forge/test/tvm/python/test_sanity.py @@ -10,13 +10,13 @@ import torch import tensorflow as tf import torch.nn as nn -import pybuda -from pybuda.tvm_to_python import compile_tvm_to_python -from pybuda import ( +import forge +from forge.tvm_to_python import compile_tvm_to_python +from forge import ( TTDevice, BackendDevice, BackendType, - pybuda_compile, + forge_compile, VerifyConfig, PyTorchModule, CompilerConfig, @@ -27,10 +27,10 @@ from transformers import BertModel, BertConfig from transformers import GPT2Model, GPT2Config from collections import OrderedDict -from pybuda.op.eval import compare_tensor_to_golden +from forge.op.eval import compare_tensor_to_golden import importlib -from test.tvm.utils import evaluate_framework_vs_pybuda +from test.tvm.utils import evaluate_framework_vs_forge from test.utils import download_model @@ -74,7 +74,7 @@ def forward(self, x1): buda_mod.process_framework_parameters(model) - sgd_optimizer = pybuda.optimizers.SGD( + sgd_optimizer = forge.optimizers.SGD( learning_rate=0.5, parameters=buda_mod.get_parameters() ) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) @@ -84,9 +84,9 @@ def forward(self, x1): sgd_optimizer.set_optimizer_parameters() - res = pybuda_compile(tt0, test_name, act1_buda, compiler_cfg=compiler_cfg, verify_cfg=VerifyConfig()) + res = forge_compile(tt0, test_name, act1_buda, compiler_cfg=compiler_cfg, verify_cfg=VerifyConfig()) - evaluate_framework_vs_pybuda(model, res, act1) + evaluate_framework_vs_forge(model, res, act1) def test_linear_tf(): @@ -126,7 +126,7 @@ def call(self, x1): buda_mod.process_framework_parameters(model) - sgd_optimizer = pybuda.optimizers.SGD( + sgd_optimizer = forge.optimizers.SGD( learning_rate=0.5, parameters=buda_mod.get_parameters() ) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) @@ -136,9 +136,9 @@ def call(self, x1): sgd_optimizer.set_optimizer_parameters() - res = pybuda_compile(tt0, test_name, act1_buda, compiler_cfg=compiler_cfg, verify_cfg=VerifyConfig()) + res = forge_compile(tt0, test_name, act1_buda, compiler_cfg=compiler_cfg, verify_cfg=VerifyConfig()) - evaluate_framework_vs_pybuda(model, res, act1) + evaluate_framework_vs_forge(model, res, act1) def test_bert(): @@ -174,7 +174,7 @@ def test_bert(): buda_mod.process_framework_parameters(submodel) - sgd_optimizer = pybuda.optimizers.SGD( + sgd_optimizer = forge.optimizers.SGD( learning_rate=0.5, parameters=buda_mod.get_parameters() ) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) @@ -184,9 +184,9 @@ def test_bert(): sgd_optimizer.set_optimizer_parameters() - res = pybuda_compile(tt0, test_name, hidden_states_buda, compiler_cfg=compiler_cfg, verify_cfg=VerifyConfig()) + res = forge_compile(tt0, test_name, hidden_states_buda, compiler_cfg=compiler_cfg, verify_cfg=VerifyConfig()) - evaluate_framework_vs_pybuda(submodel, res, hidden_states) + evaluate_framework_vs_forge(submodel, res, hidden_states) def test_gpt2(): @@ -220,7 +220,7 @@ def test_gpt2(): buda_mod.process_framework_parameters(submodel) - sgd_optimizer = pybuda.optimizers.SGD( + sgd_optimizer = forge.optimizers.SGD( learning_rate=0.5, parameters=buda_mod.get_parameters() ) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) @@ -230,9 +230,9 @@ def test_gpt2(): sgd_optimizer.set_optimizer_parameters() - res = pybuda_compile(tt0, test_name, hidden_states_buda, compiler_cfg=compiler_cfg, verify_cfg=VerifyConfig()) + res = forge_compile(tt0, test_name, hidden_states_buda, compiler_cfg=compiler_cfg, verify_cfg=VerifyConfig()) - evaluate_framework_vs_pybuda(submodel, res, hidden_states) + evaluate_framework_vs_forge(submodel, res, hidden_states) def test_gpt2_multiple_layers(): @@ -277,7 +277,7 @@ def forward(self, hidden_states): buda_mod.process_framework_parameters(model) - sgd_optimizer = pybuda.optimizers.SGD( + sgd_optimizer = forge.optimizers.SGD( learning_rate=0.5, parameters=buda_mod.get_parameters() ) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) @@ -287,9 +287,9 @@ def forward(self, hidden_states): sgd_optimizer.set_optimizer_parameters() - res = pybuda_compile(tt0, test_name, hidden_states_buda, compiler_cfg=compiler_cfg, verify_cfg=VerifyConfig()) + res = forge_compile(tt0, test_name, hidden_states_buda, compiler_cfg=compiler_cfg, verify_cfg=VerifyConfig()) - evaluate_framework_vs_pybuda(model, res, hidden_states) + evaluate_framework_vs_forge(model, res, hidden_states) def test_resnet(): model = torch.hub.load("pytorch/vision:v0.10.0", "resnet18", pretrained=True) @@ -311,7 +311,7 @@ def test_resnet(): buda_mod.process_framework_parameters(model) - sgd_optimizer = pybuda.optimizers.SGD( + sgd_optimizer = forge.optimizers.SGD( learning_rate=0.5, parameters=buda_mod.get_parameters() ) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) @@ -321,9 +321,9 @@ def test_resnet(): sgd_optimizer.set_optimizer_parameters() - res = pybuda_compile(tt0, test_name, act1_buda, compiler_cfg=compiler_cfg, verify_cfg=VerifyConfig()) + res = forge_compile(tt0, test_name, act1_buda, compiler_cfg=compiler_cfg, verify_cfg=VerifyConfig()) - evaluate_framework_vs_pybuda(model, res, act1) + evaluate_framework_vs_forge(model, res, act1) def test_unsupported(): @@ -387,7 +387,7 @@ def test_bert_base(): buda_mod.process_framework_parameters(submodel) - sgd_optimizer = pybuda.optimizers.SGD( + sgd_optimizer = forge.optimizers.SGD( learning_rate=0.5, parameters=buda_mod.get_parameters() ) tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) @@ -398,9 +398,9 @@ def test_bert_base(): sgd_optimizer.set_optimizer_parameters() - res = pybuda_compile(tt0, test_name, hidden_states_buda, attention_mask_buda, compiler_cfg=compiler_cfg, verify_cfg=VerifyConfig()) + res = forge_compile(tt0, test_name, hidden_states_buda, attention_mask_buda, compiler_cfg=compiler_cfg, verify_cfg=VerifyConfig()) - evaluate_framework_vs_pybuda(submodel, res, hidden_states) + evaluate_framework_vs_forge(submodel, res, hidden_states) class AttentionModuleLoopback(torch.nn.Module): @@ -451,7 +451,7 @@ def test_attn_module_cache_loopback(test_kind, test_device): if test_kind.is_training(): pytest.skip() - if test_device.arch == pybuda.BackendDevice.Grayskull: + if test_device.arch == forge.BackendDevice.Grayskull: pytest.skip("Wait until #1005 is resolved") s = 480 @@ -459,13 +459,13 @@ def test_attn_module_cache_loopback(test_kind, test_device): nh = 12 dh = d // nh mod = AttentionModuleLoopback(nh) - module = pybuda.PyTorchModule('attn_module_loopback_fractured', mod) + module = forge.PyTorchModule('attn_module_loopback_fractured', mod) - df = pybuda.DataFormat.Float16 - pybuda.set_configuration_options(default_df_override=df, + df = forge.DataFormat.Float16 + forge.set_configuration_options(default_df_override=df, accumulate_df=df) - from pybuda.config import _get_global_compiler_config + from forge.config import _get_global_compiler_config compiler_cfg = _get_global_compiler_config() compiler_cfg.loopback_outputs = {"k_cache_param": 1, "v_cache_param": 2} @@ -474,7 +474,7 @@ def test_attn_module_cache_loopback(test_kind, test_device): # Fracturing # ''' factor = 2 - pybuda.config.insert_fracture_group([ + forge.config.insert_fracture_group([ ("k_cache_param", -1, factor), ("v_cache_param", -1, factor), ("concatenate_13", -3, factor), @@ -484,24 +484,24 @@ def test_attn_module_cache_loopback(test_kind, test_device): ("matmul_17", -3, factor), ]) - # pybuda.config.insert_fracture_group([ + # forge.config.insert_fracture_group([ # # ("matmul_7", -3, factor), # # ("softmax_9", -3, factor), # # ("matmul_17", -3, factor), # ]) - tt0 = pybuda.TTDevice("tt0", devtype=test_device.devtype, arch=test_device.arch, module=module) + tt0 = forge.TTDevice("tt0", devtype=test_device.devtype, arch=test_device.arch, module=module) qkv_act_size = (1, 32, d) cache_size = (1, s, d) compile_inputs = (torch.randn(qkv_act_size), torch.randn(cache_size), torch.randn(qkv_act_size), torch.randn(cache_size), torch.randn(qkv_act_size)) - output_q = pybuda.initialize_pipeline(training=False, sample_inputs=compile_inputs,) + output_q = forge.initialize_pipeline(training=False, sample_inputs=compile_inputs,) inputs = (torch.randn(qkv_act_size), torch.randn(qkv_act_size), torch.randn(qkv_act_size)) tt0.push_to_inputs(inputs) - pybuda.run_generate(input_count=1, write_index=0) + forge.run_generate(input_count=1, write_index=0) def test_scheduler_write_back_parameters(test_kind, test_device): @@ -533,14 +533,14 @@ def forward(self, x, y_new, y_cache): compile_inputs=(x, y_new, y_cache) inputs=(x, y_cache) - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.loopback_outputs = {'y_cache_1': 1} - tt = pybuda.TTDevice('tt0', devtype=test_device.devtype, arch=test_device.arch) - tt.place_module(pybuda.PyTorchModule('min_repro', model)) + tt = forge.TTDevice('tt0', devtype=test_device.devtype, arch=test_device.arch) + tt.place_module(forge.PyTorchModule('min_repro', model)) - output_q = pybuda.initialize_pipeline(training=False, sample_inputs=compile_inputs) + output_q = forge.initialize_pipeline(training=False, sample_inputs=compile_inputs) tt.push_to_inputs(inputs) - pybuda.run_generate(input_count=1, write_index=0) + forge.run_generate(input_count=1, write_index=0) out = output_q.get() \ No newline at end of file diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/__init__.py b/forge/test/tvm/recommendation/pytorch/deepctr_torch/__init__.py similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/__init__.py rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/__init__.py diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/callbacks.py b/forge/test/tvm/recommendation/pytorch/deepctr_torch/callbacks.py similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/callbacks.py rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/callbacks.py diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/inputs.py b/forge/test/tvm/recommendation/pytorch/deepctr_torch/inputs.py similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/inputs.py rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/inputs.py diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/layers/__init__.py b/forge/test/tvm/recommendation/pytorch/deepctr_torch/layers/__init__.py similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/layers/__init__.py rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/layers/__init__.py diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/layers/activation.py b/forge/test/tvm/recommendation/pytorch/deepctr_torch/layers/activation.py similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/layers/activation.py rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/layers/activation.py diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/layers/core_modules.py b/forge/test/tvm/recommendation/pytorch/deepctr_torch/layers/core_modules.py similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/layers/core_modules.py rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/layers/core_modules.py diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/layers/interaction.py b/forge/test/tvm/recommendation/pytorch/deepctr_torch/layers/interaction.py similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/layers/interaction.py rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/layers/interaction.py diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/layers/sequence.py b/forge/test/tvm/recommendation/pytorch/deepctr_torch/layers/sequence.py similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/layers/sequence.py rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/layers/sequence.py diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/layers/utils.py b/forge/test/tvm/recommendation/pytorch/deepctr_torch/layers/utils.py similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/layers/utils.py rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/layers/utils.py diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/license b/forge/test/tvm/recommendation/pytorch/deepctr_torch/license similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/license rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/license diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/__init__.py b/forge/test/tvm/recommendation/pytorch/deepctr_torch/models/__init__.py similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/__init__.py rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/models/__init__.py diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/afm.py b/forge/test/tvm/recommendation/pytorch/deepctr_torch/models/afm.py similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/afm.py rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/models/afm.py diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/afn.py b/forge/test/tvm/recommendation/pytorch/deepctr_torch/models/afn.py similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/afn.py rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/models/afn.py diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/autoint.py b/forge/test/tvm/recommendation/pytorch/deepctr_torch/models/autoint.py similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/autoint.py rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/models/autoint.py diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/basemodel.py b/forge/test/tvm/recommendation/pytorch/deepctr_torch/models/basemodel.py similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/basemodel.py rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/models/basemodel.py diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/ccpm.py b/forge/test/tvm/recommendation/pytorch/deepctr_torch/models/ccpm.py similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/ccpm.py rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/models/ccpm.py diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/dcn.py b/forge/test/tvm/recommendation/pytorch/deepctr_torch/models/dcn.py similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/dcn.py rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/models/dcn.py diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/dcnmix.py b/forge/test/tvm/recommendation/pytorch/deepctr_torch/models/dcnmix.py similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/dcnmix.py rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/models/dcnmix.py diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/deepfm.py b/forge/test/tvm/recommendation/pytorch/deepctr_torch/models/deepfm.py similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/deepfm.py rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/models/deepfm.py diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/dien.py b/forge/test/tvm/recommendation/pytorch/deepctr_torch/models/dien.py similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/dien.py rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/models/dien.py diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/difm.py b/forge/test/tvm/recommendation/pytorch/deepctr_torch/models/difm.py similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/difm.py rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/models/difm.py diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/din.py b/forge/test/tvm/recommendation/pytorch/deepctr_torch/models/din.py similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/din.py rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/models/din.py diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/fibinet.py b/forge/test/tvm/recommendation/pytorch/deepctr_torch/models/fibinet.py similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/fibinet.py rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/models/fibinet.py diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/ifm.py b/forge/test/tvm/recommendation/pytorch/deepctr_torch/models/ifm.py similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/ifm.py rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/models/ifm.py diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/mlr.py b/forge/test/tvm/recommendation/pytorch/deepctr_torch/models/mlr.py similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/mlr.py rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/models/mlr.py diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/nfm.py b/forge/test/tvm/recommendation/pytorch/deepctr_torch/models/nfm.py similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/nfm.py rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/models/nfm.py diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/onn.py b/forge/test/tvm/recommendation/pytorch/deepctr_torch/models/onn.py similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/onn.py rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/models/onn.py diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/pnn.py b/forge/test/tvm/recommendation/pytorch/deepctr_torch/models/pnn.py similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/pnn.py rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/models/pnn.py diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/wdl.py b/forge/test/tvm/recommendation/pytorch/deepctr_torch/models/wdl.py similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/wdl.py rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/models/wdl.py diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/xdeepfm.py b/forge/test/tvm/recommendation/pytorch/deepctr_torch/models/xdeepfm.py similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/models/xdeepfm.py rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/models/xdeepfm.py diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/movielens_sample.txt b/forge/test/tvm/recommendation/pytorch/deepctr_torch/movielens_sample.txt similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/movielens_sample.txt rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/movielens_sample.txt diff --git a/pybuda/test/tvm/recommendation/pytorch/deepctr_torch/utils.py b/forge/test/tvm/recommendation/pytorch/deepctr_torch/utils.py similarity index 100% rename from pybuda/test/tvm/recommendation/pytorch/deepctr_torch/utils.py rename to forge/test/tvm/recommendation/pytorch/deepctr_torch/utils.py diff --git a/pybuda/test/tvm/recommendation/pytorch/test_afn.py b/forge/test/tvm/recommendation/pytorch/test_afn.py similarity index 93% rename from pybuda/test/tvm/recommendation/pytorch/test_afn.py rename to forge/test/tvm/recommendation/pytorch/test_afn.py index 4f9bd4807..0488afe46 100644 --- a/pybuda/test/tvm/recommendation/pytorch/test_afn.py +++ b/forge/test/tvm/recommendation/pytorch/test_afn.py @@ -3,19 +3,19 @@ # SPDX-License-Identifier: Apache-2.0 from audioop import bias -from pybuda.verify.backend import verify_module +from forge.verify.backend import verify_module import torch -from pybuda import ( +from forge import ( PyTorchModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from test.tvm.utils import evaluate_framework_vs_pybuda +from forge.config import CompileDepth, _get_global_compiler_config +from test.tvm.utils import evaluate_framework_vs_forge import pytest import torch diff --git a/pybuda/test/tvm/recommendation/pytorch/test_core_modules.py b/forge/test/tvm/recommendation/pytorch/test_core_modules.py similarity index 93% rename from pybuda/test/tvm/recommendation/pytorch/test_core_modules.py rename to forge/test/tvm/recommendation/pytorch/test_core_modules.py index 7de7b766a..edaf8374a 100644 --- a/pybuda/test/tvm/recommendation/pytorch/test_core_modules.py +++ b/forge/test/tvm/recommendation/pytorch/test_core_modules.py @@ -2,28 +2,28 @@ # SPDX-License-Identifier: Apache-2.0 from matplotlib import use -from pybuda.op.eval.common import compare_tensor_to_golden -from pybuda.verify.backend import verify_module +from forge.op.eval.common import compare_tensor_to_golden +from forge.verify.backend import verify_module from test.tvm.recommendation.pytorch.deepctr_torch.layers.core_modules import LocalActivationUnit from deepctr_torch.layers.core_modules import DNN, Conv2dSame, PredictionLayer import torch import numpy as np -from pybuda import ( +from forge import ( PyTorchModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from pybuda.config import CompileDepth -from test.tvm.utils import evaluate_framework_vs_pybuda +from forge.config import CompileDepth +from test.tvm.utils import evaluate_framework_vs_forge import pytest import torch -from pybuda.config import CompileDepth, _get_global_compiler_config +from forge.config import CompileDepth, _get_global_compiler_config inputs_dims = [64, 128] @@ -70,7 +70,7 @@ # act1 = torch.rand(*shape1) # act2 = torch.rand(*shape2) -# ret = pybuda_compile( +# ret = forge_compile( # tt0, # "deepctr_local_activation_unit", # act1, @@ -178,7 +178,7 @@ def test_prediction_layer(test_kind, test_device, task, use_bias): # inp = torch.randn((64, in_channels, 64, 64)) -# ret = pybuda_compile( +# ret = forge_compile( # tt0, # "deepctr_conv2d_same", # inp, @@ -191,4 +191,4 @@ def test_prediction_layer(test_kind, test_device, task, use_bias): # intermediates=True, # ), # ) -# evaluate_framework_vs_pybuda(model, ret, inp) +# evaluate_framework_vs_forge(model, ret, inp) diff --git a/pybuda/test/tvm/recommendation/pytorch/test_deepfm.py b/forge/test/tvm/recommendation/pytorch/test_deepfm.py similarity index 91% rename from pybuda/test/tvm/recommendation/pytorch/test_deepfm.py rename to forge/test/tvm/recommendation/pytorch/test_deepfm.py index b9a284c4f..559c45cd2 100644 --- a/pybuda/test/tvm/recommendation/pytorch/test_deepfm.py +++ b/forge/test/tvm/recommendation/pytorch/test_deepfm.py @@ -2,25 +2,25 @@ # SPDX-License-Identifier: Apache-2.0 import numpy as np -import pybuda -from pybuda.cpudevice import CPUDevice -from pybuda.op.eval.common import compare_tensor_to_golden -from pybuda.verify.backend import verify_module +import forge +from forge.cpudevice import CPUDevice +from forge.op.eval.common import compare_tensor_to_golden +from forge.verify.backend import verify_module from test.tvm.recommendation.pytorch.deepctr_torch.inputs import SparseFeat, combined_dnn_input from test.tvm.recommendation.pytorch.deepctr_torch.models import DeepFM from test.tvm.recommendation.pytorch.deepctr_torch.utils import SAMPLE_SIZE, check_model, get_device, get_test_data import torch -from pybuda import ( +from forge import ( PyTorchModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from pybuda.config import CompileDepth -from test.tvm.utils import evaluate_framework_vs_pybuda +from forge.config import CompileDepth +from test.tvm.utils import evaluate_framework_vs_forge import pytest import pandas as pd from sklearn.preprocessing import LabelEncoder @@ -112,7 +112,7 @@ def test_DeepFM(test_device, use_fm, hidden_size, sparse_feature_num, dense_feat # x = torch.randn(input_shape) torch_outputs = model(x) cpu0.push_to_inputs(x) - output_q = pybuda.run_inference(_verify_cfg=VerifyConfig(relative_atol=relative_atol)) + output_q = forge.run_inference(_verify_cfg=VerifyConfig(relative_atol=relative_atol)) outputs = output_q.get() @@ -121,8 +121,8 @@ def test_DeepFM(test_device, use_fm, hidden_size, sparse_feature_num, dense_feat def test_deepfm_fm(test_kind, test_device): - #Fusing disabled due to tenstorrent/pybuda#789 - pybuda.set_configuration_options(enable_auto_fusing=False) + #Fusing disabled due to tenstorrent/forge#789 + forge.set_configuration_options(enable_auto_fusing=False) data = pd.read_csv(os.path.join(os.path.dirname(__file__), "deepctr_torch/movielens_sample.txt")) sparse_features = ["movie_id", "user_id", diff --git a/pybuda/test/tvm/recommendation/pytorch/test_dlrm.py b/forge/test/tvm/recommendation/pytorch/test_dlrm.py similarity index 98% rename from pybuda/test/tvm/recommendation/pytorch/test_dlrm.py rename to forge/test/tvm/recommendation/pytorch/test_dlrm.py index d5e6b001c..673df0d8c 100644 --- a/pybuda/test/tvm/recommendation/pytorch/test_dlrm.py +++ b/forge/test/tvm/recommendation/pytorch/test_dlrm.py @@ -31,10 +31,10 @@ # See the License for the specific language governing permissions and # limitations under the License. import numpy -from pybuda.verify.backend import verify_module +from forge.verify.backend import verify_module import pytest -import pybuda +import forge m_spa = (None,) ln_emb = (None,) @@ -108,7 +108,7 @@ import sys from os.path import abspath, join, dirname -from pybuda.config import CompileDepth, _get_global_compiler_config +from forge.config import CompileDepth, _get_global_compiler_config ### define dlrm in PyTorch ### class DLRM_Net(nn.Module): @@ -432,17 +432,17 @@ def forward(self, x, ly_0, ly_1, ly_2): -from pybuda import ( +from forge import ( PyTorchModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from pybuda.config import CompileDepth -from test.tvm.utils import evaluate_framework_vs_pybuda +from forge.config import CompileDepth +from test.tvm.utils import evaluate_framework_vs_forge def test_dlrm_mlp_bot(test_kind, test_device): @@ -493,7 +493,7 @@ def test_dlrm_interact(test_kind, test_device): @pytest.mark.parametrize("size", ["toy", "small", "bench"]) def test_dlrm(test_kind, test_device, size): - if test_device.arch == pybuda.BackendDevice.Grayskull: + if test_device.arch == forge.BackendDevice.Grayskull: pytest.skip() if test_kind.is_training(): pytest.skip() diff --git a/pybuda/test/tvm/recommendation/pytorch/test_fibinet.py b/forge/test/tvm/recommendation/pytorch/test_fibinet.py similarity index 84% rename from pybuda/test/tvm/recommendation/pytorch/test_fibinet.py rename to forge/test/tvm/recommendation/pytorch/test_fibinet.py index fb15565d6..7e0278fc0 100644 --- a/pybuda/test/tvm/recommendation/pytorch/test_fibinet.py +++ b/forge/test/tvm/recommendation/pytorch/test_fibinet.py @@ -2,25 +2,25 @@ # SPDX-License-Identifier: Apache-2.0 -from pybuda.config import CompileDepth -from pybuda.verify.backend import verify_module +from forge.config import CompileDepth +from forge.verify.backend import verify_module from test.tvm.recommendation.pytorch.deepctr_torch.layers.interaction import SENETLayer -from pybuda import ( +from forge import ( PyTorchModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from test.tvm.utils import evaluate_framework_vs_pybuda +from test.tvm.utils import evaluate_framework_vs_forge import torch import numpy as np -from pybuda.config import _get_global_compiler_config +from forge.config import _get_global_compiler_config def test_fibinet_se(test_kind, test_device): # Unsupported HW ops @@ -60,7 +60,7 @@ def forward(self, x): # inps = [torch.randn(input_shape)] - # ret = pybuda_compile( + # ret = forge_compile( # tt0, # "senet_layer", # *inps, @@ -73,4 +73,4 @@ def forward(self, x): # intermediates=True, # ), # ) - # evaluate_framework_vs_pybuda(model, ret, *inps) + # evaluate_framework_vs_forge(model, ret, *inps) diff --git a/pybuda/test/tvm/recommendation/pytorch/test_interaction.py b/forge/test/tvm/recommendation/pytorch/test_interaction.py similarity index 93% rename from pybuda/test/tvm/recommendation/pytorch/test_interaction.py rename to forge/test/tvm/recommendation/pytorch/test_interaction.py index 69e12fd90..765eb45cf 100644 --- a/pybuda/test/tvm/recommendation/pytorch/test_interaction.py +++ b/forge/test/tvm/recommendation/pytorch/test_interaction.py @@ -1,24 +1,24 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -from pybuda.verify.backend import verify_module +from forge.verify.backend import verify_module from test.tvm.recommendation.pytorch.deepctr_torch.layers.interaction import CIN, FM, AFMLayer, BiInteractionPooling, BilinearInteraction, ConvLayer, CrossNet, CrossNetMix, InnerProductLayer, InteractingLayer, LogTransformLayer, OutterProductLayer -from tvm.contrib.pybuda_compile import compile_tf_graphdef_for_buda +from tvm.contrib.forge_compile import compile_tf_graphdef_for_buda from deepctr_torch.layers.interaction import * import torch from torch import nn import numpy as np -from pybuda import ( +from forge import ( PyTorchModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from test.tvm.utils import evaluate_framework_vs_pybuda +from forge.config import CompileDepth, _get_global_compiler_config +from test.tvm.utils import evaluate_framework_vs_forge import pytest @@ -49,7 +49,7 @@ def test_FM(test_kind, test_device): ) -# TODO: This test remains as a pybuda_compile test rather than verify_module test because batch_size > 1 fails for verify_module +# TODO: This test remains as a forge_compile test rather than verify_module test because batch_size > 1 fails for verify_module def test_bilinear_interaction(training): pytest.skip("Test hangs on GENERATE_INITIAL_GRAPH") @@ -80,7 +80,7 @@ def forward(self, x): tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) tt0.place_module(mod) - ret = pybuda_compile( + ret = forge_compile( tt0, "deepctr_bilinear_interaction", inp, @@ -93,7 +93,7 @@ def forward(self, x): intermediates=True, ), ) - evaluate_framework_vs_pybuda(model, ret, inp) + evaluate_framework_vs_forge(model, ret, inp) @@ -170,7 +170,7 @@ def test_interacting_layer(training): tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) tt0.place_module(mod) - ret = pybuda_compile( + ret = forge_compile( tt0, "deepctr_interacting_layer", inp, @@ -183,7 +183,7 @@ def test_interacting_layer(training): intermediates=True, ), ) - evaluate_framework_vs_pybuda(model, ret, inp) + evaluate_framework_vs_forge(model, ret, inp) # TODO: Figure out why this test can't have batch_size > 1 but others can @@ -233,7 +233,7 @@ def test_crossnet_mix(training): tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) tt0.place_module(mod) - ret = pybuda_compile( + ret = forge_compile( tt0, "deepctr_crossnet_mix", inp, @@ -246,13 +246,13 @@ def test_crossnet_mix(training): intermediates=True, ), ) - evaluate_framework_vs_pybuda(model, ret, inp) + evaluate_framework_vs_forge(model, ret, inp) def test_inner_product_layer(test_kind, test_device): if test_kind.is_training(): - pytest.skip("concatenate backward not implemented in op/eval/pybuda/eltwise_nary.py") + pytest.skip("concatenate backward not implemented in op/eval/forge/eltwise_nary.py") _get_global_compiler_config().compile_depth = CompileDepth.BUDA_GRAPH_PRE_PLACER @@ -345,7 +345,7 @@ def forward(self, x): tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) tt0.place_module(mod) - ret = pybuda_compile( + ret = forge_compile( tt0, "deepctr_conv_layer", inp, @@ -358,7 +358,7 @@ def forward(self, x): intermediates=True, ), ) - evaluate_framework_vs_pybuda(model, ret, inp) + evaluate_framework_vs_forge(model, ret, inp) def test_bi_interaction_pooling(test_kind, test_device): diff --git a/pybuda/test/tvm/recommendation/pytorch/test_xdeepfm.py b/forge/test/tvm/recommendation/pytorch/test_xdeepfm.py similarity index 95% rename from pybuda/test/tvm/recommendation/pytorch/test_xdeepfm.py rename to forge/test/tvm/recommendation/pytorch/test_xdeepfm.py index 6545ecf1b..86aff64f7 100644 --- a/pybuda/test/tvm/recommendation/pytorch/test_xdeepfm.py +++ b/forge/test/tvm/recommendation/pytorch/test_xdeepfm.py @@ -1,19 +1,19 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -from pybuda.verify.backend import verify_module +from forge.verify.backend import verify_module import torch -from pybuda import ( +from forge import ( PyTorchModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from pybuda.config import CompileDepth -from test.tvm.utils import evaluate_framework_vs_pybuda +from forge.config import CompileDepth +from test.tvm.utils import evaluate_framework_vs_forge import pytest from deepctr_torch.models.xdeepfm import xDeepFM @@ -28,7 +28,7 @@ import os -from pybuda.config import CompileDepth, _get_global_compiler_config +from forge.config import CompileDepth, _get_global_compiler_config key2index = {} diff --git a/pybuda/test/tvm/sanity/__init__.py b/forge/test/tvm/sanity/__init__.py similarity index 100% rename from pybuda/test/tvm/sanity/__init__.py rename to forge/test/tvm/sanity/__init__.py diff --git a/pybuda/test/tvm/sanity/tests_A/__init__.py b/forge/test/tvm/sanity/tests_A/__init__.py similarity index 100% rename from pybuda/test/tvm/sanity/tests_A/__init__.py rename to forge/test/tvm/sanity/tests_A/__init__.py diff --git a/pybuda/test/tvm/sanity/tests_A/test_sanity_passthrough.py b/forge/test/tvm/sanity/tests_A/test_sanity_passthrough.py similarity index 83% rename from pybuda/test/tvm/sanity/tests_A/test_sanity_passthrough.py rename to forge/test/tvm/sanity/tests_A/test_sanity_passthrough.py index 9244461e9..4416333ff 100644 --- a/pybuda/test/tvm/sanity/tests_A/test_sanity_passthrough.py +++ b/forge/test/tvm/sanity/tests_A/test_sanity_passthrough.py @@ -13,8 +13,8 @@ from transformers import BertModel, BertConfig, BertForPreTraining, TFBertMainLayer, TFBertForQuestionAnswering -import pybuda -from pybuda import ( +import forge +from forge import ( PyTorchModule, TFModule, TTDevice, @@ -22,14 +22,14 @@ CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from pybuda.tensor import to_pt_tensors -from pybuda.op.eval import compare_tensor_to_golden -from test.tvm.utils import evaluate_framework_vs_pybuda -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.tensor import to_pt_tensors +from forge.op.eval import compare_tensor_to_golden +from test.tvm.utils import evaluate_framework_vs_forge +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind from test.backend.models.test_bert import get_relaxed_atol_pcc diff --git a/pybuda/test/tvm/sanity/tests_A/test_sanity_pytorch.py b/forge/test/tvm/sanity/tests_A/test_sanity_pytorch.py similarity index 94% rename from pybuda/test/tvm/sanity/tests_A/test_sanity_pytorch.py rename to forge/test/tvm/sanity/tests_A/test_sanity_pytorch.py index b1cae4108..cfec37be9 100644 --- a/pybuda/test/tvm/sanity/tests_A/test_sanity_pytorch.py +++ b/forge/test/tvm/sanity/tests_A/test_sanity_pytorch.py @@ -6,21 +6,21 @@ # import os from typing import OrderedDict -from pybuda.tvm_to_python import compile_tvm_to_python -from pybuda.verify.config import TestKind +from forge.tvm_to_python import compile_tvm_to_python +from forge.verify.config import TestKind import pytest from sqlalchemy import true from test.tvm.python.test_sanity import test_linear -import pybuda +import forge import torch import torch.nn as nn import torch.nn.functional as F import numpy as np -import pybuda -from pybuda import ( - PyBudaModule, +import forge +from forge import ( + ForgeModule, Tensor, PyTorchModule, TFModule, @@ -29,13 +29,13 @@ CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, CompileDepth, ) -from pybuda.config import _get_global_compiler_config -from pybuda.verify.backend import verify_module -from pybuda.op.eval import compare_tensor_to_golden -from test.tvm.utils import evaluate_framework_vs_pybuda +from forge.config import _get_global_compiler_config +from forge.verify.backend import verify_module +from forge.op.eval import compare_tensor_to_golden +from test.tvm.utils import evaluate_framework_vs_forge input_shapes = [(1, 1, 8, 64)] linear_features_in = [64] @@ -57,7 +57,7 @@ def test_tvm_linear(test_kind, test_device, input_shape, lin_in, lin_out): if test_kind.is_training(): pytest.skip() import os - os.environ["PYBUDA_ENABLE_TINY_TILE"] = "1" + os.environ["FORGE_ENABLE_TINY_TILE"] = "1" _get_global_compiler_config().compile_depth = CompileDepth.GENERATE_NETLIST class DoubleLinear(nn.Module): def __init__(self): @@ -290,9 +290,9 @@ def forward(self, x1, x2): tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) tt0.place_module(mod) - #Fusing disabled due to tenstorrent/pybuda#789 + #Fusing disabled due to tenstorrent/forge#789 acts = [torch.rand(*input_shape_first), torch.rand(*input_shape_second)] - ret = pybuda_compile( + ret = forge_compile( tt0, "Einsum", *acts, @@ -396,7 +396,7 @@ def forward(self, a): act1 = torch.rand(*input_shape) - ret = pybuda_compile( + ret = forge_compile( tt0, "conv", act1, @@ -558,15 +558,15 @@ def test_tvm_upsample2d_channel_last(test_kind, test_device, input_shapes, scale if align_corners and upsample_mode != "bilinear": pytest.skip() - class Upsample2d(PyBudaModule): + class Upsample2d(ForgeModule): def __init__(self, name): super().__init__(name) def forward(self, input): if upsample_mode == "nearest_neighbor": - return pybuda.op.Resize2d("", input, sizes=scale_factors, method=upsample_mode, channel_last=1) + return forge.op.Resize2d("", input, sizes=scale_factors, method=upsample_mode, channel_last=1) else: - return pybuda.op.Resize2d("", input, sizes=scale_factors, method=upsample_mode, align_corners=align_corners, channel_last=1) + return forge.op.Resize2d("", input, sizes=scale_factors, method=upsample_mode, align_corners=align_corners, channel_last=1) model = Upsample2d("channel_last") @@ -750,7 +750,7 @@ def forward(self, x): act1 = torch.rand(*input_shape) - ret = pybuda_compile( + ret = forge_compile( tt0, "conv2d_transpose", act1, @@ -813,7 +813,7 @@ def forward(self, a): hidden_states = torch.rand((1, input_channel, input_shape, input_shape)) - res = pybuda_compile( + res = forge_compile( tt0, "adaptive_average_pool", hidden_states, @@ -872,7 +872,7 @@ def forward(self, a): hidden_states = torch.rand((1, input_channel, input_shape, input_shape)) - res = pybuda_compile( + res = forge_compile( tt0, "adaptive_max_pool", hidden_states, @@ -926,7 +926,7 @@ def forward(self, x): torch.rand(*input_shape) ] - pybuda_model_results = pybuda_compile( + forge_model_results = forge_compile( tt0, "Conv1D", *inputs, @@ -939,7 +939,7 @@ def forward(self, x): ), ) - evaluate_framework_vs_pybuda(framework_model, pybuda_model_results, *inputs) + evaluate_framework_vs_forge(framework_model, forge_model_results, *inputs) def test_tvm_clip(test_kind, test_device): @@ -995,7 +995,7 @@ def forward(self, a): hidden_states = torch.rand((1, 32, 12, 12)) - res = pybuda_compile( + res = forge_compile( tt0, "avgpool2d", hidden_states, @@ -1038,7 +1038,7 @@ def forward(self, a): hidden_states = torch.rand((1, 32, 12, 12)) - res = pybuda_compile( + res = forge_compile( tt0, "MaxPool2d", hidden_states, @@ -1458,7 +1458,7 @@ def forward(self, reference_tensor): reference_input = (1, 1, 3, 3) reference_input = torch.rand(reference_input) - pybuda_model_results = pybuda_compile( + forge_model_results = forge_compile( tt0, "NewEmpty", reference_input, @@ -1605,7 +1605,7 @@ def forward(self, reference_tensor): reference_input_shape = (1, 1, 3, 3) reference_input = torch.rand(reference_input_shape) - pybuda_model_results = pybuda_compile( + forge_model_results = forge_compile( tt0, "Nonzero", reference_input, @@ -1618,7 +1618,7 @@ def forward(self, reference_tensor): ), ) - evaluate_framework_vs_pybuda(framework_model, pybuda_model_results, reference_input) + evaluate_framework_vs_forge(framework_model, forge_model_results, reference_input) @pytest.mark.parametrize("dim", [0, 1]) @@ -1686,7 +1686,7 @@ def forward(self, x): input ] - pybuda_model_results = pybuda_compile( + forge_model_results = forge_compile( tt0, "ModelDtype", *inputs, @@ -1699,7 +1699,7 @@ def forward(self, x): ), ) - evaluate_framework_vs_pybuda(framework_model, pybuda_model_results, *inputs) + evaluate_framework_vs_forge(framework_model, forge_model_results, *inputs) @pytest.mark.parametrize("axis", [0, 1, 2]) @@ -1897,21 +1897,21 @@ def __init__(self): def forward(self, x1): return self.l2(x1) -class TTLinear1(PyBudaModule): +class TTLinear1(ForgeModule): def __init__(self, name): super().__init__(name) - self.weights1 = pybuda.Parameter(64, 128, requires_grad=True) + self.weights1 = forge.Parameter(64, 128, requires_grad=True) def forward(self, act1): - return pybuda.op.Matmul("", act1, self.weights1) + return forge.op.Matmul("", act1, self.weights1) -class TTLinear2(PyBudaModule): +class TTLinear2(ForgeModule): def __init__(self, name): super().__init__(name) - self.weights2 = pybuda.Parameter(128, 64, requires_grad=True) + self.weights2 = forge.Parameter(128, 64, requires_grad=True) def forward(self, act1): - return pybuda.op.Matmul("", act1, self.weights2) + return forge.op.Matmul("", act1, self.weights2) @pytest.mark.parametrize("first_module_pt", (True, False), ) @pytest.mark.parametrize("second_module_pt", (True, False), ) @@ -1939,7 +1939,7 @@ def test_multiple_modules_on_device(test_kind, test_device, first_module_pt, sec tt0.place_module(mod_2) tt0.push_to_inputs(inputs) - output_q = pybuda.run_inference() + output_q = forge.run_inference() data = output_q.get(timeout = 0.5) assert data[0].shape.get_pytorch_shape() == (1, 64, 64) @@ -2020,7 +2020,7 @@ def forward(self, x, y): x, y = torch.randn(shape_x), torch.randn(shape_y) try: - pybuda_compile( + forge_compile( tt0, "tensordot", x,y, @@ -2032,7 +2032,7 @@ def forward(self, x, y): intermediates=True, ), ) - except pybuda._C.UnsupportedHWOpsError: + except forge._C.UnsupportedHWOpsError: pass @@ -2361,9 +2361,9 @@ def forward(self, x, mask): def test_tvm_torch_flip(test_kind, test_device, input_shape, dim): if dim >= len(input_shape[1:]) or (dim < 0 and abs(dim) > len(input_shape[1:])): pytest.skip() - # Set PyBuda configurations - compiler_cfg = pybuda.config._get_global_compiler_config() - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b + # Set Forge configurations + compiler_cfg = forge.config._get_global_compiler_config() + compiler_cfg.default_df_override = forge.DataFormat.Float16_b compiler_cfg.compile_depth = CompileDepth.CONSTEVAL_GRAPH class Flip(torch.nn.Module): def __init__(self,dim,feature_size): @@ -2377,7 +2377,7 @@ def forward(self,input): return output model = Flip(dim=dim,feature_size=input_shape[-1]) model.eval() - tt_model = pybuda.PyTorchModule("flip_tvm_decompose_adv_index", model) + tt_model = forge.PyTorchModule("flip_tvm_decompose_adv_index", model) verify_module( tt_model, (input_shape,), @@ -2571,12 +2571,12 @@ def forward(self, x): framework_module = Module() framework_module.eval() - pybuda_module = PyTorchModule("pt_layermorm_cpu", framework_module) + forge_module = PyTorchModule("pt_layermorm_cpu", framework_module) input_shape = (1, 9, 9) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -2604,12 +2604,12 @@ def forward(self, x): framework_module = Module() framework_module.eval() - pybuda_module = PyTorchModule("pt_dropout_cpu", framework_module) + forge_module = PyTorchModule("pt_dropout_cpu", framework_module) input_shape = (1, 9, 9) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -2646,7 +2646,7 @@ def forward(self, x): return x framework_module = Module() - pybuda_module = PyTorchModule("pt_adv_index_bool_cpu_0", framework_module) + forge_module = PyTorchModule("pt_adv_index_bool_cpu_0", framework_module) input_shape = (1, 9, 9) @@ -2655,7 +2655,7 @@ def forward(self, x): # out = framework_module(input_x) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -2692,7 +2692,7 @@ def forward(self, x): return x framework_module = Module() - pybuda_module = PyTorchModule("pt_adv_index_bool_cpu_1", framework_module) + forge_module = PyTorchModule("pt_adv_index_bool_cpu_1", framework_module) input_shape = (1, 9, 9) @@ -2701,7 +2701,7 @@ def forward(self, x): # out = framework_module(input_x) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -2742,7 +2742,7 @@ def forward(self, x): return x framework_module = Module() - pybuda_module = PyTorchModule("pt_adv_index_bool_cpu_2", framework_module) + forge_module = PyTorchModule("pt_adv_index_bool_cpu_2", framework_module) input_shape = (1, 9, 9) @@ -2751,7 +2751,7 @@ def forward(self, x): # out = framework_module(input_x) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -2765,9 +2765,9 @@ def forward(self, x): @pytest.mark.parametrize("input_shape", ((1, 1, 256, 256), (1, 256, 256))) def test_tvm_simplifyreshape(test_device, input_shape): - # Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b class Model(nn.Module): def __init__(self, new_shape_1, new_shape_2): @@ -2837,7 +2837,7 @@ def forward(self, hidden_states, past_key): framework_module = Module() - pybuda_module = PyTorchModule("pt_hslice_a", framework_module) + forge_module = PyTorchModule("pt_hslice_a", framework_module) hidden_states_shape = (1, 1, 2048) past_key_shape = (1, 128, 2047) @@ -2848,7 +2848,7 @@ def forward(self, hidden_states, past_key): # out = framework_module(hidden_states, past_key) verify_module( - pybuda_module, + forge_module, (hidden_states_shape, past_key_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -2904,7 +2904,7 @@ def forward(self, hidden_states, past_key): return attn_weights.unsqueeze(0) framework_module = Module() - pybuda_module = PyTorchModule("pt_hslice_c", framework_module) + forge_module = PyTorchModule("pt_hslice_c", framework_module) hidden_states_shape = (1, 1, 2048) past_key_shape = (1, 128, 2047) @@ -2915,7 +2915,7 @@ def forward(self, hidden_states, past_key): # out = framework_module(hidden_states, past_key) verify_module( - pybuda_module, + forge_module, (hidden_states_shape, past_key_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -2961,7 +2961,7 @@ def forward(self, hidden_states, past_key): return attn_weights framework_module = Module() - pybuda_module = PyTorchModule("pt_hslice_d", framework_module) + forge_module = PyTorchModule("pt_hslice_d", framework_module) hidden_states_shape = (1, 1, 2048) past_key_shape = (1, 128, 2047) @@ -2972,7 +2972,7 @@ def forward(self, hidden_states, past_key): # out = framework_module(hidden_states, past_key) verify_module( - pybuda_module, + forge_module, (hidden_states_shape, past_key_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -3005,7 +3005,7 @@ def forward(self, act): return out framework_module = Module() - pybuda_module = PyTorchModule("pt_splice_with_scalar_concat", framework_module) + forge_module = PyTorchModule("pt_splice_with_scalar_concat", framework_module) act_shape = (1, 3, 9) @@ -3014,7 +3014,7 @@ def forward(self, act): out = framework_module(act) verify_module( - pybuda_module, + forge_module, (act_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -3048,7 +3048,7 @@ def forward(self, act): return out framework_module = Module() - pybuda_module = PyTorchModule("pt_concat_decomp_smm", framework_module) + forge_module = PyTorchModule("pt_concat_decomp_smm", framework_module) act_shape = (1, 3, 9, 9) @@ -3057,7 +3057,7 @@ def forward(self, act): out = framework_module(act) verify_module( - pybuda_module, + forge_module, (act_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -3092,13 +3092,13 @@ def forward(self, act): act_shape = (1, 3, 32, 32) framework_module = Module() - pybuda_module = PyTorchModule("pt_yz_transpose", framework_module) + forge_module = PyTorchModule("pt_yz_transpose", framework_module) act = torch.randn(act_shape) out = framework_module(act) verify_module( - pybuda_module, + forge_module, (act_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -3132,7 +3132,7 @@ def forward(self, input_ids): return out framework_module = Module() - pybuda_module = PyTorchModule("pt_embedding", framework_module) + forge_module = PyTorchModule("pt_embedding", framework_module) input_ids_shape = (1, 1, 1, 3) @@ -3141,7 +3141,7 @@ def forward(self, input_ids): out = framework_module(input_ids) verify_module( - pybuda_module, + forge_module, (input_ids_shape,), inputs=[(input_ids,)], verify_cfg=VerifyConfig( @@ -3167,7 +3167,7 @@ def forward(self, act): return act framework_module = Module() - pybuda_module = PyTorchModule("pt_splice_with_tm_vslice_0", framework_module) + forge_module = PyTorchModule("pt_splice_with_tm_vslice_0", framework_module) act_shape = (1, 1, 4096, 1) @@ -3176,7 +3176,7 @@ def forward(self, act): # out = framework_module(act) verify_module( - pybuda_module, + forge_module, (act_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -3204,7 +3204,7 @@ def forward(self, act): return q, k, v framework_module = Module() - pybuda_module = PyTorchModule("pt_splice_with_tm_vslice_1", framework_module) + forge_module = PyTorchModule("pt_splice_with_tm_vslice_1", framework_module) act_shape = (1, 1536, 1024) @@ -3213,7 +3213,7 @@ def forward(self, act): # out = framework_module(act) verify_module( - pybuda_module, + forge_module, (act_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -3239,7 +3239,7 @@ def forward(self, act): return act framework_module = Module() - pybuda_module = PyTorchModule("pt_splice_with_tm_hslice", framework_module) + forge_module = PyTorchModule("pt_splice_with_tm_hslice", framework_module) act_shape = (1, 1, 1, 4096) @@ -3248,7 +3248,7 @@ def forward(self, act): # out = framework_module(act) verify_module( - pybuda_module, + forge_module, (act_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -3273,7 +3273,7 @@ def forward(self, act): return act framework_module = Module() - pybuda_module = PyTorchModule("pt_splice_with_tm_vstack", framework_module) + forge_module = PyTorchModule("pt_splice_with_tm_vstack", framework_module) act_shape = (1, 1, 64, 64) @@ -3282,7 +3282,7 @@ def forward(self, act): # out = framework_module(act) verify_module( - pybuda_module, + forge_module, (act_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -3308,7 +3308,7 @@ def forward(self, act): return act framework_module = Module() - pybuda_module = PyTorchModule("pt_splice_with_tm_hstack", framework_module) + forge_module = PyTorchModule("pt_splice_with_tm_hstack", framework_module) act_shape = (1, 64, 1, 64) @@ -3317,7 +3317,7 @@ def forward(self, act): # out = framework_module(act) verify_module( - pybuda_module, + forge_module, (act_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -3328,16 +3328,16 @@ def forward(self, act): def test_kernel_fracturing_with_grouped_conv(test_kind, test_device): - pytest.skip("tenstorrent/pybuda#455") + pytest.skip("tenstorrent/forge#455") if test_kind.is_training(): pytest.skip() import os if test_device.is_wormhole_b0(): - os.environ["PYBUDA_EXTRA_L1_MARGIN"] = "60000" + os.environ["FORGE_EXTRA_L1_MARGIN"] = "60000" - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "CNN" compiler_cfg.conv_multi_op_fracture_factor_override["conv2d_1"] = 2 @@ -3358,7 +3358,7 @@ def forward(self, act): return act framework_module = Module() - pybuda_module = PyTorchModule("pt_kernel_fracturing_with_grouped_conv", framework_module) + forge_module = PyTorchModule("pt_kernel_fracturing_with_grouped_conv", framework_module) act_shape = (1, 3, 9, 9) @@ -3367,7 +3367,7 @@ def forward(self, act): out = framework_module(act) verify_module( - pybuda_module, + forge_module, (act_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -3379,18 +3379,18 @@ def forward(self, act): def test_BN_no_stats(test_kind, test_device): - if test_device.arch == pybuda.BackendDevice.Wormhole_B0: + if test_device.arch == forge.BackendDevice.Wormhole_B0: pytest.skip("Skip this test for golden Wormhole B0") - if test_device.arch == pybuda.BackendDevice.Grayskull: + if test_device.arch == forge.BackendDevice.Grayskull: pytest.skip("Wait until #1006 is resolved") if test_kind.is_training(): pytest.skip() - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "CNN" - #Fusing disabled due to tenstorrent/pybuda#789 + #Fusing disabled due to tenstorrent/forge#789 compiler_cfg.enable_auto_fusing=False class ModelBN(nn.Module): def __init__(self): @@ -3421,7 +3421,7 @@ def forward(self, x): return x framework_module = ModelBN() - pybuda_module = PyTorchModule("pt_BN_no_stats", framework_module) + forge_module = PyTorchModule("pt_BN_no_stats", framework_module) act_shape = (1, 3, 32, 32) @@ -3431,7 +3431,7 @@ def forward(self, x): try: verify_module( - pybuda_module, + forge_module, (act_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -3442,7 +3442,7 @@ def forward(self, x): ) finally: if test_device.is_wormhole_b0(): - del os.environ["PYBUDA_EXTRA_L1_MARGIN"] + del os.environ["FORGE_EXTRA_L1_MARGIN"] def test_prelu_pytorch(test_kind, test_device): @@ -3463,7 +3463,7 @@ def forward(self, act): return out framework_module = Module() - pybuda_module = PyTorchModule("pt_prelu", framework_module) + forge_module = PyTorchModule("pt_prelu", framework_module) act_shape = (1, 1, 32, 32) @@ -3471,7 +3471,7 @@ def forward(self, act): act = torch.randn(act_shape) verify_module( - pybuda_module, + forge_module, (act_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -3572,25 +3572,25 @@ def forward(self, x): act_shape_A = (1, 1, 32, 32) act_shape_B = (1, 3, 9, 9) - output_q = pybuda.initialize_pipeline( + output_q = forge.initialize_pipeline( training=False, sample_inputs=((torch.rand(act_shape_A),), (torch.rand(act_shape_B),))) tt0.set_active_subgraph(0) tt0.push_to_inputs((torch.rand(act_shape_A), )) - pybuda.run_forward() + forge.run_forward() tt0.set_active_subgraph(1) tt0.push_to_inputs((torch.rand(act_shape_B), )) - pybuda.run_forward() + forge.run_forward() def test_override_removal_flag(test_kind, test_device): - from pybuda.config import _set_pybuda_override_veto + from forge.config import _set_forge_override_veto # Setup override veto - os.environ["PYBUDA_OVERRIDES_VETO"] = "1" - _set_pybuda_override_veto({ + os.environ["FORGE_OVERRIDES_VETO"] = "1" + _set_forge_override_veto({ # Level 0 overrides "backend_device_descriptor_path": "", @@ -3600,9 +3600,9 @@ def test_override_removal_flag(test_kind, test_device): }, { # Level 2 overrides - "PYBUDA_RIBBON2": "", - "PYBUDA_DISABLE_STREAM_OUTPUT": "", - "PYBUDA_PAD_OUTPUT_BUFFER": "", + "FORGE_RIBBON2": "", + "FORGE_DISABLE_STREAM_OUTPUT": "", + "FORGE_PAD_OUTPUT_BUFFER": "", }) # Only single run run is needed @@ -3625,15 +3625,15 @@ def forward(self, act): start_compiler_cfg.amp_level = 1 # Environement variable compiler configuration overrides - os.environ["PYBUDA_EXTRA_L1_MARGIN"] = "60000" - os.environ["PYBUDA_RIBBON2"] = "1" + os.environ["FORGE_EXTRA_L1_MARGIN"] = "60000" + os.environ["FORGE_RIBBON2"] = "1" - # Load PyBuda module - pybuda_module = PyTorchModule("pt_override_removal", Module()) + # Load Forge module + forge_module = PyTorchModule("pt_override_removal", Module()) env_vars_before_compile = os.environ verify_module( - pybuda_module, + forge_module, ((1, 1, 32, 32),), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -3644,7 +3644,7 @@ def forward(self, act): env_vars_after_compile = os.environ # Difference between general compiler configurations - del os.environ["PYBUDA_OVERRIDES_VETO"] + del os.environ["FORGE_OVERRIDES_VETO"] end_compiler_cfg = _get_global_compiler_config() compiler_cfg_attrs = [a for a in dir(end_compiler_cfg) if not a.startswith('__') and not callable(getattr(end_compiler_cfg, a))] diff = {} @@ -3666,7 +3666,7 @@ def forward(self, act): def test_torch_conv3d(test_device): - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" inC, inD, inH, inW = (2, 5, 5, 5) @@ -3681,12 +3681,12 @@ def forward(self, act): act = self.conv(act) return act - pybuda_module = PyTorchModule("pt_conv3d", Module()) + forge_module = PyTorchModule("pt_conv3d", Module()) input_shape = (1, inC, inD, inH, inW) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -3699,7 +3699,7 @@ def forward(self, act): def test_torch_maxpool3d(test_device): - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" inC, inD, inH, inW = (3, 8, 8, 8) @@ -3715,12 +3715,12 @@ def forward(self, act): act = self.maxpool(act) return act - pybuda_module = PyTorchModule("pt_maxpool3d", Module()) + forge_module = PyTorchModule("pt_maxpool3d", Module()) input_shape = (1, inC, inD, inH, inW) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -3732,7 +3732,7 @@ def forward(self, act): ) def test_reflection_pad(test_device): - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.balancer_policy = "Ribbon" class Module(nn.Module): @@ -3744,11 +3744,11 @@ def forward(self, act): act = self.reflection_pad(act) return act - pybuda_module = PyTorchModule("reflection_pad", Module()) + forge_module = PyTorchModule("reflection_pad", Module()) verify_module( - pybuda_module, + forge_module, ((1, 1, 3, 3),), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -3774,7 +3774,7 @@ def test_tvm_scaled_dot_product_attention(test_device, mha, prefill): attn_mask_shape = (b, 1, sq, s) - pybuda.set_configuration_options(enable_auto_fusing=False) + forge.set_configuration_options(enable_auto_fusing=False) class SDPA(torch.nn.Module): def __init__(self): diff --git a/pybuda/test/tvm/sanity/tests_A/test_tvm.py b/forge/test/tvm/sanity/tests_A/test_tvm.py similarity index 92% rename from pybuda/test/tvm/sanity/tests_A/test_tvm.py rename to forge/test/tvm/sanity/tests_A/test_tvm.py index 090e215cc..874fd35f9 100644 --- a/pybuda/test/tvm/sanity/tests_A/test_tvm.py +++ b/forge/test/tvm/sanity/tests_A/test_tvm.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 import os import pytest -import pybuda +import forge import onnx import torch @@ -11,15 +11,15 @@ from tvm.relay.op.contrib import match_einsum_pattern -from pybuda import ( +from forge import ( PyTorchModule, VerifyConfig, OnnxModule, ) -from pybuda.verify.config import TestKind -from pybuda.verify.backend import verify_module -from pybuda.config import CompileDepth, _get_global_compiler_config -import pybuda +from forge.verify.config import TestKind +from forge.verify.backend import verify_module +from forge.config import CompileDepth, _get_global_compiler_config +import forge tensor_dims = [2, 3, 4, 5] @@ -169,7 +169,7 @@ def forward(self, x): compiler_cfg.cpu_fallback_ops.add("cast") framework_module = Module() - pybuda_module = PyTorchModule("pt_all_and_cast_fallback", framework_module) + forge_module = PyTorchModule("pt_all_and_cast_fallback", framework_module) input_shape = (1, 3, 5) @@ -178,7 +178,7 @@ def forward(self, x): # out = framework_module(act) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -211,7 +211,7 @@ def forward(self, x): compiler_cfg.cpu_fallback_ops.add("broadcast_to") framework_module = Module() - pybuda_module = PyTorchModule("pt_broadcast_fallback", framework_module) + forge_module = PyTorchModule("pt_broadcast_fallback", framework_module) input_shape = (1, 1, 5) @@ -220,7 +220,7 @@ def forward(self, x): # out = framework_module(act) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -250,7 +250,7 @@ def forward(self, x): compiler_cfg.cpu_fallback_ops.add("reshape") framework_module = Module() - pybuda_module = PyTorchModule("pt_reshape_fallback", framework_module) + forge_module = PyTorchModule("pt_reshape_fallback", framework_module) input_shape = (1, 1, 3, 6) @@ -259,7 +259,7 @@ def forward(self, x): # out = framework_module(act) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -288,7 +288,7 @@ def forward(self, x): compiler_cfg.cpu_fallback_ops.add("cumsum") framework_module = Module() - pybuda_module = PyTorchModule("pt_cumsum_fallback", framework_module) + forge_module = PyTorchModule("pt_cumsum_fallback", framework_module) input_shape = (1, 1, 3, 6) @@ -297,7 +297,7 @@ def forward(self, x): # out = framework_module(act) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -326,7 +326,7 @@ def forward(self, x): compiler_cfg.cpu_fallback_ops.add("nn.log_softmax") framework_module = Module() - pybuda_module = PyTorchModule("pt_log_softmax_fallback", framework_module) + forge_module = PyTorchModule("pt_log_softmax_fallback", framework_module) input_shape = (1, 1, 3, 6) @@ -335,7 +335,7 @@ def forward(self, x): # out = framework_module(act) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -364,7 +364,7 @@ def forward(self, x): compiler_cfg.cpu_fallback_ops.add("nn.softmax") framework_module = Module() - pybuda_module = PyTorchModule("pt_softmax_fallback", framework_module) + forge_module = PyTorchModule("pt_softmax_fallback", framework_module) input_shape = (1, 1, 3, 6) @@ -373,7 +373,7 @@ def forward(self, x): # out = framework_module(act) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -407,7 +407,7 @@ def forward(self, x): compiler_cfg.cpu_fallback_ops.add("transpose") framework_module = Module() - pybuda_module = PyTorchModule("pt_transpose_fallback", framework_module) + forge_module = PyTorchModule("pt_transpose_fallback", framework_module) input_shape = (1, 3, 6) @@ -416,7 +416,7 @@ def forward(self, x): # out = framework_module(act) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -451,7 +451,7 @@ def forward(self, x): return x framework_module = Module() - pybuda_module = PyTorchModule("pt_scatter_add_fallback", framework_module) + forge_module = PyTorchModule("pt_scatter_add_fallback", framework_module) input_shape = (1, 3, 5) @@ -460,7 +460,7 @@ def forward(self, x): out = framework_module(act) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -493,7 +493,7 @@ def forward(self, x): compiler_cfg = _get_global_compiler_config() framework_module = Module() - pybuda_module = PyTorchModule("pt_scatter_add_fallback_inplace", framework_module) + forge_module = PyTorchModule("pt_scatter_add_fallback_inplace", framework_module) input_shape = (1, 3, 5) @@ -502,7 +502,7 @@ def forward(self, x): out = framework_module(act) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -535,7 +535,7 @@ def forward(self, x): compiler_cfg.cpu_fallback_ops.add("max") framework_module = Module() - pybuda_module = PyTorchModule("pt_max_fallback", framework_module) + forge_module = PyTorchModule("pt_max_fallback", framework_module) input_shape = (1, 3, 5) @@ -544,7 +544,7 @@ def forward(self, x): # out = framework_module(act) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -556,7 +556,7 @@ def forward(self, x): def test_tvm_cat_fallback(test_kind, test_device): pytest.skip() - #TODO: Fix tvm.14 regressions: tenstorrent/pybuda#2099 + #TODO: Fix tvm.14 regressions: tenstorrent/forge#2099 # Only run recompute test in post-commit if test_kind == TestKind.TRAINING: pytest.skip() @@ -575,7 +575,7 @@ def forward(self, x): compiler_cfg.cpu_fallback_ops.add("concatenate") framework_module = Module() - pybuda_module = PyTorchModule("pt_cat_fallback", framework_module) + forge_module = PyTorchModule("pt_cat_fallback", framework_module) input_shape = (1, 3, 6) @@ -584,7 +584,7 @@ def forward(self, x): # out = framework_module(act) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -616,7 +616,7 @@ def forward(self, x): compiler_cfg.cpu_fallback_ops.add("argmax") framework_module = Module() - pybuda_module = PyTorchModule("pt_argmax_fallback", framework_module) + forge_module = PyTorchModule("pt_argmax_fallback", framework_module) input_shape = (1, 3, 6) @@ -625,7 +625,7 @@ def forward(self, x): # out = framework_module(act) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -678,7 +678,7 @@ def forward(self, x): compiler_cfg.retain_tvm_python_files = True framework_module = Module() - pybuda_module = PyTorchModule("pt_reshape_transpose_into_hslice", framework_module) + forge_module = PyTorchModule("pt_reshape_transpose_into_hslice", framework_module) input_shape = (1, 2048, 512) @@ -687,7 +687,7 @@ def forward(self, x): out = framework_module(act) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -739,7 +739,7 @@ def forward(self, x): compiler_cfg.retain_tvm_python_files = True framework_module = Module() - pybuda_module = PyTorchModule("pt_transpose_reshape_into_hstack", framework_module) + forge_module = PyTorchModule("pt_transpose_reshape_into_hstack", framework_module) input_shape = (1, 4, 2048, 64) @@ -748,7 +748,7 @@ def forward(self, x): out = framework_module(act) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -763,8 +763,8 @@ def test_reshape_into_vslice(test_kind, test_device): if test_kind.is_training(): pytest.skip() - #Fusing disabled due to tenstorrent/pybuda#784 - pybuda.set_configuration_options(enable_auto_fusing=False) + #Fusing disabled due to tenstorrent/forge#784 + forge.set_configuration_options(enable_auto_fusing=False) class Module(torch.nn.Module): def __init__(self): @@ -804,7 +804,7 @@ def forward(self, x): compiler_cfg.retain_tvm_python_files = True framework_module = Module() - pybuda_module = PyTorchModule("pt_reshape_into_vslice", framework_module) + forge_module = PyTorchModule("pt_reshape_into_vslice", framework_module) input_shape = (1, 4, 1024, 64) @@ -813,7 +813,7 @@ def forward(self, x): out = framework_module(act) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -864,7 +864,7 @@ def forward(self, x): compiler_cfg.retain_tvm_python_files = True framework_module = Module() - pybuda_module = PyTorchModule("pt_reshape_into_vslice", framework_module) + forge_module = PyTorchModule("pt_reshape_into_vslice", framework_module) input_shape = (1, 1024, 4, 64) @@ -873,7 +873,7 @@ def forward(self, x): out = framework_module(act) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -913,7 +913,7 @@ def forward(self, x): compiler_cfg.enable_tm_cpu_fallback = True framework_module = Module() - pybuda_module = PyTorchModule("pt_cpu_fallback_when_more_performant", framework_module) + forge_module = PyTorchModule("pt_cpu_fallback_when_more_performant", framework_module) input_shape = (1, 3, 9, 9) @@ -922,7 +922,7 @@ def forward(self, x): out = framework_module(act) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -963,7 +963,7 @@ def forward(self, x): compiler_cfg.enable_tm_cpu_fallback = True framework_module = Module() - pybuda_module = PyTorchModule("pt_test_extended_tm_cpu_fallback_concat_variation", framework_module) + forge_module = PyTorchModule("pt_test_extended_tm_cpu_fallback_concat_variation", framework_module) input_shape = (1, 3, 9, 9) @@ -972,7 +972,7 @@ def forward(self, x): out = framework_module(act) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -1016,7 +1016,7 @@ def forward(self, x): compiler_cfg.enable_tm_cpu_fallback = True framework_module = Module() - pybuda_module = PyTorchModule("pt_test_extended_tm_cpu_fallback_hslice_variation", framework_module) + forge_module = PyTorchModule("pt_test_extended_tm_cpu_fallback_hslice_variation", framework_module) input_shape = (1, 3, 14, 14) @@ -1025,7 +1025,7 @@ def forward(self, x): out = framework_module(act) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -1062,7 +1062,7 @@ def forward(self, x): compiler_cfg.enable_tm_cpu_fallback = True framework_module = Module() - pybuda_module = PyTorchModule("pt_test_extended_tm_cpu_fallback_matmul_variation", framework_module) + forge_module = PyTorchModule("pt_test_extended_tm_cpu_fallback_matmul_variation", framework_module) input_shape = (1, 3, 14, 14) @@ -1071,7 +1071,7 @@ def forward(self, x): out = framework_module(act) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -1098,12 +1098,12 @@ def forward(self, x): compiler_cfg = _get_global_compiler_config() framework_module = Module() - pybuda_module = PyTorchModule("pt_batch_matmul_with_1d_op1", framework_module) + forge_module = PyTorchModule("pt_batch_matmul_with_1d_op1", framework_module) input_shape = (1, 64, 32, 1024) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -1129,11 +1129,11 @@ def forward(self, x): compiler_cfg = _get_global_compiler_config() framework_module = Module() - pybuda_module = PyTorchModule("pt_batch_matmul_with_1d_op0", framework_module) + forge_module = PyTorchModule("pt_batch_matmul_with_1d_op0", framework_module) input_shape = (1, 32, 1024, 64) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -1143,7 +1143,7 @@ def forward(self, x): ) # Replicating following issue -# tenstorrent/pybuda#365 +# tenstorrent/forge#365 def test_group_conv2d(test_kind, test_device): pytest.skip() # Skipping for now until fix is in place if test_kind == TestKind.TRAINING: @@ -1213,7 +1213,7 @@ def forward(self, x): compiler_cfg.compile_depth = CompileDepth.BUDA_GRAPH_PRE_PLACER framework_module = Module() - pybuda_module = PyTorchModule("pt_invalid_reshape_transpose_into_hslice", framework_module) + forge_module = PyTorchModule("pt_invalid_reshape_transpose_into_hslice", framework_module) input_shape = (1, 5, 4, 256) @@ -1222,7 +1222,7 @@ def forward(self, x): out = framework_module(act) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -1245,7 +1245,7 @@ def forward(self, act): return b framework_module = Transformer() - pybuda_module = PyTorchModule("pt_neg_inf_const_onnx", framework_module) + forge_module = PyTorchModule("pt_neg_inf_const_onnx", framework_module) # Input shapes inp_shape = (1, 128, 768) @@ -1255,7 +1255,7 @@ def forward(self, act): # out = framework_module(act) verify_module( - pybuda_module, + forge_module, (inp_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -1342,7 +1342,7 @@ def forward(self, act): return torch.max(act, torch.tensor(torch.finfo(act.dtype).min)) framework_module = Model() - pybuda_module = PyTorchModule("pt_maximum_bwd", framework_module) + forge_module = PyTorchModule("pt_maximum_bwd", framework_module) # Input shapes inp_shape = (1, 8, 128, 256) @@ -1352,7 +1352,7 @@ def forward(self, act): # out = framework_module(act) verify_module( - pybuda_module, + forge_module, (inp_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -1375,7 +1375,7 @@ def forward(self, act): return torch.min(act, torch.tensor(torch.finfo(act.dtype).max)) framework_module = Model() - pybuda_module = PyTorchModule("pt_minimum_bwd", framework_module) + forge_module = PyTorchModule("pt_minimum_bwd", framework_module) # Input shapes inp_shape = (1, 8, 128, 256) @@ -1385,7 +1385,7 @@ def forward(self, act): # out = framework_module(act) verify_module( - pybuda_module, + forge_module, (inp_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -1433,7 +1433,7 @@ def forward(self, input_act, threshold): return input_act_new framework_module = Module() - pybuda_module = PyTorchModule("pt_tvm_scatter_nd", framework_module) + forge_module = PyTorchModule("pt_tvm_scatter_nd", framework_module) input_shape = (1, 18) @@ -1444,7 +1444,7 @@ def forward(self, input_act, threshold): # out = framework_module(input_act, threshold) verify_module( - pybuda_module, + forge_module, (input_shape,input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -1458,7 +1458,7 @@ def forward(self, input_act, threshold): def test_tvm_invalid_dtype(test_kind, test_device): pytest.skip() - #TODO: Fix tvm.14 regressions: tenstorrent/pybuda#2099 + #TODO: Fix tvm.14 regressions: tenstorrent/forge#2099 if test_kind.is_training(): pytest.skip() @@ -1490,7 +1490,7 @@ def forward(self, input_act): return input_act framework_module = Module() - pybuda_module = PyTorchModule("pt_tvm_invalid_dtype", framework_module) + forge_module = PyTorchModule("pt_tvm_invalid_dtype", framework_module) input_shape = (1, 18) @@ -1499,7 +1499,7 @@ def forward(self, input_act): out = framework_module(input_act) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -1525,7 +1525,7 @@ def forward(self, input_act): return input_act framework_module = Module() - pybuda_module = PyTorchModule("pt_tvm_where", framework_module) + forge_module = PyTorchModule("pt_tvm_where", framework_module) input_shape = (1, 18) @@ -1534,7 +1534,7 @@ def forward(self, input_act): # out = framework_module(input_act) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, diff --git a/pybuda/test/tvm/sanity/tests_B/__init__.py b/forge/test/tvm/sanity/tests_B/__init__.py similarity index 100% rename from pybuda/test/tvm/sanity/tests_B/__init__.py rename to forge/test/tvm/sanity/tests_B/__init__.py diff --git a/pybuda/test/tvm/sanity/tests_B/test_df.py b/forge/test/tvm/sanity/tests_B/test_df.py similarity index 95% rename from pybuda/test/tvm/sanity/tests_B/test_df.py rename to forge/test/tvm/sanity/tests_B/test_df.py index 01367acbb..eb4c94d91 100644 --- a/pybuda/test/tvm/sanity/tests_B/test_df.py +++ b/forge/test/tvm/sanity/tests_B/test_df.py @@ -9,21 +9,21 @@ import torch import tensorflow as tf -import pybuda -import pybuda.op -from pybuda import ( +import forge +import forge.op +from forge import ( PyTorchModule, VerifyConfig, BackendDevice ) -from pybuda.verify import verify_module +from forge.verify import verify_module verify_cfg = VerifyConfig( run_golden=True ) # Run backend golden check on all tests in here -# PT 2.0 does not support float16 on cpu, so for now only run bfloat16, see tenstorrent/pybuda#1935 +# PT 2.0 does not support float16 on cpu, so for now only run bfloat16, see tenstorrent/forge#1935 input_formats = [torch.bfloat16] input_format_ids = ["bfloat16"] weight_formats = [torch.bfloat16] @@ -64,7 +64,7 @@ def forward(self, act1, act2): module = PyTorchModule("pytorch_data_format", framework_module) if force_matmul_spill: - pybuda.config.override_u_kt("matmul_1", 1) + forge.config.override_u_kt("matmul_1", 1) original_model_param_dtype = {} for key, val in framework_module.state_dict().items(): @@ -133,7 +133,7 @@ def call(self, act1, act2): return m1 + act2 framework_module = TFTest(weight_df) - module = pybuda.TFModule("tensorflow_test", framework_module) + module = forge.TFModule("tensorflow_test", framework_module) original_model_param_dtype = [] for val in framework_module.trainable_variables: diff --git a/pybuda/test/tvm/sanity/tests_B/test_fallback_only.py b/forge/test/tvm/sanity/tests_B/test_fallback_only.py similarity index 90% rename from pybuda/test/tvm/sanity/tests_B/test_fallback_only.py rename to forge/test/tvm/sanity/tests_B/test_fallback_only.py index 381422494..c06e7d325 100644 --- a/pybuda/test/tvm/sanity/tests_B/test_fallback_only.py +++ b/forge/test/tvm/sanity/tests_B/test_fallback_only.py @@ -14,8 +14,8 @@ from transformers import BertModel, BertConfig, BertForPreTraining, TFBertMainLayer, TFBertForQuestionAnswering -import pybuda -from pybuda import ( +import forge +from forge import ( PyTorchModule, TFModule, TTDevice, @@ -23,14 +23,14 @@ CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from pybuda.tensor import to_pt_tensors -from pybuda.op.eval import compare_tensor_to_golden -from test.tvm.utils import evaluate_framework_vs_pybuda -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.tensor import to_pt_tensors +from forge.op.eval import compare_tensor_to_golden +from test.tvm.utils import evaluate_framework_vs_forge +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind from test.backend.models.test_bert import get_relaxed_atol_pcc diff --git a/pybuda/test/tvm/sanity/tests_B/test_pattern_matcher.py b/forge/test/tvm/sanity/tests_B/test_pattern_matcher.py similarity index 82% rename from pybuda/test/tvm/sanity/tests_B/test_pattern_matcher.py rename to forge/test/tvm/sanity/tests_B/test_pattern_matcher.py index 0a987176a..66154f14d 100644 --- a/pybuda/test/tvm/sanity/tests_B/test_pattern_matcher.py +++ b/forge/test/tvm/sanity/tests_B/test_pattern_matcher.py @@ -12,20 +12,20 @@ from transformers.models.bert.configuration_bert import BertConfig from transformers.models.bert.modeling_bert import BertEncoder -import pybuda -from pybuda import ( +import forge +from forge import ( PyTorchModule, TTDevice, - PyBudaModule, + ForgeModule, BackendType, CompilerConfig, CompileDepth, VerifyConfig, Tensor, - pybuda_compile, + forge_compile, ) -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.verify import verify_module +from forge.verify.config import TestKind import torch from torch import nn @@ -55,7 +55,7 @@ def test_tvm_bert(training, recompute): tt0 = TTDevice("tt0", devtype=BackendType.Golden) tt0.place_module(mod) - ret = pybuda_compile( + ret = forge_compile( tt0, "bert_layer", hidden_states, @@ -84,20 +84,20 @@ def test_linear_looped(training, recompute): if not training and recompute: pytest.skip() # inference + recompute is the same as just inference - class BudaTest(PyBudaModule): + class BudaTest(ForgeModule): shape = (1, 1, 64, 64) def __init__(self, name): super().__init__(name) - self.weights1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.weights2 = pybuda.Parameter(*self.shape, requires_grad=True) + self.weights1 = forge.Parameter(*self.shape, requires_grad=True) + self.weights2 = forge.Parameter(*self.shape, requires_grad=True) def forward(self, act1): - m1 = pybuda.op.Matmul("matmul1", act1, self.weights1) - m1g = pybuda.op.Gelu("gelu1", m1) + m1 = forge.op.Matmul("matmul1", act1, self.weights1) + m1g = forge.op.Gelu("gelu1", m1) - m2 = pybuda.op.Matmul("matmul2", m1g, self.weights2) - m2g = pybuda.op.Gelu("gelu2", m2) + m2 = forge.op.Matmul("matmul2", m1g, self.weights2) + m2g = forge.op.Gelu("gelu2", m2) return m2g @@ -110,7 +110,7 @@ def forward(self, act1): mod.set_parameter("weights1", torch.rand(*BudaTest.shape, requires_grad=True)) mod.set_parameter("weights2", torch.rand(*BudaTest.shape, requires_grad=True)) - pybuda_compile( + forge_compile( tt0, "sanity", act1, @@ -126,9 +126,9 @@ def forward(self, act1): ) def test_swin_roll(): - # Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b compiler_cfg.balancer_policy = "Ribbon" class swin_roll(nn.Module): @@ -147,8 +147,8 @@ def forward(self,hidden_state): tt_model, (input_shape,), verify_cfg=VerifyConfig( - arch=pybuda.BackendDevice.Wormhole_B0, - devtype=pybuda.BackendType.Golden, + arch=forge.BackendDevice.Wormhole_B0, + devtype=forge.BackendType.Golden, test_kind=TestKind.INFERENCE, ) ) @@ -156,9 +156,9 @@ def forward(self,hidden_state): @pytest.mark.parametrize("tranpose_dims", ((2, 0), (0, 1), (1, 2), (3, 1), (4, 1),(-1, -6))) def test_reshape_transpose_reshape_tvm(test_device, tranpose_dims): - # Set PyBuda configuration parameters - compiler_cfg = pybuda.config._get_global_compiler_config() - compiler_cfg.default_df_override = pybuda._C.DataFormat.Float16_b + # Set Forge configuration parameters + compiler_cfg = forge.config._get_global_compiler_config() + compiler_cfg.default_df_override = forge._C.DataFormat.Float16_b compiler_cfg.compile_depth=CompileDepth.GENERATE_INITIAL_GRAPH class Model(nn.Module): def __init__(self, new_shape_1, dim0, dim1, new_shape_2): diff --git a/pybuda/test/tvm/sanity/tests_B/test_propped_params_tensorflow.py b/forge/test/tvm/sanity/tests_B/test_propped_params_tensorflow.py similarity index 90% rename from pybuda/test/tvm/sanity/tests_B/test_propped_params_tensorflow.py rename to forge/test/tvm/sanity/tests_B/test_propped_params_tensorflow.py index aeab7e7c6..9cb3c963a 100644 --- a/pybuda/test/tvm/sanity/tests_B/test_propped_params_tensorflow.py +++ b/forge/test/tvm/sanity/tests_B/test_propped_params_tensorflow.py @@ -5,8 +5,8 @@ # Some basic bring-up tests of tracing functionality # from operator import is_ -from pybuda.tensor import consteval_input -from pybuda.tvm_to_python import generate_pybuda_module +from forge.tensor import consteval_input +from forge.tvm_to_python import generate_forge_module from collections import OrderedDict import tensorflow as tf @@ -15,17 +15,17 @@ from transformers import BertConfig, TFBertMainLayer, TFBertForQuestionAnswering, OPTConfig, TFOPTModel, GPT2Config, TFGPT2Model from transformers.models.bert.modeling_tf_bert import TFBertEncoder -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import backend, verify_module +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import backend, verify_module from test.utils import download_model -from pybuda import ( +from forge import ( TFModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) def tensor_equals(a, b): @@ -112,7 +112,7 @@ def call(self, x): const_propped = model.model.layer[0].intermediate.dense.get_weights() mod = TFModule("tf_bert", model) - buda_mods, _, buda_inputs = generate_pybuda_module(mod, inputs) + buda_mods, _, buda_inputs = generate_forge_module(mod, inputs) assert_params(model, buda_mods, const_propped, [model.am, model.hm]) @@ -136,7 +136,7 @@ def test_gpt2(): const_propped = [model.transformer.h[0].attn.c_attn.weight.numpy(), model.transformer.h[0].attn.c_attn.bias.numpy()] - buda_mods, _, buda_inputs = generate_pybuda_module(mod, inputs, verify_cfg=VerifyConfig(pcc=0.99)) + buda_mods, _, buda_inputs = generate_forge_module(mod, inputs, verify_cfg=VerifyConfig(pcc=0.99)) new_const_propped = [] for tensor in const_propped: @@ -160,6 +160,6 @@ def test_opt(): input_shape = (1, 768) inputs = [tf.random.uniform(input_shape, maxval=input_shape[-1], dtype=tf.int32)] - buda_mods, _, buda_inputs = generate_pybuda_module(mod, inputs) + buda_mods, _, buda_inputs = generate_forge_module(mod, inputs) assert_params(model, buda_mods, const_propped) diff --git a/pybuda/test/tvm/sanity/tests_B/test_sanity_onnx.py b/forge/test/tvm/sanity/tests_B/test_sanity_onnx.py similarity index 97% rename from pybuda/test/tvm/sanity/tests_B/test_sanity_onnx.py rename to forge/test/tvm/sanity/tests_B/test_sanity_onnx.py index 63b98c600..689e21200 100644 --- a/pybuda/test/tvm/sanity/tests_B/test_sanity_onnx.py +++ b/forge/test/tvm/sanity/tests_B/test_sanity_onnx.py @@ -5,7 +5,7 @@ import onnxruntime as ort import pytest import torch -from pybuda import ( +from forge import ( OnnxModule, TTDevice, BackendType, @@ -13,14 +13,14 @@ CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, TFGraphDefModule, ) -from pybuda.config import CompileDepth +from forge.config import CompileDepth from loguru import logger -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind from transformers import T5Config, T5Model, T5ForConditionalGeneration, T5Tokenizer, T5EncoderModel import urllib diff --git a/pybuda/test/tvm/sanity/tests_C/__init__.py b/forge/test/tvm/sanity/tests_C/__init__.py similarity index 100% rename from pybuda/test/tvm/sanity/tests_C/__init__.py rename to forge/test/tvm/sanity/tests_C/__init__.py diff --git a/pybuda/test/tvm/sanity/tests_C/test_decomps.py b/forge/test/tvm/sanity/tests_C/test_decomps.py similarity index 68% rename from pybuda/test/tvm/sanity/tests_C/test_decomps.py rename to forge/test/tvm/sanity/tests_C/test_decomps.py index 8ff9cbb37..5dd8c09eb 100644 --- a/pybuda/test/tvm/sanity/tests_C/test_decomps.py +++ b/forge/test/tvm/sanity/tests_C/test_decomps.py @@ -5,9 +5,9 @@ # Some basic bring-up tests of tracing functionality # from typing import OrderedDict -from pybuda.pybudaglobal import TILE_DIM -from pybuda.tvm_to_python import compile_tvm_to_python -from pybuda.verify.config import TestKind +from forge.forgeglobal import TILE_DIM +from forge.tvm_to_python import compile_tvm_to_python +from forge.verify.config import TestKind import pytest from sqlalchemy import true from test.tvm.python.test_sanity import test_linear @@ -18,15 +18,15 @@ import numpy as np import math from loguru import logger -import pybuda -from pybuda import ( +import forge +from forge import ( Tensor, PyTorchModule, - PyBudaModule, + ForgeModule, VerifyConfig, ) -from pybuda.config import CompilerConfig, _get_global_compiler_config, CompileDepth -from pybuda.verify.backend import verify_module +from forge.config import CompilerConfig, _get_global_compiler_config, CompileDepth +from forge.verify.backend import verify_module class AlexnetReshape(torch.nn.Module): def __init__(self, newshape, oldshape): @@ -147,40 +147,40 @@ def test_reshape(input_shape): goal = torch.reshape(x, newshape) goalt = torch.transpose(goal, -2, -1) - in1 = pybuda.Tensor.create_from_torch(x) - padx = pybuda.op.PadTile("", in1, -1, input_shape[-2]) - pady = pybuda.op.PadTile("", padx, -2, input_shape[-1]) + in1 = forge.Tensor.create_from_torch(x) + padx = forge.op.PadTile("", in1, -1, input_shape[-2]) + pady = forge.op.PadTile("", padx, -2, input_shape[-1]) # padded = [0, 0] + [(pady.shape[2]-1) * TILE_DIM] + [0] - # pady = pybuda.op.Pad("", pady, padded) + # pady = forge.op.Pad("", pady, padded) spm = create_reshape_flatten_sparse_picker_matrix(pady.shape[-2], TILE_DIM*pady.shape[-2]) - spm = pybuda.Tensor.create_from_torch(spm) - mm = pybuda.op.SparseMatmul("", spm, pady) - result = pybuda.op.VSlice("", mm, pady.shape[-2]) + spm = forge.Tensor.create_from_torch(spm) + mm = forge.op.SparseMatmul("", spm, pady) + result = forge.op.VSlice("", mm, pady.shape[-2]) if input_shape[-1] % TILE_DIM == 0: - hstk = pybuda.op.HSlice("", result, slc) - result = pybuda.op.VStack("", hstk, hstk.shape[-3] // input_shape[-3]) + hstk = forge.op.HSlice("", result, slc) + result = forge.op.VStack("", hstk, hstk.shape[-3] // input_shape[-3]) else: spm = create_padding_insert_sparse_picker(input_shape[-1], pady.shape[-1], slc) - spm = pybuda.Tensor.create_from_torch(spm) - t = pybuda.op.Transpose("", result, -2, -1) - mm = pybuda.op.SparseMatmul("", spm, t) - vslc = pybuda.op.VSlice("", mm, slc) - t = pybuda.op.Transpose("", vslc, -2, -1) - result = pybuda.op.VStack("", t, t.shape[-3] // input_shape[-3]) + spm = forge.Tensor.create_from_torch(spm) + t = forge.op.Transpose("", result, -2, -1) + mm = forge.op.SparseMatmul("", spm, t) + vslc = forge.op.VSlice("", mm, slc) + t = forge.op.Transpose("", vslc, -2, -1) + result = forge.op.VStack("", t, t.shape[-3] // input_shape[-3]) spm = create_flattened_padding_removal_sparse_picker_matrix(result.shape[-2], 0, 1, TILE_DIM) spm = torch.nn.functional.pad(spm.to_dense(), (0, 0, 0, align_up_tile(spm.shape[-2], TILE_DIM) - spm.shape[-2]), mode='constant', value=0).to_sparse() - spm = pybuda.Tensor.create_from_torch(spm) - result = pybuda.op.SparseMatmul("", spm, result) + spm = forge.Tensor.create_from_torch(spm) + result = forge.op.SparseMatmul("", spm, result) if not align_up_tile(newshape[-2], TILE_DIM) == align_up_tile(result.shape[-2], TILE_DIM): spm = create_flattened_padding_removal_sparse_picker_matrix(result.shape[-2], 0, newshape[-2], newshape[-2]) - spm = pybuda.Tensor.create_from_torch(spm) - result = pybuda.op.SparseMatmul("", spm, result) + spm = forge.Tensor.create_from_torch(spm) + result = forge.op.SparseMatmul("", spm, result) - n1 = pybuda.op.Narrow("", result, -1, 0, newshape[-1], result.shape[-1]) - res = pybuda.op.Narrow("", n1, -2, 0, newshape[-2], n1.shape[-2]) + n1 = forge.op.Narrow("", result, -1, 0, newshape[-1], result.shape[-1]) + res = forge.op.Narrow("", n1, -2, 0, newshape[-2], n1.shape[-2]) assert torch.all(goal == res.value()) @@ -203,47 +203,47 @@ def test_xy_flatten(): input_shape = (1, 32, 112, 112) newshape = (1, 1, input_shape[-3], input_shape[-2]*input_shape[-1]) - in1 = pybuda.Tensor.create_from_torch(torch.rand(input_shape)) - padx = pybuda.op.PadTile("", in1, -1, input_shape[-1]) - pady = pybuda.op.PadTile("", padx, -2, input_shape[-2]) - # pady = pybuda.op.Pad("", in1, (0, TILE_DIM-input_shape[-2], 0, TILE_DIM-input_shape[-1])) + in1 = forge.Tensor.create_from_torch(torch.rand(input_shape)) + padx = forge.op.PadTile("", in1, -1, input_shape[-1]) + pady = forge.op.PadTile("", padx, -2, input_shape[-2]) + # pady = forge.op.Pad("", in1, (0, TILE_DIM-input_shape[-2], 0, TILE_DIM-input_shape[-1])) torch.set_printoptions(threshold=100000, linewidth=10000) if input_shape[-3] > 1: - pady = pybuda.op.HStack("", pady, input_shape[-3]) + pady = forge.op.HStack("", pady, input_shape[-3]) padded_shape = pady.shape r_new = padded_shape[-1] * input_shape[-2] // (padded_shape[-1] // TILE_DIM) - fl_spm = pybuda.op.eval.create_reshape_flatten_sparse_picker_matrix(pady.shape[-2], r_new, TILE_DIM) - fl_spm = pybuda.Tensor.create_from_torch(fl_spm) - mm = pybuda.op.SparseMatmul("", fl_spm, pady) + fl_spm = forge.op.eval.create_reshape_flatten_sparse_picker_matrix(pady.shape[-2], r_new, TILE_DIM) + fl_spm = forge.Tensor.create_from_torch(fl_spm) + mm = forge.op.SparseMatmul("", fl_spm, pady) if input_shape[-3] > 1: - mm = pybuda.op.HSlice("", mm, input_shape[-3]) + mm = forge.op.HSlice("", mm, input_shape[-3]) rt = align_up_tile(r_new, TILE_DIM) // TILE_DIM - vs = pybuda.op.VSlice("", mm, rt) - hs = pybuda.op.HStack("", vs, rt) + vs = forge.op.VSlice("", mm, rt) + hs = forge.op.HStack("", vs, rt) if input_shape[-3] > 1: - hs = pybuda.op.VStack("", hs, input_shape[-3]) + hs = forge.op.VStack("", hs, input_shape[-3]) if input_shape[-1] % TILE_DIM: - tx = pybuda.op.Transpose("", hs, -2, -1) - pr_spm = pybuda.op.eval.create_flattened_padding_removal_sparse_picker_matrix(hs.shape[-1], 0, input_shape[-1], pad_to_tile_dim(input_shape[-1], TILE_DIM)) + tx = forge.op.Transpose("", hs, -2, -1) + pr_spm = forge.op.eval.create_flattened_padding_removal_sparse_picker_matrix(hs.shape[-1], 0, input_shape[-1], pad_to_tile_dim(input_shape[-1], TILE_DIM)) - pr_spm = pybuda.Tensor.create_from_torch(pr_spm) - pr_mm = pybuda.op.SparseMatmul("", pr_spm, tx) - utx = pybuda.op.Transpose("", pr_mm, -2, -1) + pr_spm = forge.Tensor.create_from_torch(pr_spm) + pr_mm = forge.op.SparseMatmul("", pr_spm, tx) + utx = forge.op.Transpose("", pr_mm, -2, -1) else: utx = hs if input_shape[-3] > 1: - pr_spm = pybuda.op.eval.create_flattened_padding_removal_sparse_picker_matrix(utx.shape[-2], 0, 1, TILE_DIM) - pr_spm = pybuda.Tensor.create_from_torch(pr_spm) - res = pybuda.op.SparseMatmul("", pr_spm, utx) + pr_spm = forge.op.eval.create_flattened_padding_removal_sparse_picker_matrix(utx.shape[-2], 0, 1, TILE_DIM) + pr_spm = forge.Tensor.create_from_torch(pr_spm) + res = forge.op.SparseMatmul("", pr_spm, utx) else: - res = pybuda.op.Narrow("", utx, -2, 0, 1, utx.shape[-2]) + res = forge.op.Narrow("", utx, -2, 0, 1, utx.shape[-2]) assert torch.all(in1.value().reshape(newshape) == res.value()) @@ -281,17 +281,17 @@ def test_xy_flatten(): # # newshape = list(input_shape[:-3]) + [input_shape[-2]] + [y_slice] + [input_shape[-1] // y_slice] # print(f"{input_shape} --> {newshape}") -# in1 = pybuda.Tensor.create_from_torch(torch.rand(input_shape)) -# padx = pybuda.op.PadTile("", in1, -1, input_shape[-1]) -# pady = pybuda.op.PadTile("", padx, -2, input_shape[-2]) +# in1 = forge.Tensor.create_from_torch(torch.rand(input_shape)) +# padx = forge.op.PadTile("", in1, -1, input_shape[-1]) +# pady = forge.op.PadTile("", padx, -2, input_shape[-2]) # torch.set_printoptions(threshold=100000, linewidth=10000) # num_rows_per_slice = input_shape[-1] // y_slice # padded_y_slice = pady.shape[-1] // num_rows_per_slice -# vslc = pybuda.op.VSlice("", pady, pady.shape[-2]) +# vslc = forge.op.VSlice("", pady, pady.shape[-2]) -# fl_spm = pybuda.op.eval.create_reshape_flatten_sparse_picker_matrix(1, vslc.shape[-1]) -# fl_spm = pybuda.Tensor.create_from_torch(fl_spm) -# mm = pybuda.op.SparseMatmul("", fl_spm, mm) +# fl_spm = forge.op.eval.create_reshape_flatten_sparse_picker_matrix(1, vslc.shape[-1]) +# fl_spm = forge.Tensor.create_from_torch(fl_spm) +# mm = forge.op.SparseMatmul("", fl_spm, mm) def test_zx_transpose(): @@ -299,17 +299,17 @@ def test_zx_transpose(): input_shape = (1, 1280, 1, 1) newshape = (1, 1, 1, 1280) - in1 = pybuda.Tensor.create_from_torch(torch.rand(input_shape)) - padx = pybuda.op.PadTile("", in1, -1, input_shape[-1]) - pady = pybuda.op.PadTile("", padx, -2, input_shape[-2]) + in1 = forge.Tensor.create_from_torch(torch.rand(input_shape)) + padx = forge.op.PadTile("", in1, -1, input_shape[-1]) + pady = forge.op.PadTile("", padx, -2, input_shape[-2]) - vs = pybuda.op.VStack("", pady, 1280) - fspm = pybuda.op.eval.create_reshape_flatten_sparse_picker_matrix(1280, 40960).transpose(-1, -2) - fspm = pybuda.Tensor.create_from_torch(fspm) - mm = pybuda.op.SparseMatmul("", fspm, vs) - tx = pybuda.op.Transpose("", mm, -2, -1) + vs = forge.op.VStack("", pady, 1280) + fspm = forge.op.eval.create_reshape_flatten_sparse_picker_matrix(1280, 40960).transpose(-1, -2) + fspm = forge.Tensor.create_from_torch(fspm) + mm = forge.op.SparseMatmul("", fspm, vs) + tx = forge.op.Transpose("", mm, -2, -1) - res = pybuda.op.Narrow("", tx, -2, 0, 1, 32) + res = forge.op.Narrow("", tx, -2, 0, 1, 32) assert torch.all(in1.value().reshape(newshape) == res.value()) mod = PyTorchModule("axelnet_reshape", AlexnetReshape(newshape, input_shape)) @@ -344,35 +344,35 @@ def test_concat(): input_shape_2 = (1, 128, 16, 32) axis = -1 - in1 = pybuda.Tensor.create_from_torch(torch.rand(input_shape_1)) - in2 = pybuda.Tensor.create_from_torch(torch.rand(input_shape_2)) - concat = pybuda.op.Concatenate("", in1, in2, axis=axis) + in1 = forge.Tensor.create_from_torch(torch.rand(input_shape_1)) + in2 = forge.Tensor.create_from_torch(torch.rand(input_shape_2)) + concat = forge.op.Concatenate("", in1, in2, axis=axis) if TILE_DIM == 32: - padx1 = pybuda.op.PadTile("", in1, -1, input_shape_1[-1]) - pady1 = pybuda.op.PadTile("", padx1, -2, input_shape_1[-2]) - padx2 = pybuda.op.PadTile("", in2, -1, input_shape_2[-1]) - pady2 = pybuda.op.PadTile("", padx2, -2, input_shape_2[-2]) + padx1 = forge.op.PadTile("", in1, -1, input_shape_1[-1]) + pady1 = forge.op.PadTile("", padx1, -2, input_shape_1[-2]) + padx2 = forge.op.PadTile("", in2, -1, input_shape_2[-1]) + pady2 = forge.op.PadTile("", padx2, -2, input_shape_2[-2]) else: pad1 = [0, 0, 0, 0] if input_shape_1[-1] % TILE_DIM: pad1[-3] = TILE_DIM - (input_shape_1[-1] % TILE_DIM) if input_shape_1[-2] % TILE_DIM: pad1[-1] = TILE_DIM - (input_shape_1[-2] % TILE_DIM) - pady1 = pybuda.op.Pad("", in1, pad1) + pady1 = forge.op.Pad("", in1, pad1) pad2 = [0, 0, 0, 0] if input_shape_2[-1] % TILE_DIM: pad2[-3] = TILE_DIM - (input_shape_2[-1] % TILE_DIM) if input_shape_2[-2] % TILE_DIM: pad2[-1] = TILE_DIM - (input_shape_2[-2] % TILE_DIM) - pady2 = pybuda.op.Pad("", in2, pad2) + pady2 = forge.op.Pad("", in2, pad2) if axis == -1: - pady1 = pybuda.op.Transpose("", pady1, -2, -1) + pady1 = forge.op.Transpose("", pady1, -2, -1) if axis == -1: - pady2 = pybuda.op.Transpose("", pady2, -2, -1) + pady2 = forge.op.Transpose("", pady2, -2, -1) vs1 = pady1 vs2 = pady2 @@ -404,8 +404,8 @@ def test_concat(): (r_new, vs1.shape[-2]), dtype=torch.float32, ) - spm1 = pybuda.Tensor.create_from_torch(spm1) - mm1 = pybuda.op.SparseMatmul("", spm1, vs1) + spm1 = forge.Tensor.create_from_torch(spm1) + mm1 = forge.op.SparseMatmul("", spm1, vs1) if axis >= -2: # mm2_len = input_shape_2[axis] * input_shape_1[-3] @@ -426,23 +426,23 @@ def test_concat(): (r_new, vs2.shape[-2]), dtype=torch.float32, ) - spm2 = pybuda.Tensor.create_from_torch(spm2) - mm2 = pybuda.op.SparseMatmul("", spm2, vs2) + spm2 = forge.Tensor.create_from_torch(spm2) + mm2 = forge.op.SparseMatmul("", spm2, vs2) - add = pybuda.op.Add("", mm1, mm2) + add = forge.op.Add("", mm1, mm2) vsl = add if axis == -3: rt = r_new//pady1.shape[-2] - vsl = pybuda.op.HSlice("", add, rt) + vsl = forge.op.HSlice("", add, rt) if axis == -1: - vsl = pybuda.op.Transpose("", vsl, -2, -1) + vsl = forge.op.Transpose("", vsl, -2, -1) narrow_shape = input_shape_1[-1] if axis != -1 else input_shape_1[-1] + input_shape_2[-1] if narrow_shape != vsl.shape[-1]: - vsl = pybuda.op.Narrow("", vsl, -1, 0, narrow_shape, vsl.shape[-1]) + vsl = forge.op.Narrow("", vsl, -1, 0, narrow_shape, vsl.shape[-1]) narrow_shape = input_shape_1[-2] if axis != -2 else input_shape_1[-2] + input_shape_2[-2] if narrow_shape != vsl.shape[-2]: - vsl = pybuda.op.Narrow("", vsl, -2, 0, narrow_shape, vsl.shape[-2]) + vsl = forge.op.Narrow("", vsl, -2, 0, narrow_shape, vsl.shape[-2]) assert torch.all(vsl.value() == concat.value()) mod = PyTorchModule("concat_decomp", TestConcat(axis)) @@ -472,9 +472,9 @@ def test_vslice(input_shape): x = torch.randn(input_shape) goal = x.reshape(newshape) - result = pybuda.Tensor.create_from_torch(x) - result = pybuda.op.PadTile("", result, -1, result.shape[-1]) - result = pybuda.op.PadTile("", result, -2, result.shape[-2]) + result = forge.Tensor.create_from_torch(x) + result = forge.op.PadTile("", result, -1, result.shape[-1]) + result = forge.op.PadTile("", result, -2, result.shape[-2]) padded_dim = (math.ceil(attr[-2] / TILE_DIM) * TILE_DIM) num_tiles = attr[-3] if attr[-2] < TILE_DIM else (math.ceil(attr[-3] / TILE_DIM) * TILE_DIM) @@ -496,13 +496,13 @@ def test_vslice(input_shape): spm1 = create_flattened_padding_removal_sparse_picker_matrix(spm.shape[-2], 0, slice_factor*padded_dim, spm.shape[-2]) spm = torch.sparse.mm(spm1, spm) - spm = pybuda.Tensor.create_from_torch(spm) - result = pybuda.op.SparseMatmul("", spm, result) - result = pybuda.op.VSlice("", result, slice_factor) + spm = forge.Tensor.create_from_torch(spm) + result = forge.op.SparseMatmul("", spm, result) + result = forge.op.VSlice("", result, slice_factor) assert align_up_tile(result.shape[-2]) == align_up_tile(attr[-2]) - result = pybuda.op.Narrow("", result, -1, 0, attr[-1], result.shape[-1]) - result = pybuda.op.Narrow("", result, -2, 0, attr[-2], result.shape[-2]) + result = forge.op.Narrow("", result, -1, 0, attr[-1], result.shape[-1]) + result = forge.op.Narrow("", result, -2, 0, attr[-2], result.shape[-2]) assert torch.all(goal == result.value()) @@ -528,12 +528,12 @@ def test_vstack(input_shape): x = torch.randn(input_shape) goal = x.reshape(newshape) - result = pybuda.Tensor.create_from_torch(x) - result = pybuda.op.PadTile("", result, -1, result.shape[-1]) - result = pybuda.op.PadTile("", result, -2, result.shape[-2]) + result = forge.Tensor.create_from_torch(x) + result = forge.op.PadTile("", result, -1, result.shape[-1]) + result = forge.op.PadTile("", result, -2, result.shape[-2]) padded_shape = result.shape slice_factor = orig_shape[-3] - result = pybuda.op.VStack("", result, slice_factor) + result = forge.op.VStack("", result, slice_factor) if orig_shape[-2] % TILE_DIM != 0: # Pick out multiple rows in a tile @@ -551,12 +551,12 @@ def test_vstack(input_shape): (num_rows, result.shape[-2]), dtype=torch.float32, ) - s_pick_multi_row = pybuda.Tensor.create_from_torch(s_pick_multi_row) - result = pybuda.op.SparseMatmul("", s_pick_multi_row, result) + s_pick_multi_row = forge.Tensor.create_from_torch(s_pick_multi_row) + result = forge.op.SparseMatmul("", s_pick_multi_row, result) assert align_up_tile(result.shape[-2]) == align_up_tile(attr[-2]) - result = pybuda.op.Narrow("", result, -1, 0, attr[-1], result.shape[-1]) - result = pybuda.op.Narrow("", result, -2, 0, attr[-2], result.shape[-2]) + result = forge.op.Narrow("", result, -1, 0, attr[-1], result.shape[-1]) + result = forge.op.Narrow("", result, -2, 0, attr[-2], result.shape[-2]) assert torch.all(goal == result.value()) @@ -583,45 +583,45 @@ def test_full_flatten(input_shape): x = torch.randn(input_shape) goal = torch.reshape(x, newshape) - in1 = pybuda.Tensor.create_from_torch(x) - padx = pybuda.op.PadTile("", in1, -1, input_shape[-1]) - pady = pybuda.op.PadTile("", padx, -2, input_shape[-2]) + in1 = forge.Tensor.create_from_torch(x) + padx = forge.op.PadTile("", in1, -1, input_shape[-1]) + pady = forge.op.PadTile("", padx, -2, input_shape[-2]) result = pady if len(input_shape) > 2 and input_shape[-3] != 1: - result = pybuda.op.VStack("", pady, input_shape[-3]) + result = forge.op.VStack("", pady, input_shape[-3]) if input_shape[-2] % TILE_DIM: spm = create_padding_shift_sparse_picker_matrix(input_shape[-3]*input_shape[-2], input_shape[-3], result.shape[-2]) - spm = pybuda.Tensor.create_from_torch(spm) - mm = pybuda.op.SparseMatmul("", spm, result) + spm = forge.Tensor.create_from_torch(spm) + mm = forge.op.SparseMatmul("", spm, result) result = mm padded_shape = result.shape r_new = TILE_DIM * padded_shape[-2] spm = create_reshape_flatten_sparse_picker_matrix(padded_shape[-2], r_new) - spm = pybuda.Tensor.create_from_torch(spm) - result = pybuda.op.SparseMatmul("", spm, result) + spm = forge.Tensor.create_from_torch(spm) + result = forge.op.SparseMatmul("", spm, result) rt = align_up_tile(r_new) // TILE_DIM - vslc = pybuda.op.VSlice("", result, rt) - hstk = pybuda.op.HStack("", vslc, rt) + vslc = forge.op.VSlice("", result, rt) + hstk = forge.op.HStack("", vslc, rt) result = hstk - t = pybuda.op.Transpose("", hstk, -2, -1) + t = forge.op.Transpose("", hstk, -2, -1) spm = create_flattened_padding_removal_sparse_picker_matrix(t.shape[-2], 0, input_shape[-1], padded_shape[-1]) rows = torch.arange(0, align_up_tile(newshape[-1])).tolist() cols = rows spm2 = torch.sparse_coo_tensor((rows, cols), torch.ones(len(rows)), (len(rows), spm.shape[-2]), dtype=torch.float32) spm = torch.sparse.mm(spm2, spm) - spm = pybuda.Tensor.create_from_torch(spm) - mm = pybuda.op.SparseMatmul("", spm, t) - result = pybuda.op.Transpose("", mm, -2, -1) + spm = forge.Tensor.create_from_torch(spm) + mm = forge.op.SparseMatmul("", spm, t) + result = forge.op.Transpose("", mm, -2, -1) assert align_up_tile(newshape[-1]) == align_up_tile(result.shape[-1]) assert align_up_tile(newshape[-2]) == align_up_tile(result.shape[-2]) - result = pybuda.op.Narrow("", result, -2, 0, newshape[-2], result.shape[-2]) - result = pybuda.op.Narrow("", result, -1, 0, newshape[-1], result.shape[-1]) + result = forge.op.Narrow("", result, -2, 0, newshape[-2], result.shape[-2]) + result = forge.op.Narrow("", result, -1, 0, newshape[-1], result.shape[-1]) assert torch.all(result.value() == goal) @@ -643,18 +643,18 @@ def test_single_select(test_device, index, length): test_kind = TestKind.INFERENCE input_shape = (1, 1, 128, 64) stride = input_shape[-2] - class Model(PyBudaModule): + class Model(ForgeModule): def __init__(self, inp_shape): super().__init__("single_elect_test") - x = pybuda.Tensor.create_from_torch(torch.randn((1, 128, 8 ,8))) - self.y = pybuda.op.tm.Select("", x, -3, (index, length), stride).shape[-3] + x = forge.Tensor.create_from_torch(torch.randn((1, 128, 8 ,8))) + self.y = forge.op.tm.Select("", x, -3, (index, length), stride).shape[-3] self.add_constant("one") self.add_constant("two") - self.add_parameter("three", pybuda.Parameter(torch.randn(128, 128))) - self.add_parameter("four", pybuda.Parameter(torch.randn(self.y, self.y))) + self.add_parameter("three", forge.Parameter(torch.randn(128, 128))) + self.add_parameter("four", forge.Parameter(torch.randn(self.y, self.y))) self.set_constant("one", torch.randn(1, 128, 1, 1)) self.set_constant("two", torch.randn(1, 128, 1, 1)) @@ -662,18 +662,18 @@ def __init__(self, inp_shape): def forward(self, x): # x = (1, 1, 32, 64) - x = pybuda.op.Matmul("", self.get_parameter("three"), x) - x = pybuda.op.Reshape("", x, (1, 128, 8, 8)) - x = pybuda.op.Multiply("", x, self.get_constant("one")) - x = pybuda.op.Add("", x, self.get_constant("two")) - x = pybuda.op.Select("", x, -3, (index, length), stride) - x = pybuda.op.Reshape("", x, (1, 1, self.y, 64)) - x = pybuda.op.Matmul("", self.get_parameter("four"), x) + x = forge.op.Matmul("", self.get_parameter("three"), x) + x = forge.op.Reshape("", x, (1, 128, 8, 8)) + x = forge.op.Multiply("", x, self.get_constant("one")) + x = forge.op.Add("", x, self.get_constant("two")) + x = forge.op.Select("", x, -3, (index, length), stride) + x = forge.op.Reshape("", x, (1, 1, self.y, 64)) + x = forge.op.Matmul("", self.get_parameter("four"), x) return x model = Model(input_shape) - model(pybuda.Tensor.create_from_torch(torch.randn(input_shape))) + model(forge.Tensor.create_from_torch(torch.randn(input_shape))) module = model @@ -689,11 +689,11 @@ def forward(self, x): ) def test_multi_select(test_device): - if test_device.arch == pybuda.BackendDevice.Wormhole_B0: + if test_device.arch == forge.BackendDevice.Wormhole_B0: pytest.skip("Skip this test for golden Wormhole B0") test_kind = TestKind.INFERENCE input_shape = (1, 1, 30, 3072) - class Model(PyBudaModule): + class Model(ForgeModule): def __init__(self, inp_shape): super().__init__("multi_select_test") @@ -707,66 +707,66 @@ def __init__(self, inp_shape): def forward(self, x): # x = (1, 1, 32, 64) # import pdb; pdb.set_trace() - x = pybuda.op.Softmax("", x, dim=-1) - r0 = pybuda.op.Reshape("", x, (1, 30, 4, 768)) - mult0 = pybuda.op.Multiply("", r0, self.get_constant("one")) - add0 = pybuda.op.Add("", mult0, self.get_constant("two")) + x = forge.op.Softmax("", x, dim=-1) + r0 = forge.op.Reshape("", x, (1, 30, 4, 768)) + mult0 = forge.op.Multiply("", r0, self.get_constant("one")) + add0 = forge.op.Add("", mult0, self.get_constant("two")) # Multiple selects where the dim we are selecting from is unaffected by commute # The reshapes are all inverses to r0, should be erased - s1 = pybuda.op.Select("", add0, -3, (0, 10), 30) - s2 = pybuda.op.Select("", add0, -3, (10, 10), 30) - s3 = pybuda.op.Select("", add0, -3, (20, 10), 30) - r1 = pybuda.op.Reshape("", s1, (1, 1, 10, 3072)) - r2 = pybuda.op.Reshape("", s2, (1, 1, 10, 3072)) - r3 = pybuda.op.Reshape("", s3, (1, 1, 10, 3072)) - m1 = pybuda.op.Softmax("", r1, dim=-1) - m2 = pybuda.op.Softmax("", r2, dim=-1) - m3 = pybuda.op.Softmax("", r3, dim=-1) + s1 = forge.op.Select("", add0, -3, (0, 10), 30) + s2 = forge.op.Select("", add0, -3, (10, 10), 30) + s3 = forge.op.Select("", add0, -3, (20, 10), 30) + r1 = forge.op.Reshape("", s1, (1, 1, 10, 3072)) + r2 = forge.op.Reshape("", s2, (1, 1, 10, 3072)) + r3 = forge.op.Reshape("", s3, (1, 1, 10, 3072)) + m1 = forge.op.Softmax("", r1, dim=-1) + m2 = forge.op.Softmax("", r2, dim=-1) + m3 = forge.op.Softmax("", r3, dim=-1) # Multiple selects where the dim we are selecting from is unaffected by commute # The reshapes are NOT inverses to r0. An inverse reshape shall be placed on top of each one # and subsequently erased during the optimize pass - s4 = pybuda.op.Select("", add0, -3, (0, 10), 30) - s5 = pybuda.op.Select("", add0, -3, (10, 10), 30) - s6 = pybuda.op.Select("", add0, -3, (20, 10), 30) - r4 = pybuda.op.Reshape("", s4, (1, 10, 16, 192)) - r5 = pybuda.op.Reshape("", s5, (1, 10, 16, 192)) - r6 = pybuda.op.Reshape("", s6, (1, 10, 16, 192)) - m4 = pybuda.op.Softmax("", r4, dim=-1) - m5 = pybuda.op.Softmax("", r5, dim=-1) - m6 = pybuda.op.Softmax("", r6, dim=-1) + s4 = forge.op.Select("", add0, -3, (0, 10), 30) + s5 = forge.op.Select("", add0, -3, (10, 10), 30) + s6 = forge.op.Select("", add0, -3, (20, 10), 30) + r4 = forge.op.Reshape("", s4, (1, 10, 16, 192)) + r5 = forge.op.Reshape("", s5, (1, 10, 16, 192)) + r6 = forge.op.Reshape("", s6, (1, 10, 16, 192)) + m4 = forge.op.Softmax("", r4, dim=-1) + m5 = forge.op.Softmax("", r5, dim=-1) + m6 = forge.op.Softmax("", r6, dim=-1) # Multiple selects where the dim we are selecting from IS affected by commute # The reshapes are NOT inverses to r0. An inverse reshape shall be placed on top of each one # and subsequently erased during the optimize pass - s7 = pybuda.op.Select("", add0, -1, (0, 256), 768) - s8 = pybuda.op.Select("", add0, -1, (256, 256), 768) - s9 = pybuda.op.Select("", add0, -1, (512, 256), 768) - r7 = pybuda.op.Reshape("", s7, (1, 30, 16, 64)) - r8 = pybuda.op.Reshape("", s8, (1, 30, 16, 64)) - r9 = pybuda.op.Reshape("", s9, (1, 30, 16, 64)) - m7 = pybuda.op.Softmax("", r7, dim=-1) - m8 = pybuda.op.Softmax("", r8, dim=-1) - m9 = pybuda.op.Softmax("", r9, dim=-1) + s7 = forge.op.Select("", add0, -1, (0, 256), 768) + s8 = forge.op.Select("", add0, -1, (256, 256), 768) + s9 = forge.op.Select("", add0, -1, (512, 256), 768) + r7 = forge.op.Reshape("", s7, (1, 30, 16, 64)) + r8 = forge.op.Reshape("", s8, (1, 30, 16, 64)) + r9 = forge.op.Reshape("", s9, (1, 30, 16, 64)) + m7 = forge.op.Softmax("", r7, dim=-1) + m8 = forge.op.Softmax("", r8, dim=-1) + m9 = forge.op.Softmax("", r9, dim=-1) # Multiple selects where the dim we are selecting from IS affected by commute # The reshapes are all inverses to r0, should be erased - s10 = pybuda.op.Select("", add0, -1, (0, 256), 768) - s11 = pybuda.op.Select("", add0, -1, (256, 256), 768) - s12 = pybuda.op.Select("", add0, -1, (512, 256), 768) - r10 = pybuda.op.Reshape("", s10, (1, 1, 30, 1024)) - r11 = pybuda.op.Reshape("", s11, (1, 1, 30, 1024)) - r12 = pybuda.op.Reshape("", s12, (1, 1, 30, 1024)) - m10 = pybuda.op.Softmax("", r10, dim=-1) - m11 = pybuda.op.Softmax("", r11, dim=-1) - m12 = pybuda.op.Softmax("", r12, dim=-1) + s10 = forge.op.Select("", add0, -1, (0, 256), 768) + s11 = forge.op.Select("", add0, -1, (256, 256), 768) + s12 = forge.op.Select("", add0, -1, (512, 256), 768) + r10 = forge.op.Reshape("", s10, (1, 1, 30, 1024)) + r11 = forge.op.Reshape("", s11, (1, 1, 30, 1024)) + r12 = forge.op.Reshape("", s12, (1, 1, 30, 1024)) + m10 = forge.op.Softmax("", r10, dim=-1) + m11 = forge.op.Softmax("", r11, dim=-1) + m12 = forge.op.Softmax("", r12, dim=-1) return m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12 model = Model(input_shape) - model(pybuda.Tensor.create_from_torch(torch.randn(input_shape))) + model(forge.Tensor.create_from_torch(torch.randn(input_shape))) module = model @@ -783,10 +783,10 @@ def forward(self, x): def test_binary_stack_on_x(): - class Model(PyBudaModule): + class Model(ForgeModule): def forward(self, x, y): # import pdb; pdb.set_trace() - out1 = pybuda.op.BinaryStack("", x, y, -1) + out1 = forge.op.BinaryStack("", x, y, -1) return out1 @@ -836,7 +836,7 @@ def get_factorization(n): begin_end_shapes = [([1, 47, 36, 1], [1, 3, 2, 282]), ([1, 38, 27, 15], [1, 19, 405, 2]), ([1, 14, 60, 24], [1, 72, 140, 2]), ([1, 54, 1, 22], [1, 18, 33, 2]), ([1, 8, 62, 56], [1, 8, 1736, 2])] @pytest.mark.parametrize("begin_end_shape", begin_end_shapes) def test_general_reshape(begin_end_shape): - class ReshapeModel(PyBudaModule): + class ReshapeModel(ForgeModule): def __init__(self, newshape): super().__init__("general_reshape") self.add_constant("const") @@ -844,9 +844,9 @@ def __init__(self, newshape): self.newshape = newshape def forward(self, a): - a = pybuda.op.Softmax("", a, dim=-1) - a = pybuda.op.Reshape("", a, self.newshape) - a = pybuda.op.Matmul("", a, self.get_constant("const")) + a = forge.op.Softmax("", a, dim=-1) + a = forge.op.Reshape("", a, self.newshape) + a = forge.op.Matmul("", a, self.get_constant("const")) return a orig_shape, newshape = begin_end_shape @@ -863,7 +863,7 @@ def forward(self, a): @pytest.mark.parametrize("begin_end_shape", begin_end_shapes) def test_xy_unflatten_reshape(begin_end_shape): # Tests these kind of reshapes: [1, 1, 32, 1024] -> [1, 32, 32, 32], [1, 1, 1280, 4] -> [1, 1280, 2, 2] - class ReshapeModel(PyBudaModule): + class ReshapeModel(ForgeModule): def __init__(self, newshape): super().__init__("xy_unflatten_reshape") self.add_constant("const") @@ -871,9 +871,9 @@ def __init__(self, newshape): self.newshape = newshape def forward(self, a): - a = pybuda.op.Softmax("", a, dim=-1) - a = pybuda.op.Reshape("", a, self.newshape) - a = pybuda.op.Matmul("", a, self.get_constant("const")) + a = forge.op.Softmax("", a, dim=-1) + a = forge.op.Reshape("", a, self.newshape) + a = forge.op.Matmul("", a, self.get_constant("const")) return a orig_shape, newshape = begin_end_shape @@ -892,12 +892,12 @@ def test_pixel_shuffle(shape): r = 2 shape = shape[:-3] + [r*r*shape[-3]] + shape[-2:] - class PixelShuffleModel(PyBudaModule): + class PixelShuffleModel(ForgeModule): def __init__(self): super().__init__("pixel_shuffle") def forward(self, a): - return pybuda.op.PixelShuffle("", a, r) + return forge.op.PixelShuffle("", a, r) _get_global_compiler_config().compile_depth = CompileDepth.POST_INITIAL_GRAPH_PASS verify_module( @@ -907,7 +907,7 @@ def forward(self, a): ) def test_reshape_with_smm_padding(): - class ReshapeModelWithPadding(PyBudaModule): + class ReshapeModelWithPadding(ForgeModule): def __init__(self, name, orig_shape, newshape): super().__init__(name) dim_width = orig_shape[-2] @@ -915,21 +915,21 @@ def __init__(self, name, orig_shape, newshape): spm = torch.sparse_coo_tensor([list(range(dim_width)), list(range(dim_width))], torch.ones(dim_width),(dim_width, dim_width),dtype=torch.float32) spm = torch.stack([spm] * orig_shape[-3], -3).unsqueeze(0) self.add_constant("const") - self.set_constant("const", pybuda.Tensor.create_from_torch(spm, constant=True)) + self.set_constant("const", forge.Tensor.create_from_torch(spm, constant=True)) spm = torch.sparse_coo_tensor([list(range(new_dim_width)), list(range(new_dim_width))], torch.ones(new_dim_width),(new_dim_width, new_dim_width),dtype=torch.float32) spm = torch.stack([spm] * newshape[-3], -3).unsqueeze(0) self.add_constant("const2") - self.set_constant("const2", pybuda.Tensor.create_from_torch(spm, constant=True)) + self.set_constant("const2", forge.Tensor.create_from_torch(spm, constant=True)) self.newshape = newshape def forward(self, x): - x = pybuda.op.SparseMatmul("smm0", self.get_constant("const") ,x) - x = pybuda.op.Reshape("r0", x, self.newshape) - x = pybuda.op.SparseMatmul("smm1", self.get_constant("const2"), x) + x = forge.op.SparseMatmul("smm0", self.get_constant("const") ,x) + x = forge.op.Reshape("r0", x, self.newshape) + x = forge.op.SparseMatmul("smm1", self.get_constant("const2"), x) return x import os - os.environ["PYBUDA_PAD_SPARSE_MM"] = "{14:16}" + os.environ["FORGE_PAD_SPARSE_MM"] = "{14:16}" orig_shape = (1, 256, 14, 14) new_shape = (1, 1, 256, 196) @@ -940,5 +940,5 @@ def forward(self, x): verify_cfg=VerifyConfig(), ) - del os.environ["PYBUDA_PAD_SPARSE_MM"] + del os.environ["FORGE_PAD_SPARSE_MM"] diff --git a/pybuda/test/tvm/sanity/tests_C/test_sanity_jax.py b/forge/test/tvm/sanity/tests_C/test_sanity_jax.py similarity index 92% rename from pybuda/test/tvm/sanity/tests_C/test_sanity_jax.py rename to forge/test/tvm/sanity/tests_C/test_sanity_jax.py index 71f28ab7e..27e274203 100644 --- a/pybuda/test/tvm/sanity/tests_C/test_sanity_jax.py +++ b/forge/test/tvm/sanity/tests_C/test_sanity_jax.py @@ -8,13 +8,13 @@ from jax import numpy as jnp from flax import linen as nn -from pybuda import ( +from forge import ( JaxModule, VerifyConfig, ) -from pybuda.verify.config import TestKind -from pybuda.verify.backend import verify_module -from pybuda.config import CompileDepth, _get_global_compiler_config +from forge.verify.config import TestKind +from forge.verify.backend import verify_module +from forge.config import CompileDepth, _get_global_compiler_config def test_tvm_linear(test_kind, test_device): class Linear(nn.Module): @@ -44,9 +44,9 @@ def __call__(self, x): vars = framework_module.init(key, act) framework_module = framework_module.bind(vars) - pybuda_module = JaxModule("linear", framework_module) + forge_module = JaxModule("linear", framework_module) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -88,9 +88,9 @@ def __call__(self, x): vars = framework_module.init(key, act) framework_module = framework_module.bind(vars) - pybuda_module = JaxModule("linear", framework_module) + forge_module = JaxModule("linear", framework_module) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -136,9 +136,9 @@ def __call__(self, x): vars = framework_module.init(key, act) framework_module = framework_module.bind(vars) - pybuda_module = JaxModule("jax_multiple_output", framework_module) + forge_module = JaxModule("jax_multiple_output", framework_module) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -187,9 +187,9 @@ def __call__(self, q, k, v): # Run module # res = framework_module(q, k, v) - pybuda_module = JaxModule("jax_scaled_dot_product_attention", framework_module) + forge_module = JaxModule("jax_scaled_dot_product_attention", framework_module) verify_module( - pybuda_module, + forge_module, (input_shape, input_shape, input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -235,9 +235,9 @@ def __call__(self, x): # Run module # res = framework_module(act) - pybuda_module = JaxModule("jax_layer_norm", framework_module) + forge_module = JaxModule("jax_layer_norm", framework_module) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -285,9 +285,9 @@ def __call__(self, x): # Run module # res = framework_module(act) - pybuda_module = JaxModule("jax_conv2d_test", framework_module) + forge_module = JaxModule("jax_conv2d_test", framework_module) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -326,7 +326,7 @@ def test_tvm_xla_gather(test_kind, test_device, slice_module): pytest.skip() if slice_module in [XLAGatherModule3, XLAGatherModule4, XLAGatherModule5, XLAGatherModule6]: - # tenstorrent/pybuda#1608 + # tenstorrent/forge#1608 pytest.skip() compiler_config = _get_global_compiler_config() @@ -350,9 +350,9 @@ def test_tvm_xla_gather(test_kind, test_device, slice_module): # Run module # res = framework_module(act) - pybuda_module = JaxModule("jax_xla_gather", framework_module) + forge_module = JaxModule("jax_xla_gather", framework_module) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -387,9 +387,9 @@ def __call__(self, x): vars = framework_module.init(key, act) framework_module = framework_module.bind(vars) - pybuda_module = JaxModule("JAX_dense", framework_module) + forge_module = JaxModule("JAX_dense", framework_module) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -433,9 +433,9 @@ def __call__(self, img): # Run module # res = framework_module(act) - pybuda_module = JaxModule("jax_conv2d_transpose_test", framework_module) + forge_module = JaxModule("jax_conv2d_transpose_test", framework_module) verify_module( - pybuda_module, + forge_module, (input_shape,), verify_cfg=VerifyConfig( arch=test_device.arch, @@ -481,9 +481,9 @@ def __call__(self, img): # Run module # res = framework_module(act) - pybuda_module = JaxModule("jax_conv2d_dilated_test", framework_module) + forge_module = JaxModule("jax_conv2d_dilated_test", framework_module) verify_module( - pybuda_module, + forge_module, (input_shape1,), verify_cfg=VerifyConfig( arch=test_device.arch, diff --git a/pybuda/test/tvm/sanity/tests_C/test_sanity_tf.py b/forge/test/tvm/sanity/tests_C/test_sanity_tf.py similarity index 97% rename from pybuda/test/tvm/sanity/tests_C/test_sanity_tf.py rename to forge/test/tvm/sanity/tests_C/test_sanity_tf.py index 30472738d..49163417b 100644 --- a/pybuda/test/tvm/sanity/tests_C/test_sanity_tf.py +++ b/forge/test/tvm/sanity/tests_C/test_sanity_tf.py @@ -4,25 +4,25 @@ # # Some basic bring-up tests of tracing functionality # -from pybuda.config import CompileDepth -from pybuda.op.eval.common import compare_tensor_to_golden -from pybuda.verify.backend import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth +from forge.op.eval.common import compare_tensor_to_golden +from forge.verify.backend import verify_module +from forge.verify.config import TestKind import pytest import tensorflow as tf -from pybuda import ( +from forge import ( TFModule, TTDevice, BackendType, CompilerConfig, VerifyConfig, optimizers, - pybuda_compile, + forge_compile, ) -from test.tvm.utils import evaluate_framework_vs_pybuda -from pybuda.config import _get_global_compiler_config +from test.tvm.utils import evaluate_framework_vs_forge +from forge.config import _get_global_compiler_config input_shapes = [(1, 128, 64)] dense_units = [64] @@ -331,7 +331,7 @@ def call(self, x): act = tf.random.uniform(input_shape) - res = pybuda_compile( + res = forge_compile( tt0, "global_avg_pool2d_tf", act, @@ -344,7 +344,7 @@ def call(self, x): intermediates=True, ), ) - evaluate_framework_vs_pybuda(model, res, act) + evaluate_framework_vs_forge(model, res, act) @pytest.mark.parametrize( "input_shape", input_shapes, ids=[f"input{str(s)}" for s in input_shapes] @@ -372,7 +372,7 @@ def call(self, x): act = tf.random.uniform(input_shape) - res = pybuda_compile( + res = forge_compile( tt0, "global_max_pool2d_tf", act, @@ -385,7 +385,7 @@ def call(self, x): intermediates=True, ), ) - evaluate_framework_vs_pybuda(model, res, act) + evaluate_framework_vs_forge(model, res, act) input_shapes = [(1, 224, 224, 3)] @@ -431,7 +431,7 @@ def call(self, x1): act = tf.random.uniform(input_shape) - res = pybuda_compile( + res = forge_compile( tt0, "depthwise_conv2d_tf", act, @@ -444,7 +444,7 @@ def call(self, x1): intermediates=True, ), ) - evaluate_framework_vs_pybuda(model, res, act) + evaluate_framework_vs_forge(model, res, act) @pytest.mark.parametrize( "channel_format", channel_format, ids=[f"channel_format({k})" for k in channel_format] @@ -484,7 +484,7 @@ def call(self, x1): act = tf.random.uniform((1, 224, 224, 3)) - res = pybuda_compile( + res = forge_compile( tt0, "conv2d_uneven_pad", act, @@ -498,7 +498,7 @@ def call(self, x1): verify_all=True, ), ) - evaluate_framework_vs_pybuda(model, res, act) + evaluate_framework_vs_forge(model, res, act) def test_multiout(test_kind, test_device): class MultiOut(tf.keras.Model): @@ -702,14 +702,14 @@ def call(self, inputs): tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) tt0.place_module(module) - ret = pybuda_compile( + ret = forge_compile( tt0, "list_input", *inputs, compiler_cfg=_get_global_compiler_config(), ) - evaluate_framework_vs_pybuda(model, ret, *inputs) + evaluate_framework_vs_forge(model, ret, *inputs) def test_list_input_mixture(training): @@ -758,14 +758,14 @@ def call(self, inputs1, z, inputs2, w): tt0 = TTDevice("tt0", devtype=BackendType.Golden, optimizer=sgd_optimizer) tt0.place_module(module) - ret = pybuda_compile( + ret = forge_compile( tt0, "list_input", *inputs, compiler_cfg=_get_global_compiler_config(), ) - evaluate_framework_vs_pybuda(model, ret, *inputs) + evaluate_framework_vs_forge(model, ret, *inputs) def test_list_input(test_kind, test_device): diff --git a/pybuda/test/tvm/stable_diffusion/run_stable_diffusion.py b/forge/test/tvm/stable_diffusion/run_stable_diffusion.py similarity index 91% rename from pybuda/test/tvm/stable_diffusion/run_stable_diffusion.py rename to forge/test/tvm/stable_diffusion/run_stable_diffusion.py index 9aa129017..10c0b03d0 100644 --- a/pybuda/test/tvm/stable_diffusion/run_stable_diffusion.py +++ b/forge/test/tvm/stable_diffusion/run_stable_diffusion.py @@ -1,7 +1,7 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -from pybuda._C import MathFidelity +from forge._C import MathFidelity import pytest from typing import List, Optional, Union @@ -15,13 +15,13 @@ from diffusers import StableDiffusionPipeline from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput -import pybuda -from pybuda import ( +import forge +from forge import ( TTDeviceImage, BackendType, BackendDevice, ) -from pybuda.config import CompileDepth, _get_global_compiler_config +from forge.config import CompileDepth, _get_global_compiler_config from PIL import Image @@ -115,19 +115,19 @@ def forward(self, latent_model_input, timestep, text_embeddings): return noise_pred def initialize_compiler_overrides(): - # os.environ["PYBUDA_RELOAD_GENERATED_MODULES"] = "1" - os.environ["PYBUDA_MAX_GRAPH_CUT_RETRY"] = "2000" - os.environ["PYBUDA_PAD_SPARSE_MM_WEIGHT_MM"] = "{10:12, 20:24, 30:32, 40:48, 60:64}" - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" - os.environ["PYBUDA_DECOMPOSE_SIGMOID"] = "1" - os.environ["PYBUDA_RIBBON2"] = "1" + # os.environ["FORGE_RELOAD_GENERATED_MODULES"] = "1" + os.environ["FORGE_MAX_GRAPH_CUT_RETRY"] = "2000" + os.environ["FORGE_PAD_SPARSE_MM_WEIGHT_MM"] = "{10:12, 20:24, 30:32, 40:48, 60:64}" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_DECOMPOSE_SIGMOID"] = "1" + os.environ["FORGE_RIBBON2"] = "1" compiler_cfg = _get_global_compiler_config() compiler_cfg.enable_tvm_constant_prop = True compiler_cfg.enable_enumerate_u_kt = False compiler_cfg.graph_solver_self_cut_type = "FastCut" compiler_cfg.retain_tvm_python_files = True - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b + compiler_cfg.default_df_override = forge.DataFormat.Float16_b compiler_cfg.enable_auto_fusing = False compiler_cfg.balancer_policy = "Ribbon" @@ -164,11 +164,11 @@ def denoising_loop( # predict the noise residual ttdevice.push_to_inputs(*[latent_model_input.detach()[0:1], timestep_.detach()[0:1], prompt_embeds.detach()[0:1]]) - output_q = pybuda.run_inference(_sequential=True) + output_q = forge.run_inference(_sequential=True) noise_pred_0 = output_q.get()[0].value().detach() ttdevice.push_to_inputs(*[latent_model_input.detach()[1:2], timestep_.detach()[1:2], prompt_embeds.detach()[1:2]]) - output_q = pybuda.run_inference(_sequential=True) + output_q = forge.run_inference(_sequential=True) noise_pred_1 = output_q.get()[0].value().detach() noise_pred = torch.cat([noise_pred_0, noise_pred_1], dim=0) @@ -257,8 +257,8 @@ def run_stable_diffusion_pipeline(): logger.info(f"Compiling stable diffusion unet into TTI") initialize_compiler_overrides() input_shape = ((1, 4, 64, 64), (1,), (1, 77, 768)) - tt_module = pybuda.PyTorchModule("sd_unet_demo", UnetWrapper(pipe.unet),) - device = pybuda.TTDevice("tt0", module=tt_module,arch=BackendDevice.Wormhole_B0, devtype=BackendType.Silicon) + tt_module = forge.PyTorchModule("sd_unet_demo", UnetWrapper(pipe.unet),) + device = forge.TTDevice("tt0", module=tt_module,arch=BackendDevice.Wormhole_B0, devtype=BackendType.Silicon) tti_img = device.compile_to_image( img_path="sd_unet_demo.tti", training=False, @@ -271,7 +271,7 @@ def run_stable_diffusion_pipeline(): if args.run: save_path = "sd_unet_demo.tti" device_img: TTDeviceImage = TTDeviceImage.load_from_disk(save_path) - ttdevice = pybuda.TTDevice.load_image(img=device_img) + ttdevice = forge.TTDevice.load_image(img=device_img) while True: diff --git a/pybuda/test/tvm/stable_diffusion/test_stable_diffusion.py b/forge/test/tvm/stable_diffusion/test_stable_diffusion.py similarity index 83% rename from pybuda/test/tvm/stable_diffusion/test_stable_diffusion.py rename to forge/test/tvm/stable_diffusion/test_stable_diffusion.py index cdb7e962a..5552e58d3 100644 --- a/pybuda/test/tvm/stable_diffusion/test_stable_diffusion.py +++ b/forge/test/tvm/stable_diffusion/test_stable_diffusion.py @@ -1,7 +1,7 @@ # SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC # SPDX-License-Identifier: Apache-2.0 -from pybuda._C import MathFidelity +from forge._C import MathFidelity import pytest from typing import List, Optional, Union @@ -14,19 +14,19 @@ from diffusers import StableDiffusionPipeline from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput -import pybuda -from pybuda import ( +import forge +from forge import ( PyTorchModule, TTDeviceImage, BackendType, VerifyConfig, BackendDevice, - PyBudaModule, + ForgeModule, ) -from pybuda.config import CompileDepth, _get_global_compiler_config -from pybuda.verify import verify_module -from pybuda.verify.config import TestKind +from forge.config import CompileDepth, _get_global_compiler_config +from forge.verify import verify_module +from forge.verify.config import TestKind from test.utils import download_model from PIL import Image @@ -43,13 +43,13 @@ def forward(self, latent_model_input, timestep, text_embeddings): def test_unet(test_device): import os - # os.environ["PYBUDA_RELOAD_GENERATED_MODULES"] = "1" - os.environ["PYBUDA_MAX_GRAPH_CUT_RETRY"] = "2000" - os.environ["PYBUDA_DECOMPOSE_SIGMOID"] = "1" - os.environ["PYBUDA_RIBBON2"] = "1" - os.environ["PYBUDA_PAD_SPARSE_MM_WEIGHT_MM"] = "{10:12, 20:24, 30:32, 40:48, 60:64}" - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" - os.environ["PYBUDA_LEGACY_UBLOCK_SHAPE"] = "1" + # os.environ["FORGE_RELOAD_GENERATED_MODULES"] = "1" + os.environ["FORGE_MAX_GRAPH_CUT_RETRY"] = "2000" + os.environ["FORGE_DECOMPOSE_SIGMOID"] = "1" + os.environ["FORGE_RIBBON2"] = "1" + os.environ["FORGE_PAD_SPARSE_MM_WEIGHT_MM"] = "{10:12, 20:24, 30:32, 40:48, 60:64}" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_LEGACY_UBLOCK_SHAPE"] = "1" pipe = download_model(StableDiffusionPipeline.from_pretrained, "CompVis/stable-diffusion-v1-4") mod = PyTorchModule("sd_unet", UnetWrapper(pipe.unet)) @@ -58,7 +58,7 @@ def test_unet(test_device): compiler_cfg.enable_enumerate_u_kt = False compiler_cfg.graph_solver_self_cut_type = "FastCut" compiler_cfg.retain_tvm_python_files = True - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b + compiler_cfg.default_df_override = forge.DataFormat.Float16_b compiler_cfg.enable_auto_fusing = False compiler_cfg.default_dram_parameters = True @@ -67,7 +67,7 @@ def test_unet(test_device): input_shape = ((1, 4, 64, 64), (1,), (1, 77, 768)) - # ttdevice = pybuda.TTDevice("tt0", module=mod,arch=BackendDevice.Wormhole_B0, devtype=BackendType.Silicon) + # ttdevice = forge.TTDevice("tt0", module=mod,arch=BackendDevice.Wormhole_B0, devtype=BackendType.Silicon) # TTDeviceImage = ttdevice.compile_to_image( # img_path="device_images/sd_unet_final.tti", # training=False, @@ -111,7 +111,7 @@ def test_unet_CrossAttention(test_device): compiler_cfg.retain_tvm_python_files = True compiler_cfg.enable_auto_fusing = False - compiler_cfg.default_df_override = pybuda.DataFormat.Bfp8_b + compiler_cfg.default_df_override = forge.DataFormat.Bfp8_b compiler_cfg.default_math_fidelity = MathFidelity.LoFi @@ -239,9 +239,9 @@ def forward(self, sample, emb, res1, res2, res3, res4, res5, res6, encoder_hidde def test_unet_up_block(test_device): import os - os.environ["PYBUDA_PAD_SPARSE_MM_WEIGHT_MM"] = "{10:12, 20:24, 40:48}" - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" - os.environ["PYBUDA_MAX_GRAPH_CUT_RETRY"] = "100" + os.environ["FORGE_PAD_SPARSE_MM_WEIGHT_MM"] = "{10:12, 20:24, 40:48}" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_MAX_GRAPH_CUT_RETRY"] = "100" os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{64*1024}" pipe = download_model(StableDiffusionPipeline.from_pretrained, "CompVis/stable-diffusion-v1-4") @@ -251,7 +251,7 @@ def test_unet_up_block(test_device): compiler_cfg.enable_enumerate_u_kt = False compiler_cfg.balancer_policy = "Ribbon" compiler_cfg.graph_solver_self_cut_type = "ConsumerOperandDataEdgesFirst" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b + compiler_cfg.default_df_override = forge.DataFormat.Float16_b input_shape = ((1, 1, 1280, 64), (1, 1280), (1, 1, 1280, 64), (1, 1, 1280, 64), (1, 1, 1280, 64), (1, 1, 640, 256),(1, 1, 1280, 256),(1, 1, 1280, 256),(1, 77, 768)) @@ -270,7 +270,7 @@ def test_unet_up_block(test_device): def test_unet_section(test_device): pytest.skip() - class Test(PyBudaModule): + class Test(ForgeModule): def __init__(self): super().__init__("unet_section") self.add_constant("const_760") @@ -283,25 +283,25 @@ def __init__(self): self.set_constant("const_762", torch.ones(1, 32, 10, 4096)) def forward(self, x): - softmax_210 = pybuda.op.Softmax("", x, dim=-1) # (1, 1, 320, 4096) - reshape_211 = pybuda.op.Reshape("", softmax_210, shape=(1, 32, 10, 4096)) - reduce_avg_212 = pybuda.op.ReduceAvg("", reshape_211, dim=-2) # (1, 32, 1, 4096) - reduce_avg_213 = pybuda.op.ReduceAvg("", reduce_avg_212, dim=-1) # (1, 32, 1, 1) - subtract_214 = pybuda.op.Subtract("", reshape_211, reduce_avg_213) # (1, 32, 10, 4096) - subtract_215 = pybuda.op.Subtract("", reshape_211, reduce_avg_213) # (1, 32, 10, 4096) - multiply_216 = pybuda.op.Multiply("", subtract_215, subtract_215) # (1, 32, 10, 4096) - reduce_avg_217 = pybuda.op.ReduceAvg("", multiply_216, dim=-2) # (1, 32, 1, 4096) - reduce_avg_218 = pybuda.op.ReduceAvg("", reduce_avg_217, dim=-1) # (1, 32, 1, 1) - add_220 = pybuda.op.Add("", reduce_avg_218, self.get_constant("const_760")) # (1, 32, 1, 1) - sqrt_221 = pybuda.op.Sqrt("", add_220) # (1, 32, 1, 1) - reciprocal_222 = pybuda.op.Reciprocal("", sqrt_221) # (1, 32, 1, 1) - multiply_223 = pybuda.op.Multiply("", subtract_214, reciprocal_222) # (1, 32, 10, 4096) - multiply_226 = pybuda.op.Multiply("", multiply_223, self.get_constant("const_761")) # (1, 32, 10, 4096) - add_228 = pybuda.op.Add("", multiply_226, self.get_constant("const_762")) # (1, 32, 10, 4096) - sigmoid_229 = pybuda.op.Sigmoid("", add_228) # (1, 32, 10, 4096) - multiply_230 = pybuda.op.Multiply("", add_228, sigmoid_229) # (1, 32, 10, 4096) - reshape_231 = pybuda.op.Reshape("", multiply_230, shape=(1, 1, 320, 4096)) - softmax_232 = pybuda.op.Softmax("", reshape_231, dim=-1) # (1, 1, 320, 4096) + softmax_210 = forge.op.Softmax("", x, dim=-1) # (1, 1, 320, 4096) + reshape_211 = forge.op.Reshape("", softmax_210, shape=(1, 32, 10, 4096)) + reduce_avg_212 = forge.op.ReduceAvg("", reshape_211, dim=-2) # (1, 32, 1, 4096) + reduce_avg_213 = forge.op.ReduceAvg("", reduce_avg_212, dim=-1) # (1, 32, 1, 1) + subtract_214 = forge.op.Subtract("", reshape_211, reduce_avg_213) # (1, 32, 10, 4096) + subtract_215 = forge.op.Subtract("", reshape_211, reduce_avg_213) # (1, 32, 10, 4096) + multiply_216 = forge.op.Multiply("", subtract_215, subtract_215) # (1, 32, 10, 4096) + reduce_avg_217 = forge.op.ReduceAvg("", multiply_216, dim=-2) # (1, 32, 1, 4096) + reduce_avg_218 = forge.op.ReduceAvg("", reduce_avg_217, dim=-1) # (1, 32, 1, 1) + add_220 = forge.op.Add("", reduce_avg_218, self.get_constant("const_760")) # (1, 32, 1, 1) + sqrt_221 = forge.op.Sqrt("", add_220) # (1, 32, 1, 1) + reciprocal_222 = forge.op.Reciprocal("", sqrt_221) # (1, 32, 1, 1) + multiply_223 = forge.op.Multiply("", subtract_214, reciprocal_222) # (1, 32, 10, 4096) + multiply_226 = forge.op.Multiply("", multiply_223, self.get_constant("const_761")) # (1, 32, 10, 4096) + add_228 = forge.op.Add("", multiply_226, self.get_constant("const_762")) # (1, 32, 10, 4096) + sigmoid_229 = forge.op.Sigmoid("", add_228) # (1, 32, 10, 4096) + multiply_230 = forge.op.Multiply("", add_228, sigmoid_229) # (1, 32, 10, 4096) + reshape_231 = forge.op.Reshape("", multiply_230, shape=(1, 1, 320, 4096)) + softmax_232 = forge.op.Softmax("", reshape_231, dim=-1) # (1, 1, 320, 4096) return softmax_232 @@ -338,9 +338,9 @@ def forward(self, sample, emb, encoder_hidden_states): def test_unet_down_block(test_device): import os - os.environ["PYBUDA_PAD_SPARSE_MM_WEIGHT_MM"] = "{10:12, 20:24, 40:48}" - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" - os.environ["PYBUDA_MAX_GRAPH_CUT_RETRY"] = "100" + os.environ["FORGE_PAD_SPARSE_MM_WEIGHT_MM"] = "{10:12, 20:24, 40:48}" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_MAX_GRAPH_CUT_RETRY"] = "100" os.environ["TT_BACKEND_OVERLAY_MAX_EXTRA_BLOB_SIZE"] = f"{80*1024}" pipe = download_model(StableDiffusionPipeline.from_pretrained, "CompVis/stable-diffusion-v1-4") @@ -352,7 +352,7 @@ def test_unet_down_block(test_device): compiler_cfg.enable_auto_fusing = False compiler_cfg.balancer_policy = "Ribbon" compiler_cfg.graph_solver_self_cut_type = "ConsumerOperandDataEdgesFirst" - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b + compiler_cfg.default_df_override = forge.DataFormat.Float16_b verify_module( mod, @@ -452,18 +452,18 @@ def forward(self, latent_model_input, timestep, text_embeddings): return noise_pred def initialize_compiler_overrides(): - # os.environ["PYBUDA_RELOAD_GENERATED_MODULES"] = "1" - os.environ["PYBUDA_MAX_GRAPH_CUT_RETRY"] = "2000" + # os.environ["FORGE_RELOAD_GENERATED_MODULES"] = "1" + os.environ["FORGE_MAX_GRAPH_CUT_RETRY"] = "2000" - os.environ["PYBUDA_PAD_SPARSE_MM_WEIGHT_MM"] = "{10:12, 20:24, 30:32, 40:48, 60:64}" - os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" + os.environ["FORGE_PAD_SPARSE_MM_WEIGHT_MM"] = "{10:12, 20:24, 30:32, 40:48, 60:64}" + os.environ["FORGE_FORCE_CONV_MULTI_OP_FRACTURE"] = "1" compiler_cfg = _get_global_compiler_config() compiler_cfg.enable_tvm_constant_prop = True compiler_cfg.enable_enumerate_u_kt = False compiler_cfg.graph_solver_self_cut_type = "FastCut" compiler_cfg.retain_tvm_python_files = True - compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b + compiler_cfg.default_df_override = forge.DataFormat.Float16_b compiler_cfg.enable_auto_fusing = False compiler_cfg.balancer_policy = "Ribbon" @@ -505,11 +505,11 @@ def denoising_loop( # predict the noise residual ttdevice.push_to_inputs(*[latent_model_input.detach()[0:1], timestep_.detach()[0:1], prompt_embeds.detach()[0:1]]) - output_q = pybuda.run_inference(_sequential=True) + output_q = forge.run_inference(_sequential=True) noise_pred_0 = output_q.get()[0].value().detach() ttdevice.push_to_inputs(*[latent_model_input.detach()[1:2], timestep_.detach()[1:2], prompt_embeds.detach()[1:2]]) - output_q = pybuda.run_inference(_sequential=True) + output_q = forge.run_inference(_sequential=True) noise_pred_1 = output_q.get()[0].value().detach() noise_pred = torch.cat([noise_pred_0, noise_pred_1], dim=0) @@ -584,15 +584,15 @@ def test_stable_diffusion_pipeline(): pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") - # Step 1: Compile/Load PyBuda module + # Step 1: Compile/Load Forge module save_path = "sd_unet_final.tti" if not os.path.exists(save_path): # Need to compile to tti initialize_compiler_overrides() input_shape = ((1, 4, 64, 64), (1,), (1, 77, 768)) - tt_module = pybuda.PyTorchModule("sd_unet_script", UnetWrapper(pipe.unet),) - device = pybuda.TTDevice("tt0", module=tt_module,arch=BackendDevice.Wormhole_B0, devtype=BackendType.Silicon) + tt_module = forge.PyTorchModule("sd_unet_script", UnetWrapper(pipe.unet),) + device = forge.TTDevice("tt0", module=tt_module,arch=BackendDevice.Wormhole_B0, devtype=BackendType.Silicon) tti_img = device.compile_to_image( img_path="sd_unet_final.tti", training=False, @@ -601,7 +601,7 @@ def test_stable_diffusion_pipeline(): if os.path.exists(save_path): device_img: TTDeviceImage = TTDeviceImage.load_from_disk(save_path) - ttdevice = pybuda.TTDevice.load_image(img=device_img) + ttdevice = forge.TTDevice.load_image(img=device_img) else: raise RuntimeError("Could not load compiled TTI file") diff --git a/pybuda/test/tvm/utils.py b/forge/test/tvm/utils.py similarity index 70% rename from pybuda/test/tvm/utils.py rename to forge/test/tvm/utils.py index 12779cb76..fbec37f5b 100644 --- a/pybuda/test/tvm/utils.py +++ b/forge/test/tvm/utils.py @@ -7,18 +7,18 @@ import tensorflow as tf from loguru import logger -from pybuda.op.eval.common import compare_tensor_to_golden +from forge.op.eval.common import compare_tensor_to_golden -def evaluate_framework_vs_pybuda( - framework_model, pybuda_model_results, *inputs, rtol=None, atol=None +def evaluate_framework_vs_forge( + framework_model, forge_model_results, *inputs, rtol=None, atol=None ): """ - Evaluates PyTorch model results agains compiled PyBuda model results. + Evaluates PyTorch model results agains compiled Forge model results. Args: framework_model: Framework model. - pybuda_model_results (CompileResults): PyBuda model results. + forge_model_results (CompileResults): Forge model results. rtol (float, optional): Relative tolerance. Defaults to 1e-02. atol (float, optional): Absolute tolerance. Defaults to 1e-04. """ @@ -40,6 +40,6 @@ def evaluate_framework_vs_pybuda( pytorch_res = pytorch_res.numpy() pytorch_res = torch.from_numpy(pytorch_res) - assert compare_tensor_to_golden("tvm", golden=pytorch_res, calculated=pybuda_model_results.outputs[0].value(), rtol=rtol, atol=atol) + assert compare_tensor_to_golden("tvm", golden=pytorch_res, calculated=forge_model_results.outputs[0].value(), rtol=rtol, atol=atol) - logger.debug("Tensors match on output of framework and pybuda") + logger.debug("Tensors match on output of framework and forge") diff --git a/pybuda/test/utils.py b/forge/test/utils.py similarity index 100% rename from pybuda/test/utils.py rename to forge/test/utils.py diff --git a/pybuda/test/versim/test_versim_basic_ops.py b/forge/test/versim/test_versim_basic_ops.py similarity index 75% rename from pybuda/test/versim/test_versim_basic_ops.py rename to forge/test/versim/test_versim_basic_ops.py index 30ce4d1a2..f9a2d38f5 100644 --- a/pybuda/test/versim/test_versim_basic_ops.py +++ b/forge/test/versim/test_versim_basic_ops.py @@ -4,10 +4,10 @@ # # Versim-related tests for end-to-end simulation # -from pybuda import pybuda -from pybuda._C.backend_api import BackendDevice, BackendType -from pybuda.tensor import Tensor -from pybuda.verify.config import TestKind, VerifyConfig +from forge import forge +from forge._C.backend_api import BackendDevice, BackendType +from forge.tensor import Tensor +from forge.verify.config import TestKind, VerifyConfig import pytest import torch from test.common import run @@ -24,10 +24,10 @@ def test_versim_simple_add(test_device): arch=test_device.arch), ) def simple_add(a, b): - c = pybuda.op.Add("add0", a, b) + c = forge.op.Add("add0", a, b) return c - compiler_cfg = pybuda.config._get_global_compiler_config() + compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.input_queues_on_host = False compiler_cfg.output_queues_on_host = False compiler_cfg.balancer_op_override("add0", "grid_shape", (1,1)) diff --git a/pybuda/CMakeLists.txt b/pybuda/CMakeLists.txt deleted file mode 100644 index 8b0df77fd..000000000 --- a/pybuda/CMakeLists.txt +++ /dev/null @@ -1,2 +0,0 @@ -add_subdirectory(csrc) -add_subdirectory(pybuda) diff --git a/pybuda/pybuda/_C/autograd.pyi b/pybuda/pybuda/_C/autograd.pyi deleted file mode 100644 index f53083955..000000000 --- a/pybuda/pybuda/_C/autograd.pyi +++ /dev/null @@ -1,24 +0,0 @@ -import pybuda._C.graph -from typing import overload - -class AutogradConfig: - def __init__(self, recompute: bool = ..., optimizer: object = ...) -> None: ... - -class AutogradContext: - def __init__(self, *args, **kwargs) -> None: ... - @overload - def constant(self, arg0: int) -> pybuda._C.graph.NodeContext: ... - @overload - def constant(self, arg0: float) -> pybuda._C.graph.NodeContext: ... - def create_optimizer_op(self, type: str, operands: list[pybuda._C.graph.NodeContext], attributes=...) -> pybuda._C.graph.NodeContext: ... - def get_operands(self, arg0: pybuda._C.graph.NodeContext) -> list[pybuda._C.graph.NodeContext]: ... - def get_pytorch_tensor(self, arg0: pybuda._C.graph.NodeContext) -> object: ... - def get_shape(self, arg0: pybuda._C.graph.NodeContext) -> list[int]: ... - def input(self, *args, **kwargs): ... - def loopback(self, arg0: pybuda._C.graph.NodeContext, arg1: pybuda._C.graph.NodeContext) -> None: ... - def op(self, type: str | object, operands: list[pybuda._C.graph.NodeContext], attributes=...) -> pybuda._C.graph.NodeContext: ... - def tensor(self, arg0: object) -> pybuda._C.graph.NodeContext: ... - -class AutogradEngine: - def __init__(self, arg0: pybuda._C.graph.Graph, arg1: AutogradConfig) -> None: ... - def run(self) -> pybuda._C.graph.Graph: ... diff --git a/pybuda/test/backend/test_random_grids.py b/pybuda/test/backend/test_random_grids.py deleted file mode 100644 index bece62f72..000000000 --- a/pybuda/test/backend/test_random_grids.py +++ /dev/null @@ -1,69 +0,0 @@ -# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC - -# SPDX-License-Identifier: Apache-2.0 -import pybuda -import pytest -from pybuda.verify import verify_module, VerifyConfig - -microbatch_size = 8 - -class MatmulSimple(pybuda.PyBudaModule): - shape = (256, 256) - def __init__(self, name: str): - super().__init__(name) - self.weights1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.weights2 = pybuda.Parameter(*self.shape, requires_grad=True) - - def forward(self, act1, act2): - m1 = pybuda.op.Matmul("matmul1", act1, self.weights1) - m2 = pybuda.op.Matmul("matmul2", act2, self.weights2) - return pybuda.op.Add("add", m1, m2) - -class MatmulDramFork(pybuda.PyBudaModule): - shape = (256, 256) - def __init__(self, name: str): - super().__init__(name) - self.weights1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.weights2 = pybuda.Parameter(*self.shape, requires_grad=True) - - def forward(self, act1, act2): - m1 = pybuda.op.Matmul("matmul1", act1, self.weights1) - m2 = pybuda.op.Matmul("matmul2", act1, self.weights2) - add = pybuda.op.Add("add", m1, m2) - return pybuda.op.Add("add_final", add, act2) - -class EltwiseFork(pybuda.PyBudaModule): - shape = (256, 256) - def __init__(self, name: str): - super().__init__(name) - self.weights1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.weights2 = pybuda.Parameter(*self.shape, requires_grad=True) - - def forward(self, act1, act2): - add = pybuda.op.Add("first_add", act1, act2) - m1 = pybuda.op.Matmul("matmul1", add, self.weights1) - m2 = pybuda.op.Matmul("matmul2", add, self.weights2) - return pybuda.op.Add("add", m1, m2) - -class DoubleFork(pybuda.PyBudaModule): - shape = (256, 256) - def __init__(self, name: str): - super().__init__(name) - self.weights1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.weights2 = pybuda.Parameter(*self.shape, requires_grad=True) - - def forward(self, act1, act2): - add = pybuda.op.Add("first_add", act1, act2) - weight_add = pybuda.op.Add("weight_add", self.weights1, self.weights2) - m1 = pybuda.op.Matmul("matmul1", add, weight_add) - m2 = pybuda.op.Matmul("matmul2", add, weight_add) - return pybuda.op.Add("add", m1, m2) - -@pytest.mark.parametrize("model", [MatmulSimple, MatmulDramFork, EltwiseFork, DoubleFork]) -def test(test_kind, test_device, model): - pybuda.set_configuration_options(balancer_policy="Random") - - verify_module(model("random_grid"), [(microbatch_size, *model.shape), (microbatch_size, *model.shape)], - VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch)) - - diff --git a/pybuda/test/benchmark/run_benchmark_debug b/pybuda/test/benchmark/run_benchmark_debug deleted file mode 100644 index 17a0d5706..000000000 --- a/pybuda/test/benchmark/run_benchmark_debug +++ /dev/null @@ -1,8 +0,0 @@ -rm perf.json - -# emulate runs on harvested machines -export PYBUDA_FORCE_EMULATE_HARVESTED=1 - -pybuda/test/benchmark/benchmark.py -m simple_add -c default -opt 4 -o perf.json --auto_transpose -pybuda/test/benchmark/benchmark.py -m simple_add -c default1 -opt 4 -o perf.json --auto_transpose -pybuda/test/benchmark/benchmark.py -m simple_add -c default2 -opt 4 -o perf.json --auto_transpose \ No newline at end of file diff --git a/pybuda/test/benchmark/run_benchmark_gs_e150_df_bfp8 b/pybuda/test/benchmark/run_benchmark_gs_e150_df_bfp8 deleted file mode 100644 index 27b22ec72..000000000 --- a/pybuda/test/benchmark/run_benchmark_gs_e150_df_bfp8 +++ /dev/null @@ -1,49 +0,0 @@ -# ------------------------------------------------------------------------------------------------------------------------------------------------------------ # -# Data Format Bfp8_b -# ------------------------------------------------------------------------------------------------------------------------------------------------------------ # - -# ------------------------------------------------------- # -# Grayskull e150, unharvested chip, grid size: 10x12 -# ------------------------------------------------------- # - -# Resnet -pybuda/test/benchmark/benchmark.py -m resnet -c resnet50 -o pybuda-silicon-gs-e150-perf-all-perf.json - -# Mobilenet v1 -pybuda/test/benchmark/benchmark.py -m mobilenet_v1 -c 224 -mf HiFi2 -o pybuda-silicon-gs-e150-perf-all-perf.json - -# Mobilenet v2 -pybuda/test/benchmark/benchmark.py -m mobilenet_v2 -c 224 -o pybuda-silicon-gs-e150-perf-all-perf.json - -# Mobilenet v3 -pybuda/test/benchmark/benchmark.py -m mobilenet_v3_timm -c large -o pybuda-silicon-gs-e150-perf-all-perf.json - -# Vovnet v2 -pybuda/test/benchmark/benchmark.py -m vovnet_v2 -c 39 -o pybuda-silicon-gs-e150-perf-all-perf.json - -# Openpose Body -pybuda/test/benchmark/benchmark.py -m openpose_osmr_body -c 2d -o pybuda-silicon-gs-e150-perf-all-perf.json - -# Openpose Hand -pybuda/test/benchmark/benchmark.py -m openpose_hand -c basic -o pybuda-silicon-gs-e150-perf-all-perf.json - -# YOLOv3 -pybuda/test/benchmark/benchmark.py -m yolo_v3 -c default -mb 32 -o pybuda-silicon-gs-e150-perf-all-perf.json - -# YOLOv5 -pybuda/test/benchmark/benchmark.py -m yolo_v5 -c s -o pybuda-silicon-gs-e150-perf-all-perf.json - -# Inception v4 -pybuda/test/benchmark/benchmark.py -m inception_v4 -c 224 -o pybuda-silicon-gs-e150-perf-all-perf.json - -# Unet -pybuda/test/benchmark/benchmark.py -m unet -c 256 -mb 64 -o pybuda-silicon-gs-e150-perf-all-perf.json - -# Whisper -pybuda/test/benchmark/benchmark.py -m whisper -c small --loop_count 1 -mb 1 --single-thread --generative -o pybuda-silicon-gs-e150-perf-all-perf.json - -# T5 -pybuda/test/benchmark/benchmark.py -m t5 -c large --loop_count 1 -mb 1 --single-thread --generative -o pybuda-silicon-gs-e150-perf-all-perf.json - -# Flan-T5 -pybuda/test/benchmark/benchmark.py -m flan_t5 -c large --loop_count 1 -mb 1 -mf HiFi2 --single-thread --generative -o pybuda-silicon-gs-e150-perf-all-perf.json \ No newline at end of file diff --git a/pybuda/test/benchmark/run_benchmark_gs_e150_release b/pybuda/test/benchmark/run_benchmark_gs_e150_release deleted file mode 100644 index e94f4f55a..000000000 --- a/pybuda/test/benchmark/run_benchmark_gs_e150_release +++ /dev/null @@ -1,61 +0,0 @@ -# ------------------------------------------------------------------------------------------------------------------------------------------------------------ # -# Models with data Formats that have good accuracy on Grayskull and that we release as official numbers -# ------------------------------------------------------------------------------------------------------------------------------------------------------------ # - -# ------------------------------------------------------- # -# Grayskull e150, unharvested chip, grid size: 10x12 -# ------------------------------------------------------- # - -# ResNet fp16_b -pybuda/test/benchmark/benchmark.py -m resnet -c resnet50 -mb 64 -df Fp16_b -mf HiFi3 -o pybuda-silicon-gs-e150-perf-all-perf.json - -# Mobilenet v1 fp16_b -pybuda/test/benchmark/benchmark.py -m mobilenet_v1 -c 224 -mb 64 -df Fp16_b -mf HiFi3 -o pybuda-silicon-gs-e150-perf-all-perf.json - -# Mobilenet v2 fp16_b -pybuda/test/benchmark/benchmark.py -m mobilenet_v2 -c 224 -mb 64 -df Fp16_b -mf HiFi3 -o pybuda-silicon-gs-e150-perf-all-perf.json - -# Mobilenet v3 fp16_b -pybuda/test/benchmark/benchmark.py -m mobilenet_v3_timm -c large -mb 64 -df Fp16_b -mf HiFi3 -o pybuda-silicon-gs-e150-perf-all-perf.json - -# Vit bfp8_b -pybuda/test/benchmark/benchmark.py -m vit -c base -mb 64 -o pybuda-silicon-gs-e150-perf-all-perf.json - -# Deit bfp8_b -pybuda/test/benchmark/benchmark.py -m deit -c base -mb 64 -o pybuda-silicon-gs-e150-perf-all-perf.json - -# VoVNet v2 fp16_b -pybuda/test/benchmark/benchmark.py -m vovnet_v2 -c 39 -mb 64 -df Fp16_b -mf HiFi3 -o pybuda-silicon-gs-e150-perf-all-perf.json - -# OpenPose Body fp16 -pybuda/test/benchmark/benchmark.py -m openpose_osmr_body -c 2d -mb 64 -df Fp16 -mf HiFi3 -o pybuda-silicon-gs-e150-perf-all-perf.json - -# OpenPose Hand fp16_b -pybuda/test/benchmark/benchmark.py -m openpose_hand -c basic -mb 64 -df Fp16_b -mf HiFi3 -o pybuda-silicon-gs-e150-perf-all-perf.json - -# HRNet bfp8_b -pybuda/test/benchmark/benchmark.py -m hrnet -c v2_w64 -mb 64 -o pybuda-silicon-gs-e150-perf-all-perf.json - -# YOLOv3 fp16_b -pybuda/test/benchmark/benchmark.py -m yolo_v3 -c default -mb 32 -df Fp16_b -mf HiFi3 -o pybuda-silicon-gs-e150-perf-all-perf.json - -# YOLOv5 fp16_b -pybuda/test/benchmark/benchmark.py -m yolo_v5 -c s -mb 64 -df Fp16_b -mf HiFi3 -o pybuda-silicon-gs-e150-perf-all-perf.json - -# Inception v4 fp16_b -pybuda/test/benchmark/benchmark.py -m inception_v4 -c 224 -mb 64 -df Fp16_b -mf HiFi3 -o pybuda-silicon-gs-e150-perf-all-perf.json - -# UNet fp16_b -pybuda/test/benchmark/benchmark.py -m unet -c 256 -mb 64 -df Fp16_b -mf HiFi3 -o pybuda-silicon-gs-e150-perf-all-perf.json - -# Bert large bfp8_b -pybuda/test/benchmark/benchmark.py -m bert -c large_tc -mb 64 -o pybuda-silicon-gs-e150-perf-all-perf.json - -# Whisper fp16_b -pybuda/test/benchmark/benchmark.py -m whisper -c small --loop_count 1 -mb 1 -df Fp16_b -mf HiFi3 --single-thread --generative -o pybuda-silicon-gs-e150-perf-all-perf.json - -# T5 fp16_b -pybuda/test/benchmark/benchmark.py -m t5 -c large --loop_count 1 -mb 1 -df Fp16_b -mf HiFi3 --single-thread --generative -o pybuda-silicon-gs-e150-perf-all-perf.json - -# Flan-T5 fp16_b -pybuda/test/benchmark/benchmark.py -m flan_t5 -c large --loop_count 1 -mb 1 -df Fp16_b -mf HiFi3 --single-thread --generative -o pybuda-silicon-gs-e150-perf-all-perf.json diff --git a/pybuda/test/benchmark/run_benchmark_gs_e75_df_bfp8 b/pybuda/test/benchmark/run_benchmark_gs_e75_df_bfp8 deleted file mode 100644 index 2cc788587..000000000 --- a/pybuda/test/benchmark/run_benchmark_gs_e75_df_bfp8 +++ /dev/null @@ -1,51 +0,0 @@ -# ------------------------------------------------------------------------------------------------------------------------------------------------------------ # -# Data Format Bfp8_b -# ------------------------------------------------------------------------------------------------------------------------------------------------------------ # - -# ------------------------------------------------------- # -# Grayskull e75, two-row harvested chip, grid size: 8x12 -# ------------------------------------------------------- # - -export PYBUDA_FORCE_EMULATE_HARVESTED=2 - -# Resnet -pybuda/test/benchmark/benchmark.py -m resnet -c resnet50 -o pybuda-silicon-gs-e75-perf-all-perf.json - -# Mobilenet v1 -pybuda/test/benchmark/benchmark.py -m mobilenet_v1 -c 224 -mf HiFi2 -o pybuda-silicon-gs-e75-perf-all-perf.json - -# Mobilenet v2 -pybuda/test/benchmark/benchmark.py -m mobilenet_v2 -c 224 -o pybuda-silicon-gs-e75-perf-all-perf.json - -# Mobilenet v3 -pybuda/test/benchmark/benchmark.py -m mobilenet_v3_timm -c large -o pybuda-silicon-gs-e75-perf-all-perf.json - -# Vovnet v2 -pybuda/test/benchmark/benchmark.py -m vovnet_v2 -c 39 -o pybuda-silicon-gs-e75-perf-all-perf.json - -# Openpose Body -pybuda/test/benchmark/benchmark.py -m openpose_osmr_body -c 2d -o pybuda-silicon-gs-e75-perf-all-perf.json - -# Openpose Hand -pybuda/test/benchmark/benchmark.py -m openpose_hand -c basic -o pybuda-silicon-gs-e75-perf-all-perf.json - -# YOLOv3 -pybuda/test/benchmark/benchmark.py -m yolo_v3 -c default -mb 32 -o pybuda-silicon-gs-e75-perf-all-perf.json - -# YOLOv5 -pybuda/test/benchmark/benchmark.py -m yolo_v5 -c s -o pybuda-silicon-gs-e75-perf-all-perf.json - -# Inception v4 -pybuda/test/benchmark/benchmark.py -m inception_v4 -c 224 -o pybuda-silicon-gs-e75-perf-all-perf.json - -# Unet -pybuda/test/benchmark/benchmark.py -m unet -c 256 -mb 64 -o pybuda-silicon-gs-e75-perf-all-perf.json - -# Whisper -pybuda/test/benchmark/benchmark.py -m whisper -c small --loop_count 1 -mb 1 --single-thread --generative -o pybuda-silicon-gs-e75-perf-all-perf.json - -# T5 -pybuda/test/benchmark/benchmark.py -m t5 -c large --loop_count 1 -mb 1 --single-thread --generative -o pybuda-silicon-gs-e75-perf-all-perf.json - -# Flan-T5 -pybuda/test/benchmark/benchmark.py -m flan_t5 -c large --loop_count 1 -mb 1 -mf HiFi2 --single-thread --generative -o pybuda-silicon-gs-e75-perf-all-perf.json \ No newline at end of file diff --git a/pybuda/test/benchmark/run_benchmark_gs_e75_release b/pybuda/test/benchmark/run_benchmark_gs_e75_release deleted file mode 100644 index 264a62091..000000000 --- a/pybuda/test/benchmark/run_benchmark_gs_e75_release +++ /dev/null @@ -1,63 +0,0 @@ -# ------------------------------------------------------------------------------------------------------------------------------------------------------------ # -# Models with data Formats that have good accuracy on Grayskull and that we release as official numbers -# ------------------------------------------------------------------------------------------------------------------------------------------------------------ # - -# ------------------------------------------------------- # -# Grayskull e75, two-row harvested chip, grid size: 8x12 -# ------------------------------------------------------- # - -export PYBUDA_FORCE_EMULATE_HARVESTED=2 - -# ResNet fp16_b -pybuda/test/benchmark/benchmark.py -m resnet -c resnet50 -mb 64 -df Fp16_b -mf HiFi3 -o pybuda-silicon-gs-e75-perf-all-perf.json - -# Mobilenet v1 fp16_b -pybuda/test/benchmark/benchmark.py -m mobilenet_v1 -c 224 -mb 64 -df Fp16_b -mf HiFi3 -o pybuda-silicon-gs-e75-perf-all-perf.json - -# Mobilenet v2 fp16_b -pybuda/test/benchmark/benchmark.py -m mobilenet_v2 -c 224 -mb 64 -df Fp16_b -mf HiFi3 -o pybuda-silicon-gs-e75-perf-all-perf.json - -# Mobilenet v3 fp16_b -pybuda/test/benchmark/benchmark.py -m mobilenet_v3_timm -c large -mb 64 -df Fp16_b -mf HiFi3 -o pybuda-silicon-gs-e75-perf-all-perf.json - -# Vit bfp8_b -pybuda/test/benchmark/benchmark.py -m vit -c base -mb 64 -o pybuda-silicon-gs-e75-perf-all-perf.json - -# Deit bfp8_b -pybuda/test/benchmark/benchmark.py -m deit -c base -mb 64 -o pybuda-silicon-gs-e75-perf-all-perf.json - -# VoVNet v2 fp16_b -pybuda/test/benchmark/benchmark.py -m vovnet_v2 -c 39 -mb 64 -df Fp16_b -mf HiFi3 -o pybuda-silicon-gs-e75-perf-all-perf.json - -# OpenPose Body fp16 -pybuda/test/benchmark/benchmark.py -m openpose_osmr_body -c 2d -mb 64 -df Fp16 -mf HiFi3 -o pybuda-silicon-gs-e75-perf-all-perf.json - -# OpenPose Hand fp16_b -pybuda/test/benchmark/benchmark.py -m openpose_hand -c basic -mb 64 -df Fp16_b -mf HiFi3 -o pybuda-silicon-gs-e75-perf-all-perf.json - -# HRNet bfp8_b -pybuda/test/benchmark/benchmark.py -m hrnet -c v2_w64 -mb 64 -o pybuda-silicon-gs-e75-perf-all-perf.json - -# YOLOv3 fp16_b -pybuda/test/benchmark/benchmark.py -m yolo_v3 -c default -mb 32 -df Fp16_b -mf HiFi3 -o pybuda-silicon-gs-e75-perf-all-perf.json - -# YOLOv5 fp16_b -pybuda/test/benchmark/benchmark.py -m yolo_v5 -c s -mb 64 -df Fp16_b -mf HiFi3 -o pybuda-silicon-gs-e75-perf-all-perf.json - -# Inception v4 fp16_b -pybuda/test/benchmark/benchmark.py -m inception_v4 -c 224 -mb 64 -df Fp16_b -mf HiFi3 -o pybuda-silicon-gs-e75-perf-all-perf.json - -# UNet fp16_b -pybuda/test/benchmark/benchmark.py -m unet -c 256 -mb 64 -df Fp16_b -mf HiFi3 -o pybuda-silicon-gs-e75-perf-all-perf.json - -# Bert large bfp8_b -pybuda/test/benchmark/benchmark.py -m bert -c large_tc -mb 64 -o pybuda-silicon-gs-e75-perf-all-perf.json - -# Whisper fp16_b -pybuda/test/benchmark/benchmark.py -m whisper -c small --loop_count 1 -mb 1 -df Fp16_b -mf HiFi3 --single-thread --generative -o pybuda-silicon-gs-e75-perf-all-perf.json - -# T5 fp16_b -pybuda/test/benchmark/benchmark.py -m t5 -c large --loop_count 1 -mb 1 -df Fp16_b -mf HiFi3 --single-thread --generative -o pybuda-silicon-gs-e75-perf-all-perf.json - -# Flan-T5 fp16_b -pybuda/test/benchmark/benchmark.py -m flan_t5 -c large --loop_count 1 -mb 1 -df Fp16_b -mf HiFi3 --single-thread --generative -o pybuda-silicon-gs-e75-perf-all-perf.json diff --git a/pybuda/test/benchmark/run_benchmark_tti b/pybuda/test/benchmark/run_benchmark_tti deleted file mode 100644 index 247ba4cc8..000000000 --- a/pybuda/test/benchmark/run_benchmark_tti +++ /dev/null @@ -1,11 +0,0 @@ -# emulate runs on harvested machines -export PYBUDA_FORCE_EMULATE_HARVESTED=1 -unset PYBUDA_CI_DIR - -# TTI Save -pybuda/test/benchmark/benchmark.py -m bert -c tiny -opt 4 -o perf.json --env "PYBUDA_EXP_APPROX=1 PYBUDA_DISABLE_DYNAMIC_DRAM=1 PYBUDA_FORCE_INTERMED_TO_OUTPUT_DF=1" --auto_transpose --save_tti device_images/bert_tiny.tti -pybuda/test/benchmark/benchmark.py -m mobilenet_v1 -c 224 -opt 4 --loop_count 32 -mb 64 -bp Ribbon -df Fp16_b -mf HiFi2 --env "PYBUDA_RIBBON2=1 PYBUDA_LEGACY_UBLOCK_SHAPE=1 PYBUDA_MAXIMIZE_SPARSE_UBLOCK=1 PYBUDA_ENABLE_L1_ACCUMULATE=1 PYBUDA_EXTRA_L1_MARGIN=65536 PYBUDA_FUSED_OP_MULTIPLIER=20 PYBUDA_ENABLE_DEPTHWISE=1" -o perf.json --auto_transpose --save_tti device_images/mobilenet_v1.tti - -# TTI Load -pybuda/test/benchmark/benchmark.py -m bert -c tiny -opt 4 -o perf.json --env "PYBUDA_EXP_APPROX=1 PYBUDA_DISABLE_DYNAMIC_DRAM=1 PYBUDA_FORCE_INTERMED_TO_OUTPUT_DF=1" --auto_transpose --load_tti device_images/bert_tiny.tti -pybuda/test/benchmark/benchmark.py -m mobilenet_v1 -c 224 -opt 4 --loop_count 32 -mb 64 -bp Ribbon -df Fp16_b -mf HiFi2 --env "PYBUDA_RIBBON2=1 PYBUDA_LEGACY_UBLOCK_SHAPE=1 PYBUDA_MAXIMIZE_SPARSE_UBLOCK=1 PYBUDA_ENABLE_L1_ACCUMULATE=1 PYBUDA_EXTRA_L1_MARGIN=65536 PYBUDA_FUSED_OP_MULTIPLIER=20 PYBUDA_ENABLE_DEPTHWISE=1" -o perf.json --auto_transpose --load_tti device_images/mobilenet_v1.tti diff --git a/pybuda/test/benchmark/run_benchmark_wh_df_bfp8 b/pybuda/test/benchmark/run_benchmark_wh_df_bfp8 deleted file mode 100644 index 9a731bf0f..000000000 --- a/pybuda/test/benchmark/run_benchmark_wh_df_bfp8 +++ /dev/null @@ -1,25 +0,0 @@ -# ------------------------------------------------------------------------------------------------------------------------------------------------------------ # -# Data Format Bfp8_b -# ------------------------------------------------------------------------------------------------------------------------------------------------------------ # - -# Default data format (-df) is Bfp8_b, default math fidelity (-mf) is LoFi - -# Mobilenet v3 -pybuda/test/benchmark/benchmark.py -m mobilenet_v3_timm -c large -o pybuda-silicon-wh-b0-perf-all-bfp8_b-perf.json - -# OpenPose Body -pybuda/test/benchmark/benchmark.py -m openpose_osmr_body -c 2d -o pybuda-silicon-wh-b0-perf-all-bfp8_b-perf.json - -# YOLOv5 -pybuda/test/benchmark/benchmark.py -m yolo_v5 -c s -o pybuda-silicon-wh-b0-perf-all-bfp8_b-perf.json - -# Whisper -pybuda/test/benchmark/benchmark.py -m whisper -c small --loop_count 1 -mb 1 --single-thread --generative -o pybuda-silicon-wh-b0-perf-all-bfp8_b-perf.json - -# T5 -# Low accuracy. -pybuda/test/benchmark/benchmark.py -m t5 -c large --loop_count 1 -mb 1 --single-thread --generative -o pybuda-silicon-wh-b0-perf-all-bfp8_b-perf.json - -# Flan-T5 -# Low accuracy. -pybuda/test/benchmark/benchmark.py -m flan_t5 -c large --loop_count 1 -mb 1 -mf HiFi2 --single-thread --generative -o pybuda-silicon-wh-b0-perf-all-bfp8_b-perf.json diff --git a/pybuda/test/benchmark/run_benchmark_wh_df_fp16 b/pybuda/test/benchmark/run_benchmark_wh_df_fp16 deleted file mode 100644 index 385a366ce..000000000 --- a/pybuda/test/benchmark/run_benchmark_wh_df_fp16 +++ /dev/null @@ -1,41 +0,0 @@ -# ------------------------------------------------------------------------------------------------------------------------------------------------------------ # -# Data Format Fp16, Fp16_b -# ------------------------------------------------------------------------------------------------------------------------------------------------------------ # - -# ResNet -pybuda/test/benchmark/benchmark.py -m resnet -c resnet50 -df Fp16_b -mf HiFi3 -o pybuda-silicon-wh-b0-perf-all-fp16-perf.json - -# Mobilenet v1 -pybuda/test/benchmark/benchmark.py -m mobilenet_v1 -c 224 -df Fp16_b -mf HiFi2 -o pybuda-silicon-wh-b0-perf-all-fp16-perf.json - -# Mobilenet v2 -pybuda/test/benchmark/benchmark.py -m mobilenet_v2 -c 224 -df Fp16_b -mf HiFi2 -o pybuda-silicon-wh-b0-perf-all-fp16-perf.json - -# Vit -pybuda/test/benchmark/benchmark.py -m vit -c base -df Fp16_b -mf HiFi2 -o pybuda-silicon-wh-b0-perf-all-fp16-perf.json - -# Deit -pybuda/test/benchmark/benchmark.py -m deit -c base -df Fp16_b -mf HiFi2 -o pybuda-silicon-wh-b0-perf-all-fp16-perf.json - -# VoVNet v2 -pybuda/test/benchmark/benchmark.py -m vovnet_v2 -c 39 -df Fp16_b -mf HiFi3 -o pybuda-silicon-wh-b0-perf-all-fp16-perf.json - -# OpenPose Hand -pybuda/test/benchmark/benchmark.py -m openpose_hand -c basic -df Fp16_b -mf HiFi3 -o pybuda-silicon-wh-b0-perf-all-fp16-perf.json - -# HRNet -pybuda/test/benchmark/benchmark.py -m hrnet -c v2_w64 -df Fp16_b -mf HiFi3 -o pybuda-silicon-wh-b0-perf-all-fp16-perf.json - -# YOLOv3 -# Issue to make it run with mb 64 tenstorrent/pybuda#1298 -# Issue to remove PYBUDA_OVERRIDE_INPUT_QUEUE_ENTRIES=32 tenstorrent/pybuda#1299 -pybuda/test/benchmark/benchmark.py -m yolo_v3 -c default -mb 32 -df Fp16_b -mf HiFi3 -o pybuda-silicon-wh-b0-perf-all-fp16-perf.json - -# Inception v4 -pybuda/test/benchmark/benchmark.py -m inception_v4 -c 224 -df Fp16_b -mf HiFi3 -o pybuda-silicon-wh-b0-perf-all-fp16-perf.json - -# UNet -pybuda/test/benchmark/benchmark.py -m unet -c 256 -df Fp16_b -mf HiFi3 -o pybuda-silicon-wh-b0-perf-all-fp16-perf.json - -# Bert large -pybuda/test/benchmark/benchmark.py -m bert -c large_tc -df Fp16_b -mf HiFi3 -o pybuda-silicon-wh-b0-perf-all-fp16-perf.json diff --git a/pybuda/test/benchmark/run_benchmark_wh_release b/pybuda/test/benchmark/run_benchmark_wh_release deleted file mode 100644 index 13c1cd319..000000000 --- a/pybuda/test/benchmark/run_benchmark_wh_release +++ /dev/null @@ -1,62 +0,0 @@ -# ------------------------------------------------------------------------------------------------------------------------------------------------------------ # -# Models with data Formats that have good accuracy on Wormhole B0 and that we release as official numbers -# ------------------------------------------------------------------------------------------------------------------------------------------------------------ # - - -# ResNet bfp8_b -pybuda/test/benchmark/benchmark.py -m resnet -c resnet50 -mb 256 -o pybuda-silicon-wh-b0-perf-all-perf.json - -#ResNet quant fp32 -pybuda/test/benchmark/benchmark.py -m resnet_quant -c resnet50 -df Fp32 -mf HiFi4 -mb 64 -o pybuda-silicon-wh-b0-perf-all-perf.json - -# Mobilenet v1 bfp8_b -pybuda/test/benchmark/benchmark.py -m mobilenet_v1 -c 224 -mb 256 -o pybuda-silicon-wh-b0-perf-all-perf.json - -# Mobilenet v2 bfp8_b -pybuda/test/benchmark/benchmark.py -m mobilenet_v2 -c 224 -mb 256 -o pybuda-silicon-wh-b0-perf-all-perf.json - -# Mobilenet v3 fp16_b -pybuda/test/benchmark/benchmark.py -m mobilenet_v3_timm -c large -df Fp16_b -mf HiFi2 -mb 64 -o pybuda-silicon-wh-b0-perf-all-perf.json - -# Vit bfp8_b -pybuda/test/benchmark/benchmark.py -m vit -c base -mb 256 -o pybuda-silicon-wh-b0-perf-all-perf.json - -# Deit bfp8_b -pybuda/test/benchmark/benchmark.py -m deit -c base -mb 256 -o pybuda-silicon-wh-b0-perf-all-perf.json - -# VoVNet v2 bfp8_b -pybuda/test/benchmark/benchmark.py -m vovnet_v2 -c 39 -o pybuda-silicon-wh-b0-perf-all-perf.json - -# OpenPose Body fp16 -pybuda/test/benchmark/benchmark.py -m openpose_osmr_body -c 2d -df Fp16 -mf HiFi3 -o pybuda-silicon-wh-b0-perf-all-perf.json - -# OpenPose Hand bfp8_b -pybuda/test/benchmark/benchmark.py -m openpose_hand -c basic -o pybuda-silicon-wh-b0-perf-all-perf.json - -# HRNet bfp8_b -pybuda/test/benchmark/benchmark.py -m hrnet -c v2_w64 -o pybuda-silicon-wh-b0-perf-all-perf.json - -# YOLOv3 bfp8_b -# Issue to make it run with mb 64 tenstorrent/pybuda#1298 -pybuda/test/benchmark/benchmark.py -m yolo_v3 -c default -mb 32 -o pybuda-silicon-wh-b0-perf-all-perf.json - -# YOLOv5 fp16_b -pybuda/test/benchmark/benchmark.py -m yolo_v5 -c s -df Fp16_b -mf HiFi3 -o pybuda-silicon-wh-b0-perf-all-perf.json - -# Inception v4 bfp8_b -pybuda/test/benchmark/benchmark.py -m inception_v4 -c 224 -o pybuda-silicon-wh-b0-perf-all-perf.json - -# UNet bfp8_b -pybuda/test/benchmark/benchmark.py -m unet -c 256 -mb 64 -o pybuda-silicon-wh-b0-perf-all-perf.json - -# Bert large bfp8_b -pybuda/test/benchmark/benchmark.py -m bert -c large_tc -mb 64 -o pybuda-silicon-wh-b0-perf-all-perf.json - -# Whisper fp16_b -pybuda/test/benchmark/benchmark.py -m whisper -c small --loop_count 1 -mb 1 -df Fp16_b -mf HiFi3 --single-thread --generative -o pybuda-silicon-wh-b0-perf-all-perf.json - -# T5 fp16_b -pybuda/test/benchmark/benchmark.py -m t5 -c large --loop_count 1 -mb 1 -df Fp16_b -mf HiFi3 --single-thread --generative -o pybuda-silicon-wh-b0-perf-all-perf.json - -# Flan-T5 fp16_b -pybuda/test/benchmark/benchmark.py -m flan_t5 -c large --loop_count 1 -mb 1 -df Fp16_b -mf HiFi3 --single-thread --generative -o pybuda-silicon-wh-b0-perf-all-perf.json diff --git a/pybuda/test/operators/eltwise_unary/test_command.sh b/pybuda/test/operators/eltwise_unary/test_command.sh deleted file mode 100644 index 8b22080c2..000000000 --- a/pybuda/test/operators/eltwise_unary/test_command.sh +++ /dev/null @@ -1,37 +0,0 @@ -# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC - -# SPDX-License-Identifier: Apache-2.0 -# -# Commands for running element-wise unary tests -# - -# Run single test -# -# To run using default parameters -# model, --un_model --> model_1, Note: for binary ops we have 10 models, model_[1-10] -# training, --un_train --> True -# recompute, --un_recompute --> True -# shape, --un_shape --> [1, 16, 32, 64] -# operation, --un_op --> Exp -pytest -svv test_eltwise_unary_single.py - -# Few examples with passed arguments -pytest -svv test_eltwise_unary_single.py --un_model model_3 --un_train True --un_recompute True --un_shape '[1, 32, 96, 128]' --un_op 'Log' -pytest -svv test_eltwise_unary_single.py --un_model model_1 --un_train False --un_recompute True --un_shape '[1, 32, 256, 128]' -pytest -svv test_eltwise_unary_single.py --un_model model_2 --un_train True --un_recompute False -pytest -svv test_eltwise_unary_single.py --un_model model_5 --un_train False --un_op 'Gelu' -pytest -svv test_eltwise_unary_single.py --un_model model_4 --un_shape '[1, 32, 256, 2048]' - -pytest -svv pybuda/test/operators/eltwise_unary/test_eltwise_unary_single.py --un_model model_3 --un_train False --un_recompute False --un_shape '[1, 32, 96, 128]' --un_op 'Clip' --un_kwargs_json='{"min": 0.234, "max": 0.982}' -pytest -svv pybuda/test/operators/eltwise_unary/test_eltwise_unary_single.py --un_model model_3 --un_train False --un_recompute False --un_shape '[1, 32, 96, 128]' --un_op 'LogicalNot' -pytest -svv pybuda/test/operators/eltwise_unary/test_eltwise_unary_single.py --un_model model_3 --un_train False --un_recompute False --un_shape '[1, 32, 96, 128]' --un_op 'CumSum' --un_kwargs_json='{"axis": 2}' -pytest -svv pybuda/test/operators/eltwise_unary/test_eltwise_unary_single.py --un_model model_5 --un_train False --un_recompute False --un_shape '[19, 20, 16]' --un_op 'Pow' --un_kwargs_json='{"exponent": 0.54881352186203}' -pytest -svv pybuda/test/operators/eltwise_unary/test_eltwise_unary_single.py --un_model model_5 --un_train False --un_recompute False --un_shape '[1, 1, 24, 9]' --un_op 'Pow' --un_kwargs_json='{"exponent": 0.5488135039273248}' -pytest -svv pybuda/test/operators/eltwise_unary/test_eltwise_unary_single.py --un_model model_5 --un_train False --un_recompute False --un_shape '[1, 1, 24, 9]' --un_op 'Tilize' - -# Issues -pytest -svv test_eltwise_unary_single.py --un_model model_4 --un_train True --un_recompute False --un_op 'Exp' --un_shape '[21, 127, 102, 19]' - - -# pytest -svv pybuda/test/operators/eltwise_unary/test_eltwise_unary_single.py --un_model model_6 --un_train True --un_recompute False --un_op 'Relu' --un_shape '[1, 12, 13]' -# pytest -svv pybuda/test/operators/eltwise_unary/test_eltwise_unary_single.py --un_model model_7 --un_train True --un_recompute True --un_op 'Exp' --un_shape '[1, 12, 13]' \ No newline at end of file diff --git a/pybuda/test/operators/eltwise_unary_attr/clip/models/model_3.py b/pybuda/test/operators/eltwise_unary_attr/clip/models/model_3.py deleted file mode 100644 index 189f60a4b..000000000 --- a/pybuda/test/operators/eltwise_unary_attr/clip/models/model_3.py +++ /dev/null @@ -1,104 +0,0 @@ -# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC - -# SPDX-License-Identifier: Apache-2.0 -# -# Test 3 -# Clip operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures -# - - -import torch -from torch.distributions import Normal - -import pybuda -import pybuda.op -import pybuda.op.nn as nn - -from pybuda import PyBudaModule, Tensor - - -class BudaClipTest(PyBudaModule): - """ - Buda Test 3 - - """ - - INPUTS_RANGE_MIN = -1.0 - INPUTS_RANGE_MAX = 1.0 - INPUTS_DISTRIBUTION = Normal - - WEIGHTS_RANGE_MIN = -1.0 - WEIGHTS_RANGE_MAX = 1.0 - WEIGHTS_DISTRIBUTION = Normal - - def __init__( - self, - shape, - min_value, - max_value - ): - super().__init__("Buda Test 3") - - self.testname = "Operator Clip, Test 3" - self.shape = shape - self.min_value = min_value - self.max_value = max_value - - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - - self.inputs = [] - for i in range(2): - input = BudaClipTest.INPUTS_DISTRIBUTION( - BudaClipTest.INPUTS_RANGE_MIN, - BudaClipTest.INPUTS_RANGE_MAX).sample(self.shape) - self.inputs.append(Tensor.create_from_torch(input)) - - for i in range(1, 3): - weights = BudaClipTest.WEIGHTS_DISTRIBUTION( - BudaClipTest.WEIGHTS_RANGE_MIN, - BudaClipTest.WEIGHTS_RANGE_MAX).sample(self.shape) - weights.requires_grad = True - self.set_parameter("train_param" + str(i), weights) - - - def forward(self, x1, x2): - - # Layer 2 - clip1 = pybuda.op.Clip("clip1", x1, min=self.min_value, max=self.max_value) - clip2 = pybuda.op.Clip("clip2", self.train_param1, min=self.min_value, max=self.max_value) - clip3 = pybuda.op.Clip("clip3", x2, min=self.min_value, max=self.max_value) - clip4 = pybuda.op.Clip("clip4", self.train_param2, min=self.min_value, max=self.max_value) - - # Layer 3 - mul1 = pybuda.op.Multiply("mul1", clip1, clip2) - mul2 = pybuda.op.Multiply("mul2", clip2, clip3) - mul3 = pybuda.op.Multiply("mul3", clip3, clip4) - - # Layer 4 - clip5 = pybuda.op.Clip("clip5", mul1, min=self.min_value, max=self.max_value) - clip6 = pybuda.op.Clip("clip6", mul2, min=self.min_value, max=self.max_value) - clip7 = pybuda.op.Clip("clip7", mul3, min=self.min_value, max=self.max_value) - - # Layer 5 - mul4 = pybuda.op.Multiply("mul4", clip5, self.train_param1) - mul5 = pybuda.op.Multiply("mul5", clip6, x2) - mul6 = pybuda.op.Multiply("mul6", clip7, clip4) - - # Layer 6 - clip8 = pybuda.op.Clip("clip8", mul4, min=self.min_value, max=self.max_value) - clip9 = pybuda.op.Clip("clip9", mul5, min=self.min_value, max=self.max_value) - clip10 = pybuda.op.Clip("clip10", mul6, min=self.min_value, max=self.max_value) - - # Layer 7 - add1 = pybuda.op.Add("add1", clip8, mul2) - add2 = pybuda.op.Add("add2", clip4, clip10) - mul7 = pybuda.op.Multiply("mul7", clip9, mul3) - - # Layer 8 - clip11 = pybuda.op.Clip("clip11", add1, min=self.min_value, max=self.max_value) - clip12 = pybuda.op.Clip("clip12", mul7, min=self.min_value, max=self.max_value) - clip13 = pybuda.op.Clip("clip13", add2, min=self.min_value, max=self.max_value) - - return clip11, clip12, clip13 diff --git a/pybuda/test/operators/eltwise_unary_attr/clip/models/model_4.py b/pybuda/test/operators/eltwise_unary_attr/clip/models/model_4.py deleted file mode 100644 index 06ef447ae..000000000 --- a/pybuda/test/operators/eltwise_unary_attr/clip/models/model_4.py +++ /dev/null @@ -1,132 +0,0 @@ -# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC - -# SPDX-License-Identifier: Apache-2.0 -# -# Test 4 -# Clip operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures -# - - -import torch -from torch.distributions import Normal - -import pybuda -import pybuda.op -import pybuda.op.nn as nn - -from pybuda import PyBudaModule, Tensor - - -class BudaClipTest(PyBudaModule): - """ - Buda Test 4 - - """ - - INPUTS_RANGE_MIN = -1.0 - INPUTS_RANGE_MAX = 1.0 - INPUTS_DISTRIBUTION = Normal - - WEIGHTS_RANGE_MIN = -1.0 - WEIGHTS_RANGE_MAX = 1.0 - WEIGHTS_DISTRIBUTION = Normal - - def __init__( - self, - shape, - min_value, - max_value - ): - super().__init__("Buda Test 4") - - self.testname = "Operator Clip, Test 4" - self.shape = shape - self.min_value = min_value - self.max_value = max_value - - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) - - self.inputs = [] - for i in range(3): - input = BudaClipTest.INPUTS_DISTRIBUTION( - BudaClipTest.INPUTS_RANGE_MIN, - BudaClipTest.INPUTS_RANGE_MAX).sample(self.shape) - self.inputs.append(Tensor.create_from_torch(input)) - - for i in range(1, 4): - weights = BudaClipTest.WEIGHTS_DISTRIBUTION( - BudaClipTest.WEIGHTS_RANGE_MIN, - BudaClipTest.WEIGHTS_RANGE_MAX).sample(self.shape) - weights.requires_grad = True - self.set_parameter("train_param" + str(i), weights) - - - - def forward(self, x1, x2, x3): - - # Layer 2 - add1 = pybuda.op.Add("add1", x1, self.train_param1) - add2 = pybuda.op.Add("add2", x1, x2) - add3 = pybuda.op.Add("add3", x2, self.train_param3) - add4 = pybuda.op.Add("add4", x3, self.train_param2) - - # Layer 3 - clip1 = pybuda.op.Clip("clip1", add1, min=self.min_value, max=self.max_value) - clip2 = pybuda.op.Clip("clip2", add2, min=self.min_value, max=self.max_value) - clip3 = pybuda.op.Clip("clip3", add3, min=self.min_value, max=self.max_value) - clip4 = pybuda.op.Clip("clip4", add4, min=self.min_value, max=self.max_value) - - # Layer 4 - clip5 = pybuda.op.Clip("clip5", self.train_param1, min=self.min_value, max=self.max_value) - clip6 = pybuda.op.Clip("clip6", self.train_param2, min=self.min_value, max=self.max_value) - clip7 = pybuda.op.Clip("clip7", self.train_param3, min=self.min_value, max=self.max_value) - - # Layer 5 - mul1 = pybuda.op.Multiply("mul1", clip1, clip5) - mul2 = pybuda.op.Multiply("mul2", clip2, clip3) - mul3 = pybuda.op.Multiply("mul3", clip5, clip4) - mul4 = pybuda.op.Multiply("mul4", clip6, clip7) - - # Layer 6 - clip8 = pybuda.op.Clip("clip8", mul1, min=self.min_value, max=self.max_value) - clip9 = pybuda.op.Clip("clip9", mul2, min=self.min_value, max=self.max_value) - clip10 = pybuda.op.Clip("clip10", mul3, min=self.min_value, max=self.max_value) - clip11 = pybuda.op.Clip("clip11", mul4, min=self.min_value, max=self.max_value) - - # Layer 7 - add5 = pybuda.op.Add("add5", clip8, clip5) - add6 = pybuda.op.Add("add6", clip9, clip6) - add7 = pybuda.op.Add("add7", clip10, clip7) - add8 = pybuda.op.Add("add8", clip4, clip11) - - # Layer 8 - clip12 = pybuda.op.Clip("clip12", add5, min=self.min_value, max=self.max_value) - clip13 = pybuda.op.Clip("clip13", add6, min=self.min_value, max=self.max_value) - clip14 = pybuda.op.Clip("clip14", add7, min=self.min_value, max=self.max_value) - clip15 = pybuda.op.Clip("clip15", add8, min=self.min_value, max=self.max_value) - - # Layer 9 - mul5 = pybuda.op.Multiply("mul5", clip1, clip12) - mul6 = pybuda.op.Multiply("mul6", mul2, clip13) - mul7 = pybuda.op.Multiply("mul7", clip6, clip14) - mul8 = pybuda.op.Multiply("mul8", clip15, clip7) - - # Layer 10 - clip16 = pybuda.op.Clip("clip16", mul5, min=self.min_value, max=self.max_value) - clip17 = pybuda.op.Clip("clip17", mul6, min=self.min_value, max=self.max_value) - clip18 = pybuda.op.Clip("clip18", mul7, min=self.min_value, max=self.max_value) - clip19 = pybuda.op.Clip("clip19", mul8, min=self.min_value, max=self.max_value) - - # Layer 11 - mul9 = pybuda.op.Multiply("mul9", clip16, clip17) - mul10 = pybuda.op.Multiply("mul10", clip17, clip18) - mul11 = pybuda.op.Multiply("mul11", clip18, clip19) - - # Layer 12 - mul12 = pybuda.op.Multiply("mul12", mul9, clip9) - mul13 = pybuda.op.Multiply("mul13", mul10, mul11) - - return mul12, mul13 diff --git a/pybuda/test/operators/eltwise_unary_attr/clip/models/model_5.py b/pybuda/test/operators/eltwise_unary_attr/clip/models/model_5.py deleted file mode 100644 index c1e5f3a67..000000000 --- a/pybuda/test/operators/eltwise_unary_attr/clip/models/model_5.py +++ /dev/null @@ -1,145 +0,0 @@ -# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC - -# SPDX-License-Identifier: Apache-2.0 -# -# Test 5 -# Clip operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures -# - - -import torch -from torch.distributions import Normal - -import pybuda -import pybuda.op -import pybuda.op.nn as nn - -from pybuda import PyBudaModule, Tensor - - -class BudaClipTest(PyBudaModule): - """ - Buda Test 5 - - """ - - INPUTS_RANGE_MIN = -1.0 - INPUTS_RANGE_MAX = 1.0 - INPUTS_DISTRIBUTION = Normal - - WEIGHTS_RANGE_MIN = -1.0 - WEIGHTS_RANGE_MAX = 1.0 - WEIGHTS_DISTRIBUTION = Normal - - def __init__( - self, - shape, - min_value, - max_value - ): - super().__init__("Buda Test 5") - - self.testname = "Operator Clip, Test 5" - self.shape = shape - self.min_value = min_value - self.max_value = max_value - - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) - - self.inputs = [] - for i in range(3): - input = BudaClipTest.INPUTS_DISTRIBUTION( - BudaClipTest.INPUTS_RANGE_MIN, - BudaClipTest.INPUTS_RANGE_MAX).sample(self.shape) - self.inputs.append(Tensor.create_from_torch(input)) - - for i in range(1, 4): - weights = BudaClipTest.WEIGHTS_DISTRIBUTION( - BudaClipTest.WEIGHTS_RANGE_MIN, - BudaClipTest.WEIGHTS_RANGE_MAX).sample(self.shape) - weights.requires_grad = True - self.set_parameter("train_param" + str(i), weights) - - - - def forward(self, x1, x2, x3): - - # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x2, self.train_param2) - mul3 = pybuda.op.Multiply("mul3", x3, self.train_param3) - - # Layer 3 - mul4 = pybuda.op.Multiply("mul4", x2, self.train_param1) - mul5 = pybuda.op.Multiply("mul5", x3, self.train_param2) - clip1 = pybuda.op.Clip("clip1", mul1, min=self.min_value, max=self.max_value) - clip2 = pybuda.op.Clip("clip2", mul2, min=self.min_value, max=self.max_value) - clip3 = pybuda.op.Clip("clip3", mul3, min=self.min_value, max=self.max_value) - - # Layer 4 - clip4 = pybuda.op.Clip("clip4", mul4, min=self.min_value, max=self.max_value) - clip5 = pybuda.op.Clip("clip5", mul5, min=self.min_value, max=self.max_value) - - # Layer 5 - add1 = pybuda.op.Add("add1", clip1, self.train_param1) - add2 = pybuda.op.Add("add2", clip4, x2) - add3 = pybuda.op.Add("add3", clip2, self.train_param2) - add4 = pybuda.op.Add("add4", clip5, x3) - add5 = pybuda.op.Add("add5", clip3, self.train_param3) - - # Layer 6 - clip6 = pybuda.op.Clip("clip6", add1, min=self.min_value, max=self.max_value) - clip7 = pybuda.op.Clip("clip7", add2, min=self.min_value, max=self.max_value) - clip8 = pybuda.op.Clip("clip8", add3, min=self.min_value, max=self.max_value) - clip9 = pybuda.op.Clip("clip9", add4, min=self.min_value, max=self.max_value) - clip10 = pybuda.op.Clip("clip10", add5, min=self.min_value, max=self.max_value) - - # Layer 7 - mul6 = pybuda.op.Multiply("mul6", clip6, clip4) - mul7 = pybuda.op.Multiply("mul7", mul1, clip7) - mul8 = pybuda.op.Multiply("mul8", mul2, clip8) - mul9 = pybuda.op.Multiply("mul9", clip3, clip9) - mul10 = pybuda.op.Multiply("mul10", add3, clip10) - - # Layer 8 - clip11 = pybuda.op.Clip("clip11", mul6, min=self.min_value, max=self.max_value) - clip12 = pybuda.op.Clip("clip12", mul7, min=self.min_value, max=self.max_value) - clip13 = pybuda.op.Clip("clip13", mul8, min=self.min_value, max=self.max_value) - clip14 = pybuda.op.Clip("clip14", mul9, min=self.min_value, max=self.max_value) - clip15 = pybuda.op.Clip("clip15", mul10, min=self.min_value, max=self.max_value) - - # Layer 9 - mul11 = pybuda.op.Multiply("mul11", clip11, clip8) - mul12 = pybuda.op.Multiply("mul12", clip12, clip5) - mul13 = pybuda.op.Multiply("mul13", clip13, clip7) - mul14 = pybuda.op.Multiply("mul14", clip14, add5) - mul15 = pybuda.op.Multiply("mul15", clip13, mul5) - - # Layer 10 - clip16 = pybuda.op.Clip("clip16", mul11, min=self.min_value, max=self.max_value) - clip17 = pybuda.op.Clip("clip17", mul12, min=self.min_value, max=self.max_value) - clip18 = pybuda.op.Clip("clip18", mul13, min=self.min_value, max=self.max_value) - clip19 = pybuda.op.Clip("clip19", mul14, min=self.min_value, max=self.max_value) - clip20 = pybuda.op.Clip("clip20", mul15, min=self.min_value, max=self.max_value) - - # Layer 11 - mul16 = pybuda.op.Multiply("mul16", clip16, clip12) - mul17 = pybuda.op.Multiply("mul17", clip17, clip13) - mul18 = pybuda.op.Multiply("mul18", clip18, clip19) - mul19 = pybuda.op.Multiply("mul19", clip13, clip20) - - # Layer 12 - clip21 = pybuda.op.Clip("clip21", mul16, min=self.min_value, max=self.max_value) - clip22 = pybuda.op.Clip("clip22", mul17, min=self.min_value, max=self.max_value) - clip23 = pybuda.op.Clip("clip23", mul18, min=self.min_value, max=self.max_value) - clip24 = pybuda.op.Clip("clip24", mul19, min=self.min_value, max=self.max_value) - - # Layer 13 - mul20 = pybuda.op.Multiply("mul20", clip21, mul12) - mul21 = pybuda.op.Multiply("mul21", clip22, clip18) - mul22 = pybuda.op.Multiply("mul22", clip23, clip24) - - return mul20, mul21, mul22 diff --git a/pybuda/test/operators/eltwise_unary_attr/leaky_relu/models/model_3.py b/pybuda/test/operators/eltwise_unary_attr/leaky_relu/models/model_3.py deleted file mode 100644 index c1891d29b..000000000 --- a/pybuda/test/operators/eltwise_unary_attr/leaky_relu/models/model_3.py +++ /dev/null @@ -1,101 +0,0 @@ -# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC - -# SPDX-License-Identifier: Apache-2.0 -# -# Test 3 -# LeakyRelu operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures -# - - -import torch -from torch.distributions import Normal - -import pybuda -import pybuda.op -import pybuda.op.nn as nn - -from pybuda import PyBudaModule, Tensor - - -class BudaLeakyReluTest(PyBudaModule): - """ - Buda Test 3 - - """ - - INPUTS_RANGE_MIN = -1.0 - INPUTS_RANGE_MAX = 1.0 - INPUTS_DISTRIBUTION = Normal - - WEIGHTS_RANGE_MIN = -1.0 - WEIGHTS_RANGE_MAX = 1.0 - WEIGHTS_DISTRIBUTION = Normal - - def __init__( - self, - shape, - alpha - ): - super().__init__("Buda Test 3") - - self.testname = "Operator LeakyRelu, Test 3" - self.shape = shape - self.alpha = alpha - - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - - self.inputs = [] - for i in range(2): - input = BudaLeakyReluTest.INPUTS_DISTRIBUTION( - BudaLeakyReluTest.INPUTS_RANGE_MIN, - BudaLeakyReluTest.INPUTS_RANGE_MAX).sample(self.shape) - self.inputs.append(Tensor.create_from_torch(input)) - - for i in range(1, 3): - weights = BudaLeakyReluTest.WEIGHTS_DISTRIBUTION( - BudaLeakyReluTest.WEIGHTS_RANGE_MIN, - BudaLeakyReluTest.WEIGHTS_RANGE_MAX).sample(self.shape) - weights.requires_grad = True - self.set_parameter("train_param" + str(i), weights) - - - def forward(self, x1, x2): - - # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x2, self.train_param1) - mul3 = pybuda.op.Multiply("mul3", x2, self.train_param2) - - # Layer 3 - lrelu1 = pybuda.op.LeakyRelu("lrelu1", mul1, alpha=self.alpha) - lrelu2 = pybuda.op.LeakyRelu("lrelu2", mul2, alpha=self.alpha) - lrelu3 = pybuda.op.LeakyRelu("lrelu3", mul3, alpha=self.alpha) - - # Layer 4 - mul4 = pybuda.op.Multiply("mul4", lrelu1, self.train_param1) - mul5 = pybuda.op.Multiply("mul5", lrelu3, self.train_param2) - add1 = pybuda.op.Add("add1", lrelu2, x2) - - # Layer 5 - lrelu4 = pybuda.op.LeakyRelu("lrelu4", mul4, alpha=self.alpha) - lrelu5 = pybuda.op.LeakyRelu("lrelu5", add1, alpha=self.alpha) - lrelu6 = pybuda.op.LeakyRelu("lrelu6", mul5, alpha=self.alpha) - - # Layer 6 - mul6 = pybuda.op.Multiply("mul6", lrelu4, lrelu2) - mul7 = pybuda.op.Multiply("mul7", mul2, lrelu6) - add2 = pybuda.op.Add("add2", lrelu5, mul3) - - # Layer 7 - lrelu7 = pybuda.op.LeakyRelu("lrelu7", mul6, alpha=self.alpha) - lrelu8 = pybuda.op.LeakyRelu("lrelu8", add2, alpha=self.alpha) - lrelu9 = pybuda.op.LeakyRelu("lrelu9", mul7, alpha=self.alpha) - - # Layer 8 - mul8 = pybuda.op.Multiply("mul8", lrelu7, mul4) - mul9 = pybuda.op.Multiply("mul9", lrelu8, lrelu6) - mul10 = pybuda.op.Multiply("mul10", lrelu9, lrelu3) - - return mul8, mul9, mul10 \ No newline at end of file diff --git a/pybuda/test/operators/eltwise_unary_attr/leaky_relu/models/model_4.py b/pybuda/test/operators/eltwise_unary_attr/leaky_relu/models/model_4.py deleted file mode 100644 index cb619a614..000000000 --- a/pybuda/test/operators/eltwise_unary_attr/leaky_relu/models/model_4.py +++ /dev/null @@ -1,140 +0,0 @@ -# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC - -# SPDX-License-Identifier: Apache-2.0 -# -# Test 4 -# LeakyRelu operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures -# - - -import torch -from torch.distributions import Normal - -import pybuda -import pybuda.op -import pybuda.op.nn as nn - -from pybuda import PyBudaModule, Tensor - - -class BudaLeakyReluTest(PyBudaModule): - """ - Buda Test 4 - - """ - - INPUTS_RANGE_MIN = -1.0 - INPUTS_RANGE_MAX = 1.0 - INPUTS_DISTRIBUTION = Normal - - WEIGHTS_RANGE_MIN = -1.0 - WEIGHTS_RANGE_MAX = 1.0 - WEIGHTS_DISTRIBUTION = Normal - - def __init__( - self, - shape, - alpha - ): - super().__init__("Buda Test 4") - - self.testname = "Operator LeakyRelu, Test 4" - self.shape = shape - self.alpha = alpha - - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) - - self.inputs = [] - for i in range(3): - input = BudaLeakyReluTest.INPUTS_DISTRIBUTION( - BudaLeakyReluTest.INPUTS_RANGE_MIN, - BudaLeakyReluTest.INPUTS_RANGE_MAX).sample(self.shape) - self.inputs.append(Tensor.create_from_torch(input)) - - for i in range(1, 4): - weights = BudaLeakyReluTest.WEIGHTS_DISTRIBUTION( - BudaLeakyReluTest.WEIGHTS_RANGE_MIN, - BudaLeakyReluTest.WEIGHTS_RANGE_MAX).sample(self.shape) - weights.requires_grad = True - self.set_parameter("train_param" + str(i), weights) - - - - def forward(self, x1, x2, x3): - - # Layer 2 - add1 = pybuda.op.Add("add1", x1, self.train_param1) - add2 = pybuda.op.Add("add2", x2, self.train_param1) - add3 = pybuda.op.Add("add3", x3, self.train_param2) - mul1 = pybuda.op.Multiply("mul1", x2, self.train_param2) - mul2 = pybuda.op.Multiply("mul2", x3, self.train_param3) - - # Layer 3 - lrelu1 = pybuda.op.LeakyRelu("lrelu1", add1, alpha=self.alpha) - lrelu2 = pybuda.op.LeakyRelu("lrelu2", add2, alpha=self.alpha) - lrelu3 = pybuda.op.LeakyRelu("lrelu3", mul1, alpha=self.alpha) - lrelu4 = pybuda.op.LeakyRelu("lrelu4", add3, alpha=self.alpha) - lrelu5 = pybuda.op.LeakyRelu("lrelu5", mul2, alpha=self.alpha) - - # Layer 4 - mul3 = pybuda.op.Multiply("mul3", lrelu1, self.train_param1) - mul4 = pybuda.op.Multiply("mul4", lrelu2, x2) - mul5 = pybuda.op.Multiply("mul5", lrelu3, self.train_param2) - mul6 = pybuda.op.Multiply("mul6", lrelu4, x3) - add4 = pybuda.op.Add("add4", lrelu5, self.train_param3) - - # Layer 5 - lrelu6 = pybuda.op.LeakyRelu("lrelu6", mul3, alpha=self.alpha) - lrelu7 = pybuda.op.LeakyRelu("lrelu7", mul4, alpha=self.alpha) - lrelu8 = pybuda.op.LeakyRelu("lrelu8", mul5, alpha=self.alpha) - lrelu9 = pybuda.op.LeakyRelu("lrelu9", mul6, alpha=self.alpha) - lrelu10 = pybuda.op.LeakyRelu("lrelu10", add4, alpha=self.alpha) - - # Layer 6 - mul7 = pybuda.op.Multiply("mul7", lrelu6, add2) - mul8 = pybuda.op.Multiply("mul8", lrelu8, lrelu4) - mul9 = pybuda.op.Multiply("mul9", lrelu9, lrelu5) - mul10 = pybuda.op.Multiply("mul10", lrelu10, self.train_param3) - add5 = pybuda.op.Add("add5", lrelu7, lrelu3) - - # Layer 7 - lrelu11 = pybuda.op.LeakyRelu("lrelu11", mul7, alpha=self.alpha) - lrelu12 = pybuda.op.LeakyRelu("lrelu12", add5, alpha=self.alpha) - lrelu13 = pybuda.op.LeakyRelu("lrelu13", mul8, alpha=self.alpha) - lrelu14 = pybuda.op.LeakyRelu("lrelu14", mul9, alpha=self.alpha) - lrelu15 = pybuda.op.LeakyRelu("lrelu15", mul10, alpha=self.alpha) - - # Layer 8 - add6 = pybuda.op.Add("add6", lrelu11, mul3) - add7 = pybuda.op.Add("add7", lrelu12, mul8) - mul11 = pybuda.op.Multiply("mul11", lrelu13, mul5) - mul12 = pybuda.op.Multiply("mul12", lrelu14, add4) - mul13 = pybuda.op.Multiply("mul13", mul5, lrelu15) - - # Layer 9 - lrelu16 = pybuda.op.LeakyRelu("lrelu16", add6, alpha=self.alpha) - lrelu17 = pybuda.op.LeakyRelu("lrelu17", add7, alpha=self.alpha) - lrelu18 = pybuda.op.LeakyRelu("lrelu18", mul11, alpha=self.alpha) - lrelu19 = pybuda.op.LeakyRelu("lrelu19", mul12, alpha=self.alpha) - lrelu20 = pybuda.op.LeakyRelu("lrelu20", mul13, alpha=self.alpha) - - # Layer 10 - mul14 = pybuda.op.Multiply("mul14", lrelu16, mul7) - mul15 = pybuda.op.Multiply("mul15", lrelu17, mul8) - mul16 = pybuda.op.Multiply("mul16", lrelu18, lrelu19) - mul17 = pybuda.op.Multiply("mul17", add5, lrelu20) - - # Layer 11 - lrelu21 = pybuda.op.LeakyRelu("lrelu21", mul14, alpha=self.alpha) - lrelu22 = pybuda.op.LeakyRelu("lrelu22", mul15, alpha=self.alpha) - lrelu23 = pybuda.op.LeakyRelu("lrelu23", mul16, alpha=self.alpha) - lrelu24 = pybuda.op.LeakyRelu("lrelu24", mul17, alpha=self.alpha) - - # Layer 12 - add8 = pybuda.op.Add("add8", lrelu21, lrelu23) - add9 = pybuda.op.Add("add9", lrelu22, lrelu24) - - return add8, add9 diff --git a/pybuda/test/operators/eltwise_unary_attr/leaky_relu/models/model_5.py b/pybuda/test/operators/eltwise_unary_attr/leaky_relu/models/model_5.py deleted file mode 100644 index 0cae631f1..000000000 --- a/pybuda/test/operators/eltwise_unary_attr/leaky_relu/models/model_5.py +++ /dev/null @@ -1,121 +0,0 @@ -# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC - -# SPDX-License-Identifier: Apache-2.0 -# -# Test 4 -# LeakyRelu operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures -# - - -import torch -from torch.distributions import Normal - -import pybuda -import pybuda.op -import pybuda.op.nn as nn - -from pybuda import PyBudaModule, Tensor - - -class BudaLeakyReluTest(PyBudaModule): - """ - Buda Test 4 - - """ - - INPUTS_RANGE_MIN = -1.0 - INPUTS_RANGE_MAX = 1.0 - INPUTS_DISTRIBUTION = Normal - - WEIGHTS_RANGE_MIN = -1.0 - WEIGHTS_RANGE_MAX = 1.0 - WEIGHTS_DISTRIBUTION = Normal - - def __init__( - self, - shape, - alpha - ): - super().__init__("Buda Test 4") - - self.testname = "Operator LeakyRelu, Test 4" - self.shape = shape - self.alpha = alpha - - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) - - self.inputs = [] - for i in range(3): - input = BudaLeakyReluTest.INPUTS_DISTRIBUTION( - BudaLeakyReluTest.INPUTS_RANGE_MIN, - BudaLeakyReluTest.INPUTS_RANGE_MAX).sample(self.shape) - self.inputs.append(Tensor.create_from_torch(input)) - - for i in range(1, 4): - weights = BudaLeakyReluTest.WEIGHTS_DISTRIBUTION( - BudaLeakyReluTest.WEIGHTS_RANGE_MIN, - BudaLeakyReluTest.WEIGHTS_RANGE_MAX).sample(self.shape) - weights.requires_grad = True - self.set_parameter("train_param" + str(i), weights) - - - - def forward(self, x1, x2, x3): - - # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x2, self.train_param2) - mul3 = pybuda.op.Multiply("mul3", x3, self.train_param3) - - # Layer 3 - lrelu1 = pybuda.op.LeakyRelu("lrelu1", mul1, alpha=self.alpha) - lrelu2 = pybuda.op.LeakyRelu("lrelu2", mul2, alpha=self.alpha) - lrelu3 = pybuda.op.LeakyRelu("lrelu3", mul3, alpha=self.alpha) - - # Layer 4 - mul4 = pybuda.op.Multiply("mul4", lrelu1, x2) - mul5 = pybuda.op.Multiply("mul5", lrelu2, x3) - mul6 = pybuda.op.Multiply("mul6", self.train_param2, lrelu3) - - # Layer 5 - lrelu4 = pybuda.op.LeakyRelu("lrelu4", mul4, alpha=self.alpha) - lrelu5 = pybuda.op.LeakyRelu("lrelu5", mul5, alpha=self.alpha) - lrelu6 = pybuda.op.LeakyRelu("lrelu6", mul6, alpha=self.alpha) - - # Layer 6 - mul7 = pybuda.op.Multiply("mul7", lrelu4, mul2) - mul8 = pybuda.op.Multiply("mul8", lrelu5, mul3) - mul9 = pybuda.op.Multiply("mul9", lrelu6, mul1) - mul10 = pybuda.op.Multiply("mul10", lrelu4, lrelu5) - - # Layer 7 - lrelu7 = pybuda.op.LeakyRelu("lrelu7", mul10, alpha=self.alpha) - lrelu8 = pybuda.op.LeakyRelu("lrelu8", mul8, alpha=self.alpha) - lrelu9 = pybuda.op.LeakyRelu("lrelu9", mul9, alpha=self.alpha) - - # Layer 8 - mul11 = pybuda.op.Multiply("mul11", mul7, lrelu7) - mul12 = pybuda.op.Multiply("mul12", lrelu8, mul6) - mul13 = pybuda.op.Multiply("mul13", mul5, lrelu9) - - # Layer 9 - lrelu10 = pybuda.op.LeakyRelu("lrelu10", mul11, alpha=self.alpha) - lrelu11 = pybuda.op.LeakyRelu("lrelu11", mul12, alpha=self.alpha) - lrelu12 = pybuda.op.LeakyRelu("lrelu12", mul13, alpha=self.alpha) - - # Layer 10 - mul14 = pybuda.op.Multiply("mul14", lrelu10, mul8) - mul15 = pybuda.op.Multiply("mul15", lrelu11, mul9) - mul16 = pybuda.op.Multiply("mul16", lrelu12, lrelu6) - - # Layer 11 - mul17 = pybuda.op.Multiply("mul17", mul14, lrelu8) - mul18 = pybuda.op.Multiply("mul18", mul15, mul16) - - # Layer 12 - lrelu13 = pybuda.op.LeakyRelu("lrelu13", mul18, alpha=self.alpha) - - return mul17, lrelu13 diff --git a/pybuda/test/operators/matmul/models/generic/model_10.py b/pybuda/test/operators/matmul/models/generic/model_10.py deleted file mode 100644 index ce619fd0f..000000000 --- a/pybuda/test/operators/matmul/models/generic/model_10.py +++ /dev/null @@ -1,101 +0,0 @@ -# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC - -# SPDX-License-Identifier: Apache-2.0 -# -# Test 10 -# Matmul operator defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures -# - - -import torch - -import pybuda - -from pybuda import PyBudaModule, Tensor - - -class BudaMatmulTest(PyBudaModule): - """ - Buda Test 10 - - In this test we have 22 operations, and 3 input tensors and 9 trainable variables. - One operand represents input and the other one is trainable paramater. - """ - - def __init__(self, shape): - super().__init__("Buda Test 10") - self.testname = "Operator Matmul Test 10" - self.shape = shape - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) - - self.train_param4 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param5 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param6 = pybuda.Parameter(*self.shape, requires_grad=True) - - self.train_param7 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param8 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param9 = pybuda.Parameter(*self.shape, requires_grad=True) - - self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(3)] - for i in range(9): - self.set_parameter("train_param" + str(i + 1), torch.rand(*self.shape, requires_grad=True)) - - def forward(self, x1, x2, x3): - - # Layer 2 - tr1 = pybuda.op.Transpose("tr1", self.train_param1, -1, -2) - mm1 = pybuda.op.Matmul("mm1", x1, tr1) - tr2 = pybuda.op.Transpose("tr2", self.train_param2, -1, -2) - mm2 = pybuda.op.Matmul("mm2", x2, tr2) - tr3 = pybuda.op.Transpose("tr3", self.train_param3, -1, -2) - mm3 = pybuda.op.Matmul("mm3", x3, tr3) - - # Layer 3 - mm4 = pybuda.op.Matmul("mm4", mm1, x2) - mm5 = pybuda.op.Matmul("mm5", mm2, x3) - mm6 = pybuda.op.Matmul("mm6", mm3, x2) - - # Layer 4 - tr4 = pybuda.op.Transpose("tr4", self.train_param4, -1, -2) - mm7 = pybuda.op.Matmul("mm7", mm4, tr4) - tr5 = pybuda.op.Transpose("tr5", self.train_param5, -1, -2) - mm8 = pybuda.op.Matmul("mm8", mm5, tr5) - tr6 = pybuda.op.Transpose("tr6", self.train_param6, -1, -2) - mm9 = pybuda.op.Matmul("mm9", mm6, tr6) - - # Layer - mm10 = pybuda.op.Matmul("mm10", mm2, self.train_param4) - - # Layer 6 - mm11 = pybuda.op.Matmul("mm11", mm1, self.train_param5) - mm12 = pybuda.op.Matmul("mm12", mm2, self.train_param6) - - # Layer 7 - tr7 = pybuda.op.Transpose("tr7", mm10, -1, -2) - mm13 = pybuda.op.Matmul("mm13", mm11, tr7) - mm14 = pybuda.op.Matmul("mm14", mm7, mm9) - mm15 = pybuda.op.Matmul("mm15", mm8, mm12) - - # Layer 8 - mm16 = pybuda.op.Matmul("mm16", mm13, self.train_param9) - mm17 = pybuda.op.Matmul("mm17", mm14, self.train_param7) - tr8 = pybuda.op.Transpose("tr8", self.train_param8, -1, -2) - mm18 = pybuda.op.Matmul("mm18", mm15, tr8) - - # Layer 9 - mm19 = pybuda.op.Matmul("mm19", mm16, tr1) - mm20 = pybuda.op.Matmul("mm20", mm17, tr4) - - # Layer 10 - mm21 = pybuda.op.Matmul("mm21", mm19, mm20) - - # Layer 11 - mm22 = pybuda.op.Matmul("mm22", mm21, mm18) - - return mm22 - - def values(self): - return [item.value() for item in self.inputs] \ No newline at end of file diff --git a/pybuda/test/operators/matmul/models/generic/model_3.py b/pybuda/test/operators/matmul/models/generic/model_3.py deleted file mode 100644 index f4bf9db9f..000000000 --- a/pybuda/test/operators/matmul/models/generic/model_3.py +++ /dev/null @@ -1,67 +0,0 @@ -# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC - -# SPDX-License-Identifier: Apache-2.0 -# -# Test 3 -# Matmul operator defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures -# - - -import torch - -import pybuda - -from pybuda import PyBudaModule, Tensor - - -class BudaMatmulTest(PyBudaModule): - """ - Buda Test 3 - - In this test we have 10 operations, and three input tensors and three trainable variables. - One operand represents input and the other one is trainable paramater. - """ - - def __init__(self, shape): - super().__init__("Buda Test 3") - self.testname = "Operator Matmul Test 3" - self.shape = shape - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) - - self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(3)] - for i in range(3): - self.set_parameter("train_param" + str(i + 1), torch.rand(*self.shape, requires_grad=True)) - - def forward(self, x1, x2, x3): - - # Layer 2 - tr1 = pybuda.op.Transpose("tr1", self.train_param1, -1, -2) - mm1 = pybuda.op.Matmul("mm1", x1, tr1) - tr2 = pybuda.op.Transpose("tr2", self.train_param2, -1, -2) - mm2 = pybuda.op.Matmul("mm2", x2, tr2) - tr3 = pybuda.op.Transpose("tr3", x3, -1, -2) - mm3 = pybuda.op.Matmul("mm3", tr3, self.train_param3) - - # Layer 3 - mm4 = pybuda.op.Matmul("mm4", mm1, x2) - mm5 = pybuda.op.Matmul("mm5", self.train_param2, mm3) - mm6 = pybuda.op.Matmul("mm6", mm3, tr3) - - # Layer 4 - mm7 = pybuda.op.Matmul("mm7", mm2, mm5) - mm8 = pybuda.op.Matmul("mm8", mm6, x3) - - # Layer 5 - mm9 = pybuda.op.Matmul("mm9", mm7, mm8) - - # Layer 6 - tr4 = pybuda.op.Transpose("tr4", mm4, -1, -2) - mm10 = pybuda.op.Matmul("mm10", tr4, mm9) - - return mm10 - - def values(self): - return [item.value() for item in self.inputs] \ No newline at end of file diff --git a/pybuda/test/operators/matmul/models/generic/model_6.py b/pybuda/test/operators/matmul/models/generic/model_6.py deleted file mode 100644 index bba0c1ff9..000000000 --- a/pybuda/test/operators/matmul/models/generic/model_6.py +++ /dev/null @@ -1,75 +0,0 @@ -# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC - -# SPDX-License-Identifier: Apache-2.0 -# -# Test 6 -# Matmul operator defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures -# - - -import torch - -import pybuda - -from pybuda import PyBudaModule, Tensor - - -class BudaMatmulTest(PyBudaModule): - """ - Buda Test 6 - - In this test we have 13 operations, and 4 input tensors and 4 trainable variables. - One operand represents input and the other one is trainable paramater. - """ - - def __init__(self, shape): - super().__init__("Buda Test 6") - self.testname = "Operator Matmul Test 6" - self.shape = shape - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param4 = pybuda.Parameter(*self.shape, requires_grad=True) - - self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(4)] - for i in range(4): - self.set_parameter("train_param" + str(i + 1), torch.rand(*self.shape, requires_grad=True)) - - def forward(self, x1, x2, x3, x4): - - # Layer 2 - tr1 = pybuda.op.Transpose("tr1", self.train_param1, -1, -2) - mm1 = pybuda.op.Matmul("mm1", x1, tr1) - - tr2 = pybuda.op.Transpose("tr2", self.train_param2, -1, -2) - mm2 = pybuda.op.Matmul("mm2", x2, tr2) - - tr3 = pybuda.op.Transpose("tr3", x3, -1, -2) - mm3 = pybuda.op.Matmul("mm3", tr3, self.train_param3) - - tr4 = pybuda.op.Transpose("tr4", x4, -1, -2) - mm4 = pybuda.op.Matmul("mm4", tr4, self.train_param4) - - # Layer 3 - mm5 = pybuda.op.Matmul("mm5", mm1, mm2) - mm6 = pybuda.op.Matmul("mm6", x3, mm3) - mm7 = pybuda.op.Matmul("mm7", mm3, mm4) - - # Layer 4 - mm8 = pybuda.op.Matmul("mm8", mm5, mm6) - mm9 = pybuda.op.Matmul("mm9", mm3, mm7) - mm10 = pybuda.op.Matmul("mm10", mm6, mm7) - - # Layer 5 - mm11 = pybuda.op.Matmul("mm11", mm8, mm9) - tr5 = pybuda.op.Transpose("tr5", mm10, -1, -2) - mm12 = pybuda.op.Matmul("mm12", mm9, tr5) - - # Layer 6 - mm13 = pybuda.op.Matmul("mm13", mm11, mm12) - - return mm13 - - def values(self): - return [item.value() for item in self.inputs] \ No newline at end of file diff --git a/pybuda/test/operators/tm/pad/models/model_2.py b/pybuda/test/operators/tm/pad/models/model_2.py deleted file mode 100644 index 64b29c370..000000000 --- a/pybuda/test/operators/tm/pad/models/model_2.py +++ /dev/null @@ -1,72 +0,0 @@ -# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC - -# SPDX-License-Identifier: Apache-2.0 -# -# Test 2 -# Pad operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures -# - - -import torch - -import pybuda -import pybuda.op -import pybuda.op.nn as nn - -from pybuda import PyBudaModule, Tensor - - -class BudaPadTest(PyBudaModule): - """ - Buda Test 2 - - """ - - def __init__( - self, - shape, - pad - ): - super().__init__("Buda Test 2") - - - self.testname = "Operator Pad, Test 2" - self.shape = shape - self.pad = pad - - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - - self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for _ in range(2)] - - self.set_parameter("train_param1", torch.rand(*self.shape, requires_grad=True)) - self.set_parameter("train_param2", torch.rand(*self.shape, requires_grad=True)) - - def forward(self, x1, x2): - - # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x2, self.train_param2) - - # Layer 3 - pad1 = pybuda.op.Pad("pad1", mul1, self.pad) - pad2 = pybuda.op.Pad("pad2", mul2, self.pad) - - # Layer 4 - mul3 = pybuda.op.Multiply("mul3", pad1, pad2) - pad3 = pybuda.op.Pad("pad3", x1, self.pad) - pad4 = pybuda.op.Pad("pad4", self.train_param2, self.pad) - - # Layer 5 - mul4 = pybuda.op.Multiply("mul4", pad3, mul3) - mul5 = pybuda.op.Multiply("mul5", mul3, pad4) - - # Layer 6 - pad5 = pybuda.op.Pad("pad5", mul4, self.pad) - pad6 = pybuda.op.Pad("pad6", mul5, self.pad) - - # Layer 7 - mul6 = pybuda.op.Multiply("mul6", pad5, pad6) - - return mul6 \ No newline at end of file diff --git a/pybuda/test/operators/tm/pad/models/model_3.py b/pybuda/test/operators/tm/pad/models/model_3.py deleted file mode 100644 index 15c0b086a..000000000 --- a/pybuda/test/operators/tm/pad/models/model_3.py +++ /dev/null @@ -1,86 +0,0 @@ -# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC - -# SPDX-License-Identifier: Apache-2.0 -# -# Test 3 -# Pad operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures -# - - -import torch - -import pybuda -import pybuda.op -import pybuda.op.nn as nn - -from pybuda import PyBudaModule, Tensor - - -class BudaPadTest(PyBudaModule): - """ - Buda Test 3 - - """ - - def __init__( - self, - shape, - pad - ): - super().__init__("Buda Test 3") - - - self.testname = "Operator Pad, Test 3" - self.shape = shape - self.pad = pad - - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - - self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for _ in range(2)] - - self.set_parameter("train_param1", torch.rand(*self.shape, requires_grad=True)) - self.set_parameter("train_param2", torch.rand(*self.shape, requires_grad=True)) - - def forward(self, x1, x2): - - # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", self.train_param1, x2) - mul3 = pybuda.op.Multiply("mul3", x2, self.train_param2) - - # Layer 3 - pad1 = pybuda.op.Pad("pad1", mul1, self.pad) - pad2 = pybuda.op.Pad("pad2", mul2, self.pad) - pad3 = pybuda.op.Pad("pad3", mul3, self.pad) - - # Layer 4 - mul4 = pybuda.op.Multiply("mul4", self.train_param1, x2) - add1 = pybuda.op.Add("add1", x2, self.train_param2) - - # Layer 5 - pad4 = pybuda.op.Pad("pad4", mul4, self.pad) - pad5 = pybuda.op.Pad("pad5", add1, self.pad) - - # Layer 6 - mul5 = pybuda.op.Multiply("mul5", pad1, pad4) - mul6 = pybuda.op.Multiply("mul6", pad2, pad3) - add2 = pybuda.op.Add("add2", pad3, pad5) - - # Layer 7 - pad6 = pybuda.op.Pad("pad6", mul5, self.pad) - pad7 = pybuda.op.Pad("pad7", mul6, self.pad) - pad8 = pybuda.op.Pad("pad8", add2, self.pad) - - # Layer 8 - add4 = pybuda.op.Add("add4", pad6, pad7) - add5 = pybuda.op.Add("add5", pad6, pad8) - add6 = pybuda.op.Add("add6", pad7, pad8) - - # Layer 9 - pad9 = pybuda.op.Pad("pad9", add4, self.pad) - pad10 = pybuda.op.Pad("pad10", add5, self.pad) - pad11 = pybuda.op.Pad("pad11", add6, self.pad) - - return pad9, pad10, pad11 \ No newline at end of file diff --git a/pybuda/test/operators/tm/pad/models/model_4.py b/pybuda/test/operators/tm/pad/models/model_4.py deleted file mode 100644 index 1d0eb066f..000000000 --- a/pybuda/test/operators/tm/pad/models/model_4.py +++ /dev/null @@ -1,101 +0,0 @@ -# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC - -# SPDX-License-Identifier: Apache-2.0 -# -# Test 4 -# Pad operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures -# - - -import torch - -import pybuda -import pybuda.op -import pybuda.op.nn as nn - -from pybuda import PyBudaModule, Tensor - - -class BudaPadTest(PyBudaModule): - """ - Buda Test 4 - - """ - - def __init__( - self, - shape, - pad - ): - super().__init__("Buda Test 4") - - - self.testname = "Operator Pad, Test 4" - self.shape = shape - self.pad = pad - - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) - - self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for _ in range(3)] - - self.set_parameter("train_param1", torch.rand(*self.shape, requires_grad=True)) - self.set_parameter("train_param2", torch.rand(*self.shape, requires_grad=True)) - self.set_parameter("train_param3", torch.rand(*self.shape, requires_grad=True)) - - def forward(self, x1, x2, x3): - - # Layer 2 - pad1 = pybuda.op.Pad("pad1", x1, self.pad) - pad2 = pybuda.op.Pad("pad2", self.train_param1, self.pad) - pad3 = pybuda.op.Pad("pad3", x2, self.pad) - pad4 = pybuda.op.Pad("pad4", self.train_param2, self.pad) - pad5 = pybuda.op.Pad("pad5", x3, self.pad) - pad6 = pybuda.op.Pad("pad6", self.train_param3, self.pad) - - # Layer 3 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x2, self.train_param2) - mul3 = pybuda.op.Multiply("mul3", x3, self.train_param3) - - # Layer 4 - pad7 = pybuda.op.Pad("pad7", mul1, self.pad) - pad8 = pybuda.op.Pad("pad8", mul2, self.pad) - pad9 = pybuda.op.Pad("pad9", mul3, self.pad) - - # Layer 5 - mul4 = pybuda.op.Multiply("mul4", pad7, pad1) - mul5 = pybuda.op.Multiply("mul5", pad2, pad8) - mul6 = pybuda.op.Multiply("mul6", pad8, pad4) - mul7 = pybuda.op.Multiply("mul7", pad3, pad9) - mul8 = pybuda.op.Multiply("mul8", pad5, pad6) - - # Layer 6 - pad10 = pybuda.op.Pad("pad10", pad7, self.pad) - pad11 = pybuda.op.Pad("pad11", mul4, self.pad) - pad12 = pybuda.op.Pad("pad12", mul5, self.pad) - pad13 = pybuda.op.Pad("pad13", mul6, self.pad) - pad14 = pybuda.op.Pad("pad14", mul7, self.pad) - pad15 = pybuda.op.Pad("pad15", mul8, self.pad) - pad16 = pybuda.op.Pad("pad16", pad6, self.pad) - - # Layer 7 - mul9 = pybuda.op.Multiply("mul9", pad10, pad12) - mul10 = pybuda.op.Multiply("mul10", pad11, pad14) - mul11 = pybuda.op.Multiply("mul11", pad13, pad15) - mul12 = pybuda.op.Multiply("mul12", pad15, pad16) - - # Layer 8 - pad17 = pybuda.op.Pad("pad17", mul9, self.pad) - pad18 = pybuda.op.Pad("pad18", mul10, self.pad) - pad19 = pybuda.op.Pad("pad19", mul11, self.pad) - pad20 = pybuda.op.Pad("pad20", mul12, self.pad) - - # Layer 9 - mul13 = pybuda.op.Multiply("mul13", pad17, pad18) - mul14 = pybuda.op.Multiply("mul14", pad18, pad19) - mul15 = pybuda.op.Multiply("mul15", pad19, pad20) - - return mul13, mul14, mul15 \ No newline at end of file diff --git a/pybuda/test/operators/tm/pad/models/model_5.py b/pybuda/test/operators/tm/pad/models/model_5.py deleted file mode 100644 index f79cb7561..000000000 --- a/pybuda/test/operators/tm/pad/models/model_5.py +++ /dev/null @@ -1,123 +0,0 @@ -# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC - -# SPDX-License-Identifier: Apache-2.0 -# -# Test 5 -# Pad operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures -# - - -import torch - -import pybuda -import pybuda.op -import pybuda.op.nn as nn - -from pybuda import PyBudaModule, Tensor - - -class BudaPadTest(PyBudaModule): - """ - Buda Test 5 - - """ - - def __init__( - self, - shape, - pad - ): - super().__init__("Buda Test 5") - - - self.testname = "Operator Pad, Test 5" - self.shape = shape - self.pad = pad - - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.shape, requires_grad=True) - - self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for _ in range(3)] - - self.set_parameter("train_param1", torch.rand(*self.shape, requires_grad=True)) - self.set_parameter("train_param2", torch.rand(*self.shape, requires_grad=True)) - self.set_parameter("train_param3", torch.rand(*self.shape, requires_grad=True)) - - def forward(self, x1, x2, x3): - - # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x2, self.train_param2) - mul3 = pybuda.op.Multiply("mul3", x3, self.train_param3) - - # Layer 3 - pad1 = pybuda.op.Pad("pad1", x1, self.pad) - pad2 = pybuda.op.Pad("pad2", mul1, self.pad) - pad3 = pybuda.op.Pad("pad3", self.train_param1, self.pad) - pad4 = pybuda.op.Pad("pad4", x2, self.pad) - pad5 = pybuda.op.Pad("pad5", mul2, self.pad) - pad6 = pybuda.op.Pad("pad6", self.train_param2, self.pad) - pad7 = pybuda.op.Pad("pad7", x3, self.pad) - pad8 = pybuda.op.Pad("pad8", mul3, self.pad) - pad9 = pybuda.op.Pad("pad9", self.train_param3, self.pad) - - # Layer 4 - pad10 = pybuda.op.Pad("pad10", x1, self.pad) - mul4 = pybuda.op.Multiply("mul4", pad1, pad2) - mul5 = pybuda.op.Multiply("mul5", pad2, pad3) - mul6 = pybuda.op.Multiply("mul6", pad4, pad5) - mul7 = pybuda.op.Multiply("mul7", pad5, pad6) - mul8 = pybuda.op.Multiply("mul8", pad7, pad8) - mul9 = pybuda.op.Multiply("mul9", pad8, pad9) - - # Layer 5 - mul10 = pybuda.op.Multiply("mul10", pad10, mul4) - pad11 = pybuda.op.Pad("pad11", x2, self.pad) - mul11 = pybuda.op.Multiply("mul11", mul5, pad11) - pad12 = pybuda.op.Pad("pad12", x3, self.pad) - mul12 = pybuda.op.Multiply("mul12", mul7, pad12) - pad13 = pybuda.op.Pad("pad13", self.train_param3, self.pad) - mul13 = pybuda.op.Multiply("mul13", mul9, pad13) - - # Layer 6 - pad14 = pybuda.op.Pad("pad14", mul10, self.pad) - pad15 = pybuda.op.Pad("pad15", mul11, self.pad) - pad16 = pybuda.op.Pad("pad16", mul6, self.pad) - pad17 = pybuda.op.Pad("pad17", mul12, self.pad) - pad18 = pybuda.op.Pad("pad18", mul8, self.pad) - pad19 = pybuda.op.Pad("pad19", mul13, self.pad) - - # Layer 7 - mul14 = pybuda.op.Multiply("mul14", pad14, pad15) - mul15 = pybuda.op.Multiply("mul15", pad16, pad17) - mul16 = pybuda.op.Multiply("mul16", pad18, pad19) - - # Layer 8 - pad20 = pybuda.op.Pad("pad20", pad14, self.pad) - pad21 = pybuda.op.Pad("pad21", mul14, self.pad) - pad22 = pybuda.op.Pad("pad22", pad16, self.pad) - pad23 = pybuda.op.Pad("pad23", mul15, self.pad) - pad24 = pybuda.op.Pad("pad24", pad19, self.pad) - pad25 = pybuda.op.Pad("pad25", mul16, self.pad) - - # Layer 9 - mul17 = pybuda.op.Multiply("mul17", pad20, pad23) - mul18 = pybuda.op.Multiply("mul18", pad22, pad25) - mul19 = pybuda.op.Multiply("mul19", pad21, pad24) - - # Layer 10 - pad26 = pybuda.op.Pad("pad26", mul17, self.pad) - pad27 = pybuda.op.Pad("pad27", mul18, self.pad) - pad28 = pybuda.op.Pad("pad28", mul19, self.pad) - - # Layer 11 - add1 = pybuda.op.Add("add1", pad26, pad27) - add2 = pybuda.op.Add("add2", pad27, pad28) - - # Layer 12 - pad29 = pybuda.op.Pad("pad29", add1, self.pad) - pad30 = pybuda.op.Pad("pad30", add2, self.pad) - - return pad29, pad30 \ No newline at end of file diff --git a/pybuda/test/operators/tm/reshape/models/model_3.py b/pybuda/test/operators/tm/reshape/models/model_3.py deleted file mode 100644 index 5d289f114..000000000 --- a/pybuda/test/operators/tm/reshape/models/model_3.py +++ /dev/null @@ -1,73 +0,0 @@ -# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC - -# SPDX-License-Identifier: Apache-2.0 -# -# Test 3 -# Reshape operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures -# - - -import torch -import numpy as np - -import pybuda -import pybuda.op -import pybuda.op.nn as nn - -from pybuda import PyBudaModule, Tensor - - -class BudaReshapeTest(PyBudaModule): - """ - Buda Test 3 - - """ - - def __init__( - self, - old_shape, - new_shape): - super().__init__("Buda Test 3") - - assert np.prod(old_shape) == np.prod(new_shape), "Size of a tensor should stay the same" - - self.testname = "Operator reshape Test 3" - self.old_shape = old_shape - self.new_shape = new_shape - - self.train_param1 = pybuda.Parameter(*self.old_shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.old_shape, requires_grad=True) - - self.inputs = [Tensor.create_from_torch(torch.rand(*self.old_shape)) for i in range(2)] - for i in range(1, 3): - self.set_parameter("train_param" + str(i), torch.rand(*self.old_shape, requires_grad=True)) - - def forward(self, x1, x2): - - # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", self.train_param1, x2) - mul3 = pybuda.op.Multiply("mul3", x2, self.train_param2) - - # Layer 3 - rsh1 = pybuda.op.Reshape("rsh1", mul1, self.new_shape) - rsh2 = pybuda.op.Reshape("rsh2", mul2, self.new_shape) - rsh3 = pybuda.op.Reshape("rsh3", mul3, self.new_shape) - - # Layer 4 - mul4 = pybuda.op.Multiply("mul4", rsh1, rsh2) - mul5 = pybuda.op.Multiply("mul5", rsh2, rsh3) - - # Layer 5 - rsh4 = pybuda.op.Reshape("rsh4", mul4, self.old_shape) - rsh5 = pybuda.op.Reshape("rsh5", mul5, self.old_shape) - - # Layer 6 - mul6 = pybuda.op.Multiply("mul6", rsh4, self.train_param1) - mul7 = pybuda.op.Multiply("mul7", rsh5, self.train_param2) - - return mul6, mul7 - - def values(self): - return [item.value() for item in self.inputs] \ No newline at end of file diff --git a/pybuda/test/operators/tm/reshape/models/model_4.py b/pybuda/test/operators/tm/reshape/models/model_4.py deleted file mode 100644 index aa5ca413b..000000000 --- a/pybuda/test/operators/tm/reshape/models/model_4.py +++ /dev/null @@ -1,84 +0,0 @@ -# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC - -# SPDX-License-Identifier: Apache-2.0 -# -# Test 4 -# Reshape operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures -# - - -import torch -import numpy as np - -import pybuda -import pybuda.op -import pybuda.op.nn as nn - -from pybuda import PyBudaModule, Tensor - - -class BudaReshapeTest(PyBudaModule): - """ - Buda Test 4 - - """ - - def __init__( - self, - old_shape, - new_shape): - super().__init__("Buda Test 4") - - assert np.prod(old_shape) == np.prod(new_shape), "Size of a tensor should stay the same" - - self.testname = "Operator reshape Test 4" - self.old_shape = old_shape - self.new_shape = new_shape - - self.train_param1 = pybuda.Parameter(*self.old_shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.old_shape, requires_grad=True) - - self.inputs = [Tensor.create_from_torch(torch.rand(*self.old_shape)) for i in range(2)] - for i in range(1, 3): - self.set_parameter("train_param" + str(i), torch.rand(*self.old_shape, requires_grad=True)) - - def forward(self, x1, x2): - - # Layer 2 - mul1 = pybuda.op.Multiply("mul1", x1, self.train_param1) - mul2 = pybuda.op.Multiply("mul2", x2, self.train_param2) - mul3 = pybuda.op.Multiply("mul3", mul1, mul2) - - # Layer 3 - rsh1 = pybuda.op.Reshape("rsh1", x1, self.new_shape) - rsh2 = pybuda.op.Reshape("rsh2", self.train_param1, self.new_shape) - rsh3 = pybuda.op.Reshape("rsh3", mul3, self.new_shape) - rsh4 = pybuda.op.Reshape("rsh4", x2, self.new_shape) - rsh5 = pybuda.op.Reshape("rsh5", self.train_param2, self.new_shape) - - # Layer 4 - mul4 = pybuda.op.Multiply("mul4", rsh1, rsh2) - mul5 = pybuda.op.Multiply("mul5", self.train_param1, mul3) - mul6 = pybuda.op.Multiply("mul6", rsh3, rsh4) - mul7 = pybuda.op.Multiply("mul7", rsh5, rsh5) - - # Layer 5 - rsh6 = pybuda.op.Reshape("rsh6", mul4, self.old_shape) - rsh7 = pybuda.op.Reshape("rsh7", mul5, self.old_shape) - rsh8 = pybuda.op.Reshape("rsh8", mul6, self.old_shape) - rsh9 = pybuda.op.Reshape("rsh9", mul7, self.old_shape) - - # Layer 6 - add1 = pybuda.op.Add("add1", rsh6, rsh7) - - # Layer 7 - add2 = pybuda.op.Add("add2", add1, rsh8) - - # Layer 8 - add3 = pybuda.op.Add("add3", add2, rsh9) - - return add3 - - def values(self): - return [item.value() for item in self.inputs] \ No newline at end of file diff --git a/pybuda/test/operators/tm/reshape/models/model_5.py b/pybuda/test/operators/tm/reshape/models/model_5.py deleted file mode 100644 index c03e529bb..000000000 --- a/pybuda/test/operators/tm/reshape/models/model_5.py +++ /dev/null @@ -1,72 +0,0 @@ -# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC - -# SPDX-License-Identifier: Apache-2.0 -# -# Test 5 -# Reshape operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures -# - - -import torch -import numpy as np - -import pybuda -import pybuda.op -import pybuda.op.nn as nn - -from pybuda import PyBudaModule, Tensor - - -class BudaReshapeTest(PyBudaModule): - """ - Buda Test 5 - - """ - - def __init__( - self, - old_shape, - new_shape): - super().__init__("Buda Test 5") - - assert np.prod(old_shape) == np.prod(new_shape), "Size of a tensor should stay the same" - - self.testname = "Operator reshape Test 5" - self.old_shape = old_shape - self.new_shape = new_shape - - self.train_param1 = pybuda.Parameter(*self.old_shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.old_shape, requires_grad=True) - self.train_param3 = pybuda.Parameter(*self.old_shape, requires_grad=True) - self.train_param4 = pybuda.Parameter(*self.old_shape, requires_grad=True) - - self.inputs = [Tensor.create_from_torch(torch.rand(*self.old_shape)) for i in range(2)] - for i in range(1, 5): - self.set_parameter("train_param" + str(i), torch.rand(*self.old_shape, requires_grad=True)) - - def forward(self, x1, x2): - - # Layer 2 - add1 = pybuda.op.Add("add1", x1, self.train_param1) - add2 = pybuda.op.Add("add2", x2, self.train_param2) - - # Layer 3 - mul1 = pybuda.op.Multiply("mul1", add1, add2) - - # Layer 4 - rsh1 = pybuda.op.Reshape("rsh1", add1, self.new_shape) - rsh2 = pybuda.op.Reshape("rsh2", add2, self.new_shape) - - # Layer 5 - mul2 = pybuda.op.Multiply("mul2", rsh1, rsh2) - - # Layer 6 - mul3 = pybuda.op.Multiply("mul3", mul1, self.train_param3) - rsh3 = pybuda.op.Reshape("rsh3", self.train_param4, self.new_shape) - mul4 = pybuda.op.Multiply("mul4", mul2, rsh3) - - return mul3, mul4 - - def values(self): - return [item.value() for item in self.inputs] \ No newline at end of file diff --git a/pybuda/test/operators/tm/vstack_vslice/models/model_3.py b/pybuda/test/operators/tm/vstack_vslice/models/model_3.py deleted file mode 100644 index f5bdf9c05..000000000 --- a/pybuda/test/operators/tm/vstack_vslice/models/model_3.py +++ /dev/null @@ -1,95 +0,0 @@ -# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC - -# SPDX-License-Identifier: Apache-2.0 -# -# Test 3 -# VStack, VSlice operators defined by PyBuda API -# These kinds of tests test only single specific operator through different PyBuda architectures -# - - -import torch - -import pybuda -import pybuda.op -import pybuda.op.nn as nn - -from pybuda import PyBudaModule, Tensor - - -class BudaVStackVSliceTest(PyBudaModule): - """ - Buda Test 3 - - """ - - def __init__( - self, - shape, - slice): - super().__init__("Buda Test 3") - - assert hasattr(shape, '__iter__'), "Shape must be iterable" - assert len(shape) == 4, "Shape must be 4" - assert shape[1] > 1, "Z dimension must be bigger than 1" - assert shape[-2] % slice == 0, "The last dimension must be divisible by slice" - - self.testname = "Operator VStack, VSLice, Test 3" - self.shape = shape - self.slice = slice - - if type(self.shape) == tuple: - self.shape = list(self.shape) - self.shape[1] *= self.slice - self.shape[-2] *= self.slice - - print(f"SHAPE: {self.shape}") - print(f"SLICE: {self.slice}") - - self.train_param1 = pybuda.Parameter(*self.shape, requires_grad=True) - self.train_param2 = pybuda.Parameter(*self.shape, requires_grad=True) - - self.inputs = [Tensor.create_from_torch(torch.rand(*self.shape)) for i in range(2)] - for i in range(1, 3): - self.set_parameter("train_param" + str(i), torch.rand(*self.shape, requires_grad=True)) - - def forward(self, x1, x2): - - # Layer 2 - vst1 = pybuda.op.VStack("vst1", x1, self.slice) - vst2 = pybuda.op.VStack("vst2", self.train_param1, self.slice) - vst3 = pybuda.op.VStack("vst3", x2, self.slice) - vst4 = pybuda.op.VStack("vst4", self.train_param2, self.slice) - - # Layer 3 - mul1 = pybuda.op.Multiply("mul1", vst1, vst2) - mul2 = pybuda.op.Multiply("mul2", vst3, vst4) - - # Layer 4 - vsl1 = pybuda.op.VSlice("vsl1", mul1, self.slice) - mul3 = pybuda.op.Multiply("mul3", vst2, mul2) - - # Layer 5 - mul4 = pybuda.op.Multiply("mul4", vsl1, x2) - - # Layer 6 - vsl2 = pybuda.op.VSlice("vsl2", mul4, self.slice) - vsl3 = pybuda.op.VSlice("vsl3", mul3, self.slice) - vst5 = pybuda.op.VStack("vst5", self.train_param1, self.slice) - vst6 = pybuda.op.VStack("vst6", self.train_param2, self.slice) - - # Layer 7 - add1 = pybuda.op.Add("add1", vsl2, pybuda.op.VSlice("hsl5", vsl3, self.slice)) - mul5 = pybuda.op.Multiply("mul5", vst5, vst6) - - # Layer 8 - vst8 = pybuda.op.VStack("vst8", add1, self.slice) - vst9 = pybuda.op.VStack("vst9", vst8, self.slice) - - # Layer 9 - sub1 = pybuda.op.Subtract("sub1", vst9, mul5) - - # Layer 10 - vst10 = pybuda.op.VStack("vst10", sub1, self.slice) - - return vst10 \ No newline at end of file diff --git a/pybuda/test/test_fork_join.py b/pybuda/test/test_fork_join.py deleted file mode 100644 index 1059e6384..000000000 --- a/pybuda/test/test_fork_join.py +++ /dev/null @@ -1,860 +0,0 @@ -# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC - -# SPDX-License-Identifier: Apache-2.0 -from typing import List - -import pytest -import torch - -import pybuda -from pybuda.verify import verify_module, VerifyConfig -from pybuda import DataFormat, PyBudaModule - -shape = (128, 768) - -def get_relaxed_atol_pcc(test_kind, test_device): - """ - Figure out reasonable pcc/atol for training on silicon - """ - training_atol = 0.3 - training_pcc = 0.95 - if test_device.is_silicon(): - training_pcc = 0.85 - inference_atol = 0.1 - inference_pcc = 0.95 - relative_atol = training_atol if test_kind.is_training() else inference_atol - if test_device.is_silicon() and test_kind.is_training(): - relative_atol *= 3.5 - pcc = training_pcc if test_kind.is_training() else inference_pcc - - return relative_atol, pcc - -class ForkJoinVariant(pybuda.PyBudaModule): - - def __init__(self, name, input_shape, config): - super().__init__(name) - self.weights1 = pybuda.Parameter(1, input_shape[1], input_shape[1], requires_grad=True) - self.input_shape = input_shape - self.config = config - - def forward(self, act1): - - # fork - if self.config[0] == "e": - fork = pybuda.op.Gelu("gelu_fork", act1) - elif self.config[0] == "m": - fork = pybuda.op.Matmul("matmul_fork", act1, act1) - else: - raise TypeError("Unexpected value in configuration of fork-join test") - - # right - if self.config[1] == "e": - right = pybuda.op.Add("add_long_path", fork, self.weights1) - elif self.config[1] == "m": - right = pybuda.op.Matmul("matmul_long_path", fork, self.weights1) - else: - raise TypeError("Unexpected value in configuration of fork-join test") - - # join - if self.config[2] == "e": - join = pybuda.op.Add("add_join", fork, right) - elif self.config[2] == "m": - join = pybuda.op.Matmul("matmul_join", fork, right) - else: - raise TypeError("Unexpected value in configuration of fork-join test") - - return join - -@pytest.mark.parametrize("input_shape", [(128,128), (256,256), (512,512)], ids=["128","256","512"]) -@pytest.mark.parametrize("config", ["mem", "mmm", "eme", "emm"], ids=["mem", "mmm", "eme", "emm"]) -def test_fork_join_variant(test_kind, test_device, input_shape, config): - """ - input_shape: input shape of the tensor in the fork-join. - config: string that tells us type of each op in the simple fork-join. first character describes fork node, second describes op on the longer path and third describes join node. - if config is "m" then apropriate node is matmul, and if it is "e", then node is element-wise op. - """ - num_in_channels = 1 - relative_atol, pcc = get_relaxed_atol_pcc(test_kind, test_device) - verify_module(ForkJoinVariant("test_fork_join_variant", input_shape, config), [(1, num_in_channels, input_shape[0], input_shape[1])], - VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, pcc=pcc, relative_atol=relative_atol)) - -class ForkJoin(pybuda.PyBudaModule): - - def __init__(self, name, stack_factor: int = 12): - super().__init__(name) - self.weights1 = pybuda.Parameter(stack_factor, shape[1] // stack_factor, shape[1] // stack_factor, requires_grad=True) - self.weights2 = pybuda.Parameter(1, shape[1], shape[1], requires_grad=True) - self.weights3 = pybuda.Parameter(stack_factor, shape[1] // stack_factor, shape[1] // stack_factor, requires_grad=True) - self.stack_factor = stack_factor - - def forward(self, act1): - - # input slice - sliced = pybuda.op.HSlice("slice", act1, self.stack_factor) - - # fork, t=stack_factor - fork = pybuda.op.Gelu("gelu", sliced) - - # right - right = pybuda.op.Matmul("matmul_1", fork, self.weights1) - right = pybuda.op.HStack("stack_branch", right) - right = pybuda.op.Matmul("matmul_2a_t1", right, self.weights2) - right = pybuda.op.Matmul("matmul_2b_t1", right, self.weights2) - right = pybuda.op.HSlice("slice_branch", right, self.stack_factor) - right = pybuda.op.Matmul("matmul_3", right, self.weights3) - - # join - join = pybuda.op.Add("join", fork, right) - return join - -@pytest.mark.parametrize("format", [DataFormat.Bfp8_b, DataFormat.Float16_b], ids=["bfp8", "fp16"]) -def test_fork_join(test_kind, test_device, format): - if test_device.arch == pybuda.BackendDevice.Blackhole: - pytest.skip("Skip until BudaBackend#2628 is consumed.") - - microbatch_count = 16 - - relative_atol, pcc = get_relaxed_atol_pcc(test_kind, test_device) - verify_module(ForkJoin("fork_join"), [(microbatch_count, *shape)], - VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, pcc=pcc, relative_atol=relative_atol, - fp32_fallback=format)) - -class ForkJoinWithBuffQueueLongPath(pybuda.PyBudaModule): - def __init__(self, name, stack_factor: int = 12): - super().__init__(name) - self.in0_mm_1 = pybuda.Parameter(16, 60 * 32, 60 * 32, requires_grad=False) - self.in1_mm_2 = pybuda.Parameter(1, 32 * 32, 1 * 32, requires_grad=False) - self.in1_mm_3 = pybuda.Parameter(1, 1 * 32, 32 * 32, requires_grad=False) - # in original graph in1_mm_3 has dimension 3 equal to 1 * 32. But mm_3 has broadcast on dimension 3 for 32. - # pytorch doesn't allow for broadcast if dimension is greater than 1. So we can't broadcast here. - def forward(self, act1, act2): - # Longer path of fork join contains buffering queue, - # which has to be taken into consideration when buffering fork-join. - # fork, - fork = pybuda.op.Concatenate("concatenate", act1, act2, axis=2) - # right - right = pybuda.op.Matmul("matmul_1", self.in0_mm_1, fork) - pybuda.config._get_global_compiler_config().insert_queues = [("matmul_1", "matmul_2", 0)] - right = pybuda.op.HStack("hstack", right) - right = pybuda.op.Matmul("matmul_2", right, self.in1_mm_2) - right = pybuda.op.Matmul("matmul_3", right, self.in1_mm_3) - right = pybuda.op.HSlice("vslice", right, 16) - # join - join = pybuda.op.Subtract("join", fork, right) - return join -# This test will hang on silicon if fork-join is not buffered properly. Longer path of fork join contains buffering queue, -# which has to be taken into consideration when buffering fork-join. -@pytest.mark.parametrize("format", [DataFormat.Bfp8_b, DataFormat.Float16_b], ids=["bfp8", "fp16"]) -def test_fork_join_with_buff_queue_long_path(test_kind, test_device, format): - relative_atol, pcc = get_relaxed_atol_pcc(test_kind, test_device) - compiler_cfg = pybuda.config._get_global_compiler_config() - compiler_cfg.balancer_policy = "Ribbon" - verify_module(ForkJoinWithBuffQueueLongPath("test_fork_join_with_buff_queue_long_path"), [(1, 16, 40 * 32, 2 * 32), (1, 16, 20 * 32, 2 * 32)], - VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, pcc=pcc, relative_atol=relative_atol, - fp32_fallback=format)) - -class MultilevelForkJoin(pybuda.PyBudaModule): - def __init__(self, name,): - super().__init__(name) - self.add_parameter("stages.2.blocks.1.conv_mid.0.conv.weight", pybuda.Parameter(*(192, 768, 3, 3), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("stages.2.blocks.1.conv_mid.0.bn.weight", pybuda.Parameter(*(192,), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("stages.2.blocks.1.conv_mid.0.bn.bias", pybuda.Parameter(*(192,), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("stages.2.blocks.1.conv_mid.1.conv.weight", pybuda.Parameter(*(192, 192, 3, 3), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("stages.2.blocks.1.conv_mid.1.bn.weight", pybuda.Parameter(*(192,), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("stages.2.blocks.1.conv_mid.1.bn.bias", pybuda.Parameter(*(192,), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("stages.2.blocks.1.conv_mid.2.conv.weight", pybuda.Parameter(*(192, 192, 3, 3), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("stages.2.blocks.1.conv_mid.2.bn.weight", pybuda.Parameter(*(192,), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("stages.2.blocks.1.conv_mid.2.bn.bias", pybuda.Parameter(*(192,), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("stages.2.blocks.1.conv_mid.3.conv.weight", pybuda.Parameter(*(192, 192, 3, 3), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("stages.2.blocks.1.conv_mid.3.bn.weight", pybuda.Parameter(*(192,), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("stages.2.blocks.1.conv_mid.3.bn.bias", pybuda.Parameter(*(192,), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("stages.2.blocks.1.conv_mid.4.conv.weight", pybuda.Parameter(*(192, 192, 3, 3), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("stages.2.blocks.1.conv_mid.4.bn.weight", pybuda.Parameter(*(192,), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("stages.2.blocks.1.conv_mid.4.bn.bias", pybuda.Parameter(*(192,), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("stages.2.blocks.1.conv_concat.conv.weight", pybuda.Parameter(*(768, 1728, 1, 1), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - - self.add_constant("stages.2.blocks.1.conv_mid.0.bn.running_var") - self.add_constant("stages.2.blocks.1.conv_mid.0.bn.running_mean") - self.add_constant("stages.2.blocks.1.conv_mid.1.bn.running_var") - self.add_constant("stages.2.blocks.1.conv_mid.1.bn.running_mean") - self.add_constant("stages.2.blocks.1.conv_mid.2.bn.running_var") - self.add_constant("stages.2.blocks.1.conv_mid.2.bn.running_mean") - self.add_constant("stages.2.blocks.1.conv_mid.3.bn.running_var") - self.add_constant("stages.2.blocks.1.conv_mid.3.bn.running_mean") - self.add_constant("stages.2.blocks.1.conv_mid.4.bn.running_var") - self.add_constant("stages.2.blocks.1.conv_mid.4.bn.running_mean") - self.add_constant("const_67322") - self.add_constant("const_68322") - self.add_constant("const_69322") - self.add_constant("const_70322") - self.add_constant("const_71322") - self.add_constant("const_72322") - self.add_constant("const_73322") - self.add_constant("const_74322") - self.add_constant("const_75322") - self.add_constant("const_76322") - self.add_constant("const_77322") - self.add_constant("const_78322") - self.add_constant("const_79322") - self.add_constant("const_80322") - self.add_constant("const_81322") - - self.set_constant("stages.2.blocks.1.conv_mid.0.bn.running_var", torch.rand(1, 192)) - self.set_constant("stages.2.blocks.1.conv_mid.0.bn.running_mean", torch.rand(1, 192)) - self.set_constant("stages.2.blocks.1.conv_mid.1.bn.running_var", torch.rand(1, 192)) - self.set_constant("stages.2.blocks.1.conv_mid.1.bn.running_mean", torch.rand(1, 192)) - self.set_constant("stages.2.blocks.1.conv_mid.2.bn.running_var", torch.rand(1, 192)) - self.set_constant("stages.2.blocks.1.conv_mid.2.bn.running_mean", torch.rand(1, 192)) - self.set_constant("stages.2.blocks.1.conv_mid.3.bn.running_var", torch.rand(1, 192)) - self.set_constant("stages.2.blocks.1.conv_mid.3.bn.running_mean", torch.rand(1, 192)) - self.set_constant("stages.2.blocks.1.conv_mid.4.bn.running_var", torch.rand(1, 192)) - self.set_constant("stages.2.blocks.1.conv_mid.4.bn.running_mean", torch.rand(1, 192)) - self.set_constant("const_67322", torch.rand(1, 1)) - self.set_constant("const_68322", torch.rand(1, 1)) - self.set_constant("const_69322", torch.rand(1, 1)) - self.set_constant("const_70322", torch.rand(1, 1)) - self.set_constant("const_71322", torch.rand(1, 1)) - self.set_constant("const_72322", torch.rand(1, 1)) - self.set_constant("const_73322", torch.rand(1, 1)) - self.set_constant("const_74322", torch.rand(1, 1)) - self.set_constant("const_75322", torch.rand(1, 1)) - self.set_constant("const_76322", torch.rand(1, 1)) - self.set_constant("const_77322", torch.rand(1, 1)) - self.set_constant("const_78322", torch.rand(1, 1)) - self.set_constant("const_79322", torch.rand(1, 1)) - self.set_constant("const_80322", torch.rand(1, 1)) - self.set_constant("const_81322", torch.rand(1, 1)) - - def forward(self, act_0): - - conv2d_586 = pybuda.op.Conv2d("", act_0, self.get_parameter("stages.2.blocks.1.conv_mid.0.conv.weight"), stride=[1, 1], padding=[1, 1, 1, 1], dilation=1, groups=1, channel_last=0) - add_589 = pybuda.op.Add("", self.get_constant("stages.2.blocks.1.conv_mid.0.bn.running_var"), self.get_constant("const_68322")) - sqrt_590 = pybuda.op.Sqrt("", add_589) - reciprocal_591 = pybuda.op.Reciprocal("", sqrt_590) - multiply_592 = pybuda.op.Multiply("", self.get_constant("const_67322"), reciprocal_591) - multiply_593 = pybuda.op.Multiply("", multiply_592, self.get_parameter("stages.2.blocks.1.conv_mid.0.bn.weight")) - reshape_594 = pybuda.op.Reshape("", multiply_593, shape=(192, 1, 1)) - multiply_595 = pybuda.op.Multiply("", conv2d_586, reshape_594) - multiply_597 = pybuda.op.Multiply("", self.get_constant("stages.2.blocks.1.conv_mid.0.bn.running_mean"), self.get_constant("const_69322")) - multiply_598 = pybuda.op.Multiply("", multiply_597, multiply_593) - add_599 = pybuda.op.Add("", multiply_598, self.get_parameter("stages.2.blocks.1.conv_mid.0.bn.bias")) - reshape_600 = pybuda.op.Reshape("", add_599, shape=(192, 1, 1)) - add_601 = pybuda.op.Add("", multiply_595, reshape_600) - relu_602 = pybuda.op.Relu("", add_601) - conv2d_603 = pybuda.op.Conv2d("", relu_602, self.get_parameter("stages.2.blocks.1.conv_mid.1.conv.weight"), stride=[1, 1], padding=[1, 1, 1, 1], dilation=1, groups=1, channel_last=0) - add_606 = pybuda.op.Add("", self.get_constant("stages.2.blocks.1.conv_mid.1.bn.running_var"), self.get_constant("const_71322")) - sqrt_607 = pybuda.op.Sqrt("", add_606) - reciprocal_608 = pybuda.op.Reciprocal("", sqrt_607) - multiply_609 = pybuda.op.Multiply("", self.get_constant("const_70322"), reciprocal_608) - multiply_610 = pybuda.op.Multiply("", multiply_609, self.get_parameter("stages.2.blocks.1.conv_mid.1.bn.weight")) - reshape_611 = pybuda.op.Reshape("", multiply_610, shape=(192, 1, 1)) - multiply_612 = pybuda.op.Multiply("", conv2d_603, reshape_611) - multiply_614 = pybuda.op.Multiply("", self.get_constant("stages.2.blocks.1.conv_mid.1.bn.running_mean"), self.get_constant("const_72322")) - multiply_615 = pybuda.op.Multiply("", multiply_614, multiply_610) - add_616 = pybuda.op.Add("", multiply_615, self.get_parameter("stages.2.blocks.1.conv_mid.1.bn.bias")) - reshape_617 = pybuda.op.Reshape("", add_616, shape=(192, 1, 1)) - add_618 = pybuda.op.Add("", multiply_612, reshape_617) - relu_619 = pybuda.op.Relu("", add_618) - conv2d_620 = pybuda.op.Conv2d("", relu_619, self.get_parameter("stages.2.blocks.1.conv_mid.2.conv.weight"), stride=[1, 1], padding=[1, 1, 1, 1], dilation=1, groups=1, channel_last=0) - add_623 = pybuda.op.Add("", self.get_constant("stages.2.blocks.1.conv_mid.2.bn.running_var"), self.get_constant("const_74322")) - sqrt_624 = pybuda.op.Sqrt("", add_623) - reciprocal_625 = pybuda.op.Reciprocal("", sqrt_624) - multiply_626 = pybuda.op.Multiply("", self.get_constant("const_73322"), reciprocal_625) - multiply_627 = pybuda.op.Multiply("", multiply_626, self.get_parameter("stages.2.blocks.1.conv_mid.2.bn.weight")) - reshape_628 = pybuda.op.Reshape("", multiply_627, shape=(192, 1, 1)) - multiply_629 = pybuda.op.Multiply("", conv2d_620, reshape_628) - multiply_631 = pybuda.op.Multiply("", self.get_constant("stages.2.blocks.1.conv_mid.2.bn.running_mean"), self.get_constant("const_75322")) - multiply_632 = pybuda.op.Multiply("", multiply_631, multiply_627) - add_633 = pybuda.op.Add("", multiply_632, self.get_parameter("stages.2.blocks.1.conv_mid.2.bn.bias")) - reshape_634 = pybuda.op.Reshape("", add_633, shape=(192, 1, 1)) - add_635 = pybuda.op.Add("", multiply_629, reshape_634) - relu_636 = pybuda.op.Relu("", add_635) - conv2d_637 = pybuda.op.Conv2d("", relu_636, self.get_parameter("stages.2.blocks.1.conv_mid.3.conv.weight"), stride=[1, 1], padding=[1, 1, 1, 1], dilation=1, groups=1, channel_last=0) - add_640 = pybuda.op.Add("", self.get_constant("stages.2.blocks.1.conv_mid.3.bn.running_var"), self.get_constant("const_77322")) - sqrt_641 = pybuda.op.Sqrt("", add_640) - reciprocal_642 = pybuda.op.Reciprocal("", sqrt_641) - multiply_643 = pybuda.op.Multiply("", self.get_constant("const_76322"), reciprocal_642) - multiply_644 = pybuda.op.Multiply("", multiply_643, self.get_parameter("stages.2.blocks.1.conv_mid.3.bn.weight")) - reshape_645 = pybuda.op.Reshape("", multiply_644, shape=(192, 1, 1)) - multiply_646 = pybuda.op.Multiply("", conv2d_637, reshape_645) - multiply_648 = pybuda.op.Multiply("", self.get_constant("stages.2.blocks.1.conv_mid.3.bn.running_mean"), self.get_constant("const_78322")) - multiply_649 = pybuda.op.Multiply("", multiply_648, multiply_644) - add_650 = pybuda.op.Add("", multiply_649, self.get_parameter("stages.2.blocks.1.conv_mid.3.bn.bias")) - reshape_651 = pybuda.op.Reshape("", add_650, shape=(192, 1, 1)) - add_652 = pybuda.op.Add("", multiply_646, reshape_651) - relu_653 = pybuda.op.Relu("", add_652) - conv2d_654 = pybuda.op.Conv2d("", relu_653, self.get_parameter("stages.2.blocks.1.conv_mid.4.conv.weight"), stride=[1, 1], padding=[1, 1, 1, 1], dilation=1, groups=1, channel_last=0) - add_657 = pybuda.op.Add("", self.get_constant("stages.2.blocks.1.conv_mid.4.bn.running_var"), self.get_constant("const_80322")) - sqrt_658 = pybuda.op.Sqrt("", add_657) - reciprocal_659 = pybuda.op.Reciprocal("", sqrt_658) - multiply_660 = pybuda.op.Multiply("", self.get_constant("const_79322"), reciprocal_659) - multiply_661 = pybuda.op.Multiply("", multiply_660, self.get_parameter("stages.2.blocks.1.conv_mid.4.bn.weight")) - reshape_662 = pybuda.op.Reshape("", multiply_661, shape=(192, 1, 1)) - multiply_663 = pybuda.op.Multiply("", conv2d_654, reshape_662) - multiply_665 = pybuda.op.Multiply("", self.get_constant("stages.2.blocks.1.conv_mid.4.bn.running_mean"), self.get_constant("const_81322")) - multiply_666 = pybuda.op.Multiply("", multiply_665, multiply_661) - add_667 = pybuda.op.Add("", multiply_666, self.get_parameter("stages.2.blocks.1.conv_mid.4.bn.bias")) - reshape_668 = pybuda.op.Reshape("", add_667, shape=(192, 1, 1)) - add_669 = pybuda.op.Add("", multiply_663, reshape_668) - relu_670 = pybuda.op.Relu("", add_669) - concatenate_671 = pybuda.op.Concatenate("", act_0, relu_602, relu_619, relu_636, relu_653, relu_670, axis=-3) - conv2d_672 = pybuda.op.Conv2d("", concatenate_671, self.get_parameter("stages.2.blocks.1.conv_concat.conv.weight"), stride=[1, 1], padding=[0, 0, 0, 0], dilation=1, groups=1, channel_last=0) - - return conv2d_672 - -# This test will hang on silicon if fork-join is not buffered properly. This test is from vovnet_v2 benchmark. -# This test will hang without fork-join multilevel feature fec3b1879941dde87fa7f1d460ba5ff1bbb751f4 -@pytest.mark.parametrize("format", [DataFormat.Bfp8_b, DataFormat.Float16_b], ids=["bfp8", "fp16"]) -def test_multilevel_fork_join_vovnet(test_kind, test_device, format): - if test_kind.is_training(): - pytest.skip() - try: - import os - os.environ["PYBUDA_MAXIMIZE_SPARSE_UBLOCK"] = "1" - os.environ["PYBUDA_RIBBON2"] = "1" - - compiler_cfg = pybuda.config._get_global_compiler_config() - compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = format - # Op overrides - pybuda.config.override_op_size("conv2d_0.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (1, 4)) - pybuda.config.override_op_size("conv2d_14.dc.matmul.11", (1, 2)) - pybuda.config.override_op_size("conv2d_14.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (1, 3)) - pybuda.config.override_op_size("conv2d_28.dc.matmul.11", (1, 2)) - pybuda.config.override_op_size("conv2d_28.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (1, 3)) - pybuda.config.override_op_size("conv2d_42.dc.matmul.11", (1, 2)) - pybuda.config.override_op_size("conv2d_42.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (1, 3)) - pybuda.config.override_op_size("conv2d_56.dc.matmul.11", (1, 2)) - pybuda.config.override_op_size("conv2d_56.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (1, 3)) - pybuda.config.override_op_size("concatenate_70.dc.concatenate.0", (1, 1)) - - relative_atol, pcc = get_relaxed_atol_pcc(test_kind, test_device) - verify_module(MultilevelForkJoin("test_multilevel_fork_join_vovnet"),[(1, 768, 14, 14)], - VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, pcc=pcc, relative_atol=relative_atol, - fp32_fallback=format)) - finally: - # unset env variables - os.environ.pop('PYBUDA_MAXIMIZE_SPARSE_UBLOCK', None) - os.environ.pop('PYBUDA_RIBBON2', None) - -class BertGeluFork(pybuda.PyBudaModule): - - def __init__(self, name, seq_len=128, hidden_dim=784): - super().__init__(name) - self.seq_len = seq_len - self.hidden_dim = hidden_dim - self.weights1 = pybuda.Parameter(hidden_dim, hidden_dim*4); - self.weights2 = pybuda.Parameter(hidden_dim*4, hidden_dim); - - def forward(self, act): - - # fork - fork = pybuda.op.Buffer("fork", act) - - # right - right = pybuda.op.Matmul("ff1", fork, self.weights1) - right = pybuda.op.Gelu("gelu", right) - right = pybuda.op.Matmul("ff2", right, self.weights2) - - # join - join = pybuda.op.Add("join", fork, right) - return join - -@pytest.mark.parametrize("format", [DataFormat.Bfp8_b, DataFormat.Float16_b], ids=["bfp8", "fp16"]) -@pytest.mark.skip(reason="too slow for CI") -def test_bert_gelu_fork(test_kind, test_device, format): - microbatch_count = 256 - seq_len = 128 - hidden_dim = 768 - - relative_atol, pcc = get_relaxed_atol_pcc(test_kind, test_device) - pybuda.config._get_global_compiler_config().performance_trace = pybuda.config.PerfTraceLevel.VERBOSE - verify_module(BertGeluFork("bert_gelu_fork", seq_len, hidden_dim), [(microbatch_count, seq_len, hidden_dim)], - VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, pcc=pcc, relative_atol=relative_atol, - fp32_fallback=format), params_centered_on_zero=True) - -class BertReduceFork(pybuda.PyBudaModule): - - def __init__(self, name, seq_len=128, hidden_dim=784): - super().__init__(name) - self.seq_len = seq_len - self.hidden_dim = hidden_dim - self.weights1 = pybuda.Parameter(seq_len, hidden_dim); - - def forward(self, act): - - # fork - fork = pybuda.op.Buffer("fork", act) - - # right - right = pybuda.op.Add("add", fork, self.weights1) - right = pybuda.op.ReduceAvg("reduce", right, dim=-1) - - # join - join = pybuda.op.Add("join", fork, right) - return join - -@pytest.mark.parametrize("format", [DataFormat.Bfp8_b, DataFormat.Float16_b], ids=["bfp8", "fp16"]) -@pytest.mark.skip(reason="too slow for CI") -def test_bert_reduce_fork(test_kind, test_device, format): - microbatch_count = 256 - seq_len = 384 - hidden_dim = 1024 - - relative_atol, pcc = get_relaxed_atol_pcc(test_kind, test_device) - pybuda.config._get_global_compiler_config().performance_trace = pybuda.config.PerfTraceLevel.VERBOSE - verify_module(BertReduceFork("bert_reduce_fork", seq_len, hidden_dim), [(microbatch_count, seq_len, hidden_dim)], - VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, pcc=pcc, relative_atol=relative_atol, - fp32_fallback=format), params_centered_on_zero=True) - - -class PipelineStuck(pybuda.PyBudaModule): - - def __init__(self, name): - super().__init__(name) - - def forward(self, act): - - # fork - #act = pybuda.op.ReduceAvg("reduce", act, dim=-1) - act = pybuda.op.Sqrt("sqrt", act) - act = pybuda.op.Exp("exp", act) - act = pybuda.op.Buffer("nop2", act) - - return act - -@pytest.mark.parametrize("format", [DataFormat.Bfp8_b, DataFormat.Float16_b], ids=["bfp8", "fp16"]) -@pytest.mark.skip(reason="too slow for CI") -def test_pipeline_stuck(test_kind, test_device, format): - microbatch_count = 256 - seq_len = 128 - hidden_dim = 768 - - relative_atol, pcc = get_relaxed_atol_pcc(test_kind, test_device) - pybuda.config._get_global_compiler_config().performance_trace = pybuda.config.PerfTraceLevel.VERBOSE - verify_module(PipelineStuck("pipeline_stuck"), [(microbatch_count, seq_len, hidden_dim)], - VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, pcc=pcc, relative_atol=relative_atol, - fp32_fallback=format), params_centered_on_zero=True) - - -class NestedForks(pybuda.PyBudaModule): - - def __init__(self, name): - super().__init__(name) - - def forward(self, act): - - # main fork - fork = pybuda.op.Buffer("main_fork", act) - - left_1 = pybuda.op.Buffer("left_1", fork) - left_2 = pybuda.op.Buffer("left_2", left_1) - fork_2 = pybuda.op.Buffer("fork_2", left_2) - right_2_1 = pybuda.op.Buffer("right_2_1", fork_2) - join_2 = pybuda.op.Add("join_2", fork_2, right_2_1) - - right_1 = pybuda.op.Buffer("right_1", fork) - join_3 = pybuda.op.Add("join_3", right_1, join_2) - - left_4 = pybuda.op.Buffer("left_4", join_3) - - join = pybuda.op.Add("join", fork, left_4) - - return join - -def test_nested_forks(test_kind, test_device): - microbatch_count = 1 - seq_len = 128 - hidden_dim = 768 - - relative_atol, pcc = get_relaxed_atol_pcc(test_kind, test_device) - #pybuda.config._get_global_compiler_config().performance_trace = pybuda.config.PerfTraceLevel.VERBOSE - verify_module(NestedForks("netsted_forks"), [(microbatch_count, seq_len, hidden_dim)], - VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, pcc=pcc, relative_atol=relative_atol), params_centered_on_zero=True) - -class YoloV3ForkJoin(PyBudaModule): - def __init__(self, name): - super().__init__(name) - self.add_parameter("backbone.base.conv.weight", pybuda.Parameter(*(32, 3, 3, 3), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("backbone.base.bn.weight", pybuda.Parameter(*(32,), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("backbone.base.bn.bias", pybuda.Parameter(*(32,), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("backbone.darknet_0.0.conv.weight", pybuda.Parameter(*(64, 32, 3, 3), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("backbone.darknet_0.0.bn.weight", pybuda.Parameter(*(64,), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("backbone.darknet_0.0.bn.bias", pybuda.Parameter(*(64,), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("backbone.darknet_0.1.conv1.conv.weight", pybuda.Parameter(*(32, 64, 1, 1), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("backbone.darknet_0.1.conv1.bn.weight", pybuda.Parameter(*(32,), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("backbone.darknet_0.1.conv1.bn.bias", pybuda.Parameter(*(32,), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("backbone.darknet_0.1.conv2.conv.weight", pybuda.Parameter(*(64, 32, 3, 3), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("backbone.darknet_0.1.conv2.bn.weight", pybuda.Parameter(*(64,), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("backbone.darknet_0.1.conv2.bn.bias", pybuda.Parameter(*(64,), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_constant("backbone.base.bn.running_var", shape=(32,)) - self.set_constant("backbone.base.bn.running_var", torch.rand(32, )) - self.add_constant("backbone.base.bn.running_mean", shape=(32,)) - self.set_constant("backbone.base.bn.running_mean", torch.rand(32, )) - self.add_constant("backbone.darknet_0.0.bn.running_var", shape=(64,)) - self.set_constant("backbone.darknet_0.0.bn.running_var", torch.rand(64, )) - self.add_constant("backbone.darknet_0.0.bn.running_mean", shape=(64,)) - self.set_constant("backbone.darknet_0.0.bn.running_mean", torch.rand(64, )) - self.add_constant("backbone.darknet_0.1.conv1.bn.running_var", shape=(32,)) - self.set_constant("backbone.darknet_0.1.conv1.bn.running_var", torch.rand(32, )) - self.add_constant("backbone.darknet_0.1.conv1.bn.running_mean", shape=(32,)) - self.set_constant("backbone.darknet_0.1.conv1.bn.running_mean", torch.rand(32, )) - self.add_constant("backbone.darknet_0.1.conv2.bn.running_var", shape=(64,)) - self.set_constant("backbone.darknet_0.1.conv2.bn.running_var", torch.rand(64, )) - self.add_constant("backbone.darknet_0.1.conv2.bn.running_mean", shape=(64,)) - self.set_constant("backbone.darknet_0.1.conv2.bn.running_mean", torch.rand(64, )) - self.add_constant("const_0578", shape=(1, 1)) - self.set_constant("const_0578", torch.rand(1, 1)) - self.add_constant("const_1578", shape=(1, 1)) - self.set_constant("const_1578", torch.rand(1, 1)) - self.add_constant("const_2578", shape=(1, 1)) - self.set_constant("const_2578", torch.rand(1, 1)) - self.add_constant("const_3578", shape=(1, 1)) - self.set_constant("const_3578", torch.rand(1, 1)) - self.add_constant("const_4578", shape=(1, 1)) - self.set_constant("const_4578", torch.rand(1, 1)) - self.add_constant("const_5578", shape=(1, 1)) - self.set_constant("const_5578", torch.rand(1, 1)) - self.add_constant("const_6578", shape=(1, 1)) - self.set_constant("const_6578", torch.rand(1, 1)) - self.add_constant("const_7578", shape=(1, 1)) - self.set_constant("const_7578", torch.rand(1, 1)) - self.add_constant("const_8578", shape=(1, 1)) - self.set_constant("const_8578", torch.rand(1, 1)) - self.add_constant("const_9578", shape=(1, 1)) - self.set_constant("const_9578", torch.rand(1, 1)) - self.add_constant("const_10578", shape=(1, 1)) - self.set_constant("const_10578", torch.rand(1, 1)) - self.add_constant("const_11578", shape=(1, 1)) - self.set_constant("const_11578", torch.rand(1, 1)) - - # Input shapes: - # x_1 -> (1, 3, 512, 512) - def forward(self, x_1): - conv2d_367 = pybuda.op.Conv2d("conv2d_0", x_1, self.get_parameter("backbone.base.conv.weight"), stride=[1, 1], padding=[1, 1, 1, 1], dilation=1, groups=1, channel_last=0) - add_370 = pybuda.op.Add("add_1", self.get_constant("backbone.base.bn.running_var"), self.get_constant("const_1578")) - sqrt_371 = pybuda.op.Sqrt("sqrt_2", add_370) - reciprocal_372 = pybuda.op.Reciprocal("reciprocal_3", sqrt_371) - multiply_373 = pybuda.op.Multiply("multiply_4", self.get_constant("const_0578"), reciprocal_372) - multiply_374 = pybuda.op.Multiply("multiply_5", multiply_373, self.get_parameter("backbone.base.bn.weight")) - reshape_375 = pybuda.op.Reshape("reshape_6", multiply_374, shape=(32, 1, 1)) - multiply_376 = pybuda.op.Multiply("multiply_7", conv2d_367, reshape_375) - multiply_378 = pybuda.op.Multiply("multiply_8", self.get_constant("backbone.base.bn.running_mean"), self.get_constant("const_2578")) - multiply_379 = pybuda.op.Multiply("multiply_9", multiply_378, multiply_374) - add_380 = pybuda.op.Add("add_10", multiply_379, self.get_parameter("backbone.base.bn.bias")) - reshape_381 = pybuda.op.Reshape("reshape_11", add_380, shape=(32, 1, 1)) - add_382 = pybuda.op.Add("add_12", multiply_376, reshape_381) - leaky_relu_383 = pybuda.op.LeakyRelu("leaky_relu_13", add_382, alpha=0.10000000000000001) - conv2d_384 = pybuda.op.Conv2d("conv2d_14", leaky_relu_383, self.get_parameter("backbone.darknet_0.0.conv.weight"), stride=[2, 2], padding=[1, 1, 1, 1], dilation=1, groups=1, channel_last=0) - add_387 = pybuda.op.Add("add_15", self.get_constant("backbone.darknet_0.0.bn.running_var"), self.get_constant("const_4578")) - sqrt_388 = pybuda.op.Sqrt("sqrt_16", add_387) - reciprocal_389 = pybuda.op.Reciprocal("reciprocal_17", sqrt_388) - multiply_390 = pybuda.op.Multiply("multiply_18", self.get_constant("const_3578"), reciprocal_389) - multiply_391 = pybuda.op.Multiply("multiply_19", multiply_390, self.get_parameter("backbone.darknet_0.0.bn.weight")) - reshape_392 = pybuda.op.Reshape("reshape_20", multiply_391, shape=(64, 1, 1)) - multiply_393 = pybuda.op.Multiply("multiply_21", conv2d_384, reshape_392) - multiply_395 = pybuda.op.Multiply("multiply_22", self.get_constant("backbone.darknet_0.0.bn.running_mean"), self.get_constant("const_5578")) - multiply_396 = pybuda.op.Multiply("multiply_23", multiply_395, multiply_391) - add_397 = pybuda.op.Add("add_24", multiply_396, self.get_parameter("backbone.darknet_0.0.bn.bias")) - reshape_398 = pybuda.op.Reshape("reshape_25", add_397, shape=(64, 1, 1)) - add_399 = pybuda.op.Add("add_26", multiply_393, reshape_398) - leaky_relu_400 = pybuda.op.LeakyRelu("leaky_relu_27", add_399, alpha=0.10000000000000001) - conv2d_401 = pybuda.op.Conv2d("conv2d_28", leaky_relu_400, self.get_parameter("backbone.darknet_0.1.conv1.conv.weight"), stride=[1, 1], padding=[0, 0, 0, 0], dilation=1, groups=1, channel_last=0) - add_404 = pybuda.op.Add("add_29", self.get_constant("backbone.darknet_0.1.conv1.bn.running_var"), self.get_constant("const_7578")) - sqrt_405 = pybuda.op.Sqrt("sqrt_30", add_404) - reciprocal_406 = pybuda.op.Reciprocal("reciprocal_31", sqrt_405) - multiply_407 = pybuda.op.Multiply("multiply_32", self.get_constant("const_6578"), reciprocal_406) - multiply_408 = pybuda.op.Multiply("multiply_33", multiply_407, self.get_parameter("backbone.darknet_0.1.conv1.bn.weight")) - reshape_409 = pybuda.op.Reshape("reshape_34", multiply_408, shape=(32, 1, 1)) - multiply_410 = pybuda.op.Multiply("multiply_35", conv2d_401, reshape_409) - multiply_412 = pybuda.op.Multiply("multiply_36", self.get_constant("backbone.darknet_0.1.conv1.bn.running_mean"), self.get_constant("const_8578")) - multiply_413 = pybuda.op.Multiply("multiply_37", multiply_412, multiply_408) - add_414 = pybuda.op.Add("add_38", multiply_413, self.get_parameter("backbone.darknet_0.1.conv1.bn.bias")) - reshape_415 = pybuda.op.Reshape("reshape_39", add_414, shape=(32, 1, 1)) - add_416 = pybuda.op.Add("add_40", multiply_410, reshape_415) - leaky_relu_417 = pybuda.op.LeakyRelu("leaky_relu_41", add_416, alpha=0.10000000000000001) - conv2d_418 = pybuda.op.Conv2d("conv2d_42", leaky_relu_417, self.get_parameter("backbone.darknet_0.1.conv2.conv.weight"), stride=[1, 1], padding=[1, 1, 1, 1], dilation=1, groups=1, channel_last=0) - add_421 = pybuda.op.Add("add_43", self.get_constant("backbone.darknet_0.1.conv2.bn.running_var"), self.get_constant("const_10578")) - sqrt_422 = pybuda.op.Sqrt("sqrt_44", add_421) - reciprocal_423 = pybuda.op.Reciprocal("reciprocal_45", sqrt_422) - multiply_424 = pybuda.op.Multiply("multiply_46", self.get_constant("const_9578"), reciprocal_423) - multiply_425 = pybuda.op.Multiply("multiply_47", multiply_424, self.get_parameter("backbone.darknet_0.1.conv2.bn.weight")) - reshape_426 = pybuda.op.Reshape("reshape_48", multiply_425, shape=(64, 1, 1)) - multiply_427 = pybuda.op.Multiply("multiply_49", conv2d_418, reshape_426) - multiply_429 = pybuda.op.Multiply("multiply_50", self.get_constant("backbone.darknet_0.1.conv2.bn.running_mean"), self.get_constant("const_11578")) - multiply_430 = pybuda.op.Multiply("multiply_51", multiply_429, multiply_425) - add_431 = pybuda.op.Add("add_52", multiply_430, self.get_parameter("backbone.darknet_0.1.conv2.bn.bias")) - reshape_432 = pybuda.op.Reshape("reshape_53", add_431, shape=(64, 1, 1)) - add_433 = pybuda.op.Add("add_54", multiply_427, reshape_432) - leaky_relu_434 = pybuda.op.LeakyRelu("leaky_relu_55", add_433, alpha=0.10000000000000001) - add_435 = pybuda.op.Add("add_56", leaky_relu_434, leaky_relu_400) - reshape_436 = pybuda.op.Reshape("reshape_final", add_435, shape=(1, 1, 64, 65536)) - return reshape_436 - - @staticmethod - def add_op_overrides(): - pybuda.config.override_op_size("_fused_op_2", (2, 2)) - pybuda.config.override_t_stream_shape("_fused_op_2", (128, 1)) - pybuda.config.override_t_stream_dir("_fused_op_2", "r") - pybuda.config.override_op_size("conv2d_42.dc.conv2d.1.dc.matmul.11", (2, 2)) - pybuda.config.override_t_stream_shape("conv2d_42.dc.conv2d.1.dc.matmul.11", (128, 1)) - pybuda.config.override_t_stream_dir("conv2d_42.dc.conv2d.1.dc.matmul.11", "r") - pybuda.config.override_u_kt("conv2d_42.dc.conv2d.1.dc.matmul.11", 1) - pybuda.config.override_op_size("conv2d_42.dc.conv2d.3.dc.matmul.11", (2, 2)) - pybuda.config.override_t_stream_shape("conv2d_42.dc.conv2d.3.dc.matmul.11", (128, 1)) - pybuda.config.override_t_stream_dir("conv2d_42.dc.conv2d.3.dc.matmul.11", "r") - pybuda.config.override_u_kt("conv2d_42.dc.conv2d.3.dc.matmul.11", 1) - pybuda.config.override_op_size("conv2d_42.dc.conv2d.5.dc.matmul.11", (2, 2)) - pybuda.config.override_t_stream_shape("conv2d_42.dc.conv2d.5.dc.matmul.11", (128, 1)) - pybuda.config.override_t_stream_dir("conv2d_42.dc.conv2d.5.dc.matmul.11", "r") - pybuda.config.override_u_kt("conv2d_42.dc.conv2d.5.dc.matmul.11", 1) - pybuda.config.override_op_size("_fused_op_1", (2, 2)) - pybuda.config.override_t_stream_shape("_fused_op_1", (128, 1)) - pybuda.config.override_t_stream_dir("_fused_op_1", "r") - pybuda.config.override_op_size("conv2d_42.dc.conv2d.1.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (2, 1)) - pybuda.config.override_t_stream_shape("conv2d_42.dc.conv2d.1.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (128, 1)) - pybuda.config.override_t_stream_dir("conv2d_42.dc.conv2d.1.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", "rz") - pybuda.config.override_op_size("conv2d_42.dc.conv2d.3.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (2, 1)) - pybuda.config.override_t_stream_shape("conv2d_42.dc.conv2d.3.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (128, 1)) - pybuda.config.override_t_stream_dir("conv2d_42.dc.conv2d.3.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", "rz") - pybuda.config.override_op_size("conv2d_42.dc.conv2d.5.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (2, 1)) - pybuda.config.override_t_stream_shape("conv2d_42.dc.conv2d.5.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (128, 1)) - pybuda.config.override_t_stream_dir("conv2d_42.dc.conv2d.5.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", "rz") - pybuda.config.override_op_size("conv2d_14.dc.conv2d.1.dc.matmul.11", (2, 2)) - pybuda.config.override_t_stream_shape("conv2d_14.dc.conv2d.1.dc.matmul.11", (128, 1)) - pybuda.config.override_t_stream_dir("conv2d_14.dc.conv2d.1.dc.matmul.11", "r") - pybuda.config.override_u_kt("conv2d_14.dc.conv2d.1.dc.matmul.11", 1) - pybuda.config.override_op_size("conv2d_14.dc.conv2d.3.dc.matmul.11", (2, 2)) - pybuda.config.override_t_stream_shape("conv2d_14.dc.conv2d.3.dc.matmul.11", (128, 1)) - pybuda.config.override_t_stream_dir("conv2d_14.dc.conv2d.3.dc.matmul.11", "r") - pybuda.config.override_u_kt("conv2d_14.dc.conv2d.3.dc.matmul.11", 1) - pybuda.config.override_op_size("conv2d_14.dc.conv2d.5.dc.matmul.11", (2, 2)) - pybuda.config.override_t_stream_shape("conv2d_14.dc.conv2d.5.dc.matmul.11", (128, 1)) - pybuda.config.override_t_stream_dir("conv2d_14.dc.conv2d.5.dc.matmul.11", "r") - pybuda.config.override_u_kt("conv2d_14.dc.conv2d.5.dc.matmul.11", 1) - pybuda.config.override_op_size("leaky_relu_41", (2, 1)) - pybuda.config.override_t_stream_shape("leaky_relu_41", (128, 1)) - pybuda.config.override_t_stream_dir("leaky_relu_41", "r") - pybuda.config.override_op_size("conv2d_14.dc.conv2d.1.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (2, 1)) - pybuda.config.override_t_stream_shape("conv2d_14.dc.conv2d.1.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (128, 1)) - pybuda.config.override_t_stream_dir("conv2d_14.dc.conv2d.1.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", "rz") - pybuda.config.override_op_size("conv2d_14.dc.conv2d.3.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (2, 1)) - pybuda.config.override_t_stream_shape("conv2d_14.dc.conv2d.3.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (128, 1)) - pybuda.config.override_t_stream_dir("conv2d_14.dc.conv2d.3.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", "rz") - pybuda.config.override_op_size("conv2d_14.dc.conv2d.5.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (2, 1)) - pybuda.config.override_t_stream_shape("conv2d_14.dc.conv2d.5.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (128, 1)) - pybuda.config.override_t_stream_dir("conv2d_14.dc.conv2d.5.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", "rz") - pybuda.config.override_op_size("conv2d_28.dc.matmul.8", (2, 1)) - pybuda.config.override_t_stream_shape("conv2d_28.dc.matmul.8", (128, 1)) - pybuda.config.override_t_stream_dir("conv2d_28.dc.matmul.8", "r") - pybuda.config.override_u_kt("conv2d_28.dc.matmul.8", 2) - pybuda.config.override_op_size("_fused_op_0", (2, 1)) - pybuda.config.override_t_stream_shape("_fused_op_0", (256, 1)) - pybuda.config.override_t_stream_dir("_fused_op_0", "r") - pybuda.config.override_op_size("conv2d_0.dc.conv2d.1.dc.matmul.11", (2, 1)) - pybuda.config.override_t_stream_shape("conv2d_0.dc.conv2d.1.dc.matmul.11", (256, 1)) - pybuda.config.override_t_stream_dir("conv2d_0.dc.conv2d.1.dc.matmul.11", "r") - pybuda.config.override_u_kt("conv2d_0.dc.conv2d.1.dc.matmul.11", 1) - pybuda.config.override_op_size("conv2d_0.dc.conv2d.3.dc.matmul.11", (2, 1)) - pybuda.config.override_t_stream_shape("conv2d_0.dc.conv2d.3.dc.matmul.11", (256, 1)) - pybuda.config.override_t_stream_dir("conv2d_0.dc.conv2d.3.dc.matmul.11", "r") - pybuda.config.override_u_kt("conv2d_0.dc.conv2d.3.dc.matmul.11", 1) - pybuda.config.override_op_size("conv2d_0.dc.conv2d.5.dc.matmul.11", (2, 1)) - pybuda.config.override_t_stream_shape("conv2d_0.dc.conv2d.5.dc.matmul.11", (256, 1)) - pybuda.config.override_t_stream_dir("conv2d_0.dc.conv2d.5.dc.matmul.11", "r") - pybuda.config.override_u_kt("conv2d_0.dc.conv2d.5.dc.matmul.11", 1) - pybuda.config.override_op_size("conv2d_0.dc.conv2d.1.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (2, 1)) - pybuda.config.override_t_stream_shape("conv2d_0.dc.conv2d.1.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (256, 1)) - pybuda.config.override_t_stream_dir("conv2d_0.dc.conv2d.1.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", "rz") - pybuda.config.override_op_size("conv2d_0.dc.conv2d.3.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (2, 1)) - pybuda.config.override_t_stream_shape("conv2d_0.dc.conv2d.3.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (256, 1)) - pybuda.config.override_t_stream_dir("conv2d_0.dc.conv2d.3.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", "rz") - pybuda.config.override_op_size("conv2d_0.dc.conv2d.5.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (2, 1)) - pybuda.config.override_t_stream_shape("conv2d_0.dc.conv2d.5.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (256, 1)) - pybuda.config.override_t_stream_dir("conv2d_0.dc.conv2d.5.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", "rz") - -def test_fork_join_yolo_v3(test_kind, test_device): - """ - This test is extracted from yolo_v3 benchmark model. - - Fork-join which causes hang is the one from _fused_op_1 to _fused_op_2. - PYBUDA_FORK_JOIN_EXPAND_OUTPUT_BUFFERS=1 fixes the hang. - """ - - if test_kind.is_training(): - pytest.skip("Skipping training due to op overrides.") - - compiler_cfg = pybuda.config._get_global_compiler_config() - compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = DataFormat.Float16_b - compiler_cfg.enable_auto_transposing_placement = True - - YoloV3ForkJoin.add_op_overrides() - import os - os.environ["PYBUDA_RIBBON2"] = "1" - os.environ["PYBUDA_FORCE_SEQUENTIAL"] = "1" # TODO: Figure out why this is needed, segfaults otherwise: tenstorrent/pybuda#1935 - os.environ["PYBUDA_OVERRIDE_INPUT_QUEUE_ENTRIES"] = "32" - os.environ["PYBUDA_MAXIMIZE_SPARSE_UBLOCK"] = "1" - os.environ["PYBUDA_DISABLE_CAP_SPARSE_MM_FIDELITY"] = "1" - os.environ["PYBUDA_DISABLE_EXPLICIT_DRAM_IO"] = "1" - - # Fixes hang - os.environ["PYBUDA_FORK_JOIN_EXPAND_OUTPUT_BUFFERS"] = "1" - - relative_atol, pcc = get_relaxed_atol_pcc(test_kind, test_device) - verify_module(YoloV3ForkJoin("test_fork_join_yolo_v3"), [(32, 3, 512, 512)], - VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, pcc=pcc, relative_atol=relative_atol)) - -class HRNetForkJoin(pybuda.PyBudaModule): - - def __init__(self, name): - super().__init__(name) - self.add_parameter("features.init_block.conv1.conv.weight", pybuda.Parameter(*(64, 3, 3, 3), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("features.init_block.conv1.bn.weight", pybuda.Parameter(*(64,), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("features.init_block.conv1.bn.bias", pybuda.Parameter(*(64,), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("features.init_block.conv2.conv.weight", pybuda.Parameter(*(64, 64, 3, 3), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("features.init_block.subblocks.block1.body.conv1.conv.weight", pybuda.Parameter(*(64, 64, 1, 1), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("features.init_block.subblocks.block1.body.conv2.conv.weight", pybuda.Parameter(*(64, 64, 3, 3), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("features.init_block.subblocks.block1.body.conv3.conv.weight", pybuda.Parameter(*(256, 64, 1, 1), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("features.init_block.subblocks.block1.identity_conv.conv.weight", pybuda.Parameter(*(256, 64, 1, 1), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("features.init_block.subblocks.block2.body.conv1.conv.weight", pybuda.Parameter(*(64, 256, 1, 1), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("bla", pybuda.Parameter(*(256, 56, 56), requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("bias1", pybuda.Parameter(64, requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("bias2", pybuda.Parameter(64, requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("bias3", pybuda.Parameter(64, requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("bias4", pybuda.Parameter(256, requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_parameter("bias5", pybuda.Parameter(256, requires_grad=True, dev_data_format=pybuda.DataFormat.Float32)) - self.add_constant("features.init_block.conv1.bn.running_var") - self.add_constant("features.init_block.conv1.bn.running_mean") - self.add_constant("const_12602") - self.add_constant("const_02602") - self.add_constant("const_22602") - - self.set_constant("features.init_block.conv1.bn.running_var", torch.rand(1, 1)) - self.set_constant("features.init_block.conv1.bn.running_mean", torch.rand(1, 1)) - self.set_constant("const_12602", torch.rand(1, 1)) - self.set_constant("const_02602", torch.rand(1, 1)) - self.set_constant("const_22602", torch.rand(1, 1)) - - for param in self.get_parameters(): - self.set_parameter(param.get_name(), torch.rand(size = param.shape.get_pytorch_shape())) - - def forward(self, act): - - conv2d_1632 = pybuda.op.Conv2d("", act, self.get_parameter("features.init_block.conv1.conv.weight"), stride=[2, 2], padding=[1, 1, 1, 1], dilation=1, groups=1, channel_last=0) - add_1635 = pybuda.op.Add("", self.get_constant("features.init_block.conv1.bn.running_var"), self.get_constant("const_12602")) - sqrt_1636 = pybuda.op.Sqrt("", add_1635) - reciprocal_1637 = pybuda.op.Reciprocal("", sqrt_1636) - multiply_1638 = pybuda.op.Multiply("", self.get_constant("const_02602"), reciprocal_1637) - multiply_1639 = pybuda.op.Multiply("", multiply_1638, self.get_parameter("features.init_block.conv1.bn.weight")) - reshape_1640 = pybuda.op.Reshape("", multiply_1639, shape=(64, 1, 1)) - multiply_1641 = pybuda.op.Multiply("", conv2d_1632, reshape_1640) - multiply_1643 = pybuda.op.Multiply("", self.get_constant("features.init_block.conv1.bn.running_mean"), self.get_constant("const_22602")) - multiply_1644 = pybuda.op.Multiply("", multiply_1643, multiply_1639) - add_1645 = pybuda.op.Add("", multiply_1644, self.get_parameter("features.init_block.conv1.bn.bias")) - reshape_1646 = pybuda.op.Reshape("", add_1645, shape=(64, 1, 1)) - add_1647 = pybuda.op.Add("", multiply_1641, reshape_1646) - relu_1648 = pybuda.op.Relu("", add_1647) - - conv2d_1649 = pybuda.op.Conv2d("", relu_1648, self.get_parameter("features.init_block.conv2.conv.weight"), self.get_parameter("bias1"), stride=[2, 2], padding=[1, 1, 1, 1], dilation=1, groups=1, channel_last=0) - relu_1665 = pybuda.op.Relu("", conv2d_1649) - conv2d_1666 = pybuda.op.Conv2d("", relu_1665, self.get_parameter("features.init_block.subblocks.block1.body.conv1.conv.weight"), self.get_parameter("bias2"), stride=[1, 1], padding=[0, 0, 0, 0], dilation=1, groups=1, channel_last=0) - relu_1682 = pybuda.op.Relu("", conv2d_1666) - conv2d_1683 = pybuda.op.Conv2d("", relu_1682, self.get_parameter("features.init_block.subblocks.block1.body.conv2.conv.weight"), self.get_parameter("bias3"), stride=[1, 1], padding=[1, 1, 1, 1], dilation=1, groups=1, channel_last=0) - relu_1699 = pybuda.op.Relu("", conv2d_1683) - conv2d_1700 = pybuda.op.Conv2d("", relu_1699, self.get_parameter("features.init_block.subblocks.block1.body.conv3.conv.weight"), self.get_parameter("bias4"), stride=[1, 1], padding=[0, 0, 0, 0], dilation=1, groups=1, channel_last=0) - - # Left side fork - conv2d_1716 = pybuda.op.Conv2d("", relu_1665, self.get_parameter("features.init_block.subblocks.block1.identity_conv.conv.weight"), self.get_parameter("bias5"), stride=[1, 1], padding=[0, 0, 0, 0], dilation=1, groups=1, channel_last=0) - - # Join - add_1732 = pybuda.op.Add("", conv2d_1700, conv2d_1716) - relu_1733 = pybuda.op.Relu("", add_1732) - - conv2d_1734 = pybuda.op.Conv2d("", relu_1733, self.get_parameter("features.init_block.subblocks.block2.body.conv1.conv.weight"), stride=[1, 1], padding=[0, 0, 0, 0], dilation=1, groups=1, channel_last=0) - - return conv2d_1734 - - @staticmethod - def add_overrides(): - pybuda.config.override_op_size("conv2d_14.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (7, 1)) - pybuda.config.override_t_stream_shape("conv2d_14.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (7, 1)) - pybuda.config.override_u_kt("conv2d_14.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", 28) - - # Fork node - pybuda.config.override_op_size("conv2d_14.dc.matmul.11", (2, 2)) - pybuda.config.override_t_stream_shape("conv2d_14.dc.matmul.11", (7, 1)) - pybuda.config.override_u_kt("conv2d_14.dc.matmul.11", 18) - - # Short path - pybuda.config.override_op_size("conv2d_21.dc.matmul.8", (2, 4)) - pybuda.config.override_t_stream_shape("conv2d_21.dc.matmul.8", (7, 1)) - pybuda.config.override_u_kt("conv2d_21.dc.matmul.8", 1) - - # Long path - pybuda.config.override_op_size("conv2d_16.dc.matmul.8", (2, 1)) - pybuda.config.override_t_stream_shape("conv2d_16.dc.matmul.8", (7, 1)) - pybuda.config.override_u_kt("conv2d_16.dc.matmul.8", 1) - pybuda.config.override_op_size("conv2d_18.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (6, 2)) - pybuda.config.override_t_stream_shape("conv2d_18.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", (1, 1)) - pybuda.config.override_u_kt("conv2d_18.dc.sparse_matmul.9.dc.sparse_matmul.1.lc2", 7) - pybuda.config.override_op_size("conv2d_18.dc.matmul.11", (1, 1)) - pybuda.config.override_t_stream_shape("conv2d_18.dc.matmul.11", (1, 1)) - pybuda.config.override_u_kt("conv2d_18.dc.matmul.11", 1) - pybuda.config.override_op_size("conv2d_20.dc.matmul.8", (2, 4)) - pybuda.config.override_t_stream_shape("conv2d_20.dc.matmul.8", (7, 1)) - pybuda.config.override_u_kt("conv2d_20.dc.matmul.8", 1) - - # Join - pybuda.config.override_op_size("add_22", (2, 1)) - pybuda.config.override_t_stream_shape("add_22", (7, 1)) - -def test_fork_join_hrnet(test_kind, test_device): - if test_kind.is_training(): - pytest.skip("Skipping training test") - - if test_device.arch == pybuda.BackendDevice.Grayskull: - pytest.skip("There is not enough L1 memory on Grayskull to fit some of these ops.") - - channels = 3 - height = 224 - width = 224 - - compiler_cfg = pybuda.config._get_global_compiler_config() - compiler_cfg.balancer_policy = "Ribbon" - compiler_cfg.default_df_override = DataFormat.Float16_b - - import os - os.environ["PYBUDA_RIBBON2"] = "1" - - HRNetForkJoin.add_overrides() - - relative_atol, pcc = get_relaxed_atol_pcc(test_kind, test_device) - verify_module(HRNetForkJoin("test_fork_join_hrnet"), [(1, channels, height, width)], - VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, pcc=pcc, relative_atol=relative_atol)) - -class ForkJoinExpandOutputBuffer(pybuda.PyBudaModule): - def __init__(self, name): - super().__init__(name) - self.weights0 = pybuda.Parameter(1, 64, 128, requires_grad=False) - - def forward(self, act1): - fork = pybuda.op.Matmul("matmul", act1, self.weights0) - left = pybuda.op.Exp("exp", fork) - right = pybuda.op.Buffer("buffer", fork) - join = pybuda.op.Add("add", left, right) - return join - -# Test implementation of Backend constrains for buf_size_mb. -def test_fork_join_expand_output_buffer_constraints(test_kind, test_device): - if test_kind.is_training(): - pytest.skip("Skipping training test") - - pybuda.config.override_op_size("matmul", (2, 1)) - pybuda.config.override_op_size("exp", (2, 4)) - pybuda.config.override_t_stream_shape("matmul", (10, 1)) - pybuda.config.override_t_stream_shape("exp", (1, 1)) - - relative_atol, pcc = get_relaxed_atol_pcc(test_kind, test_device) - verify_module(ForkJoinExpandOutputBuffer("test_fork_join_expand_output_buffer_constraints"), [(1, 1, 6400, 64)], - VerifyConfig(test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch, pcc=pcc, relative_atol=relative_atol)) diff --git a/pybuda/test/test_perf_simulator.py b/pybuda/test/test_perf_simulator.py deleted file mode 100644 index 94b44699f..000000000 --- a/pybuda/test/test_perf_simulator.py +++ /dev/null @@ -1,132 +0,0 @@ -# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC - -# SPDX-License-Identifier: Apache-2.0 -import os - -import pybuda -from pybuda.verify import verify_module, VerifyConfig, TestKind -from pybuda import PyTorchModule -from transformers import BertModel, BertConfig -from test.utils import download_model - - -def test_bert_encoder(): - pybuda.config._get_global_compiler_config().compile_depth = pybuda.config.CompileDepth.GENERATE_NETLIST - - model_name = "bert-base-uncased" - seq_len = 128 - - config = download_model(BertConfig.from_pretrained, model_name) - config.num_hidden_layers = 1 - - model = BertModel(config=config) - encoder = PyTorchModule("bert_encoder", model.encoder) - microbatch = 1 - - os.environ["PYBUDA_PERF_SIMULATOR"] = "1" - try: - verify_module(encoder, [(microbatch, seq_len, config.hidden_size), (microbatch, 1, seq_len, seq_len)], - VerifyConfig(test_kind=TestKind.INFERENCE, skip_shutdown=True, fp32_fallback=pybuda.DataFormat.Bfp8_b)) - - perf_results = pybuda.pybudaglobal.get_devices()[0]._compile_output.perf_model_results - print(perf_results) - - finally: - del os.environ["PYBUDA_PERF_SIMULATOR"] - -class LayernormFork(pybuda.PyBudaModule): - """ - Module with a layernorm, and some matmuls - """ - - shape = (1, 1, 128, 512) - - def __init__(self, name): - super().__init__(name) - #self.weights1 = pybuda.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) - #self.weights2 = pybuda.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) - self.ln_weights = pybuda.Parameter(1, self.shape[-1], requires_grad=True) - self.ln_bias = pybuda.Parameter(1, self.shape[-1], requires_grad=True) - - def forward(self, act1): - #a1 = pybuda.op.Matmul("matmul1", act1, self.weights1) - a1 = act1 - a2 = pybuda.op.Layernorm("layernorm", a1, self.ln_weights, self.ln_bias) - #a3 = pybuda.op.Matmul("matmul2", a2, self.weights2) - a3 = a2 - return a3 - -def test_layernorm_fork(test_device): - #pybuda.config._get_global_compiler_config().compile_depth = pybuda.config.CompileDepth.GENERATE_NETLIST - - microbatch = 64 - - os.environ["PYBUDA_PERF_SIMULATOR"] = "1" - try: - pybuda.config.set_configuration_options(performance_trace=pybuda.PerfTraceLevel.VERBOSE) - verify_module(LayernormFork("layernorm_fork"), [(microbatch, LayernormFork.shape[-2], LayernormFork.shape[-1])], - VerifyConfig( - test_kind=TestKind.INFERENCE, - devtype=test_device.devtype, - arch=test_device.arch, - skip_shutdown=True, - fp32_fallback=pybuda.DataFormat.Bfp8_b)) - - perf_results = pybuda.pybudaglobal.get_devices()[0]._compile_output.perf_model_results - print(perf_results) - - finally: - del os.environ["PYBUDA_PERF_SIMULATOR"] - -class MHALikeFork(pybuda.PyBudaModule): - """ - Module with a layernorm, and some matmuls - """ - - shape = (1, 1, 128, 768) - - def __init__(self, name): - super().__init__(name) - self.weights1 = pybuda.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) - self.weights2 = pybuda.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) - self.weights3 = pybuda.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) - self.weights4 = pybuda.Parameter(self.shape[-1], self.shape[-1], requires_grad=True) - self.bias = pybuda.Parameter(1, self.shape[-1], requires_grad=True) - - def forward(self, act1, act2): - # don't for out of a queue, since that doesn't need buffering - in1 = act1 - act2 - - # fork - a1 = pybuda.op.Matmul("matmul1", in1, self.weights1) - a2 = pybuda.op.Matmul("matmul2", in1, self.weights2) - a3 = pybuda.op.Matmul("matmul3", in1, self.weights3) - - a23 = a2+a3 - a23 = pybuda.op.Matmul("matmul_a23_1", a23, self.weights4) - a23 = pybuda.op.Matmul("matmul_a23_2", a23, self.weights4) - a23 = pybuda.op.Matmul("matmul_a23_3", a23, self.weights4) - - return a1 + a23 # join - -def test_mha_fork(test_device): - - microbatch = 64 - seq_len = MHALikeFork.shape[-2] - hidden_size = MHALikeFork.shape[-1] - os.environ["PYBUDA_PERF_SIMULATOR"] = "1" - try: - pybuda.config.set_configuration_options(performance_trace=pybuda.PerfTraceLevel.VERBOSE) - verify_module(MHALikeFork("mha_like_fork"), [(microbatch, seq_len, hidden_size), (microbatch, seq_len, hidden_size)], - VerifyConfig( - test_kind=TestKind.INFERENCE, - devtype=test_device.devtype, - arch=test_device.arch, - skip_shutdown=True, - fp32_fallback=pybuda.DataFormat.Bfp8_b)) - - perf_results = pybuda.pybudaglobal.get_devices()[0]._compile_output.perf_model_results - print(perf_results) - - finally: - del os.environ["PYBUDA_PERF_SIMULATOR"] diff --git a/pybuda/test/test_streaming.py b/pybuda/test/test_streaming.py deleted file mode 100644 index aedb576a0..000000000 --- a/pybuda/test/test_streaming.py +++ /dev/null @@ -1,137 +0,0 @@ -# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC - -# SPDX-License-Identifier: Apache-2.0 -import pybuda -import pytest -import torch - -from pybuda.config import CompileDepth, _get_global_compiler_config -from .common import run - - -def test_stream_transpose(test_kind, test_device): - if test_kind.is_training(): - pytest.skip() - - @run( - pybuda.VerifyConfig( - test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch - ), - ) - def stream_transpose(a, b, param=None): - x = pybuda.op.Add("add0", a, b) - x = pybuda.op.Transpose("transpose0", x, 2, 3) - x = pybuda.op.Matmul("mm0", x, param) - return x - - compiler_cfg = pybuda.config._get_global_compiler_config() - - pybuda.config.override_op_size("add0", (1, 1)) - pybuda.config.override_op_size("transpose0", (1, 1)) - pybuda.config.override_op_size("mm0", (1, 1)) - - shape = (1, 1, 32, 16384) - a = pybuda.Tensor.create_from_torch( - torch.rand(*shape, requires_grad=test_kind.is_training()) - ) - b = pybuda.Tensor.create_from_torch( - torch.rand(*shape, requires_grad=test_kind.is_training()) - ) - c = pybuda.Tensor.create_from_torch(torch.rand(1, 1, 32, 32), constant=True) - stream_transpose(a, b, param=c) - - -def test_stream_to_slice(test_kind, test_device): - if test_kind.is_training(): - pytest.skip() - - @run( - pybuda.VerifyConfig( - test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch - ), - ) - def stream_to_slice(x): - x = pybuda.op.Buffer("buf0", x) - x = pybuda.op.VSlice("vslice0", x, 512) - x = pybuda.op.Buffer("buf1", x) - return x - - compiler_cfg = pybuda.config._get_global_compiler_config() - - pybuda.config.override_op_size("buf0", (1, 1)) - pybuda.config.override_op_size("buf1", (1, 1)) - - shape = (1, 1, 16384, 32) - a = pybuda.Tensor.create_from_torch( - torch.rand(*shape, requires_grad=test_kind.is_training()) - ) - stream_to_slice(a) - - -@pytest.mark.parametrize("mode", ["producer_streaming", "consumer_streaming", "both_streaming"]) -def test_stream_slice_transpose(test_kind, test_device, mode): - if test_kind.is_training(): - pytest.skip() - - @run( - pybuda.VerifyConfig( - test_kind=test_kind, devtype=test_device.devtype, arch=test_device.arch - ), - ) - def stream_slice_transpose(x): - x = pybuda.op.Buffer("producer", x) - x = pybuda.op.VSlice("vslice", x, 2) - x = pybuda.op.Transpose("consumer", x, 2, 3) - return x - - compiler_cfg = pybuda.config._get_global_compiler_config() - - if mode == "producer_streaming": - pybuda.config.override_t_stream_shape("producer", (2, 1)) - pybuda.config.override_t_stream_shape("consumer", (1, 1)) - elif mode == "consumer_streaming": - pybuda.config.override_t_stream_shape("producer", (1, 1)) - pybuda.config.override_t_stream_shape("consumer", (1, 2)) - elif mode == "both_streaming": - pybuda.config.override_t_stream_shape("producer", (2, 1)) - pybuda.config.override_t_stream_shape("consumer", (1, 2)) - - shape = (1, 1, 128, 32) - a = pybuda.Tensor.create_from_torch( - torch.rand(*shape, requires_grad=test_kind.is_training()) - ) - stream_slice_transpose(a) - - -@pytest.mark.parametrize("dir", ["r", "c"]) -def test_stream_interleave(test_device, dir): - pybuda.config.set_configuration_options(balancer_policy="MaximizeTMinimizeGrid") - pybuda.config.override_t_stream_dir("interleave", dir) - - @run(test_device) - def stream_interleave(a, b): - return pybuda.op.Interleave("interleave", a, b, axis=-3, stride=1) - - shape = (1, 4, 512, 512) - a = pybuda.Tensor.create_from_torch(torch.randn(*shape)) - b = pybuda.Tensor.create_from_torch(torch.randn(*shape)) - stream_interleave(a, b) - - -def test_manual_streaming(test_device): - - @run(test_device) - def manual_stream(x): - x = pybuda.op.Buffer("buf0", x) - x = pybuda.op.Buffer("buf1", x) - x = pybuda.op.Buffer("buf2", x) - return x - - compiler_cfg = pybuda.config._get_global_compiler_config() - compiler_cfg.manual_t_streaming = True - - pybuda.config.override_t_stream_shape("buf1", (4, 1)) - - shape = (1, 1, 128, 128) - a = pybuda.Tensor.create_from_torch(torch.rand(*shape)) - manual_stream(a) diff --git a/pytest.ini b/pytest.ini index 118f863c4..694708482 100644 --- a/pytest.ini +++ b/pytest.ini @@ -7,14 +7,14 @@ addopts = -svv --junit-xml=reports/report.xml # Where pytest should look for tests testpaths = # Ops - pybuda/test/mlir/test_ops.py + forge/test/mlir/test_ops.py # API - pybuda/test/test_api.py + forge/test/test_api.py # MNIST Linear - pybuda/test/mlir/mnist/test_inference.py - pybuda/test/mlir/test_training.py + forge/test/mlir/mnist/test_inference.py + forge/test/mlir/test_training.py filterwarnings = ignore::DeprecationWarning diff --git a/scripts/bisect.sh b/scripts/bisect.sh index df2aea38e..6f3c9d5f3 100644 --- a/scripts/bisect.sh +++ b/scripts/bisect.sh @@ -9,7 +9,7 @@ Script run command : bash ./scripts/bisect.sh INPUTS: Enter Pytest Command: -pytest --devtype golden pybuda/test/model_demos/high_prio/cnn/pytorch/test_xception.py::test_xception_timm[Golden-xception] --device-config gs_e150 +pytest --devtype golden forge/test/model_demos/high_prio/cnn/pytorch/test_xception.py::test_xception_timm[Golden-xception] --device-config gs_e150 Enter Passing Commit Id: 8e576abe7fdc250ba88775322b448fa05acf52d1 #passing commit id Enter Failing Commit Id: @@ -28,9 +28,9 @@ set_evn_flags() { local arch=$1 local runtype=$2 local device_config=$3 - export PYBUDA_VERIFY_POST_AUTOGRAD_PASSES=1 - export PYBUDA_VERIFY_POST_PLACER=1 - export PYBUDA_VERIFY_NET2PIPE=3 + export FORGE_VERIFY_POST_AUTOGRAD_PASSES=1 + export FORGE_VERIFY_POST_PLACER=1 + export FORGE_VERIFY_NET2PIPE=3 export PYTEST_ADDOPTS=" -svv" if [ "$arch" = "wormhole_b0" ] ; then @@ -38,14 +38,14 @@ set_evn_flags() { export ARCH_NAME=wormhole_b0 if [ "$device_config" = "no" ] ; then - export PYBUDA_FORCE_EMULATE_HARVESTED=1 + export FORGE_FORCE_EMULATE_HARVESTED=1 fi if [ "$runtype" = "compile" ] ; then export GOLDEN_WORMHOLE_B0=1 - export PYBUDA_DEVMODE=1 - export PYBUDA_EMULATE_SILICON_DEVICE=1 - export PYBUDA_VERIFY_GOLDEN=1 + export FORGE_DEVMODE=1 + export FORGE_EMULATE_SILICON_DEVICE=1 + export FORGE_VERIFY_GOLDEN=1 else export PYTEST_ADDOPTS=" -svv --silicon-only" fi @@ -56,13 +56,13 @@ set_evn_flags() { export ARCH_NAME=grayskull if [ "$device_config" = "e300" ] ; then - export PYBUDA_FORCE_EMULATE_HARVESTED=1 + export FORGE_FORCE_EMULATE_HARVESTED=1 fi if [ "$runtype" = "compile" ] ; then - export PYBUDA_DEVMODE=1 - export PYBUDA_EMULATE_SILICON_DEVICE=1 - export PYBUDA_VERIFY_GOLDEN=1 + export FORGE_DEVMODE=1 + export FORGE_EMULATE_SILICON_DEVICE=1 + export FORGE_VERIFY_GOLDEN=1 else export PYTEST_ADDOPTS=" -svv --silicon-only" fi @@ -113,7 +113,7 @@ env_clean_and_build() { rm -rf .hlkc_cache rm -rf wheel_out/ rm -rf wheel_env/ - rm -rf pybuda.egg-info/ + rm -rf forge.egg-info/ rm -rf wheele_env/ rm -rf generated_modules rm -rf tt_build diff --git a/scripts/compare_perf.py b/scripts/compare_perf.py index 83d39f4d0..bbfb25b07 100644 --- a/scripts/compare_perf.py +++ b/scripts/compare_perf.py @@ -25,7 +25,7 @@ def get_perf_from_es(es, build_id): }, "size": 100 } - perf_res = es.search(index="pybuda-perf-ci", body=query) + perf_res = es.search(index="forge-perf-ci", body=query) hits = perf_res['hits']['hits'] return [{ "build_id": h["_source"]["build_id"], diff --git a/setup.py b/setup.py index fcd0a0af0..ba6d50ca9 100644 --- a/setup.py +++ b/setup.py @@ -14,9 +14,9 @@ from setuptools.command.build_ext import build_ext -pybuda_files = { +forge_files = { "test" : { - "path": "pybuda/test", + "path": "forge/test", "files": [ "conftest.py", "__init__.py", @@ -52,16 +52,16 @@ def run(self): env = os.environ.copy() env.update(additional_env_variables) nproc = os.cpu_count() - subprocess.check_call(["make", f"-j{nproc}", "pybuda", r'DEVICE_VERSIM_INSTALL_ROOT=\$$ORIGIN/../..'], env=env) + subprocess.check_call(["make", f"-j{nproc}", "forge", r'DEVICE_VERSIM_INSTALL_ROOT=\$$ORIGIN/../..'], env=env) - src = "build/lib/libpybuda_csrc.so" + src = "build/lib/libforge_csrc.so" self.copy_file(src, os.path.join(build_lib, filename)) - self._copy_pybuda(build_lib) + self._copy_forge(build_lib) - def _copy_pybuda(self, target_path): + def _copy_forge(self, target_path): - for t, d in pybuda_files.items(): + for t, d in forge_files.items(): path = target_path + "/" + d["path"] os.makedirs(path, exist_ok=True) @@ -86,13 +86,13 @@ def _copy_pybuda(self, target_path): with open("python_env/dist_requirements.txt", "r") as f: requirements += [r for r in f.read().splitlines() if not r.startswith("-r")] -# pybuda._C -pybuda_c = TTExtension("pybuda._C") +# forge._C +forge_c = TTExtension("forge._C") -ext_modules = [pybuda_c] +ext_modules = [forge_c] -packages = [p for p in find_packages("pybuda") if not p.startswith("test")] +packages = [p for p in find_packages("forge") if not p.startswith("test")] short_hash = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).decode('ascii').strip() date = subprocess.check_output(['git', 'show', '-s', '--format=%cd', "--date=format:%y%m%d", 'HEAD']).decode('ascii').strip() @@ -103,7 +103,7 @@ def _copy_pybuda(self, target_path): version = "0.1." + date + "+dev." + arch_code + "." + short_hash setup( - name='pybuda', + name='forge', version=version, author='Tenstorrent', url="http://www.tenstorrent.com", @@ -111,8 +111,8 @@ def _copy_pybuda(self, target_path): description='AI/ML framework for Tenstorrent devices', python_requires='>=3.8', packages=packages, - package_data={"pybuda": ["tti/runtime_param_yamls/*.yaml"]}, - package_dir={"pybuda": "pybuda/pybuda"}, + package_data={"forge": ["tti/runtime_param_yamls/*.yaml"]}, + package_dir={"forge": "forge/forge"}, long_description=long_description, long_description_content_type="text/markdown", ext_modules=ext_modules, @@ -120,7 +120,7 @@ def _copy_pybuda(self, target_path): zip_safe=False, install_requires=requirements, license="TBD", - keywords="pybuda machine learning tenstorrent", + keywords="forge machine learning tenstorrent", # PyPI classifiers=[ "Development Status :: 3 - Alpha", diff --git a/third_party/tvm b/third_party/tvm index 2b488782a..79a8fb013 160000 --- a/third_party/tvm +++ b/third_party/tvm @@ -1 +1 @@ -Subproject commit 2b488782a61fbf2eafdc9c69f4cb8eafbfb5f0aa +Subproject commit 79a8fb0131c4d00a4a441b609e74f40dfcc9e037