diff --git a/.github/check-spdx.yml b/.github/check-spdx.yml new file mode 100644 index 000000000..9cded255d --- /dev/null +++ b/.github/check-spdx.yml @@ -0,0 +1,13 @@ +DEFAULT: + perform_check: yes # Perform check for all files + allowed_licenses: + - Apache-2.0 + license_for_new_files: Apache-2.0 # license to be used when inserting a new copyright notice + new_notice_c: |+ # notice for new C, CPP, H, HPP and LD files + // SPDX-FileCopyrightText: (c) {years} Tenstorrent AI ULC + // + // SPDX-License-Identifier: {license} + new_notice_python: |+ # notice for new python files + # SPDX-FileCopyrightText: (c) {years} Tenstorrent AI ULC + # + # SPDX-License-Identifier: {license} \ No newline at end of file diff --git a/.github/workflows/pull-request-workflow.yml b/.github/workflows/pull-request-workflow.yml new file mode 100644 index 000000000..516fd2ce2 --- /dev/null +++ b/.github/workflows/pull-request-workflow.yml @@ -0,0 +1,13 @@ +name: Pull request workflow + +on: + workflow_dispatch: + workflow_call: + pull_request: + branches: + - main + +jobs: + spdx: + uses: ./.github/workflows/spdx.yml + secrets: inherit \ No newline at end of file diff --git a/.github/workflows/spdx.yml b/.github/workflows/spdx.yml new file mode 100644 index 000000000..a719cb2a0 --- /dev/null +++ b/.github/workflows/spdx.yml @@ -0,0 +1,15 @@ +name: spdx + +on: + workflow_dispatch: + workflow_call: + +jobs: + check-spdx-headers: + runs-on: ubuntu-latest + steps: + - name: checkout + uses: actions/checkout@v4 + - uses: enarx/spdx@master + with: + licenses: Apache-2.0 \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 000000000..f39826aac --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,6 @@ +repos: + - repo: https://github.com/espressif/check-copyright/ + rev: v1.0.3 + hooks: + - id: check-copyright + args: ['--config', '.github/check-spdx.yaml'] \ No newline at end of file diff --git a/clean_all.sh b/clean_all.sh index 6818a1b3c..13cbfde3b 100755 --- a/clean_all.sh +++ b/clean_all.sh @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 + rm -rf build rm -rf env/build rm -rf third_party/tt-mlir/build diff --git a/env/create_venv.sh b/env/create_venv.sh index 944e8016f..ed5d3b778 100644 --- a/env/create_venv.sh +++ b/env/create_venv.sh @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 + if [[ -z "$TTFORGE_PYTHON_VERSION" ]]; then echo "TTFORGE_PYTHON_VERSION environment variable is not set" exit 1 diff --git a/old_docs/public/build.sh b/old_docs/public/build.sh index 9a58cf5f4..56ae701e2 100755 --- a/old_docs/public/build.sh +++ b/old_docs/public/build.sh @@ -1,4 +1,7 @@ #!/bin/bash +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 set -u source $PYTHON_ENV/bin/activate pip install sphinx diff --git a/pybuda/pybuda/tools/autotune.sh b/pybuda/pybuda/tools/autotune.sh index d94ecf0c8..b568c85ce 100755 --- a/pybuda/pybuda/tools/autotune.sh +++ b/pybuda/pybuda/tools/autotune.sh @@ -1,4 +1,7 @@ #!/bin/bash +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 LOGFILE="autotune.log" diff --git a/pybuda/test/mlir/llama/utils/utils.py b/pybuda/test/mlir/llama/utils/utils.py index 03751ccb0..a3b7f1d78 100644 --- a/pybuda/test/mlir/llama/utils/utils.py +++ b/pybuda/test/mlir/llama/utils/utils.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 + from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer import pybuda diff --git a/pybuda/test/mlir/mnist/training/mnist_linear_pybuda.py b/pybuda/test/mlir/mnist/training/mnist_linear_pybuda.py index eb0364aae..dba714365 100644 --- a/pybuda/test/mlir/mnist/training/mnist_linear_pybuda.py +++ b/pybuda/test/mlir/mnist/training/mnist_linear_pybuda.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 + import torch from torchvision import datasets, transforms from torch.utils.tensorboard import SummaryWriter diff --git a/pybuda/test/mlir/mnist/training/mnist_linear_pytorch.py b/pybuda/test/mlir/mnist/training/mnist_linear_pytorch.py index 7958745e4..a9a4b8f92 100644 --- a/pybuda/test/mlir/mnist/training/mnist_linear_pytorch.py +++ b/pybuda/test/mlir/mnist/training/mnist_linear_pytorch.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 + import torch from torchvision import datasets, transforms from torch.utils.tensorboard import SummaryWriter diff --git a/pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/basic/test_command.sh b/pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/basic/test_command.sh index 8930a77ce..2b91989ac 100644 --- a/pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/basic/test_command.sh +++ b/pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/basic/test_command.sh @@ -1,4 +1,6 @@ -# +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 # Commands for running resnet basic block # diff --git a/pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/test_command.sh b/pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/test_command.sh index 41aaa6352..f3a379c7f 100644 --- a/pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/test_command.sh +++ b/pybuda/test/nn/architectures/cnn/resnet/resnet_blocks/bottleneck/test_command.sh @@ -1,4 +1,6 @@ -# +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 # Commands for running resnet bottleneck block # diff --git a/pybuda/test/operators/eltwise_binary/test_command.sh b/pybuda/test/operators/eltwise_binary/test_command.sh index bdd2eea83..3a0fdcf93 100644 --- a/pybuda/test/operators/eltwise_binary/test_command.sh +++ b/pybuda/test/operators/eltwise_binary/test_command.sh @@ -1,3 +1,6 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 # # Commands for running element-wise binary tests # diff --git a/pybuda/test/operators/eltwise_unary/test_command.sh b/pybuda/test/operators/eltwise_unary/test_command.sh index 76a061d44..8b22080c2 100644 --- a/pybuda/test/operators/eltwise_unary/test_command.sh +++ b/pybuda/test/operators/eltwise_unary/test_command.sh @@ -1,3 +1,6 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 # # Commands for running element-wise unary tests # diff --git a/pybuda/test/operators/matmul/test_command.sh b/pybuda/test/operators/matmul/test_command.sh index ecc5b18a9..34e211611 100644 --- a/pybuda/test/operators/matmul/test_command.sh +++ b/pybuda/test/operators/matmul/test_command.sh @@ -1,3 +1,6 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 # # Commands for running matmul tests # diff --git a/pybuda/test/operators/reduce/test_command.sh b/pybuda/test/operators/reduce/test_command.sh index 2a76f4033..75bfcddd3 100644 --- a/pybuda/test/operators/reduce/test_command.sh +++ b/pybuda/test/operators/reduce/test_command.sh @@ -1,4 +1,6 @@ -# +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 # Commands for running reduce tests # diff --git a/pybuda/test/operators/tm/hstack_hslice/test_command.sh b/pybuda/test/operators/tm/hstack_hslice/test_command.sh index 67adcdbd5..0f8575230 100644 --- a/pybuda/test/operators/tm/hstack_hslice/test_command.sh +++ b/pybuda/test/operators/tm/hstack_hslice/test_command.sh @@ -1,3 +1,6 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 # # Commands for running hstack and hslice tests # diff --git a/pybuda/test/operators/tm/reshape/test_command.sh b/pybuda/test/operators/tm/reshape/test_command.sh index 0309156cf..a5dc9bfd2 100644 --- a/pybuda/test/operators/tm/reshape/test_command.sh +++ b/pybuda/test/operators/tm/reshape/test_command.sh @@ -1,4 +1,6 @@ -# +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 # Commands for running reshape tests # diff --git a/pybuda/test/tvm/cnn/pytorch/videopose/model.py b/pybuda/test/tvm/cnn/pytorch/videopose/model.py deleted file mode 100644 index 7455d992b..000000000 --- a/pybuda/test/tvm/cnn/pytorch/videopose/model.py +++ /dev/null @@ -1,272 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2018-present, Facebook, Inc. -# -# SPDX-License-Identifier: Attribution-NonCommercial 4.0 International -# https://github.com/facebookresearch/VideoPose3D - - -import torch.nn as nn - - -class TemporalModelBase(nn.Module): - """ - Do not instantiate this class. - """ - - def __init__( - self, - num_joints_in, - in_features, - num_joints_out, - filter_widths, - causal, - dropout, - channels, - ): - super().__init__() - - # Validate input - for fw in filter_widths: - assert fw % 2 != 0, "Only odd filter widths are supported" - - self.num_joints_in = num_joints_in - self.in_features = in_features - self.num_joints_out = num_joints_out - self.filter_widths = filter_widths - - self.drop = nn.Dropout(dropout) - self.relu = nn.ReLU(inplace=True) - - self.pad = [filter_widths[0] // 2] - self.expand_bn = nn.BatchNorm1d(channels, momentum=0.1) - self.shrink = nn.Conv1d(channels, num_joints_out * 3, 1) - - def set_bn_momentum(self, momentum): - self.expand_bn.momentum = momentum - for bn in self.layers_bn: - bn.momentum = momentum - - def receptive_field(self): - """ - Return the total receptive field of this model as # of frames. - """ - frames = 0 - for f in self.pad: - frames += f - return 1 + 2 * frames - - def total_causal_shift(self): - """ - Return the asymmetric offset for sequence padding. - The returned value is typically 0 if causal convolutions are disabled, - otherwise it is half the receptive field. - """ - frames = self.causal_shift[0] - next_dilation = self.filter_widths[0] - for i in range(1, len(self.filter_widths)): - frames += self.causal_shift[i] * next_dilation - next_dilation *= self.filter_widths[i] - return frames - - def forward(self, x): - assert len(x.shape) == 4 - assert x.shape[-2] == self.num_joints_in - assert x.shape[-1] == self.in_features - - sz = x.shape[:3] - x = x.view(x.shape[0], x.shape[1], -1) - x = x.permute(0, 2, 1) - - x = self._forward_blocks(x) - - x = x.permute(0, 2, 1) - x = x.view(sz[0], -1, self.num_joints_out, 3) - - return x - - -class TemporalModel(TemporalModelBase): - """ - Reference 3D pose estimation model with temporal convolutions. - This implementation can be used for all use-cases. - """ - - def __init__( - self, - num_joints_in, - in_features, - num_joints_out, - filter_widths, - causal=False, - dropout=0.25, - channels=1024, - dense=False, - ): - """ - Initialize this model. - - Arguments: - num_joints_in -- number of input joints (e.g. 17 for Human3.6M) - in_features -- number of input features for each joint (typically 2 for 2D input) - num_joints_out -- number of output joints (can be different than input) - filter_widths -- list of convolution widths, which also determines the # of blocks and receptive field - causal -- use causal convolutions instead of symmetric convolutions (for real-time applications) - dropout -- dropout probability - channels -- number of convolution channels - dense -- use regular dense convolutions instead of dilated convolutions (ablation experiment) - """ - super().__init__( - num_joints_in, - in_features, - num_joints_out, - filter_widths, - causal, - dropout, - channels, - ) - - self.expand_conv = nn.Conv1d( - num_joints_in * in_features, channels, filter_widths[0], bias=False - ) - - layers_conv = [] - layers_bn = [] - - self.causal_shift = [(filter_widths[0]) // 2 if causal else 0] - next_dilation = filter_widths[0] - for i in range(1, len(filter_widths)): - self.pad.append((filter_widths[i] - 1) * next_dilation // 2) - self.causal_shift.append( - (filter_widths[i] // 2 * next_dilation) if causal else 0 - ) - - layers_conv.append( - nn.Conv1d( - channels, - channels, - filter_widths[i] if not dense else (2 * self.pad[-1] + 1), - dilation=next_dilation if not dense else 1, - bias=False, - ) - ) - layers_bn.append(nn.BatchNorm1d(channels, momentum=0.1)) - layers_conv.append(nn.Conv1d(channels, channels, 1, dilation=1, bias=False)) - layers_bn.append(nn.BatchNorm1d(channels, momentum=0.1)) - - next_dilation *= filter_widths[i] - - self.layers_conv = nn.ModuleList(layers_conv) - self.layers_bn = nn.ModuleList(layers_bn) - - def _forward_blocks(self, x): - - x = self.drop(self.relu(self.expand_bn(self.expand_conv(x)))) - - for i in range(len(self.pad) - 1): - pad = self.pad[i + 1] - shift = self.causal_shift[i + 1] - res = x[:, :, pad + shift : x.shape[2] - pad + shift] - - x = self.drop(self.relu(self.layers_bn[2 * i](self.layers_conv[2 * i](x)))) - x = res + self.drop( - self.relu(self.layers_bn[2 * i + 1](self.layers_conv[2 * i + 1](x))) - ) - - x = self.shrink(x) - return x - - -class TemporalModelOptimized1f(TemporalModelBase): - """ - 3D pose estimation model optimized for single-frame batching, i.e. - where batches have input length = receptive field, and output length = 1. - This scenario is only used for training when stride == 1. - - This implementation replaces dilated convolutions with strided convolutions - to avoid generating unused intermediate results. The weights are interchangeable - with the reference implementation. - """ - - def __init__( - self, - num_joints_in, - in_features, - num_joints_out, - filter_widths, - causal=False, - dropout=0.25, - channels=1024, - ): - """ - Initialize this model. - - Arguments: - num_joints_in -- number of input joints (e.g. 17 for Human3.6M) - in_features -- number of input features for each joint (typically 2 for 2D input) - num_joints_out -- number of output joints (can be different than input) - filter_widths -- list of convolution widths, which also determines the # of blocks and receptive field - causal -- use causal convolutions instead of symmetric convolutions (for real-time applications) - dropout -- dropout probability - channels -- number of convolution channels - """ - super().__init__( - num_joints_in, - in_features, - num_joints_out, - filter_widths, - causal, - dropout, - channels, - ) - - self.expand_conv = nn.Conv1d( - num_joints_in * in_features, - channels, - filter_widths[0], - stride=filter_widths[0], - bias=False, - ) - - layers_conv = [] - layers_bn = [] - - self.causal_shift = [(filter_widths[0] // 2) if causal else 0] - next_dilation = filter_widths[0] - for i in range(1, len(filter_widths)): - self.pad.append((filter_widths[i] - 1) * next_dilation // 2) - self.causal_shift.append((filter_widths[i] // 2) if causal else 0) - - layers_conv.append( - nn.Conv1d( - channels, - channels, - filter_widths[i], - stride=filter_widths[i], - bias=False, - ) - ) - layers_bn.append(nn.BatchNorm1d(channels, momentum=0.1)) - layers_conv.append(nn.Conv1d(channels, channels, 1, dilation=1, bias=False)) - layers_bn.append(nn.BatchNorm1d(channels, momentum=0.1)) - next_dilation *= filter_widths[i] - - self.layers_conv = nn.ModuleList(layers_conv) - self.layers_bn = nn.ModuleList(layers_bn) - - def _forward_blocks(self, x): - x = self.drop(self.relu(self.expand_bn(self.expand_conv(x)))) - - for i in range(len(self.pad) - 1): - res = x[ - :, - :, - self.causal_shift[i + 1] - + self.filter_widths[i + 1] // 2 :: self.filter_widths[i + 1], - ] - - x = self.drop(self.relu(self.layers_bn[2 * i](self.layers_conv[2 * i](x)))) - x = res + self.drop( - self.relu(self.layers_bn[2 * i + 1](self.layers_conv[2 * i + 1](x))) - ) - - x = self.shrink(x) - return x diff --git a/scripts/bisect.sh b/scripts/bisect.sh index 65f1686c3..df2aea38e 100644 --- a/scripts/bisect.sh +++ b/scripts/bisect.sh @@ -1,4 +1,7 @@ #!/bin/bash +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 : << 'COMMENT' SAMPLE: diff --git a/utils/pointers.hpp b/utils/pointers.hpp index a1a306b51..f811c7d52 100644 --- a/utils/pointers.hpp +++ b/utils/pointers.hpp @@ -1,7 +1,8 @@ // SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC // // SPDX-License-Identifier: Apache-2.0 -/////////////////////////////////////////////////////////////////////////////// +// +//This file incorporates work covered by the following copyright and permission notice: // // Copyright (c) 2015 Microsoft Corporation. All rights reserved. //