Skip to content

Commit

Permalink
Golden function for tt and torch results
Browse files Browse the repository at this point in the history
  • Loading branch information
ddilbazTT committed Nov 7, 2024
1 parent 6503c70 commit ab2fe88
Showing 1 changed file with 6 additions and 2 deletions.
8 changes: 6 additions & 2 deletions tt_torch/dynamo/backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,12 +110,13 @@ def compile_process(receiver, sender):


class Executor:
def __init__(self, gm, compiler_config=None):
def __init__(self, gm, compiler_config=None, required_atol=1e-2):
self.gm = gm
self.binary = None
if compiler_config is None:
compiler_config = CompilerConfig()
self.compiler_config = compiler_config
self.required_atol = required_atol

def set_binary(self, binary):
self.binary = binary
Expand Down Expand Up @@ -298,7 +299,10 @@ def run_gm_op_by_op(self, *inputs):
== CompileDepth.EXECUTE_OP_BY_OP
and binary is not None
):
tensor = self.run_op(binary, *args)
tt_tensor = self.run_op(binary, *args)
golden_tensor = node.target(*args, **node.kwargs)
atol = torch.max(torch.abs(golden_tensor - tt_tensor)).item()
assert (atol <= self.required_atol), f"ATOL too high: {atol}"
op.compilation_status = OpCompilationStatus.EXECUTED
else:
tensor = node.target(*args, **node.kwargs)
Expand Down

0 comments on commit ab2fe88

Please sign in to comment.