Skip to content

Commit

Permalink
Add support for nan_to_num op
Browse files Browse the repository at this point in the history
  • Loading branch information
kamalrajkannan78 committed Jan 6, 2025
1 parent e405246 commit b02be0c
Showing 1 changed file with 19 additions and 0 deletions.
19 changes: 19 additions & 0 deletions python/tvm/relay/frontend/pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -4620,6 +4620,24 @@ def scaled_dot_product_attention(self, inputs, input_types):
attn_weight = _op.reshape(attn_weight, newshape=[-4, batch_size, -1, -2])

return attn_weight


def nan_to_num(self, inputs, input_types):

data = inputs[0]
nan_value = inputs[1]
posinf = inputs[2]
neginf = inputs[3]

nan_tensor = tvm.relay.const(nan_value if nan_value is not None else 0.0, "float32")
posinf_tensor = tvm.relay.const(posinf if posinf is not None else np.finfo(np.float32).max, "float32")
neginf_tensor = tvm.relay.const(neginf if neginf is not None else np.finfo(np.float32).min, "float32")

result = tvm.relay.where(tvm.relay.isnan(data), nan_tensor, data)
result = tvm.relay.where(tvm.relay.equal(data, tvm.relay.const(np.inf, "float32")), posinf_tensor, result)
result = tvm.relay.where(tvm.relay.equal(data, tvm.relay.const(-np.inf, "float32")), neginf_tensor, result)

return result

# Operator mappings
def create_convert_map(self):
Expand Down Expand Up @@ -4920,6 +4938,7 @@ def create_convert_map(self):
"aten::linalg_vector_norm": self.linalg_vector_norm,
"aten::scaled_dot_product_attention": self.scaled_dot_product_attention,
"aten::lift_fresh": self.identity,
"aten::nan_to_num":self.nan_to_num,
}

def update_convert_map(self, custom_map):
Expand Down

0 comments on commit b02be0c

Please sign in to comment.