Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion thunder/core/jit_ext.py
Original file line number Diff line number Diff line change
Expand Up @@ -1888,7 +1888,7 @@ def from_provenance(provenance, *, new_output=False):
try:
from_provenance(p.history)
except Exception as e:
raise NotImplementedError(f"Exception occured unpacking object from {p.history}") from e
raise NotImplementedError(f"Exception occurred unpacking object from {p.history}") from e

already_unpacked[id(p)] = p

Expand Down
2 changes: 1 addition & 1 deletion thunder/core/prims.py
Original file line number Diff line number Diff line change
Expand Up @@ -3912,7 +3912,7 @@ def _reduction_meta(a: TensorProxy, /, dims: Sequence[int]) -> TensorProxy:
sum = make_prim(PrimIDs.SUM, "sum", meta=_reduction_meta, tags=(OpTags.REDUCTION_OP,))


# Note: We have seperate meta function for `argmin/argmax` instead of
# Note: We have separate meta function for `argmin/argmax` instead of
# reusing `_reduction_meta` as these operations expect Optional[int] for `dim`
# and return output with integer dtype.
#
Expand Down
2 changes: 1 addition & 1 deletion thunder/core/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1141,7 +1141,7 @@ def find_producer_symbols(trace: TraceCtx, proxies: Sequence[Proxy], stop_proxie
if arg_name not in map(lambda x: x.name, stop_proxies) and arg_name not in seen:
queue.append(arg)
seen.add(arg_name)
# original_order maps from bound_symbol to the index/order of its occurence in the trace. The order is
# original_order maps from bound_symbol to the index/order of its occurrence in the trace. The order is
# used to sort producer bound symbols to preserve the correctness of data dependency.
original_order = dict()
for i, bsym in enumerate(trace.bound_symbols):
Expand Down
2 changes: 1 addition & 1 deletion thunder/distributed/transforms/ddp_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def transform_module(self, model: ThunderModule):

# NOTE: Shared Parameters in Trace
# Shared parameters in PyTorch eager are parameters of module which have different name but share the underlying tensor.
# For shared parameter, we replace all occurence shared parameter with it's corresponding `base` parameter.
# For shared parameter, we replace all occurrences shared parameter with it's corresponding `base` parameter.
# In our implementation `base` parameter is the parameter and corresponding name which we see the first time while
# iterating our parameters (see below). We track subsequent parameter which share the underlying Tensor with this `base` parameter
# in `shared_params_name` dictionary.
Expand Down
2 changes: 1 addition & 1 deletion thunder/distributed/transforms/fsdp_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ def transform_module(

# NOTE: Shared Parameters in Trace
# Shared parameters in PyTorch eager are parameters of module which have different name but share the underlying tensor.
# For shared parameter, we replace all occurence shared parameter with it's corresponding `base` parameter.
# For shared parameter, we replace all occurrences shared parameter with it's corresponding `base` parameter.
# In our implementation `base` parameter is the parameter and corresponding name which we see the first time while
# iterating our parameters (see below). We track subsequent parameter which share the underlying Tensor with this `base` parameter
# in `shared_params_name` dictionary.
Expand Down
2 changes: 1 addition & 1 deletion thunder/tests/distributed/test_dtensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
from thunder.dynamo import thunderfx


# NOTE: We run all these similar functions seperately
# NOTE: We run all these similar functions separately
# as we want to avoid nvfuser issue (https://github.com/NVIDIA/Fuser/issues/4507)
# where trying to create FusionDefinition with same math operation can fail.
functions_to_test = {
Expand Down
4 changes: 2 additions & 2 deletions thunder/tests/opinfos.py
Original file line number Diff line number Diff line change
Expand Up @@ -5964,8 +5964,8 @@ def make_t(shape):
if dtype is not torch.bool: # argmax is not supported on `bool`
# overload: torch_max(a: TensorLike, /, dim: int | tuple[int], keepdim: bool = False) -> TensorLike, TensorLike
# This overload corresponds to taking the max along the specified dimension `dim`.
# It returns first occurence of the maximum value along the dimension and it's corresponding index.
# NOTE: When same values are present, the first occurence of the `value` and corresponding index is returned
# It returns first occurrence of the maximum value along the dimension and it's corresponding index.
# NOTE: When same values are present, the first occurrence of the `value` and corresponding index is returned
yield SampleInput(make_t(shape), dim)
yield SampleInput(make_t(shape), dim, keepdim)

Expand Down
2 changes: 1 addition & 1 deletion thunder/torch/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3197,7 +3197,7 @@ def torch_max(

# overload - torch_max(a: TensorLike, /, dim: int | tuple[int], keepdim: bool = False) -> TensorLike, TensorLike
# This overload corresponds to taking the max along the specified dimension `dim`.
# NOTE: It returns first occurence of the maximum value along the dimension and it's corresponding index.
# NOTE: It returns first occurrence of the maximum value along the dimension and it's corresponding index.
utils.check_type(dim, NumberLike)
max_vals = amax(a, dim, keepdim)
argmax_vals = argmax(a, dim, keepdim)
Expand Down
2 changes: 1 addition & 1 deletion thunder/transforms/materialization.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def should_skip_materialization(n):
processed_names = set()

# Shared parameters in PyTorch eager are parameters of module which have different name but share the underlying tensor.
# For shared parameter, we replace all occurence shared parameter with its corresponding `base` parameter.
# For shared parameter, we replace all occurrences shared parameter with its corresponding `base` parameter.
# In our implementation `base` parameter is the parameter and corresponding name which we see the first time while
# iterating our parameters (see below). We track subsequent parameter which share the underlying Tensor with this `base` parameter
# in `shared_params_name` dictionary.
Expand Down