Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ for more information.

## Installation

The preferred way to install `dtaianomaly` is via PyPi. See the [documentation](https://dtaianomaly.readthedocs.io/en/stable/index.html)
The preferred way to install `dtaianomaly` is via PyPi. See the [documentation](https://dtaianomaly.readthedocs.io/en/stable/getting_started/installation.html)
for more options.
```
pip install dtaianomaly
Expand Down
2 changes: 2 additions & 0 deletions docs/additional_information/changelog.rst
Original file line number Diff line number Diff line change
Expand Up @@ -42,12 +42,14 @@ Changed
- Replaced dependency on ``tslearn>=0.6.3`` by ``sktime[clustering]``, which includes the ``tslearn``
dependency. Before, ``tslearn`` was only used for ``KShapeAnomalyDetector`` to do the clustering. This
capability is also offered by ``sktime`` through a direct interface to ``tslearn``.
- Added dependency ``tslearn>=0.6.3`` as it is not installed for Python >= 3.12 via ``sktime``.

Fixed
^^^^^

- np.datetime64 are now also valid values in the ``utils.is_valid_array_like``.
This was especially necessary for time indexes in a dataset.
- Added missing tests and removed temporary code to test the doctests.

[0.4.2] - 2025-07-03
--------------------
Expand Down
10 changes: 6 additions & 4 deletions docs/getting_started/installation.rst
Original file line number Diff line number Diff line change
Expand Up @@ -29,10 +29,12 @@ Currently, following subsets are available:
- ``docs``: Dependencies for generating the documentation.
- ``notebooks``: Dependencies for using jupyter notebooks.
- ``lint``: Dependencies for linting the code.
- ``chronos``: Install autogluon.timeseries, necessary for running Chronos.
- ``moment``: Install momentfm, necessary for running Chronos. **Warning:** Not included when
installing ``dtaianomaly[all]`` due to dependency conflicts!
- ``time_moe``: Install transformers==4.40.1, necessary for running MOMENT.
- ``chronos``: Install autogluon.timeseries, necessary for running Chronos. **Warning:** Not
included when installing ``dtaianomaly[all]``!
- ``moment``: Install momentfm, necessary for running MOMENT. **Warning:** Not included when
installing ``dtaianomaly[all]``!
- ``time_moe``: Install transformers==4.40.1, necessary for running Time-MoE. **Warning:** Not
included when installing ``dtaianomaly[all]``!
- ``in_time_ad``: Dependencies for running the demonstrator.

To install version ``X.Y.Z``, use the following command:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -198,11 +198,6 @@ def _evaluate_batch(self, batch: list[torch.Tensor]) -> torch.Tensor:
torch.abs(forecast - future), dim=tuple(range(1, forecast.ndim))
)

# Raise an error if invalid metric is given
raise ValueError(
f"Unknown error_metric '{self.error_metric}'. Valid options are ['mean-squared-error', 'mean-absolute-error']"
)

def _evaluate(self, data_loader: torch.utils.data.DataLoader) -> np.array:
decision_scores = super()._evaluate(data_loader)
return np.concatenate(
Expand Down
10 changes: 5 additions & 5 deletions dtaianomaly/anomaly_detection/_Chronos.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,11 +94,11 @@ class Chronos(BaseDetector):

Examples
--------
>>> from dtaianomaly.anomaly_detection import Chronos
>>> from dtaianomaly.data import demonstration_time_series
>>> x, y = demonstration_time_series()
>>> chronos = Chronos(10).fit(x)
>>> chronos.decision_function(x) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
>>> from dtaianomaly.anomaly_detection import Chronos # doctest: +SKIP
>>> from dtaianomaly.data import demonstration_time_series # doctest: +SKIP
>>> x, y = demonstration_time_series() # doctest: +SKIP
>>> chronos = Chronos(10).fit(x) # doctest: +SKIP
>>> chronos.decision_function(x) # doctest: +SKIP
array([0.00027719, 0.00027719, 0.00027719, ..., 0.00058781, 0.02628242,
0.00010728]...)
"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ class HybridKNearestNeighbors(BaseDetector):
>>> hybrid_knn = HybridKNearestNeighbors(64, seed=0).fit(x)
>>> hybrid_knn.decision_function(x) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([0.12284644, 0.38202247, 0.43220974, ..., 0.83470662, 0.81722846,
0.85243446])
0.85243446]...)
"""

window_size: WINDOW_SIZE_TYPE
Expand Down
8 changes: 1 addition & 7 deletions dtaianomaly/anomaly_detection/_KShapeAnomalyDetector.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ class KShapeAnomalyDetector(BaseDetector):
>>> kshape = KShapeAnomalyDetector(window_size=50).fit(x)
>>> kshape.decision_function(x) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([7.07106781, 7.07106781, 7.07106781, ..., 7.07106781, 7.07106781,
7.07106781])
7.07106781]...)
"""

window_size: WINDOW_SIZE_TYPE
Expand Down Expand Up @@ -203,9 +203,3 @@ def _ncc_c(x: np.array, y: np.array) -> np.array:
cc = np.fft.ifft(np.fft.fft(x, fft_size) * np.conj(np.fft.fft(y, fft_size)))
cc = np.concatenate((cc[-(x.shape[0] - 1) :], cc[: x.shape[0]]))
return np.real(cc) / den


if __name__ == "__main__":
import doctest

doctest.testmod()
23 changes: 0 additions & 23 deletions dtaianomaly/anomaly_detection/_MatrixProfileDetector.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,26 +186,3 @@ def is_fitted(self) -> bool:
for attr in self.__annotations__
if attr.endswith("_") and attr != "X_reference_"
)

def requires_fitting(self) -> bool:
"""
Check whether this object requires fitting.

Check whether any of the attributes of this object ends with an
underscore ('_'), which indicates that the attribute is set when
the object is fitted. Note that this method does not check whether
the object is fitted, i.e., whether the attributes have been set.

Returns
-------
bool
True if and only if this object has attributes that end with '_'.
"""
if self.novelty:
return super().requires_fitting()
else:
return any(
attr.endswith("_")
for attr in self._all_annotations()
if attr != "X_reference_"
)
6 changes: 0 additions & 6 deletions dtaianomaly/anomaly_detection/_SpectralResidual.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,9 +112,3 @@ def _decision_function(self, X: np.ndarray) -> np.array:
saliency_map = np.sqrt(inverse_fourier.real**2 + inverse_fourier.imag**2)

return saliency_map


if __name__ == "__main__":
import doctest

doctest.testmod()
10 changes: 5 additions & 5 deletions dtaianomaly/anomaly_detection/_TimeMoE.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,11 +74,11 @@ class TimeMoE(BaseDetector):

Examples
--------
>>> from dtaianomaly.anomaly_detection import TimeMoE
>>> from dtaianomaly.data import demonstration_time_series
>>> x, y = demonstration_time_series()
>>> time_moe = TimeMoE(10).fit(x)
>>> time_moe.decision_function(x) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE +SKIP
>>> from dtaianomaly.anomaly_detection import TimeMoE # doctest: +SKIP
>>> from dtaianomaly.data import demonstration_time_series # doctest: +SKIP
>>> x, y = demonstration_time_series() # doctest: +SKIP
>>> time_moe = TimeMoE(10).fit(x) # doctest: +SKIP
>>> time_moe.decision_function(x) # doctest: +SKIP
array([6.34949149e-05, 6.34949149e-05, 6.34949149e-05, ...,
6.34949149e-05, 6.34949149e-05, 6.34949149e-05]...)
"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ class AlwaysAnomalous(BaseDetector):
>>> x, y = demonstration_time_series()
>>> baseline = AlwaysAnomalous().fit(x)
>>> baseline.decision_function(x)
array([1., 1., 1., ..., 1., 1., 1.])
array([1., 1., 1., ..., 1., 1., 1.]...)
"""

def __init__(self):
Expand Down
2 changes: 1 addition & 1 deletion dtaianomaly/anomaly_detection/baselines/_AlwaysNormal.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ class AlwaysNormal(BaseDetector):
>>> x, y = demonstration_time_series()
>>> baseline = AlwaysNormal().fit(x)
>>> baseline.decision_function(x)
array([0., 0., 0., ..., 0., 0., 0.])
array([0., 0., 0., ..., 0., 0., 0.]...)
"""

def __init__(self):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ class MovingWindowVariance(BaseDetector):
>>> baseline = MovingWindowVariance(16).fit(x)
>>> baseline.decision_function(x)
array([0.06820711, 0.07130246, 0.07286874, ..., 0.01125165, 0.00984333,
0.00986772])
0.00986772]...)
"""

window_size: WINDOW_SIZE_TYPE
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ class RandomDetector(BaseDetector):
>>> baseline = RandomDetector(seed=0).fit(x)
>>> baseline.decision_function(x)
array([0.63696169, 0.26978671, 0.04097352, ..., 0.70724404, 0.90315986,
0.8944909 ])
0.8944909 ]...)
"""

seed: int | None
Expand Down
18 changes: 1 addition & 17 deletions dtaianomaly/anomaly_detection/baselines/_SquaredDifference.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ class SquaredDifference(BaseDetector):
>>> baseline = SquaredDifference().fit(x)
>>> baseline.decision_function(x)
array([0.00779346, 0.00779346, 0.00260361, ..., 0.00286662, 0.05578398,
0.02683475])
0.02683475]...)
"""

square_errors: bool
Expand All @@ -54,19 +54,3 @@ def _decision_function(self, X: np.ndarray) -> np.array:
if self.square_errors:
decision_scores = np.square(decision_scores)
return decision_scores


def main():
from dtaianomaly.data import demonstration_time_series
from dtaianomaly.visualization import plot_anomaly_scores

x, y = demonstration_time_series()
x = x.reshape(-1, 1)

baseline = SquaredDifference().fit(x)
y_pred = baseline.decision_function(x)
plot_anomaly_scores(x, y, y_pred, figsize=(20, 5)).show()


if __name__ == "__main__":
main()
2 changes: 1 addition & 1 deletion dtaianomaly/pipeline/_Pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ class Pipeline(BaseDetector):
>>> pipeline = Pipeline(StandardScaler(), IsolationForest(16))
>>> pipeline.fit(X).decision_function(X)
array([-0.01080726, -0.01053199, -0.00883758, ..., -0.05298726,
-0.05898066, -0.05713733])
-0.05898066, -0.05713733]...)
"""

preprocessor: Preprocessor
Expand Down
6 changes: 0 additions & 6 deletions dtaianomaly/windowing/_sliding_window.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,3 @@ def sliding_window(X: np.ndarray, window_size: int, stride: int) -> np.ndarray:
]
windows.append(X[-window_size:].ravel())
return np.array(windows)


if __name__ == "__main__":
import doctest

doctest.testmod()
5 changes: 2 additions & 3 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,8 @@ dependencies = [
"matplotlib>=3.7",
"statsmodels>=0.6",
"pyod>=2.0.0",
"sktime[clustering]",
"tslearn>=0.6.3",
"sktime",
"toml",
"torch>=1.8.0",
]
Expand Down Expand Up @@ -73,8 +74,6 @@ all = [ # All the optional dependencies
"isort",
'tqdm',
"sphinxcontrib-bibtex",
"autogluon.timeseries>=1.3.1",
"transformers==4.40.1",
'tqdm',
"streamlit",
"plotly"
Expand Down
51 changes: 44 additions & 7 deletions tests/anomaly_detection/test_Chronos.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,72 +8,114 @@
from dtaianomaly.anomaly_detection._Chronos import MODEL_PATHS


def setup():
# create dummy parent package and submodule
autogluon = types.ModuleType("autogluon")
timeseries = types.ModuleType("autogluon.timeseries")
autogluon.timeseries = timeseries

# register both in sys.modules
sys.modules["autogluon"] = autogluon
sys.modules["autogluon.timeseries"] = timeseries


def cleanup():
del sys.modules["autogluon"]
del sys.modules["autogluon.timeseries"]


class TestChronos:

def test_supervision(self):
setup()
detector = Chronos(1)
assert detector.supervision == Supervision.UNSUPERVISED
cleanup()

def test_str(self):
setup()
assert str(Chronos(5)) == "Chronos(window_size=5)"
assert str(Chronos("fft")) == "Chronos(window_size='fft')"
assert str(Chronos(15, "large")) == "Chronos(window_size=15,model_path='large')"
assert str(Chronos(25, batch_size=3)) == "Chronos(window_size=25,batch_size=3)"
cleanup()

@pytest.mark.parametrize("model_path", MODEL_PATHS)
def test_model_path_valid(self, model_path):
setup()
detector = Chronos(window_size="fft", model_path=model_path)
assert detector.model_path == model_path
cleanup()

@pytest.mark.parametrize("model_path", [0, True, None, ["a", "list"]])
def test_model_path_invalid_type(self, model_path):
setup()
with pytest.raises(TypeError):
Chronos(window_size="fft", model_path=model_path)
cleanup()

@pytest.mark.parametrize("model_path", ["invalid"])
def test_model_path_invalid_value(self, model_path):
setup()
with pytest.raises(ValueError):
Chronos(window_size="fft", model_path=model_path)
cleanup()

@pytest.mark.parametrize("batch_size", [8, 16, 32])
def test_batch_size_valid(self, batch_size):
setup()
detector = Chronos(window_size="fft", batch_size=batch_size)
assert detector.batch_size == batch_size
cleanup()

@pytest.mark.parametrize("batch_size", ["8", 8.0])
def test_batch_size_invalid_type(self, batch_size):
setup()
with pytest.raises(TypeError):
Chronos(window_size="fft", batch_size=batch_size)
cleanup()

@pytest.mark.parametrize("batch_size", [0, -8])
def test_batch_size_invalid_value(self, batch_size):
setup()
with pytest.raises(ValueError):
Chronos(window_size="fft", batch_size=batch_size)
cleanup()

@pytest.mark.parametrize("forecast_horizon", [32, 16, 8])
def test_forecast_horizon_valid(self, forecast_horizon):
setup()
detector = Chronos(window_size=16, forecast_horizon=forecast_horizon)
assert detector.forecast_horizon == forecast_horizon
cleanup()

@pytest.mark.parametrize("forecast_horizon", ["32", 16.0, True])
def test_forecast_horizon_invalid_type(self, forecast_horizon):
setup()
with pytest.raises(TypeError):
Chronos(window_size=16, forecast_horizon=forecast_horizon)
cleanup()

@pytest.mark.parametrize("forecast_horizon", [0, -1, -16])
def test_forecast_horizon_invalid_value(self, forecast_horizon):
setup()
with pytest.raises(ValueError):
Chronos(window_size=16, forecast_horizon=forecast_horizon)
cleanup()

@pytest.mark.parametrize("do_fine_tuning", [True, False])
def test_do_fine_tuning_valid(self, do_fine_tuning):
setup()
detector = Chronos(window_size="fft", do_fine_tuning=do_fine_tuning)
assert detector.do_fine_tuning == do_fine_tuning
cleanup()

@pytest.mark.parametrize("do_fine_tuning", [5, 1.0, "invalid"])
def test_do_fine_tuning_invalid_type(self, do_fine_tuning):
setup()
with pytest.raises(TypeError):
Chronos(window_size="fft", do_fine_tuning=do_fine_tuning)
cleanup()

def test_raises_if_autogluon_missing(self, monkeypatch):
# simulate ImportError when trying to import autogluon.timeseries
Expand All @@ -93,11 +135,6 @@ def fake_import(name, *args, **kwargs):
Chronos(15)

def test_no_error_if_autogluon_available(self, monkeypatch):
# simulate a dummy autogluon.timeseries module
sys.modules["autogluon.timeseries"] = types.ModuleType("autogluon.timeseries")

# should NOT raise
setup()
Chronos(15) # just runs, no exception

# cleanup (avoid side effects on other tests)
del sys.modules["autogluon.timeseries"]
cleanup()
Loading