diff --git a/.gitignore b/.gitignore index af2e54b..f854fae 100644 --- a/.gitignore +++ b/.gitignore @@ -42,6 +42,24 @@ downloads .vscode pyrightconfig.json +# Testing +.pytest_cache/ +.coverage +htmlcov/ +coverage.xml +.tox/ +.nox/ + +# Claude Code +.claude/ + +# Virtual environments +venv/ +env/ +ENV/ +env.bak/ +venv.bak/ + # Custom data outputs diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..a97ca75 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,136 @@ +[tool.poetry] +name = "opus" +version = "1.0.0" +description = "OPUS: Occupancy Prediction Using a Sparse Set" +authors = ["Jiabao Wang ", "Zhaojiang Liu "] +readme = "README.md" +packages = [{include = "loaders"}, {include = "models"}] + +[tool.poetry.dependencies] +python = "^3.8" + +[tool.poetry.group.dev.dependencies] +pytest = "^7.4.0" +pytest-cov = "^4.1.0" +pytest-mock = "^3.11.1" +pytest-xdist = "^3.3.1" +black = "^23.0.0" +isort = "^5.12.0" +flake8 = "^6.0.0" +mypy = "^1.5.0" +torch = ">=1.13.1" +numpy = "*" + + +[tool.poetry.scripts] +test = "pytest" +tests = "pytest" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" + +[tool.pytest.ini_options] +minversion = "6.0" +addopts = [ + "--strict-markers", + "--strict-config", + "--verbose", + "--tb=short", + "--cov=loaders", + "--cov=models", + "--cov-report=term-missing", + "--cov-report=html:htmlcov", + "--cov-report=xml:coverage.xml", + "--cov-fail-under=80", +] +testpaths = ["tests"] +python_files = ["test_*.py", "*_test.py"] +python_classes = ["Test*"] +python_functions = ["test_*"] +markers = [ + "unit: Unit tests", + "integration: Integration tests", + "slow: Slow running tests", + "gpu: Tests requiring GPU", +] +filterwarnings = [ + "error", + "ignore::UserWarning", + "ignore::DeprecationWarning", +] + +[tool.coverage.run] +source = ["loaders", "models"] +omit = [ + "*/tests/*", + "*/test_*.py", + "*/*_test.py", + "*/conftest.py", + "*/setup.py", + "*/models/csrc/*", + "*/lib/*", +] +branch = true + +[tool.coverage.report] +exclude_lines = [ + "pragma: no cover", + "def __repr__", + "if self.debug:", + "if settings.DEBUG", + "raise AssertionError", + "raise NotImplementedError", + "if 0:", + "if __name__ == .__main__.:", + "class .*\\bProtocol\\):", + "@(abc\\.)?abstractmethod", +] +ignore_errors = true +show_missing = true +precision = 2 + +[tool.coverage.html] +directory = "htmlcov" + +[tool.coverage.xml] +output = "coverage.xml" + +[tool.black] +line-length = 88 +target-version = ["py38"] +include = '\.pyi?$' +extend-exclude = ''' +/( + \.eggs + | \.git + | \.hg + | \.mypy_cache + | \.tox + | \.venv + | _build + | buck-out + | build + | dist + | models/csrc + | lib +)/ +''' + +[tool.isort] +profile = "black" +multi_line_output = 3 +line_length = 88 +known_first_party = ["loaders", "models"] +skip_glob = ["models/csrc/*", "lib/*"] + +[tool.mypy] +python_version = "3.8" +warn_return_any = true +warn_unused_configs = true +disallow_untyped_defs = true +exclude = [ + "models/csrc/", + "lib/", + "tests/", +] \ No newline at end of file diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 0000000..4f994af --- /dev/null +++ b/tests/README.md @@ -0,0 +1,100 @@ +# OPUS Testing Infrastructure + +This directory contains the testing infrastructure for the OPUS project. + +## Structure + +``` +tests/ +├── README.md # This file +├── __init__.py # Test package initialization +├── conftest.py # Shared pytest fixtures +├── test_basic_infrastructure.py # Basic tests (no external deps) +├── test_infrastructure.py # Full infrastructure tests (requires deps) +├── unit/ # Unit tests +│ └── __init__.py +└── integration/ # Integration tests + └── __init__.py +``` + +## Running Tests + +### Option 1: With Poetry (Recommended for full setup) +```bash +# Install dependencies +poetry install + +# Run all tests +poetry run test +# or +poetry run tests + +# Run with coverage +poetry run pytest --cov + +# Run specific test categories +poetry run pytest -m unit # Unit tests only +poetry run pytest -m integration # Integration tests only +poetry run pytest -m "not slow" # Skip slow tests +``` + +### Option 2: With pip (if Poetry unavailable) +```bash +# Create virtual environment +python3 -m venv venv +source venv/bin/activate # On Windows: venv\Scripts\activate + +# Install testing dependencies +pip install pytest pytest-cov pytest-mock torch numpy + +# Run tests +pytest +``` + +### Option 3: Basic validation (no dependencies) +```bash +# Run basic infrastructure tests +python3 tests/test_basic_infrastructure.py +``` + +## Test Categories + +Tests are organized with markers: +- `@pytest.mark.unit` - Fast unit tests +- `@pytest.mark.integration` - Integration tests +- `@pytest.mark.slow` - Slow-running tests +- `@pytest.mark.gpu` - Tests requiring GPU + +## Coverage + +Coverage is configured to: +- Measure coverage of `loaders/` and `models/` packages +- Generate HTML reports in `htmlcov/` +- Generate XML reports as `coverage.xml` +- Require minimum 80% coverage +- Exclude test files and CUDA extensions + +## Fixtures + +Common fixtures are available in `conftest.py`: +- `temp_dir` - Temporary directory for test files +- `mock_config` - Mock configuration dictionary +- `sample_tensor` - Sample PyTorch tensor +- `sample_batch_dict` - Sample batch data +- `mock_model` - Mock OPUS model +- And many more... + +## Configuration + +Test configuration is in `pyproject.toml` under: +- `[tool.pytest.ini_options]` - pytest settings +- `[tool.coverage.run]` - Coverage measurement +- `[tool.coverage.report]` - Coverage reporting + +## Adding New Tests + +1. Create test files with `test_*.py` naming +2. Use appropriate markers for categorization +3. Place unit tests in `tests/unit/` +4. Place integration tests in `tests/integration/` +5. Use fixtures from `conftest.py` for common test data \ No newline at end of file diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..93a2d4b --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1 @@ +# Test package initialization \ No newline at end of file diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..9db4ce6 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,255 @@ +"""Shared pytest fixtures for OPUS testing.""" + +import tempfile +import shutil +from pathlib import Path +from typing import Generator, Dict, Any +from unittest.mock import Mock, MagicMock + +import pytest +import torch +import numpy as np + + +@pytest.fixture +def temp_dir() -> Generator[Path, None, None]: + """Create a temporary directory for test files.""" + with tempfile.TemporaryDirectory() as tmp_dir: + yield Path(tmp_dir) + + +@pytest.fixture +def temp_file(temp_dir: Path) -> Generator[Path, None, None]: + """Create a temporary file for testing.""" + temp_file_path = temp_dir / "test_file.txt" + temp_file_path.write_text("test content") + yield temp_file_path + + +@pytest.fixture +def mock_config() -> Dict[str, Any]: + """Mock configuration dictionary for testing.""" + return { + "model": { + "type": "OPUS", + "backbone": { + "type": "ResNet", + "depth": 50, + "num_stages": 4, + }, + "neck": { + "type": "FPN", + "in_channels": [256, 512, 1024, 2048], + "out_channels": 256, + }, + }, + "dataset": { + "type": "NuScenesOccDataset", + "data_root": "data/nuscenes", + "ann_file": "nuscenes_infos_train.pkl", + }, + "train_cfg": { + "max_epochs": 100, + "batch_size": 1, + }, + "test_cfg": { + "batch_size": 1, + }, + } + + +@pytest.fixture +def mock_device() -> str: + """Mock device configuration.""" + return "cuda" if torch.cuda.is_available() else "cpu" + + +@pytest.fixture +def sample_tensor() -> torch.Tensor: + """Create a sample tensor for testing.""" + return torch.randn(1, 3, 256, 704) + + +@pytest.fixture +def sample_batch_dict() -> Dict[str, Any]: + """Create a sample batch dictionary for testing.""" + return { + "img": torch.randn(1, 6, 3, 256, 704), + "img_metas": [{ + "filename": "sample.jpg", + "ori_shape": (256, 704), + "img_shape": (256, 704), + "pad_shape": (256, 704), + "scale_factor": 1.0, + "flip": False, + }], + "gt_occ": torch.randint(0, 18, (1, 200, 200, 16)), + "gt_semantics": torch.randint(0, 17, (1, 1000)), + "gt_coords": torch.randn(1, 1000, 3), + } + + +@pytest.fixture +def sample_points() -> np.ndarray: + """Create sample 3D points for testing.""" + return np.random.randn(1000, 3).astype(np.float32) + + +@pytest.fixture +def sample_occupancy_grid() -> np.ndarray: + """Create sample occupancy grid for testing.""" + return np.random.randint(0, 18, size=(200, 200, 16), dtype=np.int32) + + +@pytest.fixture +def mock_nuscenes_dataset(): + """Mock NuScenes dataset for testing.""" + dataset = Mock() + dataset.__len__ = Mock(return_value=100) + dataset.__getitem__ = Mock(return_value={ + "img": torch.randn(6, 3, 256, 704), + "img_metas": { + "filename": "sample.jpg", + "ori_shape": (256, 704), + "img_shape": (256, 704), + }, + "gt_occ": torch.randint(0, 18, (200, 200, 16)), + }) + return dataset + + +@pytest.fixture +def mock_model(): + """Mock OPUS model for testing.""" + model = Mock() + model.forward = Mock(return_value={ + "pred_coords": torch.randn(1, 1000, 3), + "pred_semantics": torch.randn(1, 1000, 17), + "loss_chamfer": torch.tensor(0.5), + "loss_semantic": torch.tensor(0.3), + }) + model.train = Mock() + model.eval = Mock() + model.to = Mock(return_value=model) + model.parameters = Mock(return_value=[torch.randn(10, 10, requires_grad=True)]) + return model + + +@pytest.fixture +def mock_optimizer(): + """Mock optimizer for testing.""" + optimizer = Mock() + optimizer.zero_grad = Mock() + optimizer.step = Mock() + optimizer.param_groups = [{"lr": 0.001}] + return optimizer + + +@pytest.fixture +def mock_scheduler(): + """Mock learning rate scheduler for testing.""" + scheduler = Mock() + scheduler.step = Mock() + scheduler.get_last_lr = Mock(return_value=[0.001]) + return scheduler + + +@pytest.fixture +def sample_camera_params() -> Dict[str, Any]: + """Sample camera parameters for testing.""" + return { + "intrinsics": np.array([ + [1266.417203, 0.0, 816.2670197], + [0.0, 1266.417203, 491.50706579], + [0.0, 0.0, 1.0] + ]), + "extrinsics": np.array([ + [1.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 0.0, 0.0], + [0.0, 0.0, 1.0, 0.0], + [0.0, 0.0, 0.0, 1.0] + ]), + "distortion": np.array([-0.13761, 0.14072, 0.00025, 0.00025, 0.0]), + } + + +@pytest.fixture +def sample_lidar_data() -> Dict[str, np.ndarray]: + """Sample LiDAR data for testing.""" + return { + "points": np.random.randn(10000, 4).astype(np.float32), # x, y, z, intensity + "labels": np.random.randint(0, 17, size=(10000,), dtype=np.int32), + } + + +@pytest.fixture +def mock_checkpoint(): + """Mock model checkpoint for testing.""" + return { + "model": { + "backbone.conv1.weight": torch.randn(64, 3, 7, 7), + "backbone.bn1.weight": torch.randn(64), + "backbone.bn1.bias": torch.randn(64), + }, + "optimizer": { + "state": {}, + "param_groups": [{"lr": 0.001}], + }, + "epoch": 10, + "best_score": 0.85, + } + + +@pytest.fixture(autouse=True) +def set_random_seeds(): + """Set random seeds for reproducible testing.""" + np.random.seed(42) + torch.manual_seed(42) + if torch.cuda.is_available(): + torch.cuda.manual_seed(42) + torch.cuda.manual_seed_all(42) + + +@pytest.fixture +def no_cuda(monkeypatch): + """Disable CUDA for CPU-only testing.""" + monkeypatch.setattr(torch.cuda, "is_available", lambda: False) + + +@pytest.fixture +def mock_mmcv_config(): + """Mock mmcv Config object.""" + config = Mock() + config.model = Mock() + config.model.type = "OPUS" + config.data = Mock() + config.data.train = Mock() + config.optimizer = Mock() + config.optimizer.type = "AdamW" + config.optimizer.lr = 0.001 + return config + + +@pytest.fixture(scope="session") +def test_data_dir() -> Path: + """Path to test data directory.""" + return Path(__file__).parent / "data" + + +@pytest.fixture +def cleanup_files(): + """Cleanup test files after test execution.""" + files_to_cleanup = [] + + def register_cleanup(filepath: Path): + files_to_cleanup.append(filepath) + + yield register_cleanup + + # Cleanup + for filepath in files_to_cleanup: + if filepath.exists(): + if filepath.is_dir(): + shutil.rmtree(filepath) + else: + filepath.unlink() \ No newline at end of file diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py new file mode 100644 index 0000000..e27cd7a --- /dev/null +++ b/tests/integration/__init__.py @@ -0,0 +1 @@ +# Integration tests package \ No newline at end of file diff --git a/tests/test_basic_infrastructure.py b/tests/test_basic_infrastructure.py new file mode 100644 index 0000000..4ab4326 --- /dev/null +++ b/tests/test_basic_infrastructure.py @@ -0,0 +1,178 @@ +"""Basic infrastructure validation without external dependencies.""" + +import sys +import os +import tempfile +from pathlib import Path +from unittest.mock import Mock + + +def test_basic_python_functionality(): + """Test basic Python functionality.""" + assert 2 + 2 == 4 + assert isinstance("hello", str) + assert len([1, 2, 3]) == 3 + + +def test_pathlib_functionality(): + """Test pathlib functionality.""" + current_dir = Path.cwd() + assert current_dir.exists() + + test_path = Path("/tmp/test.txt") + assert test_path.name == "test.txt" + + +def test_temporary_files(): + """Test temporary file creation.""" + with tempfile.NamedTemporaryFile(mode='w', delete=False) as tmp_file: + tmp_file.write("test content") + tmp_path = tmp_file.name + + try: + assert os.path.exists(tmp_path) + with open(tmp_path, 'r') as f: + content = f.read() + assert content == "test content" + finally: + os.unlink(tmp_path) + + +def test_mock_functionality(): + """Test mock functionality.""" + mock_obj = Mock() + mock_obj.method.return_value = "test_result" + + result = mock_obj.method() + assert result == "test_result" + mock_obj.method.assert_called_once() + + +def test_project_structure(): + """Test that project structure is correct.""" + project_root = Path.cwd() + + # Check main directories + assert (project_root / "loaders").exists() + assert (project_root / "models").exists() + assert (project_root / "tests").exists() + + # Check test directories + assert (project_root / "tests" / "unit").exists() + assert (project_root / "tests" / "integration").exists() + + # Check configuration files + assert (project_root / "pyproject.toml").exists() + assert (project_root / ".gitignore").exists() + + +def test_gitignore_entries(): + """Test that .gitignore contains required entries.""" + gitignore_path = Path.cwd() / ".gitignore" + assert gitignore_path.exists() + + content = gitignore_path.read_text() + required_entries = [ + ".pytest_cache/", + ".coverage", + "htmlcov/", + "coverage.xml", + ".claude/" + ] + + for entry in required_entries: + assert entry in content, f"Missing {entry} in .gitignore" + + +def test_pyproject_toml_structure(): + """Test that pyproject.toml has correct structure.""" + pyproject_path = Path.cwd() / "pyproject.toml" + assert pyproject_path.exists() + + content = pyproject_path.read_text() + + # Check required sections + required_sections = [ + "[tool.poetry]", + "[tool.pytest.ini_options]", + "[tool.coverage.run]", + "[tool.coverage.report]", + "[tool.poetry.scripts]" + ] + + for section in required_sections: + assert section in content, f"Missing {section} in pyproject.toml" + + +def test_conftest_exists(): + """Test that conftest.py exists and is readable.""" + conftest_path = Path.cwd() / "tests" / "conftest.py" + assert conftest_path.exists() + + content = conftest_path.read_text() + assert "pytest" in content + assert "fixture" in content + + +class TestInfrastructureValidation: + """Test class for infrastructure validation.""" + + def test_class_based_tests_work(self): + """Test that class-based tests work.""" + assert True + + def test_assertions_work(self): + """Test various assertion types.""" + assert 1 == 1 + assert "test" != "fail" + assert len("hello") == 5 + assert 5 > 3 + + def test_exception_handling(self): + """Test exception handling.""" + try: + raise ValueError("test error") + except ValueError as e: + assert str(e) == "test error" + else: + assert False, "Exception should have been raised" + + +if __name__ == "__main__": + # Run tests manually if pytest is not available + test_functions = [ + test_basic_python_functionality, + test_pathlib_functionality, + test_temporary_files, + test_mock_functionality, + test_project_structure, + test_gitignore_entries, + test_pyproject_toml_structure, + test_conftest_exists, + ] + + print("Running basic infrastructure tests...") + + for test_func in test_functions: + try: + test_func() + print(f"✓ {test_func.__name__}") + except Exception as e: + print(f"✗ {test_func.__name__}: {e}") + + # Run class-based tests + test_class = TestInfrastructureValidation() + class_methods = [ + test_class.test_class_based_tests_work, + test_class.test_assertions_work, + test_class.test_exception_handling, + ] + + for test_method in class_methods: + try: + test_method() + print(f"✓ {test_method.__name__}") + except Exception as e: + print(f"✗ {test_method.__name__}: {e}") + + print("Basic infrastructure tests completed!") \ No newline at end of file diff --git a/tests/test_infrastructure.py b/tests/test_infrastructure.py new file mode 100644 index 0000000..d19cd0f --- /dev/null +++ b/tests/test_infrastructure.py @@ -0,0 +1,187 @@ +"""Test infrastructure validation (requires pytest and dependencies).""" + +try: + import pytest + import torch + import numpy as np + DEPENDENCIES_AVAILABLE = True +except ImportError: + DEPENDENCIES_AVAILABLE = False + +from pathlib import Path +from unittest.mock import Mock + + +class TestInfrastructure: + """Test the testing infrastructure setup.""" + + def test_pytest_working(self): + """Test that pytest is working correctly.""" + assert True + + def test_fixtures_available(self, temp_dir, mock_config, sample_tensor): + """Test that fixtures are properly loaded.""" + assert temp_dir.exists() + assert isinstance(mock_config, dict) + assert "model" in mock_config + assert isinstance(sample_tensor, torch.Tensor) + assert sample_tensor.shape == (1, 3, 256, 704) + + def test_mock_functionality(self, mock_model): + """Test that mocking works correctly.""" + result = mock_model.forward(torch.randn(1, 3, 256, 704)) + assert "pred_coords" in result + assert "pred_semantics" in result + mock_model.forward.assert_called_once() + + @pytest.mark.unit + def test_unit_marker(self): + """Test unit test marker.""" + assert True + + @pytest.mark.integration + def test_integration_marker(self): + """Test integration test marker.""" + assert True + + def test_numpy_arrays(self, sample_points, sample_occupancy_grid): + """Test numpy array fixtures.""" + assert isinstance(sample_points, np.ndarray) + assert sample_points.shape == (1000, 3) + assert sample_points.dtype == np.float32 + + assert isinstance(sample_occupancy_grid, np.ndarray) + assert sample_occupancy_grid.shape == (200, 200, 16) + assert sample_occupancy_grid.dtype == np.int32 + + def test_batch_data(self, sample_batch_dict): + """Test batch data fixture.""" + assert "img" in sample_batch_dict + assert "img_metas" in sample_batch_dict + assert "gt_occ" in sample_batch_dict + + img = sample_batch_dict["img"] + assert img.shape == (1, 6, 3, 256, 704) + + def test_camera_params(self, sample_camera_params): + """Test camera parameters fixture.""" + assert "intrinsics" in sample_camera_params + assert "extrinsics" in sample_camera_params + assert "distortion" in sample_camera_params + + intrinsics = sample_camera_params["intrinsics"] + assert intrinsics.shape == (3, 3) + + def test_lidar_data(self, sample_lidar_data): + """Test LiDAR data fixture.""" + assert "points" in sample_lidar_data + assert "labels" in sample_lidar_data + + points = sample_lidar_data["points"] + labels = sample_lidar_data["labels"] + assert points.shape[0] == labels.shape[0] + assert points.shape[1] == 4 # x, y, z, intensity + + def test_random_seed_reproducibility(self): + """Test that random seeds are set for reproducibility.""" + # Generate random numbers twice + np_rand1 = np.random.random(10) + torch_rand1 = torch.rand(10) + + # Reset seeds (this should happen automatically per test) + np.random.seed(42) + torch.manual_seed(42) + + np_rand2 = np.random.random(10) + torch_rand2 = torch.rand(10) + + # Should be identical due to seed setting + np.testing.assert_array_equal(np_rand1, np_rand2) + torch.testing.assert_allclose(torch_rand1, torch_rand2) + + def test_temp_directory_cleanup(self, temp_dir, cleanup_files): + """Test temporary directory and cleanup functionality.""" + test_file = temp_dir / "cleanup_test.txt" + test_file.write_text("test content") + + cleanup_files(test_file) + assert test_file.exists() # Should exist during test + + def test_device_fixture(self, mock_device): + """Test device fixture.""" + assert mock_device in ["cuda", "cpu"] + + def test_checkpoint_fixture(self, mock_checkpoint): + """Test checkpoint fixture.""" + assert "model" in mock_checkpoint + assert "optimizer" in mock_checkpoint + assert "epoch" in mock_checkpoint + assert isinstance(mock_checkpoint["epoch"], int) + + def test_mmcv_config_fixture(self, mock_mmcv_config): + """Test mmcv config fixture.""" + assert hasattr(mock_mmcv_config, "model") + assert hasattr(mock_mmcv_config, "data") + assert hasattr(mock_mmcv_config, "optimizer") + + +class TestProjectStructure: + """Test that the project structure is properly set up.""" + + def test_package_directories_exist(self): + """Test that main package directories exist.""" + assert Path("loaders").exists() + assert Path("models").exists() + + def test_test_directories_exist(self): + """Test that test directories exist.""" + assert Path("tests").exists() + assert Path("tests/unit").exists() + assert Path("tests/integration").exists() + + def test_init_files_exist(self): + """Test that __init__.py files exist in test directories.""" + assert Path("tests/__init__.py").exists() + assert Path("tests/unit/__init__.py").exists() + assert Path("tests/integration/__init__.py").exists() + + def test_conftest_exists(self): + """Test that conftest.py exists.""" + assert Path("tests/conftest.py").exists() + + def test_pyproject_exists(self): + """Test that pyproject.toml exists.""" + assert Path("pyproject.toml").exists() + + +class TestConfiguration: + """Test that pytest configuration is working.""" + + def test_markers_defined(self): + """Test that custom markers are properly defined.""" + # This test will fail if markers are not properly configured + # and --strict-markers is enabled + pass + + def test_coverage_settings(self): + """Test that coverage is configured.""" + # This is more of a documentation test + # Coverage settings are tested during actual test runs + assert True + + @pytest.mark.slow + def test_slow_marker(self): + """Test slow marker functionality.""" + import time + time.sleep(0.01) # Simulate slow test + assert True + + @pytest.mark.gpu + def test_gpu_marker(self): + """Test GPU marker functionality.""" + # This test would require GPU in actual usage + assert True + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) \ No newline at end of file diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py new file mode 100644 index 0000000..07c9273 --- /dev/null +++ b/tests/unit/__init__.py @@ -0,0 +1 @@ +# Unit tests package \ No newline at end of file