Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ pyRail is a Python library that provides a convenient interface for interacting
- Supports API endpoints: stations, liveboard, vehicle, connections, and disturbances.
- Caching and conditional GET requests using ETags.
- Rate limiting to handle API request limits efficiently.
- Automatic retry with exponential backoff for network resilience.

## Installation

Expand Down Expand Up @@ -141,6 +142,26 @@ async with iRail() as api:

Exceeding the request limit will cause the server to return 429 responses. You can monitor rate limiting through debug logs.

### Network Resilience and Retry Logic

pyRail implements automatic retry logic with exponential backoff to handle transient network issues and server errors:

- **Network Errors**: Connection timeouts, DNS failures, and other network-related issues are automatically retried up to 3 times with exponential backoff (1s, 2s, 4s, up to 10s maximum).
- **Server Errors**: HTTP 5xx server errors trigger automatic retries, allowing recovery from temporary server issues.
- **Client Errors**: HTTP 4xx errors (except 429 rate limits) are not retried, as they typically indicate permanent issues like invalid requests.
- **Rate Limits**: HTTP 429 responses are handled separately with the existing rate limit logic, respecting the server's `Retry-After` header.

The retry functionality is powered by the [tenacity](https://github.com/jd/tenacity) library and works transparently:

```python
async with iRail() as api:
# Network errors and server errors are automatically retried
# No additional code needed - retry logic is built-in
stations = await api.get_stations()
```

You can monitor retry attempts through the warning logs when network issues occur.

## Development

The devcontainer setup includes all necessary dependencies and tools for development.
Expand Down
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ packages = [
python = "^3.12"
aiohttp = "^3.11.11"
mashumaro = {extras = ["orjson"], version = "^3.15"}
tenacity = "^9.0.0"

[tool.poetry.group.test.dependencies]
pytest = "^8.3.4"
Expand Down
64 changes: 60 additions & 4 deletions pyrail/irail.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from typing import Any, Type

from aiohttp import ClientError, ClientResponse, ClientSession
from tenacity import retry, retry_if_exception_type, stop_after_attempt, wait_exponential

from pyrail.models import (
CompositionApiResponse,
Expand Down Expand Up @@ -325,12 +326,66 @@ async def _handle_response(
logger.error("Request failed with status code: %s, response: %s", response.status, await response.text())
return None

def _should_retry_on_status(self, response: ClientResponse) -> bool:
"""Determine if a response status code should trigger a retry.

Args:
response: The HTTP response to check

Returns:
bool: True if the status should trigger a retry, False otherwise

"""
# Retry on server errors (5xx) but not on client errors (4xx except 429)
# 429 is handled separately in _handle_response
return 500 <= response.status < 600

@retry(
stop=stop_after_attempt(3),
wait=wait_exponential(multiplier=1, min=1, max=10),
retry=retry_if_exception_type(ClientError),
reraise=True
)
Comment on lines +343 to +348
Copy link

Copilot AI Sep 24, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The retry decorator configuration creates a potentially confusing API design. The reraise=True parameter will cause the original exception to be raised after all retries are exhausted, but the calling code in _do_request catches ClientError and logs 'Request failed after retries'. This makes it unclear whether the retry logic or the calling method is responsible for final error handling.

Copilot uses AI. Check for mistakes.
async def _execute_http_request(
self, url: str, params: dict[str, Any], headers: dict[str, str], method: str, args: dict[str, Any] | None = None
) -> dict[str, Any] | None:
"""Execute the HTTP request with retry logic for network failures and server errors.

Args:
url: The URL to request
params: Query parameters
headers: Request headers
method: API method name for logging
args: Original method arguments for potential retry

Returns:
dict or None: Parsed response or None if failed

"""
if self.session is None:
logger.error("Session not initialized")
return None

try:
async with self.session.get(url, params=params, headers=headers) as response:
# Check if we should retry on this status code
if self._should_retry_on_status(response):
logger.warning("Server error %d for endpoint %s, will retry", response.status, method)
# Create a temporary exception to trigger tenacity retry
raise ClientError(f"Server error {response.status}")
Comment on lines +372 to +375
Copy link

Copilot AI Sep 24, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Creating an artificial exception to trigger the retry mechanism is an anti-pattern that makes the code harder to understand and maintain. Consider using tenacity's retry_if conditions or restructuring the retry logic to handle status codes more naturally rather than converting them to exceptions.

Copilot uses AI. Check for mistakes.

return await self._handle_response(response, method, args)
except ClientError as e:
logger.warning("Network error for endpoint %s: %s", method, e)
raise # Re-raise to trigger tenacity retry

async def _do_request(self, method: str, args: dict[str, Any] | None = None) -> dict[str, Any] | None:
"""Send an asynchronous request to the specified iRail API endpoint.

This method handles API requests with rate limiting, parameter validation,
and ETag-based caching. It supports conditional requests and manages
various HTTP response scenarios.
various HTTP response scenarios. Network errors and server errors (5xx)
are automatically retried with exponential backoff using tenacity.

Args:
method (str): The iRail API endpoint method to request.
Expand All @@ -348,6 +403,7 @@ async def _do_request(self, method: str, args: dict[str, Any] | None = None) ->
- Implements rate limiting using a token-based mechanism
- Supports ETag-based conditional requests
- Handles rate limit (429) responses with automatic retry
- Retries network failures and server errors with exponential backoff
- Logs detailed information about request processing

"""
Expand All @@ -358,6 +414,7 @@ async def _do_request(self, method: str, args: dict[str, Any] | None = None) ->
if not self._validate_params(method, args or {}):
logger.error("Validation failed for method: %s with args: %s", method, args)
return None

async with self.lock:
await self._handle_rate_limit()

Expand All @@ -370,10 +427,9 @@ async def _do_request(self, method: str, args: dict[str, Any] | None = None) ->
request_headers: dict[str, str] = self._add_etag_header(method)

try:
async with self.session.get(url, params=params, headers=request_headers) as response:
return await self._handle_response(response, method, args)
return await self._execute_http_request(url, params, request_headers, method, args)
except ClientError as e:
logger.error("Request failed due to an exception: %s", e)
logger.error("Request failed after retries due to an exception: %s", e)
return None

async def get_stations(self) -> StationsApiResponse | None:
Expand Down
116 changes: 116 additions & 0 deletions tests/test_irail.py
Original file line number Diff line number Diff line change
Expand Up @@ -495,3 +495,119 @@ async def test_boolean_field_deserialization():
assert departure.left is False, "Departure left field should be False when '0'"
assert departure.is_extra is True, "Departure is_extra field should be True when '1'"
assert departure.platform_info.normal is True, "Platform normal field should be True when '1'"


@pytest.mark.asyncio
@patch("pyrail.irail.ClientSession.get")
async def test_retry_on_network_error(mock_get):
"""Test that network errors trigger retry with exponential backoff."""
from aiohttp import ClientError

# Mock the get method to fail first two times, then succeed
mock_response_success = AsyncMock()
mock_response_success.status = 200
mock_response_success.json = AsyncMock(return_value={"data": "success_after_retry"})

mock_get.side_effect = [
ClientError("Connection failed"),
ClientError("Timeout"),
AsyncMock(__aenter__=AsyncMock(return_value=mock_response_success))
]

async with iRail() as api:
response = await api._do_request("stations")

# Should succeed after retries
assert response == {"data": "success_after_retry"}
assert mock_get.call_count == 3


@pytest.mark.asyncio
@patch("pyrail.irail.ClientSession.get")
async def test_retry_on_server_error(mock_get):
"""Test that server errors (5xx) trigger retry."""
# Mock first response as 500 server error, second as success
mock_response_error = AsyncMock()
mock_response_error.status = 500
mock_response_error.text = AsyncMock(return_value="Internal Server Error")

mock_response_success = AsyncMock()
mock_response_success.status = 200
mock_response_success.json = AsyncMock(return_value={"data": "success_after_server_error"})
mock_response_success.headers = {}

mock_get.side_effect = [
AsyncMock(__aenter__=AsyncMock(return_value=mock_response_error)),
AsyncMock(__aenter__=AsyncMock(return_value=mock_response_success))
]

async with iRail() as api:
response = await api._do_request("stations")

# Should succeed after server error retry
assert response == {"data": "success_after_server_error"}
assert mock_get.call_count == 2


@pytest.mark.asyncio
@patch("pyrail.irail.ClientSession.get")
async def test_no_retry_on_client_error(mock_get):
"""Test that client errors (4xx except 429) do not trigger retry."""
mock_response = AsyncMock()
mock_response.status = 404
mock_response.text = AsyncMock(return_value="Not Found")
mock_get.return_value.__aenter__.return_value = mock_response

async with iRail() as api:
response = await api._do_request("stations")

# Should not retry on 404
assert response is None
assert mock_get.call_count == 1


@pytest.mark.asyncio
@patch("pyrail.irail.ClientSession.get")
async def test_retry_exhaustion(mock_get):
"""Test that after exhausting retries, the method returns None."""
from aiohttp import ClientError

# Always fail with network error
mock_get.side_effect = ClientError("Persistent network error")

async with iRail() as api:
response = await api._do_request("stations")

# Should return None after exhausting retries
assert response is None
# Should attempt 3 times (original + 2 retries)
assert mock_get.call_count == 3


@pytest.mark.asyncio
@patch("pyrail.irail.ClientSession.get")
async def test_preserve_rate_limit_behavior(mock_get):
"""Test that existing 429 rate limit handling is preserved."""
# Mock 429 response first, then success
mock_response_429 = AsyncMock()
mock_response_429.status = 429
mock_response_429.headers = {"Retry-After": "1"}

mock_response_success = AsyncMock()
mock_response_success.status = 200
mock_response_success.json = AsyncMock(return_value={"data": "success_after_rate_limit"})
mock_response_success.headers = {}

mock_get.side_effect = [
AsyncMock(__aenter__=AsyncMock(return_value=mock_response_429)),
AsyncMock(__aenter__=AsyncMock(return_value=mock_response_success))
]

# Mock sleep to avoid actual delay in tests
with patch("asyncio.sleep"):
async with iRail() as api:
response = await api._do_request("stations")

# Should succeed after rate limit handling
assert response == {"data": "success_after_rate_limit"}
assert mock_get.call_count == 2