Skip to content

Testing Guide

This guide covers testing practices, patterns, and tools for the ProjectX Python SDK. Learn how to write effective tests for async trading systems, mock external dependencies, and ensure code quality.

Testing Philosophy

The ProjectX SDK follows a comprehensive testing strategy:

  • Unit Tests: Fast, isolated tests for individual components
  • Integration Tests: Test component interactions
  • End-to-End Tests: Full workflow testing
  • Performance Tests: Ensure scalability and performance
  • Real-time Tests: Validate real-time data handling (market hours only)

Test Environment Setup

Prerequisites

# Install development dependencies
uv sync --dev

# Set up test environment variables
echo "PROJECT_X_API_KEY=test_key" > .env.test
echo "PROJECT_X_USERNAME=test_user" >> .env.test
echo "PROJECT_X_ACCOUNT_NAME=test_account" >> .env.test

Running Tests

# Run all tests
uv run pytest

# Run with coverage
uv run pytest --cov=project_x_py --cov-report=html

# Run specific test categories
uv run pytest -m unit                    # Unit tests only
uv run pytest -m integration            # Integration tests
uv run pytest -m "not slow"             # Skip slow tests
uv run pytest -m "not realtime"         # Skip real-time tests

# Run specific test files
uv run pytest tests/test_client.py -v
uv run pytest tests/order_manager/ -v

# Run tests with detailed output
uv run pytest -v -s --tb=short

Test Structure and Organization

Directory Structure

tests/
 __init__.py
 conftest.py                 # Shared fixtures
 unit/                       # Unit tests
    test_client_auth.py
    test_order_manager.py
    test_indicators.py
 integration/               # Integration tests
    test_trading_suite.py
    test_realtime_flow.py
 benchmarks/               # Performance tests
    test_performance.py
 fixtures/                 # Test data
     market_data.json
     order_responses.json

Test Configuration

# conftest.py - Shared test configuration
import pytest
import asyncio
import json
from pathlib import Path
from unittest.mock import AsyncMock, MagicMock
from project_x_py import ProjectX, TradingSuite
from project_x_py.models import InstrumentInfo, AccountInfo

@pytest.fixture
def event_loop():
    """Create event loop for async tests."""
    loop = asyncio.new_event_loop()
    yield loop
    loop.close()

@pytest.fixture
def mock_client():
    """Mock ProjectX client for testing."""
    client = AsyncMock(spec=ProjectX)

    # Set up common mock responses
    client.authenticate.return_value = True
    client.get_account_info.return_value = AccountInfo(
        account_id="TEST123",
        balance=100000.0,
        available_balance=95000.0,
        currency="USD"
    )

    return client

@pytest.fixture
def sample_instrument_info():
    """Sample instrument info for testing."""
    return InstrumentInfo(
        id="CON.F.US.MNQ.U25",
        symbol="MNQ",
        description="Micro E-mini NASDAQ",
        tick_size=0.25,
        multiplier=20,
        exchange="CME"
    )

@pytest.fixture
def sample_market_data():
    """Load sample market data from fixtures."""
    fixtures_path = Path(__file__).parent / "fixtures"
    with open(fixtures_path / "market_data.json") as f:
        return json.load(f)

# Custom markers for test categorization
pytest_plugins = []

def pytest_configure(config):
    """Configure custom pytest markers."""
    config.addinivalue_line("markers", "unit: Unit tests")
    config.addinivalue_line("markers", "integration: Integration tests")
    config.addinivalue_line("markers", "slow: Slow tests (may take >5 seconds)")
    config.addinivalue_line("markers", "realtime: Tests requiring market hours")
    config.addinivalue_line("markers", "performance: Performance benchmark tests")

Unit Testing Patterns

Testing Async Functions

import pytest
import asyncio
from unittest.mock import AsyncMock, patch
from project_x_py.client import ProjectX
from project_x_py.exceptions import AuthenticationError

class TestProjectXAuth:
    """Test authentication functionality."""

    @pytest.mark.asyncio
    async def test_authenticate_success(self, mock_client):
        """Test successful authentication."""
        # Arrange
        mock_client.authenticate.return_value = True

        # Act
        result = await mock_client.authenticate()

        # Assert
        assert result is True
        mock_client.authenticate.assert_called_once()

    @pytest.mark.asyncio
    async def test_authenticate_failure(self):
        """Test authentication failure."""
        # Arrange
        with patch('project_x_py.client.auth.AuthMixin._authenticate') as mock_auth:
            mock_auth.side_effect = AuthenticationError("Invalid credentials")

            async with ProjectX.from_env() as client:
                # Act & Assert
                with pytest.raises(AuthenticationError, match="Invalid credentials"):
                    await client.authenticate()

    @pytest.mark.asyncio
    @pytest.mark.timeout(5)  # Ensure test doesn't hang
    async def test_authenticate_timeout(self):
        """Test authentication timeout handling."""
        with patch('aiohttp.ClientSession.post') as mock_post:
            mock_post.side_effect = asyncio.TimeoutError()

            async with ProjectX.from_env() as client:
                with pytest.raises(asyncio.TimeoutError):
                    await client.authenticate()

Testing Real-time Components

import pytest
from unittest.mock import AsyncMock, MagicMock
from project_x_py.realtime import ProjectXRealtimeClient
from project_x_py import EventType

class TestRealtimeClient:
    """Test real-time client functionality."""

    @pytest.mark.asyncio
    async def test_connection_establishment(self):
        """Test WebSocket connection establishment."""
        # Arrange
        mock_connection = AsyncMock()
        mock_hub = MagicMock()

        with patch('signalrcore.hub_connection_builder.HubConnectionBuilder') as mock_builder:
            mock_builder.return_value.build.return_value = mock_connection
            mock_connection.start = AsyncMock()

            # Act
            client = ProjectXRealtimeClient("test_token", "test_url")
            await client.connect()

            # Assert
            assert client.is_connected
            mock_connection.start.assert_called_once()

    @pytest.mark.asyncio
    async def test_event_handling(self):
        """Test event registration and triggering."""
        # Arrange
        client = ProjectXRealtimeClient("test_token", "test_url")
        event_handler = AsyncMock()
        test_data = {"price": 15000.0, "volume": 100}

        # Act
        await client.on(EventType.TICK, event_handler)
        await client._emit_event(EventType.TICK, test_data)

        # Assert
        event_handler.assert_called_once()
        call_args = event_handler.call_args[0][0]
        assert call_args.event_type == EventType.TICK
        assert call_args.data == test_data

    @pytest.mark.asyncio
    async def test_subscription_management(self):
        """Test market data subscription management."""
        # Arrange
        client = ProjectXRealtimeClient("test_token", "test_url")
        client._market_connection = AsyncMock()

        # Act
        await client.subscribe_to_ticks("MNQ")
        await client.unsubscribe_from_ticks("MNQ")

        # Assert
        client._market_connection.invoke.assert_called()
        assert "MNQ" not in client._subscribed_symbols

Testing Data Processing

import pytest
import polars as pl
from project_x_py.indicators import SMA, RSI, MACD
from project_x_py.realtime_data_manager import RealtimeDataManager

class TestIndicators:
    """Test technical indicator calculations."""

    def test_sma_calculation(self):
        """Test Simple Moving Average calculation."""
        # Arrange
        data = pl.DataFrame({
            "close": [100, 101, 102, 103, 104, 105, 106, 107, 108, 109]
        })

        # Act
        sma_5 = data.pipe(SMA, period=5)

        # Assert
        assert len(sma_5) == len(data)
        assert sma_5[4] == pytest.approx(102.0, abs=0.01)  # First valid SMA
        assert sma_5[-1] == pytest.approx(107.0, abs=0.01)  # Last SMA

    def test_rsi_calculation(self):
        """Test RSI calculation with known values."""
        # Arrange - Create data with known RSI pattern
        prices = [44, 44.34, 44.09, 44.15, 43.61, 44.33, 44.83, 45.85, 46.08,
                 45.89, 46.03, 46.83, 47.69, 46.49, 46.26, 47.09]
        data = pl.DataFrame({"close": prices})

        # Act
        rsi = data.pipe(RSI, period=14)

        # Assert
        assert len(rsi) == len(data)
        # RSI should be between 0 and 100
        valid_rsi = [x for x in rsi if not pl.DataFrame([x]).null_count().item() > 0]
        assert all(0 <= x <= 100 for x in valid_rsi)

    @pytest.mark.asyncio
    async def test_realtime_data_processing(self):
        """Test real-time data processing and bar construction."""
        # Arrange
        data_manager = RealtimeDataManager(["1min"])

        # Simulate tick data
        ticks = [
            {"timestamp": "2024-01-01T09:30:00Z", "price": 15000.0, "volume": 10},
            {"timestamp": "2024-01-01T09:30:30Z", "price": 15001.0, "volume": 5},
            {"timestamp": "2024-01-01T09:31:00Z", "price": 15002.0, "volume": 8},
        ]

        # Act
        for tick in ticks:
            await data_manager.process_tick(tick)

        # Assert
        bars = await data_manager.get_data("1min")
        assert len(bars) >= 1
        assert bars[0]["open"] == 15000.0
        assert bars[0]["high"] >= 15000.0

Integration Testing

Testing Component Interactions

import pytest
from unittest.mock import AsyncMock, patch
from project_x_py import TradingSuite
from project_x_py.exceptions import OrderRejectedError

class TestTradingSuiteIntegration:
    """Test TradingSuite component integration."""

    @pytest.mark.asyncio
    @pytest.mark.integration
    async def test_full_trading_workflow(self):
        """Test complete trading workflow integration."""
        # Arrange
        with patch('project_x_py.client.ProjectX.from_env') as mock_client_factory:
            mock_client = AsyncMock()
            mock_client_factory.return_value.__aenter__.return_value = mock_client

            # Mock successful responses
            mock_client.authenticate.return_value = True
            mock_client.get_instruments.return_value = [
                {"id": "CON.F.US.MNQ.U25", "symbol": "MNQ"}
            ]
            mock_client.get_bars.return_value = pl.DataFrame({
                "timestamp": ["2024-01-01T10:00:00Z"] * 100,
                "open": [15000.0] * 100,
                "high": [15010.0] * 100,
                "low": [14990.0] * 100,
                "close": [15005.0] * 100,
                "volume": [1000] * 100
            })

            # Act
            suite = await TradingSuite.create("MNQ", timeframes=["1min"])

            # Verify initialization
            assert suite.is_connected
            assert suite.instrument == "MNQ"
            assert hasattr(suite, 'data')
            assert hasattr(suite, 'orders')
            assert hasattr(suite, 'positions')

            # Test data access
            bars = await suite.data.get_data("1min")
            assert len(bars) > 0

    @pytest.mark.asyncio
    @pytest.mark.integration
    async def test_order_position_integration(self):
        """Test order and position manager integration."""
        # This would test the flow from order placement to position tracking
        pass  # Implementation depends on your specific integration patterns

Mocking External Services

import pytest
from unittest.mock import AsyncMock, patch
import aioresponses

class TestAPIIntegration:
    """Test API integration with mocked HTTP responses."""

    @pytest.mark.asyncio
    async def test_market_data_api_integration(self):
        """Test market data API integration with mocked responses."""
        with aioresponses.aioresponses() as mocked:
            # Mock API response
            mocked.get(
                'https://api.example.com/v1/bars/MNQ',
                payload={
                    "bars": [
                        {
                            "timestamp": "2024-01-01T10:00:00Z",
                            "open": 15000.0,
                            "high": 15010.0,
                            "low": 14990.0,
                            "close": 15005.0,
                            "volume": 1000
                        }
                    ]
                }
            )

            # Test API call
            async with ProjectX.from_env() as client:
                bars = await client.get_bars("MNQ")
                assert len(bars) == 1
                assert bars[0]["close"] == 15005.0

Performance Testing

Benchmarking Critical Paths

import pytest
import time
import asyncio
from project_x_py.indicators import SMA, RSI
import polars as pl

class TestPerformance:
    """Performance benchmarks for critical components."""

    @pytest.mark.performance
    def test_indicator_calculation_performance(self, benchmark):
        """Benchmark indicator calculation performance."""
        # Arrange
        large_dataset = pl.DataFrame({
            "close": list(range(10000))  # 10k data points
        })

        # Act & Assert
        result = benchmark(large_dataset.pipe, SMA, period=20)
        assert len(result) == 10000

    @pytest.mark.performance
    @pytest.mark.asyncio
    async def test_concurrent_api_calls_performance(self):
        """Test performance of concurrent API calls."""
        # Arrange
        symbols = ["MNQ", "MES", "MYM", "M2K"]  # Multiple symbols

        start_time = time.perf_counter()

        # Act - Concurrent calls
        with patch('project_x_py.client.ProjectX.get_bars') as mock_get_bars:
            mock_get_bars.return_value = pl.DataFrame({"close": [1, 2, 3]})

            tasks = [mock_get_bars(symbol) for symbol in symbols]
            results = await asyncio.gather(*tasks)

        end_time = time.perf_counter()

        # Assert
        assert len(results) == len(symbols)
        assert end_time - start_time < 1.0  # Should complete in under 1 second

    @pytest.mark.performance
    def test_memory_usage_realtime_data(self):
        """Test memory usage patterns for real-time data."""
        import psutil
        import os

        process = psutil.Process(os.getpid())
        initial_memory = process.memory_info().rss

        # Simulate heavy real-time data processing
        data_manager = RealtimeDataManager(max_bars_per_timeframe=1000)

        # Add lots of data
        for i in range(5000):
            tick = {
                "timestamp": f"2024-01-01T10:{i//60:02d}:{i%60:02d}Z",
                "price": 15000.0 + i * 0.25,
                "volume": 10
            }
            # Simulate processing without await for memory test
            data_manager._process_tick_sync(tick)

        final_memory = process.memory_info().rss
        memory_increase = (final_memory - initial_memory) / 1024 / 1024  # MB

        # Memory increase should be reasonable (< 100MB for this test)
        assert memory_increase < 100, f"Memory increased by {memory_increase:.2f}MB"

Real-time Testing

Testing WebSocket Connections

import pytest
from unittest.mock import AsyncMock, patch
from project_x_py.realtime import ProjectXRealtimeClient

class TestRealTimeComponents:
    """Test real-time components (requires market hours for full testing)."""

    @pytest.mark.realtime
    @pytest.mark.asyncio
    @pytest.mark.skip(reason="Requires market hours and live connection")
    async def test_live_data_stream(self):
        """Test live data streaming (only run during market hours)."""
        # This test would connect to actual WebSocket feeds
        client = ProjectXRealtimeClient("real_token", "wss://api.real.com/ws")

        received_data = []

        async def data_handler(event):
            received_data.append(event.data)
            if len(received_data) >= 5:  # Stop after 5 ticks
                await client.disconnect()

        await client.on(EventType.TICK, data_handler)
        await client.connect()
        await client.subscribe_to_ticks("MNQ")

        # Wait for some data (timeout after 30 seconds)
        timeout = 30
        start_time = time.time()

        while len(received_data) < 5 and (time.time() - start_time) < timeout:
            await asyncio.sleep(0.1)

        assert len(received_data) >= 5, "Should receive at least 5 ticks"
        assert all("price" in tick for tick in received_data)

    @pytest.mark.asyncio
    async def test_connection_resilience(self):
        """Test connection resilience and reconnection."""
        client = ProjectXRealtimeClient("test_token", "test_url")

        with patch.object(client, '_connection') as mock_connection:
            # Simulate connection failure
            mock_connection.start.side_effect = [
                ConnectionError("Connection failed"),  # First attempt fails
                None  # Second attempt succeeds
            ]

            # Act
            await client.connect_with_retry(max_retries=2)

            # Assert
            assert mock_connection.start.call_count == 2

Test Data Management

Using Fixtures

# tests/fixtures/market_data.json
{
  "bars_1min": [
    {
      "timestamp": "2024-01-01T10:00:00Z",
      "open": 15000.0,
      "high": 15010.0,
      "low": 14990.0,
      "close": 15005.0,
      "volume": 1000
    }
  ],
  "ticks": [
    {
      "timestamp": "2024-01-01T10:00:00Z",
      "price": 15000.0,
      "size": 10,
      "bid": 14999.5,
      "ask": 15000.5
    }
  ]
}
# Using fixtures in tests
@pytest.fixture
def sample_bars(sample_market_data):
    """Convert fixture data to Polars DataFrame."""
    bars_data = sample_market_data["bars_1min"]
    return pl.DataFrame(bars_data)

def test_with_fixture_data(sample_bars):
    """Test using fixture data."""
    assert len(sample_bars) > 0
    assert "close" in sample_bars.columns

Parameterized Tests

@pytest.mark.parametrize("period,expected_length", [
    (5, 5),
    (10, 10),
    (20, 20),
])
def test_sma_different_periods(period, expected_length):
    """Test SMA with different periods."""
    data = pl.DataFrame({"close": list(range(100))})
    sma = data.pipe(SMA, period=period)

    # First 'period-1' values should be null, rest should be valid
    valid_values = sma[period-1:]
    assert len(valid_values) == len(data) - period + 1

@pytest.mark.parametrize("symbol,expected_multiplier", [
    ("MNQ", 20),
    ("MES", 50),
    ("MYM", 50),
])
async def test_instrument_multipliers(symbol, expected_multiplier, mock_client):
    """Test instrument-specific multipliers."""
    # Mock instrument data
    mock_client.get_instruments.return_value = [{
        "symbol": symbol,
        "multiplier": expected_multiplier
    }]

    instruments = await mock_client.get_instruments()
    instrument = next(i for i in instruments if i["symbol"] == symbol)
    assert instrument["multiplier"] == expected_multiplier

Testing Best Practices

Async Testing Patterns

#  Good: Proper async test structure
@pytest.mark.asyncio
async def test_async_operation():
    async with ProjectX.from_env() as client:
        result = await client.authenticate()
        assert result is True

# L Bad: Missing async/await
def test_async_operation_bad():
    client = ProjectX.from_env()
    result = client.authenticate()  # This won't work

Mock Best Practices

#  Good: Specific mocking with proper specs
@patch('project_x_py.client.ProjectX.authenticate')
async def test_with_specific_mock(mock_auth):
    mock_auth.return_value = True
    # Test implementation

#  Good: Mock at the right level
with patch('aiohttp.ClientSession.post') as mock_post:
    mock_post.return_value.__aenter__.return_value.json = AsyncMock(return_value={"success": True})
    # Test HTTP client behavior

# L Bad: Over-mocking (mocking too much)
@patch('project_x_py.client.ProjectX')  # Mocks entire class
async def test_over_mocked(mock_client):
    # Test becomes meaningless

Error Testing

@pytest.mark.asyncio
async def test_error_handling():
    """Test proper error handling and propagation."""
    with patch('project_x_py.client.ProjectX.place_order') as mock_place:
        mock_place.side_effect = OrderRejectedError("Insufficient funds")

        with pytest.raises(OrderRejectedError) as exc_info:
            async with ProjectX.from_env() as client:
                await client.place_order({"symbol": "MNQ"})

        assert "Insufficient funds" in str(exc_info.value)

Test Organization

class TestOrderManager:
    """Group related tests in classes."""

    @pytest.fixture(autouse=True)
    def setup(self):
        """Setup run before each test method."""
        self.mock_client = AsyncMock()

    @pytest.mark.asyncio
    async def test_place_order_success(self):
        """Test successful order placement."""
        pass

    @pytest.mark.asyncio
    async def test_place_order_failure(self):
        """Test order placement failure."""
        pass

    @pytest.mark.asyncio
    async def test_cancel_order(self):
        """Test order cancellation."""
        pass

Continuous Integration

GitHub Actions Configuration

# .github/workflows/test.yml
name: Tests

on: [push, pull_request]

jobs:
  test:
    runs-on: ubuntu-latest
    strategy:
      matrix:
        python-version: [3.11, 3.12]

    steps:
    - uses: actions/checkout@v4

    - name: Install uv
      uses: astral-sh/setup-uv@v1

    - name: Set up Python ${{ matrix.python-version }}
      run: uv python install ${{ matrix.python-version }}

    - name: Install dependencies
      run: uv sync --dev

    - name: Run tests
      run: |
        uv run pytest --cov=project_x_py --cov-report=xml -m "not realtime"

    - name: Upload coverage
      uses: codecov/codecov-action@v3
      with:
        file: ./coverage.xml

Testing Checklist

Before submitting code:

  • All new code has corresponding tests
  • Tests cover both success and error cases
  • Async code uses proper async testing patterns
  • Mocks are used appropriately (not over-mocked)
  • Performance-critical code has benchmark tests
  • Integration tests verify component interactions
  • Tests are properly categorized with markers
  • Test coverage is maintained/improved
  • Tests pass locally and in CI

Debugging Tests

Common Issues

  1. Async Test Failures

    # Run with detailed async debugging
    uv run pytest -v -s --tb=long tests/test_async_feature.py
    

  2. Mock Issues

    # Debug mock calls
    print(f"Mock called: {mock_object.called}")
    print(f"Call args: {mock_object.call_args_list}")
    

  3. Performance Test Variations

    # Run performance tests multiple times
    uv run pytest -m performance --count=5
    

By following these testing guidelines, you'll ensure the ProjectX SDK maintains high quality, reliability, and performance standards suitable for production trading environments.