Our Python service had zero tests. Every deploy was scary. Bugs in production were common. We were flying blind.

I introduced pytest and built a test suite. Now we have 85% coverage, catch bugs before production, and deploy with confidence.

Table of Contents

The Problem

No tests meant:

  • Manual testing before every deploy
  • Bugs discovered in production
  • Fear of refactoring
  • Slow development

We needed automated testing.

Installing Pytest

pip install pytest pytest-cov pytest-mock

First Test

test_calculator.py:

def add(a, b):
    return a + b

def test_add():
    assert add(2, 3) == 5
    assert add(-1, 1) == 0
    assert add(0, 0) == 0

Run:

pytest

Output:

test_calculator.py .                                    [100%]
1 passed in 0.01s

Test Organization

Structure:

project/
├── src/
│   ├── __init__.py
│   ├── calculator.py
│   └── user_service.py
├── tests/
│   ├── __init__.py
│   ├── test_calculator.py
│   └── test_user_service.py
└── pytest.ini

pytest.ini:

[pytest]
testpaths = tests
python_files = test_*.py
python_classes = Test*
python_functions = test_*

Fixtures

Reusable test data:

import pytest

@pytest.fixture
def sample_user():
    return {
        'id': 1,
        'name': 'John Doe',
        'email': 'john@example.com'
    }

def test_user_name(sample_user):
    assert sample_user['name'] == 'John Doe'

def test_user_email(sample_user):
    assert sample_user['email'] == 'john@example.com'

Fixture Scope

@pytest.fixture(scope='function')  # Default, runs per test
def user():
    return create_user()

@pytest.fixture(scope='module')  # Runs once per module
def database():
    db = Database()
    yield db
    db.close()

@pytest.fixture(scope='session')  # Runs once per session
def config():
    return load_config()

Parametrized Tests

Test multiple inputs:

import pytest

@pytest.mark.parametrize('a,b,expected', [
    (2, 3, 5),
    (-1, 1, 0),
    (0, 0, 0),
    (100, 200, 300),
])
def test_add(a, b, expected):
    assert add(a, b) == expected

One test, multiple cases!

Testing Exceptions

import pytest

def divide(a, b):
    if b == 0:
        raise ValueError("Cannot divide by zero")
    return a / b

def test_divide_by_zero():
    with pytest.raises(ValueError, match="Cannot divide by zero"):
        divide(10, 0)

def test_divide_success():
    assert divide(10, 2) == 5

Mocking

Mock external dependencies:

from unittest.mock import Mock, patch
import requests

def get_user(user_id):
    response = requests.get(f'https://api.example.com/users/{user_id}')
    return response.json()

def test_get_user():
    with patch('requests.get') as mock_get:
        mock_get.return_value.json.return_value = {'id': 1, 'name': 'John'}
        
        user = get_user(1)
        
        assert user['name'] == 'John'
        mock_get.assert_called_once_with('https://api.example.com/users/1')

Pytest-Mock

Easier mocking:

def test_get_user(mocker):
    mock_get = mocker.patch('requests.get')
    mock_get.return_value.json.return_value = {'id': 1, 'name': 'John'}
    
    user = get_user(1)
    
    assert user['name'] == 'John'

Testing Database

Use test database:

import pytest
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker

@pytest.fixture(scope='module')
def db_session():
    engine = create_engine('sqlite:///:memory:')
    Base.metadata.create_all(engine)
    Session = sessionmaker(bind=engine)
    session = Session()
    
    yield session
    
    session.close()

def test_create_user(db_session):
    user = User(name='John', email='john@example.com')
    db_session.add(user)
    db_session.commit()
    
    assert user.id is not None
    assert db_session.query(User).count() == 1

Testing API

Test Flask app:

import pytest
from app import create_app

@pytest.fixture
def client():
    app = create_app({'TESTING': True})
    with app.test_client() as client:
        yield client

def test_get_users(client):
    response = client.get('/api/users')
    assert response.status_code == 200
    assert len(response.json) > 0

def test_create_user(client):
    response = client.post('/api/users', json={
        'name': 'John',
        'email': 'john@example.com'
    })
    assert response.status_code == 201
    assert response.json['name'] == 'John'

Coverage

Measure test coverage:

pytest --cov=src --cov-report=html

Output:

---------- coverage: platform darwin, python 3.7.0 -----------
Name                    Stmts   Miss  Cover
-------------------------------------------
src/__init__.py             0      0   100%
src/calculator.py          10      1    90%
src/user_service.py        50      8    84%
-------------------------------------------
TOTAL                      60      9    85%

HTML report in htmlcov/index.html

Markers

Custom test markers:

import pytest

@pytest.mark.slow
def test_slow_operation():
    # Takes 10 seconds
    pass

@pytest.mark.integration
def test_api_integration():
    # Calls external API
    pass

@pytest.mark.skip(reason="Not implemented yet")
def test_future_feature():
    pass

Run specific markers:

pytest -m "not slow"  # Skip slow tests
pytest -m integration  # Run only integration tests

Conftest.py

Shared fixtures:

tests/conftest.py:

import pytest

@pytest.fixture
def app():
    from app import create_app
    return create_app({'TESTING': True})

@pytest.fixture
def client(app):
    return app.test_client()

@pytest.fixture
def db_session():
    # Database setup
    yield session
    # Teardown

Available to all tests!

Real-World Example

Testing user service:

# src/user_service.py
class UserService:
    def __init__(self, db, cache):
        self.db = db
        self.cache = cache
    
    def get_user(self, user_id):
        # Check cache
        cached = self.cache.get(f'user:{user_id}')
        if cached:
            return cached
        
        # Query database
        user = self.db.query(User).filter_by(id=user_id).first()
        if not user:
            raise ValueError(f'User {user_id} not found')
        
        # Cache result
        self.cache.set(f'user:{user_id}', user, ttl=3600)
        return user

# tests/test_user_service.py
import pytest
from unittest.mock import Mock

@pytest.fixture
def mock_db():
    return Mock()

@pytest.fixture
def mock_cache():
    cache = Mock()
    cache.get.return_value = None
    return cache

@pytest.fixture
def user_service(mock_db, mock_cache):
    return UserService(mock_db, mock_cache)

def test_get_user_from_cache(user_service, mock_cache):
    mock_cache.get.return_value = {'id': 1, 'name': 'John'}
    
    user = user_service.get_user(1)
    
    assert user['name'] == 'John'
    mock_cache.get.assert_called_once_with('user:1')

def test_get_user_from_db(user_service, mock_db, mock_cache):
    mock_user = Mock(id=1, name='John')
    mock_db.query.return_value.filter_by.return_value.first.return_value = mock_user
    
    user = user_service.get_user(1)
    
    assert user.name == 'John'
    mock_cache.set.assert_called_once()

def test_get_user_not_found(user_service, mock_db):
    mock_db.query.return_value.filter_by.return_value.first.return_value = None
    
    with pytest.raises(ValueError, match='User 1 not found'):
        user_service.get_user(1)

CI Integration

.gitlab-ci.yml:

test:
  image: python:3.7
  script:
    - pip install -r requirements.txt
    - pytest --cov=src --cov-report=xml
  coverage: '/TOTAL.*\s+(\d+%)$/'
  artifacts:
    reports:
      cobertura: coverage.xml

Results

Before:

  • 0% test coverage
  • Manual testing
  • Bugs in production
  • Fear of refactoring

After:

  • 85% test coverage
  • Automated testing
  • Bugs caught early
  • Confident refactoring

Lessons Learned

  1. Start small - Don’t aim for 100% immediately
  2. Test behavior, not implementation - Focus on what, not how
  3. Use fixtures - DRY principle
  4. Mock external dependencies - Fast, reliable tests
  5. Run tests in CI - Catch issues before merge

Conclusion

Pytest makes Python testing easy and enjoyable. Start testing today.

Key takeaways:

  1. Pytest for simple, powerful testing
  2. Fixtures for reusable test data
  3. Parametrize for multiple test cases
  4. Mock external dependencies
  5. Measure and improve coverage

Tests are an investment. They pay off every single day.