python
exercises
exercises.py🐍python
"""
Debugging and Profiling - Exercises
Practice debugging techniques and performance profiling.
"""
import pytest
from typing import Any
from dataclasses import dataclass
from datetime import datetime
import time
# =============================================================================
# EXERCISE 1: Debug Logger
# =============================================================================
class DebugLogger:
"""
Create a debug logger that:
- Supports different log levels (DEBUG, INFO, WARNING, ERROR)
- Can filter messages by level
- Tracks the number of messages at each level
- Can format messages with timestamps
Example:
logger = DebugLogger(level="INFO")
logger.debug("Debug message") # Not logged (below level)
logger.info("Info message") # Logged
logger.warning("Warning!") # Logged
print(logger.count("INFO")) # 1
"""
LEVELS = {"DEBUG": 0, "INFO": 1, "WARNING": 2, "ERROR": 3}
def __init__(self, level: str = "DEBUG"):
self.level = level
self.messages: list[dict] = []
self.counts: dict[str, int] = {level: 0 for level in self.LEVELS}
def _should_log(self, level: str) -> bool:
"""Check if message should be logged based on level."""
# TODO: Implement level checking
pass
def _log(self, level: str, message: str) -> None:
"""Log a message at the specified level."""
# TODO: Implement logging
# - Check if should log
# - Add timestamp
# - Track count
# - Store message
pass
def debug(self, message: str) -> None:
"""Log a debug message."""
self._log("DEBUG", message)
def info(self, message: str) -> None:
"""Log an info message."""
self._log("INFO", message)
def warning(self, message: str) -> None:
"""Log a warning message."""
self._log("WARNING", message)
def error(self, message: str) -> None:
"""Log an error message."""
self._log("ERROR", message)
def count(self, level: str) -> int:
"""Get count of messages at level."""
return self.counts.get(level, 0)
def get_messages(self, level: str = None) -> list[dict]:
"""Get logged messages, optionally filtered by level."""
if level is None:
return self.messages
return [m for m in self.messages if m["level"] == level]
class TestDebugLogger:
"""Tests for DebugLogger."""
def test_log_levels(self):
"""Test that log levels filter correctly."""
logger = DebugLogger(level="INFO")
logger.debug("debug")
logger.info("info")
logger.warning("warning")
logger.error("error")
assert logger.count("DEBUG") == 0 # Filtered
assert logger.count("INFO") == 1
assert logger.count("WARNING") == 1
assert logger.count("ERROR") == 1
def test_message_format(self):
"""Test that messages include timestamps."""
logger = DebugLogger()
logger.info("test message")
messages = logger.get_messages()
assert len(messages) == 1
assert "timestamp" in messages[0]
assert messages[0]["message"] == "test message"
assert messages[0]["level"] == "INFO"
def test_filter_by_level(self):
"""Test filtering messages by level."""
logger = DebugLogger()
logger.debug("d1")
logger.info("i1")
logger.debug("d2")
logger.error("e1")
debug_msgs = logger.get_messages("DEBUG")
assert len(debug_msgs) == 2
# =============================================================================
# EXERCISE 2: Function Profiler
# =============================================================================
class FunctionProfiler:
"""
Create a function profiler that:
- Tracks how many times each function is called
- Records execution time for each call
- Calculates min, max, avg execution times
- Can be used as a decorator
Example:
profiler = FunctionProfiler()
@profiler.profile
def slow_function():
time.sleep(0.1)
return "done"
slow_function()
slow_function()
stats = profiler.get_stats("slow_function")
assert stats.calls == 2
assert stats.avg_time > 0.1
"""
def __init__(self):
self.stats: dict[str, dict] = {}
def profile(self, func):
"""Decorator to profile a function."""
# TODO: Implement profiling decorator
# - Track call count
# - Measure execution time
# - Update min/max/total time
pass
def get_stats(self, func_name: str):
"""Get statistics for a function."""
# TODO: Return stats as named tuple or dataclass
pass
def report(self) -> str:
"""Generate a profiling report."""
# TODO: Format stats as a readable report
pass
@dataclass
class ProfileStats:
"""Statistics for a profiled function."""
name: str
calls: int
total_time: float
min_time: float
max_time: float
@property
def avg_time(self) -> float:
return self.total_time / self.calls if self.calls > 0 else 0
class TestFunctionProfiler:
"""Tests for FunctionProfiler."""
def test_call_counting(self):
"""Test that calls are counted correctly."""
profiler = FunctionProfiler()
@profiler.profile
def test_func():
return 42
test_func()
test_func()
test_func()
stats = profiler.get_stats("test_func")
assert stats.calls == 3
def test_timing_accuracy(self):
"""Test that timing is reasonably accurate."""
profiler = FunctionProfiler()
@profiler.profile
def timed_func():
time.sleep(0.05)
timed_func()
stats = profiler.get_stats("timed_func")
# Allow some tolerance
assert stats.avg_time >= 0.04
assert stats.avg_time <= 0.15
def test_min_max_tracking(self):
"""Test min/max time tracking."""
profiler = FunctionProfiler()
@profiler.profile
def variable_time(seconds: float):
time.sleep(seconds)
variable_time(0.01)
variable_time(0.05)
variable_time(0.02)
stats = profiler.get_stats("variable_time")
assert stats.min_time < stats.max_time
assert stats.min_time >= 0.01
assert stats.max_time >= 0.04
# =============================================================================
# EXERCISE 3: Exception Tracker
# =============================================================================
class ExceptionTracker:
"""
Create an exception tracker that:
- Captures exceptions with full context
- Groups exceptions by type
- Provides statistics on exception frequency
- Can replay exceptions for debugging
Example:
tracker = ExceptionTracker()
try:
x = 1 / 0
except Exception as e:
tracker.capture(e, context={"operation": "division"})
stats = tracker.get_stats()
assert stats["ZeroDivisionError"] == 1
"""
def __init__(self):
self.exceptions: list[dict] = []
def capture(self, exception: Exception, context: dict = None) -> None:
"""Capture an exception with context."""
# TODO: Store exception with:
# - Type name
# - Message
# - Traceback
# - Context dict
# - Timestamp
pass
def get_stats(self) -> dict[str, int]:
"""Get exception counts by type."""
# TODO: Return dict of exception type -> count
pass
def get_by_type(self, exc_type: str) -> list[dict]:
"""Get all exceptions of a specific type."""
# TODO: Filter exceptions by type name
pass
def clear(self) -> None:
"""Clear all captured exceptions."""
self.exceptions = []
def replay(self, index: int) -> None:
"""Re-raise a captured exception for debugging."""
# TODO: Re-raise the exception at index
pass
class TestExceptionTracker:
"""Tests for ExceptionTracker."""
def test_capture_exception(self):
"""Test capturing an exception."""
tracker = ExceptionTracker()
try:
raise ValueError("test error")
except Exception as e:
tracker.capture(e, context={"test": True})
assert len(tracker.exceptions) == 1
assert tracker.exceptions[0]["type"] == "ValueError"
assert tracker.exceptions[0]["message"] == "test error"
def test_exception_stats(self):
"""Test exception statistics."""
tracker = ExceptionTracker()
for _ in range(3):
try:
raise ValueError("value error")
except Exception as e:
tracker.capture(e)
for _ in range(2):
try:
raise TypeError("type error")
except Exception as e:
tracker.capture(e)
stats = tracker.get_stats()
assert stats["ValueError"] == 3
assert stats["TypeError"] == 2
def test_filter_by_type(self):
"""Test filtering exceptions by type."""
tracker = ExceptionTracker()
try:
raise ValueError("v1")
except Exception as e:
tracker.capture(e)
try:
raise TypeError("t1")
except Exception as e:
tracker.capture(e)
try:
raise ValueError("v2")
except Exception as e:
tracker.capture(e)
value_errors = tracker.get_by_type("ValueError")
assert len(value_errors) == 2
# =============================================================================
# EXERCISE 4: Memory Tracker
# =============================================================================
class MemoryTracker:
"""
Create a memory tracker that:
- Takes snapshots of memory usage
- Tracks memory changes over time
- Identifies memory leaks
- Reports memory growth
Note: This is a simplified version for learning.
Real memory tracking would use tracemalloc.
Example:
tracker = MemoryTracker()
tracker.snapshot("before")
large_list = [x for x in range(1000000)]
tracker.snapshot("after")
growth = tracker.get_growth()
print(f"Memory grew by: {growth} bytes")
"""
def __init__(self):
self.snapshots: list[dict] = []
def snapshot(self, label: str = "") -> dict:
"""Take a memory snapshot."""
# TODO: Capture current memory usage
# Use sys.getsizeof for simplicity
# Or try to use resource module on Unix
pass
def get_growth(self, start_label: str = None, end_label: str = None) -> int:
"""Calculate memory growth between snapshots."""
# TODO: Return difference in bytes
pass
def report(self) -> str:
"""Generate memory usage report."""
# TODO: Format snapshots as readable report
pass
class TestMemoryTracker:
"""Tests for MemoryTracker."""
def test_snapshot(self):
"""Test taking a snapshot."""
tracker = MemoryTracker()
snap = tracker.snapshot("test")
assert "label" in snap
assert "timestamp" in snap
assert "memory_bytes" in snap
def test_multiple_snapshots(self):
"""Test tracking multiple snapshots."""
tracker = MemoryTracker()
tracker.snapshot("start")
tracker.snapshot("middle")
tracker.snapshot("end")
assert len(tracker.snapshots) == 3
# =============================================================================
# EXERCISE 5: Breakpoint Manager
# =============================================================================
class BreakpointManager:
"""
Create a breakpoint manager that:
- Allows setting conditional breakpoints
- Can enable/disable breakpoints
- Logs when breakpoints are hit
- Supports breakpoint counts (break after N hits)
Note: This simulates breakpoints for learning.
Real debugging would use pdb or IDE debuggers.
Example:
bp = BreakpointManager()
bp.add("check_value", condition=lambda x: x > 100)
bp.add("loop_iter", count=5) # Break every 5th iteration
for i in range(20):
if bp.check("loop_iter"):
print(f"Breakpoint hit at iteration {i}")
"""
def __init__(self, enabled: bool = True):
self.enabled = enabled
self.breakpoints: dict[str, dict] = {}
self.hits: dict[str, int] = {}
def add(self, name: str, condition=None, count: int = None) -> None:
"""Add a breakpoint."""
# TODO: Store breakpoint with:
# - condition (callable)
# - count (break every N hits)
# - enabled state
pass
def remove(self, name: str) -> None:
"""Remove a breakpoint."""
# TODO: Remove breakpoint by name
pass
def enable(self, name: str = None) -> None:
"""Enable breakpoint(s)."""
# TODO: Enable specific or all breakpoints
pass
def disable(self, name: str = None) -> None:
"""Disable breakpoint(s)."""
# TODO: Disable specific or all breakpoints
pass
def check(self, name: str, *args, **kwargs) -> bool:
"""Check if breakpoint should trigger."""
# TODO: Check condition and count
# Return True if breakpoint should trigger
pass
def get_hits(self, name: str) -> int:
"""Get hit count for a breakpoint."""
return self.hits.get(name, 0)
class TestBreakpointManager:
"""Tests for BreakpointManager."""
def test_simple_breakpoint(self):
"""Test a simple breakpoint."""
bp = BreakpointManager()
bp.add("test")
assert bp.check("test") == True
assert bp.get_hits("test") == 1
def test_conditional_breakpoint(self):
"""Test conditional breakpoint."""
bp = BreakpointManager()
bp.add("value_check", condition=lambda x: x > 10)
assert bp.check("value_check", 5) == False
assert bp.check("value_check", 15) == True
def test_count_breakpoint(self):
"""Test breakpoint with count."""
bp = BreakpointManager()
bp.add("every_third", count=3)
results = [bp.check("every_third") for _ in range(9)]
# Should trigger on 3rd, 6th, 9th hit
expected = [False, False, True, False, False, True, False, False, True]
assert results == expected
def test_disable_breakpoint(self):
"""Test disabling a breakpoint."""
bp = BreakpointManager()
bp.add("toggle")
assert bp.check("toggle") == True
bp.disable("toggle")
assert bp.check("toggle") == False
bp.enable("toggle")
assert bp.check("toggle") == True
# =============================================================================
# EXERCISE 6: Performance Analyzer
# =============================================================================
class PerformanceAnalyzer:
"""
Create a performance analyzer that:
- Identifies slow operations
- Tracks operation trends over time
- Provides optimization suggestions
- Generates performance reports
Example:
analyzer = PerformanceAnalyzer(threshold_ms=100)
with analyzer.track("database_query"):
result = slow_database_query()
slow_ops = analyzer.get_slow_operations()
if slow_ops:
print("Slow operations detected!")
"""
def __init__(self, threshold_ms: float = 100):
self.threshold_ms = threshold_ms
self.operations: list[dict] = []
def track(self, name: str):
"""Context manager to track an operation."""
# TODO: Return context manager that tracks:
# - Operation name
# - Start time
# - End time
# - Duration
# - Whether it exceeded threshold
pass
def record(self, name: str, duration_ms: float) -> None:
"""Manually record an operation."""
# TODO: Store operation with timestamp and slow flag
pass
def get_slow_operations(self) -> list[dict]:
"""Get all operations that exceeded threshold."""
# TODO: Filter operations by threshold
pass
def get_average(self, name: str) -> float:
"""Get average duration for an operation type."""
# TODO: Calculate average duration
pass
def get_suggestions(self) -> list[str]:
"""Get optimization suggestions based on data."""
# TODO: Analyze patterns and suggest optimizations
pass
class TestPerformanceAnalyzer:
"""Tests for PerformanceAnalyzer."""
def test_track_operation(self):
"""Test tracking an operation."""
analyzer = PerformanceAnalyzer(threshold_ms=10)
with analyzer.track("test_op"):
time.sleep(0.001) # 1ms
assert len(analyzer.operations) == 1
assert analyzer.operations[0]["name"] == "test_op"
def test_detect_slow_operation(self):
"""Test detecting slow operations."""
analyzer = PerformanceAnalyzer(threshold_ms=10)
with analyzer.track("fast_op"):
pass # Very fast
with analyzer.track("slow_op"):
time.sleep(0.05) # 50ms
slow = analyzer.get_slow_operations()
assert len(slow) == 1
assert slow[0]["name"] == "slow_op"
def test_average_calculation(self):
"""Test average duration calculation."""
analyzer = PerformanceAnalyzer()
analyzer.record("query", 100)
analyzer.record("query", 150)
analyzer.record("query", 50)
avg = analyzer.get_average("query")
assert avg == 100.0
# =============================================================================
# EXERCISE 7: Debug Decorator Suite
# =============================================================================
def create_debug_decorator(
log_args: bool = True,
log_result: bool = True,
log_time: bool = True,
log_exceptions: bool = True
):
"""
Create a configurable debug decorator that can:
- Log function arguments
- Log return values
- Log execution time
- Log and re-raise exceptions
Example:
@create_debug_decorator(log_args=True, log_time=True)
def my_function(x, y):
return x + y
my_function(1, 2)
# Logs: "Calling my_function(1, 2)"
# Logs: "my_function took 0.001s"
# Logs: "my_function returned 3"
"""
# TODO: Implement configurable debug decorator
pass
class TestDebugDecorator:
"""Tests for debug decorator."""
def test_basic_functionality(self):
"""Test decorated function still works."""
@create_debug_decorator()
def add(a, b):
return a + b
assert add(1, 2) == 3
def test_preserves_function_metadata(self):
"""Test decorator preserves function metadata."""
@create_debug_decorator()
def documented_function():
"""This function is documented."""
pass
assert documented_function.__doc__ == "This function is documented."
assert documented_function.__name__ == "documented_function"
def test_exception_handling(self):
"""Test exception is logged and re-raised."""
@create_debug_decorator(log_exceptions=True)
def error_function():
raise ValueError("test error")
with pytest.raises(ValueError):
error_function()
# =============================================================================
# MAIN
# =============================================================================
if __name__ == "__main__":
pytest.main([__file__, "-v"])