pytorch basicsPython

pytorch basics

data science ml

Run notebook
Concept Lesson
Advanced
13 min

Learning Objective

Understand pytorch basics well enough to explain it, recognize it in Python, and apply it in a small task.

Why It Matters

This concept is part of the foundation that later lessons and projects assume you already understand.

PytorchScienceTensor CreationTensor OperationsAutograd Automatic Differentiation
Private notes
0/8000

Notes stay private to your browser until account sync is configured.

pytorch basics
1 min read18 headings

Converted from 03_pytorch_basics.ipynb for web reading.

Code cell 1

import numpy as np
import torch
import torch.nn as nn

print(f"PyTorch version: {torch.__version__}")
print(f"CUDA available: {torch.cuda.is_available()}")

Tensor Creation

Code cell 3

# From Python list
tensor = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0])
print(f"Tensor: {tensor}")
print(f"Shape: {tensor.shape}")
print(f"Dtype: {tensor.dtype}")
print(f"Device: {tensor.device}")

Code cell 4

# Special tensors
zeros = torch.zeros(2, 3)
ones = torch.ones(2, 3)
randn = torch.randn(2, 3)  # Normal distribution

print(f"Zeros:\n{zeros}")
print(f"\nOnes:\n{ones}")
print(f"\nRandom Normal:\n{randn}")

Code cell 5

# From NumPy
np_array = np.array([1, 2, 3, 4, 5])
tensor_from_np = torch.from_numpy(np_array)
print(f"From NumPy: {tensor_from_np}")

# Back to NumPy
back_to_np = tensor_from_np.numpy()
print(f"Back to NumPy: {back_to_np}")

Tensor Operations

Code cell 7

a = torch.tensor([1, 2, 3], dtype=torch.float32)
b = torch.tensor([4, 5, 6], dtype=torch.float32)

print(f"a = {a}")
print(f"b = {b}")
print(f"a + b = {a + b}")
print(f"a * b = {a * b}")
print(f"a @ b (dot) = {a @ b}")

Code cell 8

# Matrix multiplication
A = torch.randn(2, 3)
B = torch.randn(3, 4)
C = A @ B  # or torch.matmul(A, B)

print(f"A shape: {A.shape}")
print(f"B shape: {B.shape}")
print(f"A @ B shape: {C.shape}")

Autograd (Automatic Differentiation)

Code cell 10

# Tensor with gradient tracking
x = torch.tensor([1.0, 2.0, 3.0], requires_grad=True)
print(f"x: {x}")
print(f"requires_grad: {x.requires_grad}")

Code cell 11

# Forward pass: compute y = x^2
y = x ** 2
print(f"y = x^2: {y}")

# Sum to get scalar for backward
z = y.sum()
print(f"z = sum(y): {z}")

Code cell 12

# Backward pass: compute gradients
z.backward()

# dy/dx = 2x
print(f"x.grad (should be 2x): {x.grad}")
print(f"Expected: {2 * x.detach()}")

Simple Neural Network

Code cell 14

class SimpleNet(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super().__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(hidden_size, output_size)
    
    def forward(self, x):
        x = self.fc1(x)
        x = self.relu(x)
        x = self.fc2(x)
        return x

model = SimpleNet(input_size=10, hidden_size=5, output_size=2)
print(model)

Code cell 15

# Test forward pass
sample_input = torch.randn(3, 10)  # batch of 3 samples
output = model(sample_input)

print(f"Input shape: {sample_input.shape}")
print(f"Output shape: {output.shape}")
print(f"Output:\n{output}")

Training Loop Example

Code cell 17

# Generate synthetic data
torch.manual_seed(42)
X = torch.randn(100, 10)
y = torch.randint(0, 2, (100,))  # Binary classification

print(f"X shape: {X.shape}")
print(f"y shape: {y.shape}")
print(f"y unique values: {y.unique()}")

Code cell 18

# Create model, loss, optimizer
model = SimpleNet(input_size=10, hidden_size=5, output_size=2)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)

# Training loop
for epoch in range(100):
    # Forward pass
    outputs = model(X)
    loss = criterion(outputs, y)
    
    # Backward pass
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
    
    if (epoch + 1) % 20 == 0:
        # Calculate accuracy
        _, predicted = torch.max(outputs, 1)
        accuracy = (predicted == y).float().mean()
        print(f"Epoch [{epoch+1}/100], Loss: {loss.item():.4f}, Accuracy: {accuracy:.2%}")

Code cell 19

# Make predictions
model.eval()
with torch.no_grad():
    test_input = torch.randn(5, 10)
    predictions = model(test_input)
    predicted_classes = torch.argmax(predictions, dim=1)
    
print(f"Test input shape: {test_input.shape}")
print(f"Predicted classes: {predicted_classes}")

Skill Check

Test this lesson

Answer 4 quick questions to lock in the lesson and feed your adaptive practice queue.

--
Score
0/4
Answered
Not attempted
Status
1

Which module does this lesson belong to?

2

Which section is covered in this lesson content?

3

Which term is most central to this lesson?

4

What is the best way to use this lesson for real learning?

Your answers save locally first, then sync when account storage is available.
Practice queue