PyTorch Essentials

Essential PyTorch operations and patterns for deep learning.


Installation

1# CPU version
2pip install torch torchvision torchaudio
3
4# GPU version (CUDA 11.8)
5pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118
6
7# Check installation
8python -c "import torch; print(torch.__version__); print(torch.cuda.is_available())"

Basic Tensor Operations

 1import torch
 2
 3# Create tensors
 4scalar = torch.tensor(42)
 5vector = torch.tensor([1, 2, 3])
 6matrix = torch.tensor([[1, 2], [3, 4]])
 7
 8# Random tensors
 9x = torch.rand(3, 4)
10y = torch.randn(3, 4)  # Normal distribution
11z = torch.zeros(3, 4)
12ones = torch.ones(3, 4)
13
14# Operations
15a = torch.tensor([[1., 2.], [3., 4.]])
16b = torch.tensor([[5., 6.], [7., 8.]])
17
18c = a + b
19d = a * b  # Element-wise
20e = torch.matmul(a, b)  # Matrix multiplication
21f = a @ b  # Also matrix multiplication
22
23# GPU operations
24if torch.cuda.is_available():
25    device = torch.device('cuda')
26    a = a.to(device)
27    b = b.to(device)
28    c = a + b

Building Models

 1import torch.nn as nn
 2import torch.nn.functional as F
 3
 4class SimpleNet(nn.Module):
 5    def __init__(self):
 6        super().__init__()
 7        self.fc1 = nn.Linear(784, 128)
 8        self.fc2 = nn.Linear(128, 64)
 9        self.fc3 = nn.Linear(64, 10)
10        self.dropout = nn.Dropout(0.2)
11    
12    def forward(self, x):
13        x = x.view(-1, 784)  # Flatten
14        x = F.relu(self.fc1(x))
15        x = self.dropout(x)
16        x = F.relu(self.fc2(x))
17        x = self.fc3(x)
18        return F.log_softmax(x, dim=1)
19
20model = SimpleNet()

Training Loop

 1import torch.optim as optim
 2
 3# Setup
 4device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
 5model = SimpleNet().to(device)
 6criterion = nn.CrossEntropyLoss()
 7optimizer = optim.Adam(model.parameters(), lr=0.001)
 8
 9# Training
10model.train()
11for epoch in range(num_epochs):
12    for batch_idx, (data, target) in enumerate(train_loader):
13        data, target = data.to(device), target.to(device)
14        
15        # Forward pass
16        optimizer.zero_grad()
17        output = model(data)
18        loss = criterion(output, target)
19        
20        # Backward pass
21        loss.backward()
22        optimizer.step()
23        
24        if batch_idx % 100 == 0:
25            print(f'Epoch: {epoch}, Batch: {batch_idx}, Loss: {loss.item():.4f}')
26
27# Evaluation
28model.eval()
29with torch.no_grad():
30    for data, target in test_loader:
31        data, target = data.to(device), target.to(device)
32        output = model(data)
33        # ... calculate metrics

DataLoader

 1from torch.utils.data import Dataset, DataLoader
 2
 3class CustomDataset(Dataset):
 4    def __init__(self, data, labels):
 5        self.data = data
 6        self.labels = labels
 7    
 8    def __len__(self):
 9        return len(self.data)
10    
11    def __getitem__(self, idx):
12        return self.data[idx], self.labels[idx]
13
14dataset = CustomDataset(x_train, y_train)
15dataloader = DataLoader(dataset, batch_size=32, shuffle=True, num_workers=4)

Saving and Loading

 1# Save model
 2torch.save(model.state_dict(), 'model.pth')
 3
 4# Load model
 5model = SimpleNet()
 6model.load_state_dict(torch.load('model.pth'))
 7model.eval()
 8
 9# Save entire model
10torch.save(model, 'entire_model.pth')
11loaded_model = torch.load('entire_model.pth')
12
13# Save checkpoint
14torch.save({
15    'epoch': epoch,
16    'model_state_dict': model.state_dict(),
17    'optimizer_state_dict': optimizer.state_dict(),
18    'loss': loss,
19}, 'checkpoint.pth')

TensorBoard

 1from torch.utils.tensorboard import SummaryWriter
 2
 3writer = SummaryWriter('runs/experiment_1')
 4
 5for epoch in range(num_epochs):
 6    # ... training ...
 7    writer.add_scalar('Loss/train', loss.item(), epoch)
 8    writer.add_scalar('Accuracy/train', accuracy, epoch)
 9
10writer.close()
11
12# View: tensorboard --logdir=runs

Related Snippets