- toc {:toc}
Pipeline Batch training pattern
λ¨Έμ λ¬λ, λ₯λ¬λ νμ΅μ μ§ννλλ° κ°μ₯ κΈ°λ³Έμ΄ λλ ν¨ν΄μ΄λ€. λ¨Έμ λ¬λ, λ₯λ¬λμμμ νμ΅μ λ€μκ³Ό κ°μ΄ λΆλ₯ν μ μλ€.
- λ°μ΄ν° μμ§
- λ°μ΄ν° μ μ²λ¦¬
- λͺ¨λΈ νμ΅
- λͺ¨λΈ νκ°
- μμΈ‘ μλ²μ λͺ¨λΈ ꡬμΆ
- λͺ¨λΈ, μλ², νκ° κΈ°λ‘
λ€μκ³Ό κ°μ΄ λ¨Έμ λ¬λμμμ νμ΅μ μ¬λ¬ κ³Όμ μΌλ‘ λΆν λλ€. μ΄ κ° νλ‘μΈμ€λ€μ μμ°¨μ μΌλ‘ μ€ννμ¬ νμ΅μ΄ μ΄λ£¨μ΄μ§λλ‘ λ§λλ ν¨ν΄μ΄ pipeline batch training pattern μ΄λ€. νλ‘μΈμ€λ€μ΄ λΆν λμ΄ μμ΄, νμ΅ λμ€μ κ²½κ³Όλ₯Ό κΈ°λ‘νκ³ μ¬μ¬μ©μ΄λ λΆλΆμ μΈ μμ μ κ°νΈνκ² νκ±°λ, λ³λ ¬ μ²λ¦¬λ₯Ό ν΅ν΄ μ±λ₯μ ν₯μμν¬ μ μλ€. λν, κ°κ°μ νλ‘μΈμ€μμ μ¬μ©λλ λΌμ΄λΈλ¬λ¦¬λ₯Ό μ νν΄ μ¬μ©ν μ μκ³ νμ΄νλΌμΈμ λ§λ€λ©΄μ νμ΅ λ° μΆλ‘ μ λν μλνλ₯Ό μ§νν μ μλ€.
Pipeline batch training pattern μ μ¬μ©νλ κ²½μ° (μ₯μ )
- νμ΄νλΌμΈμ μμμ λΆν ν΄ νλ‘μΈμ€λ§λ€ λΌμ΄λΈλ¬λ¦¬λ₯Ό μ μ νλ κ²½μ°
- νλ‘μΈμ€λ₯Ό λ€λ₯Έ μ©λλ‘ ν¨κ» μ¬μ©νλ €λ κ²½μ°
- νλ‘μΈμ€λ§λ€ λ°μ΄ν°μ μν, μ§ν λ‘κ·Έλ€μ κΈ°λ‘νκ³ μΆμ κ²½μ°
- νλ‘μΈμ€ μ€νμ κ°λ³μ μΌλ‘ λ€λ£¨κ³ μΆμ κ²½μ°
λ¨μ
- κ°λ³ μμ μ μ§ννλ©΄μ λ 립μ±μ κ°μΆλ νμΈν΄μΌ ν μ‘°κ±΄μ΄ λμ΄λ μ½λ κ΄λ¦¬κ° 볡μ‘ν΄μ§λ€.
- μμ μ νμ λν΄ κ³ λ € μ¬νμ΄ λμ΄λλ€.
μλΉμ€μ μ 체μ μΈ κ·Έλ¦Όμ μλμ κ°κ³ , κ·Έ μ€ νμ΅ νμ΄νλΌμΈμμ Pipeline Batch training pattern μ΄ μ¬μ©λμλ€.
- μ½λ μ 체μ μΈ μ½λ ꡬ쑰λ pytorch tutorial μ ν΅ν΄ μ΄ν΄ν μ μλ€. λ°μ΄ν°μ μΌλ‘ Fashion-MNIST dataset μ μ¬μ©νλ€.
- Fashion-MNIST Dataset μ load ν΄ νμ΅ λ°μ΄ν°λ₯Ό μμ§νλ€.
import torch
from torch.utils.data import Dataset
from torchvision import datasets
from torchvision.transforms import ToTensor
import matplotlib.pyplot as plt
training_data = datasets.FashionMNIST(
root="data",
train=True,
download=True,
transform=ToTensor()
)
test_data = datasets.FashionMNIST(
root="data",
train=False,
download=True,
transform=ToTensor()
)μ Fashion-MNIST μ κ²½μ°μ λ¬λ¦¬ κ° task μ λ°λΌ λ°μ΄ν°μ μ μ μνμ¬ νλ ¨μ μ§ννλ κ²½μ° νμ΅μ μ¬μ©ν ν Custom Dataset μ λ§λ λ€.
import os
import pandas as pd
from torchvision.io import read_image
class CustomImageDataset(Dataset):
def __init__(self, annotations_file, img_dir, transform=None, target_transform=None):
self.img_labels = pd.read_csv(annotations_file)
self.img_dir = img_dir
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return len(self.img_labels)
def __getitem__(self, idx):
img_path = os.path.join(self.img_dir, self.img_labels.iloc[idx, 0])
image = read_image(img_path)
label = self.img_labels.iloc[idx, 1]
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label- μ Fashion-MNIST Dataset load κ³Όμ μμλ λ°μ΄ν°λ₯Ό λΆλ¬μ€λ©΄μ transform μ μ μ©ν΄ λΆλ¬μ€λλ‘ λ§λ€μ΄μ Έ μμ§λ§, μΌλ°μ μΌλ‘ νμ΅ λ°μ΄ν°λ₯Ό λΆλ¬μ€λ κ²½μ° λ°μ΄ν°λ₯Ό λΆλ¬μ€κ³ , νμ΅ λ°μ΄ν°λ₯Ό νλ ¨μ μ ν©νλλ‘ λ°μ΄ν° μ¦κ°, μ κ·ν λ± μ μ²λ¦¬λ₯Ό μ§ννλ€.
from torchvision import transforms
train_df, val_df = train_test_split(training_data,
test_size=0.15,
random_state=42,
stratify=imageNet_df['label'])
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
train_ds = Dataset(train_df,
root_dir,
transform=transform)
val_ds = ImageNetDataset(val_df,
root_dir,
transform=transform)
dataset = {'train' : train_ds, 'val' : val_ds}
train_loader = DataLoader(train_ds, batch_size = config['batch_size'], shuffle=True)
val_loader = DataLoader(val_ds, batch_size = config['batch_size'], shuffle=True)- νμ΅μ μ§νν λͺ¨λΈμ λ§λ€κ³ νμ΅κ³Ό νκ°λ₯Ό μ§ννλ€.
class NeuralNetwork(nn.Module):
def __init__(self):
super().__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28*28, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 10),
)
def forward(self, x):
x = self.flatten(x)
logits = self.linear_relu_stack(x)
return logits
def train_loop(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
# Set the model to training mode - important for batch normalization and dropout layers
# Unnecessary in this situation but added for best practices
model.train()
for batch, (X, y) in enumerate(dataloader):
# Compute prediction and loss
pred = model(X)
loss = loss_fn(pred, y)
# Backpropagation
loss.backward()
optimizer.step()
optimizer.zero_grad()
if batch % 100 == 0:
loss, current = loss.item(), (batch + 1) * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
def test_loop(dataloader, model, loss_fn):
# Set the model to evaluation mode - important for batch normalization and dropout layers
# Unnecessary in this situation but added for best practices
model.eval()
size = len(dataloader.dataset)
num_batches = len(dataloader)
test_loss, correct = 0, 0
# Evaluating the model with torch.no_grad() ensures that no gradients are computed during test mode
# also serves to reduce unnecessary gradient computations and memory usage for tensors with requires_grad=True
with torch.no_grad():
for X, y in dataloader:
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= num_batches
correct /= size
print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
epochs = 10
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
train_loop(train_dataloader, model, loss_fn, optimizer)
test_loop(test_dataloader, model, loss_fn)
print("Done!")μ΄ν μμΈ‘ μ§ν, λͺ¨λΈ, μλ², νκ° κΈ°λ‘μ κ²½μ° ν΄λΉ νκ²½μ λ°λΌ λ¬λΌμ§κ² λλ€. Pipeline batch training pattern μ κ³Όμ μ λ€μκ³Ό κ°κ³ κ° μλΉμ€μ λ°λΌ κ΅¬μ‘°κ° λ¬λΌμ§λ€.
μ°Έκ³ λ¬Έν
- pytorch νν λ¦¬μΌ : https://pytorch.org/tutorials/