🧠 Deep Learning Foundations
คู่มือครบถ้วนเกี่ยวกับ Deep Learning และ Neural Networks
สำหรับการพัฒนาระบบ AI ขั้นสูงในยุค Industry 4.0
เครือข่ายประสาทเทียม
การประมวลผลภาพ
ข้อมูลอนุกรมเวลา
การประมวลผลภาษา
Deep Learning คืออะไร?
Deep Learning เป็นสาขาย่อยของ Machine Learning ที่ใช้ Neural Networks ที่มีหลายชั้น (Hidden Layers) เพื่อเรียนรู้รูปแบบที่ซับซ้อนในข้อมูล โดยจำลองการทำงานของสมองมนุษย์ในการประมวลผลข้อมูล ความสามารถในการเรียนรู้ feature representations อัตโนมัติทำให้ Deep Learning เหมาะสำหรับปัญหา ที่ซับซ้อนเช่น การรู้จำภาพ การประมวลผลภาษาธรรมชาติ และการควบคุมระบบอัตโนมัติ
🤖 Traditional Machine Learning
🧠 Deep Learning
Neural Network Architecture
โครงสร้างพื้นฐาน Neural Network
Input Layer
Hidden Layer 1
Hidden Layer 2
Output Layer
🔗 Connections
⚡ Activation Function
📊 Learning Process
Deep Learning Architectures
Convolutional Neural Networks (CNN)
Recurrent Neural Networks (RNN)
Transformer Architecture
Generative Adversarial Networks (GAN)
การฝึกสอน Deep Learning Model
Training Pipeline
• Augmentation
• Normalization
• Layer configuration
• Hyperparameters
• Loss calculation
• Backpropagation
• Overfitting check
• Model selection
• Production setup
• Monitoring
⚠️ Common Challenges
✅ Best Practices
Practical Implementation
PyTorch Deep Learning Example
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, Dataset
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report, confusion_matrix
import cv2
class IndustrialCNN(nn.Module):
"""CNN for industrial image classification"""
def __init__(self, num_classes=3, dropout_rate=0.5):
super(IndustrialCNN, self).__init__()
# Convolutional layers
self.features = nn.Sequential(
# First block
nn.Conv2d(3, 32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(0.25),
# Second block
nn.Conv2d(32, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(0.25),
# Third block
nn.Conv2d(64, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(0.25),
)
# Classifier
self.classifier = nn.Sequential(
nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten(),
nn.Linear(128, 256),
nn.BatchNorm1d(256),
nn.ReLU(inplace=True),
nn.Dropout(dropout_rate),
nn.Linear(256, 128),
nn.BatchNorm1d(128),
nn.ReLU(inplace=True),
nn.Dropout(dropout_rate),
nn.Linear(128, num_classes)
)
def forward(self, x):
x = self.features(x)
x = self.classifier(x)
return x
class IndustrialDataset(Dataset):
"""Custom dataset for industrial images"""
def __init__(self, image_paths, labels, transform=None):
self.image_paths = image_paths
self.labels = labels
self.transform = transform
def __len__(self):
return len(self.image_paths)
def __getitem__(self, idx):
# Load image
image = cv2.imread(self.image_paths[idx])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Apply transforms
if self.transform:
image = self.transform(image)
return image, self.labels[idx]
class DeepLearningTrainer:
def __init__(self, model, device='cuda' if torch.cuda.is_available() else 'cpu'):
self.model = model.to(device)
self.device = device
self.train_losses = []
self.val_losses = []
self.train_accuracies = []
self.val_accuracies = []
def train_epoch(self, train_loader, criterion, optimizer):
"""Train for one epoch"""
self.model.train()
running_loss = 0.0
correct = 0
total = 0
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(self.device), target.to(self.device)
# Zero the gradients
optimizer.zero_grad()
# Forward pass
outputs = self.model(data)
loss = criterion(outputs, target)
# Backward pass
loss.backward()
optimizer.step()
# Statistics
running_loss += loss.item()
_, predicted = outputs.max(1)
total += target.size(0)
correct += predicted.eq(target).sum().item()
epoch_loss = running_loss / len(train_loader)
epoch_acc = 100. * correct / total
return epoch_loss, epoch_acc
def validate(self, val_loader, criterion):
"""Validate the model"""
self.model.eval()
val_loss = 0.0
correct = 0
total = 0
with torch.no_grad():
for data, target in val_loader:
data, target = data.to(self.device), target.to(self.device)
outputs = self.model(data)
loss = criterion(outputs, target)
val_loss += loss.item()
_, predicted = outputs.max(1)
total += target.size(0)
correct += predicted.eq(target).sum().item()
epoch_loss = val_loss / len(val_loader)
epoch_acc = 100. * correct / total
return epoch_loss, epoch_acc
def train(self, train_loader, val_loader, num_epochs=50, learning_rate=0.001):
"""Complete training process"""
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(self.model.parameters(), lr=learning_rate, weight_decay=1e-4)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=5, factor=0.5)
best_val_acc = 0.0
patience_counter = 0
patience = 10
print("Starting training...")
print(f"Device: {self.device}")
for epoch in range(num_epochs):
# Train
train_loss, train_acc = self.train_epoch(train_loader, criterion, optimizer)
# Validate
val_loss, val_acc = self.validate(val_loader, criterion)
# Learning rate scheduling
scheduler.step(val_loss)
# Store metrics
self.train_losses.append(train_loss)
self.val_losses.append(val_loss)
self.train_accuracies.append(train_acc)
self.val_accuracies.append(val_acc)
# Print progress
if epoch % 5 == 0 or epoch == num_epochs - 1:
current_lr = optimizer.param_groups[0]['lr']
print(f'Epoch {epoch+1}/{num_epochs}:')
print(f' Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.2f}%')
print(f' Val Loss: {val_loss:.4f}, Val Acc: {val_acc:.2f}%')
print(f' Learning Rate: {current_lr:.6f}')
print('-' * 50)
# Early stopping
if val_acc > best_val_acc:
best_val_acc = val_acc
patience_counter = 0
# Save best model
torch.save({
'epoch': epoch,
'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'val_acc': val_acc,
}, 'best_model.pth')
else:
patience_counter += 1
if patience_counter >= patience:
print(f'Early stopping at epoch {epoch+1}')
break
print(f'Training completed! Best validation accuracy: {best_val_acc:.2f}%')
def plot_training_history(self):
"""Plot training history"""
fig, axes = plt.subplots(1, 2, figsize=(15, 5))
# Loss plot
axes[0].plot(self.train_losses, label='Training Loss', color='blue')
axes[0].plot(self.val_losses, label='Validation Loss', color='red')
axes[0].set_title('Training and Validation Loss')
axes[0].set_xlabel('Epoch')
axes[0].set_ylabel('Loss')
axes[0].legend()
axes[0].grid(True)
# Accuracy plot
axes[1].plot(self.train_accuracies, label='Training Accuracy', color='blue')
axes[1].plot(self.val_accuracies, label='Validation Accuracy', color='red')
axes[1].set_title('Training and Validation Accuracy')
axes[1].set_xlabel('Epoch')
axes[1].set_ylabel('Accuracy (%)')
axes[1].legend()
axes[1].grid(True)
plt.tight_layout()
plt.show()
def evaluate_model(self, test_loader, class_names):
"""Evaluate model on test set"""
self.model.eval()
all_preds = []
all_targets = []
with torch.no_grad():
for data, target in test_loader:
data = data.to(self.device)
outputs = self.model(data)
_, predicted = outputs.max(1)
all_preds.extend(predicted.cpu().numpy())
all_targets.extend(target.numpy())
# Classification report
print("Classification Report:")
print(classification_report(all_targets, all_preds, target_names=class_names))
# Confusion matrix
cm = confusion_matrix(all_targets, all_preds)
print(f"\nConfusion Matrix:")
print(cm)
return all_targets, all_preds
# Data preprocessing
def get_transforms():
"""Get data transforms for training and validation"""
train_transform = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((224, 224)),
transforms.RandomRotation(10),
transforms.RandomHorizontalFlip(0.5),
transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
val_transform = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
return train_transform, val_transform
def main():
"""Main training function"""
# Set random seeds for reproducibility
torch.manual_seed(42)
np.random.seed(42)
# Parameters
batch_size = 32
num_epochs = 50
learning_rate = 0.001
num_classes = 3 # Normal, Defective, Critical
# Get transforms
train_transform, val_transform = get_transforms()
# For demonstration, create dummy data
# In practice, replace with your actual image paths and labels
dummy_image_paths = ['image1.jpg'] * 100
dummy_labels = [0, 1, 2] * 33 + [0] # Dummy labels
# Create datasets
# train_dataset = IndustrialDataset(train_image_paths, train_labels, train_transform)
# val_dataset = IndustrialDataset(val_image_paths, val_labels, val_transform)
# Create data loaders
# train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
# val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
# Initialize model
model = IndustrialCNN(num_classes=num_classes)
# Print model info
total_params = sum(p.numel() for p in model.parameters())
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f"Total parameters: {total_params:,}")
print(f"Trainable parameters: {trainable_params:,}")
# Initialize trainer
trainer = DeepLearningTrainer(model)
# Train model
# trainer.train(train_loader, val_loader, num_epochs, learning_rate)
# Plot training history
# trainer.plot_training_history()
# Evaluate model
class_names = ['Normal', 'Defective', 'Critical']
# targets, predictions = trainer.evaluate_model(test_loader, class_names)
print("Training setup complete!")
print("Replace dummy data with actual image paths and labels to start training.")
if __name__ == "__main__":
main()
พร้อมพัฒนา Deep Learning Solution แล้วหรือยัง?
ปรึกษาผู้เชี่ยวชาญเพื่อสร้างระบบ AI ขั้นสูงที่เหมาะสมกับความต้องการของคุณ