写点什么

深度学习核心:从基础到前沿的全面解析

  • 2025-07-10
    广东
  • 本文字数:15060 字

    阅读完需:约 49 分钟

🚀 探索深度学习的核心技术栈,从神经网络基础到最新的 Transformer 架构



📋 目录



🔬 神经网络基础:从感知机到多层网络 {#神经网络基础}

🧮 感知机:神经网络的起点

感知机是最简单的神经网络模型,由 Frank Rosenblatt 在 1957 年提出。它模拟了生物神经元的基本功能。


import numpy as npimport matplotlib.pyplot as plt
class Perceptron: def __init__(self, learning_rate=0.01, n_iterations=1000): self.learning_rate = learning_rate self.n_iterations = n_iterations def fit(self, X, y): # 初始化权重和偏置 self.weights = np.zeros(X.shape[1]) self.bias = 0 for _ in range(self.n_iterations): for idx, x_i in enumerate(X): # 计算线性输出 linear_output = np.dot(x_i, self.weights) + self.bias # 激活函数(阶跃函数) y_predicted = self.activation_function(linear_output) # 更新权重和偏置 update = self.learning_rate * (y[idx] - y_predicted) self.weights += update * x_i self.bias += update def predict(self, X): linear_output = np.dot(X, self.weights) + self.bias predictions = self.activation_function(linear_output) return predictions def activation_function(self, x): return np.where(x >= 0, 1, 0)
# 示例:AND门实现X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])y = np.array([0, 0, 0, 1]) # AND门真值表
perceptron = Perceptron(learning_rate=0.1, n_iterations=10)perceptron.fit(X, y)
print("AND门预测结果:")for i in range(len(X)): prediction = perceptron.predict(X[i].reshape(1, -1)) print(f"输入: {X[i]}, 预测: {prediction[0]}, 实际: {y[i]}")
复制代码

🏗️ 多层感知机(MLP)

多层感知机通过增加隐藏层,解决了单层感知机无法处理非线性问题的局限性。


import torchimport torch.nn as nnimport torch.optim as optimfrom sklearn.datasets import make_classificationfrom sklearn.model_selection import train_test_splitfrom sklearn.preprocessing import StandardScaler
class MLP(nn.Module): def __init__(self, input_size, hidden_sizes, output_size, dropout_rate=0.2): super(MLP, self).__init__() layers = [] prev_size = input_size # 构建隐藏层 for hidden_size in hidden_sizes: layers.extend([ nn.Linear(prev_size, hidden_size), nn.ReLU(), nn.BatchNorm1d(hidden_size), nn.Dropout(dropout_rate) ]) prev_size = hidden_size # 输出层 layers.append(nn.Linear(prev_size, output_size)) self.network = nn.Sequential(*layers) def forward(self, x): return self.network(x)
# 生成示例数据X, y = make_classification(n_samples=1000, n_features=20, n_classes=2, random_state=42)X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# 数据标准化scaler = StandardScaler()X_train_scaled = scaler.fit_transform(X_train)X_test_scaled = scaler.transform(X_test)
# 转换为PyTorch张量X_train_tensor = torch.FloatTensor(X_train_scaled)y_train_tensor = torch.LongTensor(y_train)X_test_tensor = torch.FloatTensor(X_test_scaled)y_test_tensor = torch.LongTensor(y_test)
# 创建模型model = MLP(input_size=20, hidden_sizes=[64, 32, 16], output_size=2)criterion = nn.CrossEntropyLoss()optimizer = optim.Adam(model.parameters(), lr=0.001)
# 训练模型num_epochs = 100for epoch in range(num_epochs): model.train() optimizer.zero_grad() outputs = model(X_train_tensor) loss = criterion(outputs, y_train_tensor) loss.backward() optimizer.step() if (epoch + 1) % 20 == 0: model.eval() with torch.no_grad(): test_outputs = model(X_test_tensor) _, predicted = torch.max(test_outputs.data, 1) accuracy = (predicted == y_test_tensor).sum().item() / len(y_test_tensor) print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}, Test Accuracy: {accuracy:.4f}')
复制代码

🎯 激活函数详解

激活函数为神经网络引入非线性,是深度学习的关键组件。


import numpy as npimport matplotlib.pyplot as plt
def sigmoid(x): return 1 / (1 + np.exp(-np.clip(x, -500, 500)))
def tanh(x): return np.tanh(x)
def relu(x): return np.maximum(0, x)
def leaky_relu(x, alpha=0.01): return np.where(x > 0, x, alpha * x)
def swish(x): return x * sigmoid(x)
def gelu(x): return 0.5 * x * (1 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * x**3)))
# 绘制激活函数x = np.linspace(-5, 5, 1000)
plt.figure(figsize=(15, 10))
activations = { 'Sigmoid': sigmoid, 'Tanh': tanh, 'ReLU': relu, 'Leaky ReLU': leaky_relu, 'Swish': swish, 'GELU': gelu}
for i, (name, func) in enumerate(activations.items(), 1): plt.subplot(2, 3, i) plt.plot(x, func(x), linewidth=2) plt.title(f'{name} Activation Function') plt.grid(True, alpha=0.3) plt.xlabel('Input') plt.ylabel('Output')
plt.tight_layout()plt.show()
# 激活函数的导数(用于反向传播)def sigmoid_derivative(x): s = sigmoid(x) return s * (1 - s)
def relu_derivative(x): return np.where(x > 0, 1, 0)
def leaky_relu_derivative(x, alpha=0.01): return np.where(x > 0, 1, alpha)
复制代码



🖼️ 卷积神经网络(CNN):图像识别的利器 {#卷积神经网络}

🔍 卷积层原理

卷积神经网络通过卷积操作提取图像的局部特征,具有平移不变性和参数共享的优势。


import torchimport torch.nn as nnimport torch.nn.functional as Fimport torchvision.transforms as transformsfrom torchvision.datasets import CIFAR10from torch.utils.data import DataLoader
class ConvBlock(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1): super(ConvBlock, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding) self.bn = nn.BatchNorm2d(out_channels) self.relu = nn.ReLU(inplace=True) def forward(self, x): return self.relu(self.bn(self.conv(x)))
class SimpleCNN(nn.Module): def __init__(self, num_classes=10): super(SimpleCNN, self).__init__() # 特征提取层 self.features = nn.Sequential( ConvBlock(3, 32), ConvBlock(32, 32), nn.MaxPool2d(2, 2), nn.Dropout2d(0.25), ConvBlock(32, 64), ConvBlock(64, 64), nn.MaxPool2d(2, 2), nn.Dropout2d(0.25), ConvBlock(64, 128), ConvBlock(128, 128), nn.MaxPool2d(2, 2), nn.Dropout2d(0.25) ) # 分类器 self.classifier = nn.Sequential( nn.AdaptiveAvgPool2d((1, 1)), nn.Flatten(), nn.Linear(128, 512), nn.ReLU(inplace=True), nn.Dropout(0.5), nn.Linear(512, num_classes) ) def forward(self, x): x = self.features(x) x = self.classifier(x) return x
# 数据预处理transform_train = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
# 加载CIFAR-10数据集train_dataset = CIFAR10(root='./data', train=True, download=True, transform=transform_train)test_dataset = CIFAR10(root='./data', train=False, download=True, transform=transform_test)
train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True, num_workers=2)test_loader = DataLoader(test_dataset, batch_size=100, shuffle=False, num_workers=2)
# 训练函数def train_model(model, train_loader, test_loader, num_epochs=10): device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model.to(device) criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-4) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1) for epoch in range(num_epochs): # 训练阶段 model.train() running_loss = 0.0 correct = 0 total = 0 for batch_idx, (data, target) in enumerate(train_loader): data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() running_loss += loss.item() _, predicted = output.max(1) total += target.size(0) correct += predicted.eq(target).sum().item() if batch_idx % 100 == 0: print(f'Epoch: {epoch+1}, Batch: {batch_idx}, Loss: {loss.item():.4f}') scheduler.step() # 测试阶段 model.eval() test_loss = 0 test_correct = 0 test_total = 0 with torch.no_grad(): for data, target in test_loader: data, target = data.to(device), target.to(device) output = model(data) test_loss += criterion(output, target).item() _, predicted = output.max(1) test_total += target.size(0) test_correct += predicted.eq(target).sum().item() train_acc = 100. * correct / total test_acc = 100. * test_correct / test_total print(f'Epoch {epoch+1}: Train Acc: {train_acc:.2f}%, Test Acc: {test_acc:.2f}%')
# 创建并训练模型model = SimpleCNN(num_classes=10)print("开始训练CNN模型...")# train_model(model, train_loader, test_loader, num_epochs=5)
复制代码

🏛️ 经典 CNN 架构

LeNet-5:CNN 的先驱

class LeNet5(nn.Module):    def __init__(self, num_classes=10):        super(LeNet5, self).__init__()        self.features = nn.Sequential(            nn.Conv2d(1, 6, kernel_size=5),            nn.Tanh(),            nn.AvgPool2d(kernel_size=2),            nn.Conv2d(6, 16, kernel_size=5),            nn.Tanh(),            nn.AvgPool2d(kernel_size=2)        )                self.classifier = nn.Sequential(            nn.Linear(16 * 5 * 5, 120),            nn.Tanh(),            nn.Linear(120, 84),            nn.Tanh(),            nn.Linear(84, num_classes)        )        def forward(self, x):        x = self.features(x)        x = x.view(x.size(0), -1)        x = self.classifier(x)        return x
复制代码

ResNet:残差网络

class ResidualBlock(nn.Module):    def __init__(self, in_channels, out_channels, stride=1, downsample=None):        super(ResidualBlock, self).__init__()        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3,                               stride=stride, padding=1, bias=False)        self.bn1 = nn.BatchNorm2d(out_channels)        self.relu = nn.ReLU(inplace=True)        self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3,                              stride=1, padding=1, bias=False)        self.bn2 = nn.BatchNorm2d(out_channels)        self.downsample = downsample            def forward(self, x):        identity = x                out = self.conv1(x)        out = self.bn1(out)        out = self.relu(out)                out = self.conv2(out)        out = self.bn2(out)                if self.downsample is not None:            identity = self.downsample(x)                out += identity  # 残差连接        out = self.relu(out)                return out
class ResNet(nn.Module): def __init__(self, block, layers, num_classes=1000): super(ResNet, self).__init__() self.in_channels = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(512, num_classes) def _make_layer(self, block, out_channels, blocks, stride=1): downsample = None if stride != 1 or self.in_channels != out_channels: downsample = nn.Sequential( nn.Conv2d(self.in_channels, out_channels, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(out_channels) ) layers = [] layers.append(block(self.in_channels, out_channels, stride, downsample)) self.in_channels = out_channels for _ in range(1, blocks): layers.append(block(out_channels, out_channels)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = torch.flatten(x, 1) x = self.fc(x) return x
# 创建ResNet-18def resnet18(num_classes=1000): return ResNet(ResidualBlock, [2, 2, 2, 2], num_classes)
复制代码



🔄 循环神经网络(RNN/LSTM/GRU):序列数据处理 {#循环神经网络}

🔗 基础 RNN

循环神经网络专门处理序列数据,具有记忆能力。


import torchimport torch.nn as nnimport numpy as npimport matplotlib.pyplot as plt
class SimpleRNN(nn.Module): def __init__(self, input_size, hidden_size, output_size, num_layers=1): super(SimpleRNN, self).__init__() self.hidden_size = hidden_size self.num_layers = num_layers self.rnn = nn.RNN(input_size, hidden_size, num_layers, batch_first=True) self.fc = nn.Linear(hidden_size, output_size) def forward(self, x): # 初始化隐藏状态 h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size) # RNN前向传播 out, _ = self.rnn(x, h0) # 只使用最后一个时间步的输出 out = self.fc(out[:, -1, :]) return out
# 生成正弦波数据用于时间序列预测def generate_sine_wave(seq_length, num_samples): X, y = [], [] for _ in range(num_samples): start = np.random.uniform(0, 100) x = np.linspace(start, start + seq_length, seq_length) sine_wave = np.sin(x) X.append(sine_wave[:-1]) # 输入序列 y.append(sine_wave[-1]) # 预测目标 return np.array(X), np.array(y)
# 生成训练数据seq_length = 20num_samples = 1000X_train, y_train = generate_sine_wave(seq_length, num_samples)
# 转换为PyTorch张量X_train = torch.FloatTensor(X_train).unsqueeze(-1) # 添加特征维度y_train = torch.FloatTensor(y_train)
# 创建和训练模型model = SimpleRNN(input_size=1, hidden_size=50, output_size=1)criterion = nn.MSELoss()optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
# 训练循环num_epochs = 100for epoch in range(num_epochs): model.train() optimizer.zero_grad() outputs = model(X_train) loss = criterion(outputs.squeeze(), y_train) loss.backward() optimizer.step() if (epoch + 1) % 20 == 0: print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.6f}')
复制代码

🧠 LSTM:长短期记忆网络

LSTM 通过门控机制解决了传统 RNN 的梯度消失问题。


class LSTMModel(nn.Module):    def __init__(self, input_size, hidden_size, num_layers, output_size, dropout=0.2):        super(LSTMModel, self).__init__()        self.hidden_size = hidden_size        self.num_layers = num_layers                self.lstm = nn.LSTM(input_size, hidden_size, num_layers,                            batch_first=True, dropout=dropout)        self.dropout = nn.Dropout(dropout)        self.fc = nn.Linear(hidden_size, output_size)            def forward(self, x):        # 初始化隐藏状态和细胞状态        h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size)        c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size)                # LSTM前向传播        out, (hn, cn) = self.lstm(x, (h0, c0))                # 应用dropout        out = self.dropout(out)                # 使用最后一个时间步的输出        out = self.fc(out[:, -1, :])        return out
# 文本分类示例class TextClassificationLSTM(nn.Module): def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim, n_layers=2, dropout=0.3): super(TextClassificationLSTM, self).__init__() self.embedding = nn.Embedding(vocab_size, embedding_dim) self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers, batch_first=True, dropout=dropout) self.dropout = nn.Dropout(dropout) self.fc = nn.Linear(hidden_dim, output_dim) def forward(self, x): # 词嵌入 embedded = self.embedding(x) # LSTM处理 lstm_out, (hidden, cell) = self.lstm(embedded) # 使用最后一个隐藏状态 output = self.dropout(hidden[-1]) output = self.fc(output) return output
# 双向LSTMclass BiLSTM(nn.Module): def __init__(self, input_size, hidden_size, num_layers, output_size): super(BiLSTM, self).__init__() self.hidden_size = hidden_size self.num_layers = num_layers self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, bidirectional=True) self.fc = nn.Linear(hidden_size * 2, output_size) # *2 因为是双向 def forward(self, x): # 初始化隐藏状态(双向需要 *2) h0 = torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size) c0 = torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size) out, _ = self.lstm(x, (h0, c0)) # 连接前向和后向的最后输出 out = self.fc(out[:, -1, :]) return out
复制代码

⚡ GRU:门控循环单元

GRU 是 LSTM 的简化版本,参数更少但性能相近。


class GRUModel(nn.Module):    def __init__(self, input_size, hidden_size, num_layers, output_size, dropout=0.2):        super(GRUModel, self).__init__()        self.hidden_size = hidden_size        self.num_layers = num_layers                self.gru = nn.GRU(input_size, hidden_size, num_layers,                         batch_first=True, dropout=dropout)        self.dropout = nn.Dropout(dropout)        self.fc = nn.Linear(hidden_size, output_size)            def forward(self, x):        # 初始化隐藏状态        h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size)                # GRU前向传播        out, _ = self.gru(x, h0)                # 应用dropout和全连接层        out = self.dropout(out[:, -1, :])        out = self.fc(out)        return out
# 序列到序列模型(Seq2Seq)class Seq2SeqGRU(nn.Module): def __init__(self, input_size, hidden_size, output_size, num_layers=1): super(Seq2SeqGRU, self).__init__() # 编码器 self.encoder = nn.GRU(input_size, hidden_size, num_layers, batch_first=True) # 解码器 self.decoder = nn.GRU(output_size, hidden_size, num_layers, batch_first=True) # 输出层 self.output_projection = nn.Linear(hidden_size, output_size) def forward(self, src, tgt=None, max_length=50): batch_size = src.size(0) # 编码 _, hidden = self.encoder(src) if self.training and tgt is not None: # 训练时使用teacher forcing decoder_output, _ = self.decoder(tgt, hidden) output = self.output_projection(decoder_output) else: # 推理时逐步生成 outputs = [] decoder_input = torch.zeros(batch_size, 1, self.output_projection.out_features) for _ in range(max_length): decoder_output, hidden = self.decoder(decoder_input, hidden) output = self.output_projection(decoder_output) outputs.append(output) decoder_input = output output = torch.cat(outputs, dim=1) return output
复制代码



⚡ 注意力机制与 Transformer 架构 {#注意力机制与 transformer}

🎯 注意力机制原理

注意力机制允许模型在处理序列时关注最相关的部分。


import torchimport torch.nn as nnimport torch.nn.functional as Fimport math
class ScaledDotProductAttention(nn.Module): def __init__(self, d_model, dropout=0.1): super(ScaledDotProductAttention, self).__init__() self.d_model = d_model self.dropout = nn.Dropout(dropout) def forward(self, query, key, value, mask=None): # 计算注意力分数 scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.d_model) # 应用掩码(如果提供) if mask is not None: scores = scores.masked_fill(mask == 0, -1e9) # 计算注意力权重 attention_weights = F.softmax(scores, dim=-1) attention_weights = self.dropout(attention_weights) # 应用注意力权重 output = torch.matmul(attention_weights, value) return output, attention_weights
class MultiHeadAttention(nn.Module): def __init__(self, d_model, num_heads, dropout=0.1): super(MultiHeadAttention, self).__init__() assert d_model % num_heads == 0 self.d_model = d_model self.num_heads = num_heads self.d_k = d_model // num_heads self.w_q = nn.Linear(d_model, d_model) self.w_k = nn.Linear(d_model, d_model) self.w_v = nn.Linear(d_model, d_model) self.w_o = nn.Linear(d_model, d_model) self.attention = ScaledDotProductAttention(self.d_k, dropout) def forward(self, query, key, value, mask=None): batch_size = query.size(0) # 线性变换并重塑为多头 Q = self.w_q(query).view(batch_size, -1, self.num_heads, self.d_k).transpose(1, 2) K = self.w_k(key).view(batch_size, -1, self.num_heads, self.d_k).transpose(1, 2) V = self.w_v(value).view(batch_size, -1, self.num_heads, self.d_k).transpose(1, 2) # 应用注意力 attn_output, attn_weights = self.attention(Q, K, V, mask) # 连接多头输出 attn_output = attn_output.transpose(1, 2).contiguous().view( batch_size, -1, self.d_model) # 最终线性变换 output = self.w_o(attn_output) return output, attn_weights
复制代码

🏗️ Transformer 架构

class PositionalEncoding(nn.Module):    def __init__(self, d_model, max_length=5000):        super(PositionalEncoding, self).__init__()                pe = torch.zeros(max_length, d_model)        position = torch.arange(0, max_length, dtype=torch.float).unsqueeze(1)                div_term = torch.exp(torch.arange(0, d_model, 2).float() *                            (-math.log(10000.0) / d_model))                pe[:, 0::2] = torch.sin(position * div_term)        pe[:, 1::2] = torch.cos(position * div_term)                pe = pe.unsqueeze(0).transpose(0, 1)        self.register_buffer('pe', pe)            def forward(self, x):        return x + self.pe[:x.size(0), :]
class TransformerBlock(nn.Module): def __init__(self, d_model, num_heads, d_ff, dropout=0.1): super(TransformerBlock, self).__init__() self.attention = MultiHeadAttention(d_model, num_heads, dropout) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.feed_forward = nn.Sequential( nn.Linear(d_model, d_ff), nn.ReLU(), nn.Dropout(dropout), nn.Linear(d_ff, d_model) ) self.dropout = nn.Dropout(dropout) def forward(self, x, mask=None): # 多头自注意力 + 残差连接 attn_output, _ = self.attention(x, x, x, mask) x = self.norm1(x + self.dropout(attn_output)) # 前馈网络 + 残差连接 ff_output = self.feed_forward(x) x = self.norm2(x + self.dropout(ff_output)) return x
class TransformerEncoder(nn.Module): def __init__(self, vocab_size, d_model, num_heads, num_layers, d_ff, max_length=5000, dropout=0.1): super(TransformerEncoder, self).__init__() self.d_model = d_model self.embedding = nn.Embedding(vocab_size, d_model) self.pos_encoding = PositionalEncoding(d_model, max_length) self.transformer_blocks = nn.ModuleList([ TransformerBlock(d_model, num_heads, d_ff, dropout) for _ in range(num_layers) ]) self.dropout = nn.Dropout(dropout) def forward(self, x, mask=None): # 词嵌入 + 位置编码 x = self.embedding(x) * math.sqrt(self.d_model) x = self.pos_encoding(x) x = self.dropout(x) # 通过Transformer块 for transformer in self.transformer_blocks: x = transformer(x, mask) return x
# 用于分类任务的完整Transformer模型class TransformerClassifier(nn.Module): def __init__(self, vocab_size, d_model, num_heads, num_layers, d_ff, num_classes, max_length=512, dropout=0.1): super(TransformerClassifier, self).__init__() self.encoder = TransformerEncoder( vocab_size, d_model, num_heads, num_layers, d_ff, max_length, dropout ) self.classifier = nn.Sequential( nn.Linear(d_model, d_model // 2), nn.ReLU(), nn.Dropout(dropout), nn.Linear(d_model // 2, num_classes) ) def forward(self, x, mask=None): # 编码 encoded = self.encoder(x, mask) # 全局平均池化 pooled = encoded.mean(dim=1) # 分类 output = self.classifier(pooled) return output
# 创建模型示例model = TransformerClassifier( vocab_size=10000, d_model=512, num_heads=8, num_layers=6, d_ff=2048, num_classes=2, max_length=512, dropout=0.1)
print(f"模型参数数量: {sum(p.numel() for p in model.parameters()):,}")
复制代码

🎨 Vision Transformer (ViT)

将 Transformer 应用于计算机视觉任务。


class PatchEmbedding(nn.Module):    def __init__(self, img_size=224, patch_size=16, in_channels=3, embed_dim=768):        super(PatchEmbedding, self).__init__()        self.img_size = img_size        self.patch_size = patch_size        self.num_patches = (img_size // patch_size) ** 2                self.projection = nn.Conv2d(in_channels, embed_dim,                                    kernel_size=patch_size, stride=patch_size)            def forward(self, x):        # x: (batch_size, channels, height, width)        x = self.projection(x)  # (batch_size, embed_dim, num_patches_h, num_patches_w)        x = x.flatten(2)        # (batch_size, embed_dim, num_patches)        x = x.transpose(1, 2)   # (batch_size, num_patches, embed_dim)        return x
class VisionTransformer(nn.Module): def __init__(self, img_size=224, patch_size=16, in_channels=3, num_classes=1000, embed_dim=768, num_heads=12, num_layers=12, mlp_ratio=4, dropout=0.1): super(VisionTransformer, self).__init__() self.patch_embedding = PatchEmbedding(img_size, patch_size, in_channels, embed_dim) num_patches = self.patch_embedding.num_patches # 类别token和位置编码 self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.pos_embedding = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) # Transformer编码器 self.transformer_blocks = nn.ModuleList([ TransformerBlock(embed_dim, num_heads, int(embed_dim * mlp_ratio), dropout) for _ in range(num_layers) ]) self.norm = nn.LayerNorm(embed_dim) self.head = nn.Linear(embed_dim, num_classes) self.dropout = nn.Dropout(dropout) def forward(self, x): batch_size = x.shape[0] # 图像分块和嵌入 x = self.patch_embedding(x) # 添加类别token cls_tokens = self.cls_token.expand(batch_size, -1, -1) x = torch.cat([cls_tokens, x], dim=1) # 添加位置编码 x = x + self.pos_embedding x = self.dropout(x) # 通过Transformer块 for transformer in self.transformer_blocks: x = transformer(x) # 归一化并分类 x = self.norm(x) cls_token_final = x[:, 0] # 使用类别token output = self.head(cls_token_final) return output
# 创建ViT模型vit_model = VisionTransformer( img_size=224, patch_size=16, in_channels=3, num_classes=1000, embed_dim=768, num_heads=12, num_layers=12)
print(f"ViT模型参数数量: {sum(p.numel() for p in vit_model.parameters()):,}")
复制代码



🎯 总结与展望

📊 深度学习技术对比

🚀 未来发展趋势

1. 模型效率优化

  • 模型压缩:知识蒸馏、剪枝、量化

  • 轻量化架构:MobileNet、EfficientNet、DistilBERT

  • 神经架构搜索:AutoML、NAS

2. 多模态融合

  • 视觉-语言模型:CLIP、DALL-E、GPT-4V

  • 跨模态理解:图像描述、视觉问答

  • 统一架构:通用多模态 Transformer

3. 自监督学习

  • 对比学习:SimCLR、MoCo、SwAV

  • 掩码语言模型:BERT、RoBERTa、DeBERTa

  • 生成式预训练:GPT 系列、T5


🌟 结语

深度学习正在快速发展,从基础的神经网络到复杂的 Transformer 架构,每一项技术都在推动 AI 的边界。掌握这些核心技术不仅需要理解理论原理,更需要大量的实践经验。


深度学习的未来充满无限可能,让我们一起在这个激动人心的领域中不断探索和创新! 🚀✨

用户头像

还未添加个人签名 2020-09-29 加入

还未添加个人简介

评论

发布
暂无评论
深度学习核心:从基础到前沿的全面解析_AI 原生云_野猪🐗 佩琪_InfoQ写作社区