目标:用 MNIST 训练一个 CNN 模型,然后用梯度上升法生成一张图片,使得模型对这张图片的预测结果为 8


import numpy as np
import torch 
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt

CNN 模型训练

# 下载 MNIST 数据集
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))]) # 被归一化到 [-1, 1] 之间
trainset = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=256, shuffle=True, num_workers=2)
testset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=256, shuffle=False, num_workers=2)
classes = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')

# 训练模型
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 6, 5)
        self.pool = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(6, 16, 5)
        self.fc1 = nn.Linear(16 * 4 * 4, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)

    def forward(self, x):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = x.view(-1, 16 * 4 * 4)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net = Net()
net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)

# 开始训练
epochs = 10

from tqdm import tqdm

for epoch in range(epochs):
    avg_loss = 0
    for i, data in enumerate(tqdm(trainloader)):
        inputs, labels = data
        inputs, labels = inputs.to(device), labels.to(device)
        optimizer.zero_grad()
        outputs = net(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        avg_loss += loss.item()
    avg_loss = avg_loss / (i + 1)
    print('epoch: %d, loss: %.4f' % (epoch + 1, avg_loss))
print('Finished Training')

# 保存模型
PATH = './mnist_net.pth'
torch.save(net.state_dict(), PATH)
# 读取模型
PATH = './mnist_net.pth'
net = Net()
net.load_state_dict(torch.load(PATH))

梯度上升法进行图片生成

net = net.to(device)
# 固定 net 的参数
for param in net.parameters():
    param.requires_grad = False
    
net.eval()
# 进行梯度上升,让模型生成一张图片,使得模型对这张图片的预测结果为 9
img_gen = torch.randn(1, 1, 28, 28, requires_grad=True)

img_gen = img_gen.to(device)

epochs = 200
for epoch in range(epochs):

    output = net(img_gen)
    value_to_max = output[0][8] # 使得类别 8 的概率输出最大化
    
    # 计算梯度
    grad = torch.autograd.grad(value_to_max, img_gen)[0] 
    img_gen = img_gen.data + 0.1 * grad.data / torch.sqrt(grad.data * grad.data) # torch.Size([1, 1, 28, 28]) 
    # 往梯度上升的方向前进

    # 把 img_gen 有 nan 的位置变成 0
    img_gen.data[img_gen.data != img_gen.data] = 0
    
    # 重新计算梯度
    img_gen = img_gen.clone().detach().requires_grad_(True).to(device)

    if epoch % 20 == 0:
        print('epoch: {}, loss: {}'.format(epoch, value_to_max.item()))
        plt.imshow(img_gen[0][0].cpu().detach().numpy(), cmap='gray')
        plt.show()
        

epoch: 0, loss: 1.4248332977294922
【pytorch】MNIST 梯度上升法求使得某类概率最大的样本-LMLPHP

epoch: 180, loss: 259.0355224609375
【pytorch】MNIST 梯度上升法求使得某类概率最大的样本-LMLPHP

# 把 这个 img_gen 标准化到 -1 ,1 之间,然后输入网络,看看网络的预测结果
# 把最大值变成 1, 最小值变成 -1
img_gen = img_gen - torch.min(img_gen)
img_gen = img_gen / torch.max(img_gen)
img_gen = img_gen * 2 - 1
# 看看图片
plt.imshow(img_gen[0][0].cpu().detach().numpy(), cmap='gray')
# 输入网络,看看网络的预测结果和各类的概率
output = net(img_gen)
# 看各类的概率
for i in range(10):
    print('{}: {}'.format(classes[i], output[0][i].item()))

0: -5.7123026847839355
1: -0.5687944889068604
2: -1.5327638387680054
3: 0.04780220612883568
4: -2.2129156589508057
5: 2.809201955795288
6: -3.1844711303710938
7: -7.135143280029297
8: 13.538104057312012
9: -0.9435712099075317

11-22 11:52