from google.colab import drive
drive.mount('/gdrive', force_remount=True)
import os
import numpy as np
import torch
import torch.nn as nn
from sklearn.metrics import accuracy_score
from torch.utils.data import (DataLoader, RandomSampler, TensorDataset)
from keras.datasets import mnist
class MNIST_CNN(nn.Module):
def __init__(self, config):
super(MNIST_CNN, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.conv2 = nn.Sequential(
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.fnn = nn.Sequential(
nn.Linear(7*7*64, 10, bias=True)
)
nn.init.xavier_uniform_(self.fnn.weight)
def forward(self,input_features):
output = self.conv1(input_features)
output = self.conv2(output)
output = output.view(output.size(0), -1)
hyothesis = self.fnn(output)
return hyothesis
def load_dataset():
(train_X, train_Y), (test_X, test_Y) = mnist.load_data()
train_X = train_X.reshape(-1, 1, 28, 28)
test_X = test_X.reshape(-1, 1, 28, 28)
train_X = torch.tensor(train_X, dtype=torch.float)
train_Y = torch.tensor(train_Y, dtype=torch.long)
test_X = torch.tensor(test_X, dtype=torch.float)
test_Y = torch.tensor(test_Y, dtype=torch.long)
return (train_X, train_Y), (test_X, test_Y)
def tensor2list(input_tensor):
return input_tensor.cpu().detach().numpy().tolist()
def do_test(model, test_dataloader):
model.eval()
predicts, golds = [], []
with torch.no_grad():
for step, batch in enumerate(test_dataloader):
batch = tuple(t.cuda() for t in batch)
input_features, labels = batch
hypothesis = model(input_features)
print("size of hypothesis", hypothesis.size())
logits = torch.argmax(hypothesis, -1)
x = tensor2list(logits)
y = tensor2list(labels)
predicts.extend(x)
golds.extend(y)
print("PRED=", predicts)
print("GOLD=", golds)
print("Accuracy={0:f}\n".format(accuracy_score(golds, predicts)))
def test(config):
model = MNIST_CNN(config).cuda()
model.load_state_dict(torch.load(os.path.join(config["output_dir"], config["model_name"])))
(_, _), (features, labels) = load_dataset()
test_features = TensorDataset(features, labels)
test_dataloader = DataLoader(test_features, shuffle=True, batch_size=config["batch_size"])
do_test(model, test_dataloader)
def train(config):
model = MNIST_CNN(config).cuda()
(input_features, labels), (_, _) = load_dataset()
train_features = TensorDataset(input_features, labels)
train_dataloader = DataLoader(train_features, shuffle=True, batch_size=config["batch_size"])
loss_func = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=config["learn_rate"])
for epoch in range(config["epochs"]):
model.train()
costs = []
for step, batch in enumerate(train_dataloader):
batch = tuple(t.cuda() for t in batch)
input_features, labels = batch
optimizer.zero_grad()
hypothesis = model(input_features)
cost = loss_func(hypothesis, labels)
cost.backward()
optimizer.step()
costs.append(cost.data.item())
print("Average Loss={0:f}".format(np.mean(costs)))
torch.save(model.state_dict(), os.path.join(config["output_dir"], "epoch_{0:d}.pt"))
do_test(model, train_dataloader)
if(__name__=="__main__"):
root_dir = "/gdrive/My Drive/24-2/MachineLearning"
output_dir = os.path.join(root_dir, "output")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
config = {"mode": "test",
"model_name":"epoch_{0:d}.pt".format(10),
"output_dir":output_dir,
"learn_rate":0.001,
"batch_size":32,
"epoch":10,
}
if(config["mode"] == "train"):
train(config)
else:
test(config)
从这段代码中,我得到了 AttributeError: 'Sequential' object has no attribute 'weight'。 我尝试了 nn.init.xavier_uniform_(self.fnn[0].weight),但它低于错误。
运行时错误:加载 MNIST_CNN 的 state_dict 时出错: state_dict 中缺少键:“conv1.0.weight”、“conv1.0.bias”、“conv2.0.weight”、“conv2.0.bias”、“fnn.0.weight”、“fnn .0.偏差”。 state_dict 中出现意外的键:“线性1.权重”、“线性1.偏差”、“线性2.权重”、“线性2.偏差”。
我该如何解决?请帮助我。
一个简单的解决方案是从 self.fnn 中删除 nn.Sequential() 包装器
self.fnn = nn.Linear(7*7*64, 10, bias=True)
nn.init.xavier_uniform_(self.fnn.weight)