我正在尝试用 Pytorch 制作一个神经网络,但我的代码中出现了这个错误。
细节:我正在研究神经网络模型(卷积神经网络)并在没有参数的优化器中不断出现此错误。
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/var/folders/0d/g137kv8j52v02jnqm9ym8cf00000gn/T/ipykernel_83584/1244336557.py in <module>
38 lr = 0.001
39 model = CNN()
---> 40 history = fit(num_epochs, lr, model, train_dl, val_dl, opt_func)
/var/folders/0d/g137kv8j52v02jnqm9ym8cf00000gn/T/ipykernel_83584/1244336557.py in fit(epochs, lr, model, train_loader, validation_loader, opt_func)
16
17 history = []
---> 18 opt = opt_func(model.parameters(),lr)
19 for epoch in range(epochs):
20
~/opt/anaconda3/lib/python3.9/site-packages/torch/optim/adam.py in __init__(self, params, lr, betas, eps, weight_decay, amsgrad, foreach, maximize, capturable, differentiable, fused)
135 maximize=maximize, foreach=foreach, capturable=capturable,
136 differentiable=differentiable, fused=fused)
--> 137 super(Adam, self).__init__(params, defaults)
138
139 if fused:
~/opt/anaconda3/lib/python3.9/site-packages/torch/optim/optimizer.py in __init__(self, params, defaults)
59 param_groups = list(params)
60 if len(param_groups) == 0:
---> 61 raise ValueError("optimizer got an empty parameter list")
62 if not isinstance(param_groups[0], dict):
63 param_groups = [{'params': param_groups}]
ValueError: optimizer got an empty parameter list
代码:
import torch
import torchvision
from torchvision import transforms
from torchvision.datasets import ImageFolder
import os
import numpy as np
import shutil
import splitfolders
from matplotlib import pyplot as plt
from torchvision import datasets
import glob
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.optim import Adam
from torch.autograd import Variable
import pathlib
from torch import optim
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.functional as F
#cross-entropy loss function
class CNN(nn.Module):
# def __init__(self,num_classes=6):
# super(CNN,self).__init__()
def training_step(self, batch):
images, labels = batch
out = self.images
loss = F.cross_entropy(out, labels)
return loss
def validation_step(self, batch):
images, labels = batch
out = self.images
loss = F.cross_entropy(out, labels)
acc = accuracy(out, labels)
return {'validation_loss': loss.detach(), 'validation_acc': acc}
def validation_epoch_end(self, outputs):
batch_losses = [x['validation_loss'] for x in outputs]
epoch_loss = torch.stack(batch_losses).mean()
batch_accs = [x['validation_acc'] for x in outputs]
epoch_acc = torch.stack(batch_accs).mean()
return {'validation_loss': epoch_loss.item(), 'validation_acc': epoch_acc.item()}
def epoch_end(self, epoch, result):
print("Epoch [{}], train_loss: {:.4f}, validation_loss: {:.4f}, validation_acc: {:.4f}".format(
epoch, result['train_loss'], result['validation_loss'], result['validation_acc']))
class Conv(ImageClassificationBase):
#relu activation
def __init__(self):
super().__init__()
self.network = nn.Sequential(
nn.Conv2d(16, 32, kernel_size = 3, padding = 1),
nn.ReLU(),
nn.Conv2d(32,64, kernel_size = 3, stride = 1, padding = 1),
nn.ReLU(),
nn.MaxPool2d(2,2),
nn.Conv2d(64, 128, kernel_size = 3, stride = 1, padding = 1),
nn.ReLU(),
nn.Conv2d(128 ,256, kernel_size = 3, stride = 1, padding = 1),
nn.ReLU(),
nn.MaxPool2d(2,2),
nn.Conv2d(256, 256, kernel_size = 3, stride = 1, padding = 1),
nn.ReLU(),
nn.Conv2d(256,256, kernel_size = 3, stride = 1, padding = 1),
nn.ReLU(),
nn.MaxPool2d(2,2),
nn.Flatten(),
nn.Linear(82944,1024),
nn.ReLU(),
nn.Linear(1024, 512),
nn.ReLU(),
nn.Linear(512,6)
)
def forward(self, xb):
return self.network(xb)
def accuracy(outputs, labels):
_, preds = torch.max(outputs, dim=1)
return torch.tensor(torch.sum(preds == labels).item() / len(preds))
@torch.no_grad()
def evaluate(model, validation_loader):
model.eval()
outputs = [model.validation_step(batch) for batch in validation_loader]
return model.validation_epoch_end(outputs)
#fitting the model on training data and record the result after each epoch
def fit(epochs, lr, model, train_loader, validation_loader, opt_func = torch.optim.SGD):
history = []
opt = opt_func(model.parameters(),lr) #Error occurs Here
for epoch in range(epochs):
model.train()
train_losses = []
for batch in train_loader:
loss = model.training_step(batch)
train_losses.append(loss)
loss.backward()
opt.step()
opt.zero_grad()
result = evaluate(model, validation_loader)
result['train_loss'] = torch.stack(train_losses).mean().item()
model.epoch_end(epoch, result)
history.append(result)
return history
num_epochs = 30
opt_func = optim.Adam
lr = 0.001
model = CNN()
history = fit(num_epochs, lr, model, train_dl, val_dl, opt_func)
我查看了其他答案,但它们似乎没有用。
就在前面,一些代码被剪掉了,这是导入后立即出现的代码:
#labels of data images
labels = ['Bicycle', 'Bus', 'Car', 'Motorcycle', 'NonVehicles', 'Taxi', 'Truck', 'Van'] #total labels
if os.path.exists('./train_test'):
#makes sure that file doesn't exist so it can be added with split data
print("file : output needs to be deleted in order to split data")
else:
#split into ratio of 8:2 with splitfolders
splitfolders.ratio("Vehicles", output="train_test", seed=1337, ratio=(.8,.0, .2), group_prefix=None, move=False)
#remove excessive validation file
shutil.rmtree('./train_test/val')
#training data
train_data_dir = "./train_test/train/"
#testing data
test_data_dir = "./train_test/test"
dataset = ImageFolder(train_data_dir,transform = transforms.Compose([
transforms.Resize((150,150)),transforms.ToTensor()
]))
test_dataset = ImageFolder(test_data_dir,transforms.Compose([
transforms.Resize((150,150)),transforms.ToTensor()
]))
train_data = dataset
validation_data = test_dataset
train_dl = DataLoader(train_data, batch_size, shuffle = True, num_workers = 4, pin_memory = True)
val_dl = DataLoader(validation_data, batch_size*2, num_workers = 4, pin_memory = True)