class ConvDQN(nn.Module):
def __init__(self, input_dim, output_dim):
super(ConvDQN, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.conv = nn.Sequential(
nn.Conv2d(self.input_dim, 32, kernel_size=8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.ReLU()
)
self.fc_input_dim = self.feature_size()
self.fc = nn.Sequential(
nn.Linear(self.fc_input_dim, 128),
nn.ReLU(),
nn.Linear(128, 256),
nn.ReLU(),
nn.Linear(256, self.output_dim)
)
def forward(self, state):
features = self.conv(state)
features = features.view(features.size(0), -1)
qvals = self.fc(features)
return qvals
def feature_size(self):
return self.conv(autograd.Variable(torch.zeros(1, *self.input_dim))).view(1, -1).size(1)
它给了我错误:
File "dqn.py", line 86, in __init__
self.fc_input_dim = self.feature_size()
File "dqn.py", line 105, in feature_size
return self.conv(autograd.Variable(torch.zeros(32, *self.input_dim))).view(1, -1).size(1)
File "C:\Users\ariji\AppData\Local\Programs\Python\Python37\lib\site-packages\torch\nn\modules\module.py", line 489, in __call__
result = self.forward(*input, **kwargs)
File "C:\Users\ariji\AppData\Local\Programs\Python\Python37\lib\site-packages\torch\nn\modules\container.py", line 92, in forward
input = module(input)
File "C:\Users\ariji\AppData\Local\Programs\Python\Python37\lib\site-packages\torch\nn\modules\module.py", line 489, in __call__
result = self.forward(*input, **kwargs)
File "C:\Users\ariji\AppData\Local\Programs\Python\Python37\lib\site-packages\torch\nn\modules\conv.py", line 320, in forward
self.padding, self.dilation, self.groups)
RuntimeError: Expected 4-dimensional input for 4-dimensional weight [32, 4, 8, 8], but got 2-dimensional input of size [1, 4] instead
因此,我得到的事实是,我传递给卷积网络的输入的尺寸不正确。我不明白我应该如何在输入中添加所需的尺寸?还是应该在卷积网络中更改某些内容?
我正在使用以下代码初始化卷积DQN:类ConvDQN(nn.Module):def __init __(self,input_dim,output_dim):super(ConvDQN,self ..__ init __()self.input_dim ...
torch.zeros(1, *self.input_dim)
的转换层torch.Size([1, 4])
,但是将转换层初始化为,nn.Sequential(
nn.Conv2d(self.input_dim, 32, kernel_size=8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.ReLU()
)