Pytorch 深度学习

问题描述 投票:0回答:0

我正在尝试使用大小为 torch.Size([280652, 87]) 和目标:torch.Size([280652, 64]) 的数据训练模型,训练数据中有 80%,训练数据中有 20%测试数据。 我的代码:

#split the data in train and test
X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.2)
# convert to torch tensors
train = torch.tensor(X_train.values, dtype=torch.float32)
test = torch.tensor(X_test.values, dtype=torch.float32)
train_target = torch.tensor(y_train.values, dtype=torch.float32)
test_target = torch.tensor(y_test.values, dtype=torch.float32)
# inizializing and forward propagation
class MyModel(nn.Module):
    def __init__(self):
        super(MyModel, self).__init__()
        self.fc1 = nn.Linear(87, 50)# layer 1
        self.fc2 = nn.Linear(50, 64)# layer 2
        self.relu = nn.ReLU()# aktivation method

    def forward(self, x):
        x = self.relu(self.fc1(x))
        x = self.fc2(x)
        return x
#print(shapes)
print(train.shape)
print(train_target.shape)
print(test.shape)
print(test_target.shape)
#trainings datasets
train_dataset = TensorDataset(train, train_target)
train_dataloader = DataLoader(train_dataset, batch_size=64, shuffle=True)
#train_dataloader = DataLoader(train_dataset, batch_size=len(train_dataset), shuffle=False)
test_dataset = TensorDataset(test, test_target)
test_dataloader = DataLoader(test_dataset, batch_size=64, shuffle=False)
#test_dataloader = DataLoader(test_dataset, batch_size=len(test_dataset), shuffle=False)
model = MyModel()
#opimizer (ajust weights) for large amounts
optimizer = optim.Adam(model.parameters(), lr=0.001)
criterion = nn.CrossEntropyLoss()
print(train.shape)
print(train_target.shape)
print(test.shape)
print(test_target.shape)

#train = F.one_hot(train_target.to(torch.int64))

# Train the model
for epoch in range(10):
    for i, (inputs, labels) in enumerate(train_dataloader):
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, labels.float())
        loss.backward()
        optimizer.step()

        # Print the loss every 1000 iterations
        if i % 1000 == 0:
            print(train.shape)
            print(train_target.shape)
            print(test.shape)
            print(test_target.shape)
            print(f"Epoch {epoch+1}, Iteration {i+1}, Loss {loss.item():.4f}")
correct = 0
total = 0
with torch.no_grad():
    for inputs, labels in test_dataloader:
        outputs = model(inputs)
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        predicted = torch.argmax(outputs, dim=1)
        #correct += (torch.argmax(predicted, dim=1) == labels).sum().item()
        #print(len(labels))
        #print(len(predicted))
        #print(predicted)
        #print(labels)
        correct += (predicted == labels).sum().item()

accuracy = 100 * correct / total
print(f"Test Accuracy: {accuracy:.2f}%")

错误在 correct += (predicted == labels).sum().item() 行中出现,错误为:The size of tensor a (19) must match the size of tensor b (64) at non-singleton维度 1 我不知道 19 从何而来,我认为批量大小应该无关紧要。 我是不是忘记了什么,或者我的代码中是否存在 mager 错误?

我尝试调整批量大小和层大小并尝试了一些不同的方法,但似乎没有任何效果。

python deep-learning pytorch
© www.soinside.com 2019 - 2024. All rights reserved.