为什么 pytorch 中的权重不更新?

问题描述 投票:0回答:0

我是 pytorch 的新手,想设计一个简单的 nn。 然而,当运行下面的代码时,nn 似乎没有学到任何东西。

我用tensorflow设计了同样的nn,准确率很快收敛到1。 我做错了什么吗?

import numpy as np
import os
import torch
from torch import nn
from torch.utils.data import Dataset
from torch.utils.data import TensorDataset
from torch.utils.data import DataLoader
from torch import optim
import torch.nn.functional as F


class HSVData(Dataset):

    def __init__(self, path_to_data) -> None:
        self.x, self.y = self._load_data(path_to_data)

    def _load_data(self, path_to_data):
        with np.load(path_to_data) as f:
            fx, fy = f.files
            x, y = f[fx], f[fy]

            x = torch.from_numpy(x)
            y = torch.from_numpy(y)

            y = self._to_categorical(y, torch.max(y).item()+1)
            return x, y

    def _to_categorical(self, y, num_classes):
        return np.eye(num_classes)[y]

    def __len__(self):
        return len(self.y)

    def __getitem__(self, idx):
        return self.x[idx], self.y[idx]


class NNModel(nn.Module):
    def __init__(self, input_dim, output_dim) -> None:
        super().__init__()
        self.nn_stack = nn.Sequential(
            nn.Linear(input_dim, 64),
            nn.ReLU(),
            nn.Linear(64, 64),
            nn.ReLU(),
            nn.Linear(64, 64),
            nn.ReLU(),
            nn.Linear(64, 64),
            nn.ReLU(),
            nn.Linear(64, output_dim),
            nn.Softmax(dim=1))

    def forward(self, x):
        x = x.to(torch.float32)
        return self.nn_stack(x)


def train(model, dataloader, loss_fn, optimizer):
    model.train()
    for i, (x, y) in enumerate(dataloader):
        pred = model(x)
        loss = loss_fn(pred, y.to(torch.float32))

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()


def val(model, dataloader):
    model.eval()
    correct = 0, 0
    with torch.no_grad():
        for x, y in dataloader:
            pred = model(x)
            correct += 1 if (pred.argmax().item() == y.argmax().item()) else 0

        print(f"Accuracy: {100*(correct/len(dataloader))}")


dataset = HSVData("data/5k_per_point.npz")
train_data, val_data = torch.utils.data.random_split(dataset, [44000, 11000])

train_dl = DataLoader(train_data, batch_size=1, shuffle=True)
val_dl = DataLoader(val_data, batch_size=1, shuffle=True)

model = NNModel(8, 11)
loss_fn = nn.BCELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)

epochs = 5
for e in range(epochs):
    print(f"Epoch {e+1}\n-------------------------------")
    train(model, train_dl, loss_fn, optimizer)
    val(model, val_dl)

提供更多信息:

特征向量:

tensor([0, 3739, 4095, 2243, 3077,  518, 2208,  939], dtype=torch.int32)

真实情况:

tensor([0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.], dtype=torch.float64)

预测:

tensor([[5.3249e-44, 0.0000e+00, 6.5020e-16, 0.0000e+00, 1.4795e-11, 0.0000e+00,
         8.1166e-10, 1.0000e+00, 1.7702e-36, 0.0000e+00, 0.0000e+00]],
       grad_fn=<SoftmaxBackward0>)

--> 准确度保持在 8.xx ~ 百分比不变,如果 nn 是随机猜测的话,我会预料到这一点。

machine-learning deep-learning pytorch
© www.soinside.com 2019 - 2024. All rights reserved.