运行时错误:预期标量类型 Long 但发现 Float (Pytorch)

问题描述 投票:0回答:3

我已经尝试了很多次修复,我也使用了function.py中的示例代码,然后我得到了相同的“损失”值。我该如何解决这个问题?

我的图书馆


    import matplotlib.pyplot as plt
    import torch
    import torch.nn as nn
    import numpy as np
    import matplotlib
    import pandas as pd
    from torch.autograd import Variable
    from torch.utils.data import DataLoader,TensorDataset
    from sklearn.model_selection import train_test_split
    import warnings
    import os
    import torchvision
    import torchvision.datasets as dsets
    import torchvision.transforms as transforms

Mnis数据集



    
    train=pd.read_csv("train.csv",dtype=np.float32)
    
    
    targets_numpy = train.label.values
    features_numpy = train.loc[:,train.columns != "label"].values/255 # normalization
    
    
    features_train, features_test, targets_train, targets_test = train_test_split(features_numpy,
                                                                                 targets_numpy,test_size = 0.2,
                                                                                 random_state = 42) 
   
    featuresTrain=torch.from_numpy(features_train)
    targetsTrain=torch.from_numpy(targets_train)
    
    
    featuresTest=torch.from_numpy(features_test)
    targetsTest=torch.from_numpy(targets_test)     
    
    
    batch_size=100
    n_iterations=10000
    num_epochs=n_iterations/(len(features_train)/batch_size)
    num_epochs=int(num_epochs)
    
    
    train=torch.utils.data.TensorDataset(featuresTrain,targetsTrain) 
    test=torch.utils.data.TensorDataset(featuresTest,targetsTest)
    
    print(type(train))
    
    
    
    train_loader=DataLoader(train,batch_size=batch_size,shuffle=False)
    test_loader=DataLoader(test,batch_size=batch_size,shuffle=False)
    print(type(train_loader))
    
    plt.imshow(features_numpy[226].reshape(28,28))
    plt.axis("off")
    plt.title(str(targets_numpy[226]))
    plt.show()


这是我的模型

  

    class ANNModel(nn.Module):  
      
      def __init__(self,input_dim,hidden_dim,output_dim):
        super(ANNModel,self).__init__()
    
        
        self.fc1=nn.Linear(input_dim,hidden_dim)
        
        self.relu1=nn.ReLU()
    
        
        self.fc2=nn.Linear(hidden_dim,hidden_dim)
        
        self.tanh2=nn.Tanh()
    
       
    
        
        self.fc4=nn.Linear(hidden_dim,output_dim)
    
      def forward (self,x): #forward ile elde edilen layer lar bağlanır
        
        out=self.fc1(x)
        
        out=self.relu1(out)
    
        
        out=self.fc2(out)
        
        out=self.tanh2(out)
    
       
        
    
        
        out=self.fc4(out)
        return out  
      
    input_dim=28*28
    hidden_dim=150  
    output_dim=10 
    
    
    
    model=ANNModel(input_dim,hidden_dim,output_dim)
    
    
    error=nn.CrossEntropyLoss()
    
    
    learning_rate=0.02
    optimizer=torch.optim.SGD(model.parameters(),lr=learning_rate)

问题出在哪里

 

   
    count=0
    loss_list=[]
    iteration_list=[]
    accuracy_list = []
    for epoch in range(num_epochs):
      for i,(images,labels) in enumerate(train_loader):
        
        train=Variable(images.view(-1,28*28))
        labels=Variable(labels)
        #print(labels)
        #print(outputs)  
       
        optimizer.zero_grad()
    
        #forward propagation
        outputs=model(train)
    
       
       
        #outputs=torch.randn(784,10,requires_grad=True)
        ##labels=torch.randn(784,10).softmax(dim=1)
        loss=error(outputs,labels)
        
        
        
       
        loss.backward()
    
        
        optimizer.step()
        
        count+=1
         
        if count %50 ==0:
          
          
          correct=0
          total=0
          
         
          for images,labels in test_loader:
            test=Variable(images.view(-1,28*28))
    
            
            outputs=model(test)
    
            
            predicted=torch.max(outputs.data,1)[1] #mantık???
    
           
            total+= len(labels)
    
            
            correct+=(predicted==labels).sum()
    
          accuracy=100  *correct/float(total)
         
          loss_list.append(loss.data)
          iteration_list.append(count)
          accuracy_list.append(accuracy)
          if  count %500 ==0 :
           
           print('Iteration: {}  Loss: {}  Accuracy: {} %'.format(count, loss.data, accuracy))


这给出了



    ---------------------------------------------------------------------------
    RuntimeError                              Traceback (most recent call last)
    <ipython-input-9-9e53988ad250> in <module>()
         26     #outputs=torch.randn(784,10,requires_grad=True)
         27     ##labels=torch.randn(784,10).softmax(dim=1)
    ---> 28     loss=error(outputs,labels)
         29 
         30 
    
    2 frames
    /usr/local/lib/python3.7/dist-packages/torch/nn/functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction, label_smoothing)
       2844     if size_average is not None or reduce is not None:
       2845         reduction = _Reduction.legacy_get_string(size_average, reduce)
    -> 2846     return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index, label_smoothing)
       2847 
       2848 
    
    RuntimeError: expected scalar type Long but found Float

python machine-learning deep-learning pytorch artificial-intelligence
3个回答
4
投票

张量“标签”的数据类型似乎是FloatTensor。然而,nn.CrossEntropyLoss 期望一个 LongTensor 类型的目标。这意味着您应该检查“标签”的类型。如果是这种情况,那么您应该使用以下代码将“标签”的数据类型从 FloatTensor 转换为 LongTensor:

loss=error(outputs,labels.long())

0
投票
targetsTrain=torch.from_numpy(targets_train)
targetsTest=torch.from_numpy(targets_test)

在这些行中,您必须添加这些代码:

targetsTrain=torch.from_numpy(targets_train).type(torch.LongTensor)#data type is long
targetsTest=torch.from_numpy(targets_test).type(torch.LongTensor)#data type is long

然后就可以正常工作了


0
投票

我在下面遇到了同样的错误:

运行时错误:预期标量类型为 Long,但发现为 Float

当我将 CrossEntropyLoss() 与大小为 1 的 1D 张量和 0D 张量一起使用时,如下所示

import torch
from torch import nn

tensor1 = torch.tensor([7.2]) # Here
tensor2 = torch.tensor(1.5) # Here

cel = nn.CrossEntropyLoss()
cel(input=tensor1, target=tensor2) # Error

因此,我将

1.5
0
更改为
tensor2
,然后它的工作原理如下所示。 * 实际上,这种行为是出乎意料的,所以我将其报告为错误

import torch
from torch import nn

tensor1 = torch.tensor([7.2])
tensor2 = torch.tensor(0) # Here

cel = nn.CrossEntropyLoss()
cel(input=tensor1, target=tensor2)
# tensor(0.)
© www.soinside.com 2019 - 2024. All rights reserved.