每个时期后都会打印性能结果

问题描述 投票:0回答:1

我正在尝试计算模型性能。然而,当我想计算模型性能时,即使我正在计算所有批次的训练和测试数据集的总体结果,它也会在每个时期后打印出来。我的理解是,即使我的数据中有批次,我也应该只获得一个精度、召回率等值。如何获得我的训练数据集代码中提到的精度值(作为一个数字)、召回值和性能指标测试数据集。我的代码似乎有什么问题。

def train():
    model.train()

    for data in train_loader: 
         optimizer.zero_grad() 
         model_output = my_model(data) 
         loss = criterion(model_output, data.y)  
         loss.backward()  
         optimizer.step() 
def test(loader):
    output_predictions = []
    target = []
    model.eval()

    correct = 0

    for data in loader:
        model_output = my_model(data)
        predictions= model_output.argmax(dim=1)
        correct += predictions.eq(data.y).sum().item() 
        output_predictions.extend(predictions.tolist())
        target.extend((data.y).tolist())

    Acc = correct / len(loader.dataset)
    B_c = class_likelihood_ratios(target,output_predictions, labels =[0,1])
    C_R = classification_report(target,output_predictions, labels =[0,1])
    P = precision_score(target,output_predictions, labels =[0,1])
    R = recall_score(target,output_predictions, labels =[0,1])

    fig, ax = plt.subplots()
    PrecisionRecallDisplay.from_predictions(target,output_predictions, ax=ax)
    plt.show()

    conf_m = confusion_matrix(target,output_predictions)
    sns.heatmap(conf_m/np.sum(conf_m), annot=True, fmt='.2%', xticklabels=[1,0], yticklabels=[1,0])
    plt.show()




    #Printing the Evaluation
    print("Binary_classification_positive_and_negative", B_c)
    print(C_R)
    print(P)
    print(R)

    return Acc
for epoch in range(1, 50):
    train()
    train_results_accuracy = test(train_loader)
    test_results_accuracy = test(test_loader)
    print(f'Epoch: {epoch:03d}, Train Acc: {train_results_accuracy:.4f}, Test Acc: {test_results_accuracy:.4f}')
python machine-learning deep-learning pytorch
1个回答
0
投票

为此,您可以将指标计算移出季节周期并在训练期间存储结果。

代码:

import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import classification_report, precision_score, recall_score, confusion_matrix
from sklearn.metrics import PrecisionRecallDisplay

def train():
    model.train()

    for data in train_loader: 
         optimizer.zero_grad() 
         model_output = my_model(data) 
         loss = criterion(model_output, data.y)  
         loss.backward()  
         optimizer.step()

def evaluate(loader):
    output_predictions = []
    target = []
    model.eval()

    correct = 0

    for data in loader:
        model_output = my_model(data)
        predictions = model_output.argmax(dim=1)
        correct += predictions.eq(data.y).sum().item() 
        output_predictions.extend(predictions.tolist())
        target.extend((data.y).tolist())

    Acc = correct / len(loader.dataset)

    return Acc, target, output_predictions

# Training loop
for epoch in range(1, 50):
    train()

# Calculate metrics after training
train_results_accuracy, train_target, train_predictions = evaluate(train_loader)
test_results_accuracy, test_target, test_predictions = evaluate(test_loader)

# Print and visualize metrics
print(f'Train Acc: {train_results_accuracy:.4f}, Test Acc: {test_results_accuracy:.4f}')

B_c = class_likelihood_ratios(train_target, train_predictions, labels=[0, 1])
C_R = classification_report(train_target, train_predictions, labels=[0, 1])
P = precision_score(train_target, train_predictions, labels=[0, 1])
R = recall_score(train_target, train_predictions, labels=[0, 1])

print("Binary_classification_positive_and_negative", B_c)
print(C_R)
print(P)
print(R)

# Visualizations
fig, ax = plt.subplots()
PrecisionRecallDisplay.from_predictions(train_target, train_predictions, ax=ax)
plt.show()

conf_m = confusion_matrix(train_target, train_predictions)
sns.heatmap(conf_m/np.sum(conf_m), annot=True, fmt='.2%', xticklabels=[1,0], yticklabels=[1,0])
plt.show()

这样,性能指标将在完成训练后计算一次,而不是每个时期。

© www.soinside.com 2019 - 2024. All rights reserved.