评估 SGEMM 的 CUDNN 问题

问题描述 投票:0回答:1

我使用 cudnn 来测试 sgemmC[步幅 x 步幅] = A[步幅 x 步幅] x B[步幅 x 步幅]

配置

  • GPU:T1000/SM_75
  • cuda-12.0.1/driver-535 安装(通过 ubuntu-24.04 上的 multiverse 存储库)
  • cudnn-9.2.1 tarball已安装

sgemm_cudnn_test.cu

#include <assert.h>
#include <cuda_runtime.h>
#include <cudnn.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>

#define ONES(mat, stride) for (int i = 0; i < stride * stride; mat[i++] = 1)
#define ZEROS(mat, stride) for (int i = 0; i < stride * stride; mat[i++] = 0)
#define ASSERT(mat, stride) for (int i = 0; i < stride * stride; assert(mat[i++] == stride))

void checked_finalize(cudnnBackendDescriptor_t desc, const char *dname) {
    cudnnStatus_t status;
    if ((status = cudnnBackendFinalize(desc)) != CUDNN_STATUS_SUCCESS) {
        printf("Finalizing %s error: %s, exit!\n", dname, cudnnGetErrorString(status));
        exit(1);
    }
}

void run(int stride, bool manual, cudnnBackendHeurMode_t heurMode) {
    float *A; cudaMallocManaged(&A, sizeof(float) * stride * stride); ONES(A, stride);
    float *B; cudaMallocManaged(&B, sizeof(float) * stride * stride); ONES(B, stride);
    float *C; cudaMallocManaged(&C, sizeof(float) * stride * stride); ZEROS(C, stride);

    cudnnStatus_t status;
    cudnnHandle_t handle;
    if ((status = cudnnCreate(&handle)) != CUDNN_STATUS_SUCCESS) {
        printf("Creating handle error: %s, exit!\n", cudnnGetErrorString(status));
        exit(1);
    }

    cudnnDataType_t dtype = CUDNN_DATA_FLOAT;
    int64_t dim[] = {1, stride, stride}, strides[] = {stride * stride, stride, 1};
    int64_t alignment = 4;

    cudnnBackendDescriptor_t aDesc; cudnnBackendCreateDescriptor(CUDNN_BACKEND_TENSOR_DESCRIPTOR, &aDesc);
    cudnnBackendSetAttribute(aDesc, CUDNN_ATTR_TENSOR_DATA_TYPE, CUDNN_TYPE_DATA_TYPE, 1, &dtype);
    cudnnBackendSetAttribute(aDesc, CUDNN_ATTR_TENSOR_DIMENSIONS, CUDNN_TYPE_INT64, 3, dim);
    cudnnBackendSetAttribute(aDesc, CUDNN_ATTR_TENSOR_STRIDES, CUDNN_TYPE_INT64, 3, strides);
    int64_t aId = 'A'; cudnnBackendSetAttribute(aDesc, CUDNN_ATTR_TENSOR_UNIQUE_ID, CUDNN_TYPE_INT64, 1, &aId);
    cudnnBackendSetAttribute(aDesc, CUDNN_ATTR_TENSOR_BYTE_ALIGNMENT, CUDNN_TYPE_INT64, 1, &alignment);
    checked_finalize(aDesc, "aDesc");

    cudnnBackendDescriptor_t bDesc; cudnnBackendCreateDescriptor(CUDNN_BACKEND_TENSOR_DESCRIPTOR, &bDesc);
    cudnnBackendSetAttribute(bDesc, CUDNN_ATTR_TENSOR_DATA_TYPE, CUDNN_TYPE_DATA_TYPE, 1, &dtype);
    cudnnBackendSetAttribute(bDesc, CUDNN_ATTR_TENSOR_DIMENSIONS, CUDNN_TYPE_INT64, 3, dim);
    cudnnBackendSetAttribute(bDesc, CUDNN_ATTR_TENSOR_STRIDES, CUDNN_TYPE_INT64, 3, strides);
    int64_t bId = 'B'; cudnnBackendSetAttribute(bDesc, CUDNN_ATTR_TENSOR_UNIQUE_ID, CUDNN_TYPE_INT64, 1, &bId);
    cudnnBackendSetAttribute(bDesc, CUDNN_ATTR_TENSOR_BYTE_ALIGNMENT, CUDNN_TYPE_INT64, 1, &alignment);
    checked_finalize(bDesc, "bDesc");

    cudnnBackendDescriptor_t cDesc; cudnnBackendCreateDescriptor(CUDNN_BACKEND_TENSOR_DESCRIPTOR, &cDesc);
    cudnnBackendSetAttribute(cDesc, CUDNN_ATTR_TENSOR_DATA_TYPE, CUDNN_TYPE_DATA_TYPE, 1, &dtype);
    cudnnBackendSetAttribute(cDesc, CUDNN_ATTR_TENSOR_DIMENSIONS, CUDNN_TYPE_INT64, 3, dim);
    cudnnBackendSetAttribute(cDesc, CUDNN_ATTR_TENSOR_STRIDES, CUDNN_TYPE_INT64, 3, strides);
    int64_t cId = 'C'; cudnnBackendSetAttribute(cDesc, CUDNN_ATTR_TENSOR_UNIQUE_ID, CUDNN_TYPE_INT64, 1, &cId);
    cudnnBackendSetAttribute(cDesc, CUDNN_ATTR_TENSOR_BYTE_ALIGNMENT, CUDNN_TYPE_INT64, 1, &alignment);
    checked_finalize(cDesc, "cDesc");

    cudnnBackendDescriptor_t matmulDesc; cudnnBackendCreateDescriptor(CUDNN_BACKEND_MATMUL_DESCRIPTOR, &matmulDesc);
    cudnnBackendSetAttribute(matmulDesc, CUDNN_ATTR_MATMUL_COMP_TYPE, CUDNN_TYPE_DATA_TYPE, 1, &dtype);
    checked_finalize(matmulDesc, "matmulDesc");

    cudnnBackendDescriptor_t matmulOptDesc; cudnnBackendCreateDescriptor(CUDNN_BACKEND_OPERATION_MATMUL_DESCRIPTOR, &matmulOptDesc);
    cudnnBackendSetAttribute(matmulOptDesc, CUDNN_ATTR_OPERATION_MATMUL_DESC, CUDNN_TYPE_BACKEND_DESCRIPTOR, 1, &matmulDesc);
    cudnnBackendSetAttribute(matmulOptDesc, CUDNN_ATTR_OPERATION_MATMUL_ADESC, CUDNN_TYPE_BACKEND_DESCRIPTOR, 1, &aDesc);
    cudnnBackendSetAttribute(matmulOptDesc, CUDNN_ATTR_OPERATION_MATMUL_BDESC, CUDNN_TYPE_BACKEND_DESCRIPTOR, 1, &bDesc);
    cudnnBackendSetAttribute(matmulOptDesc, CUDNN_ATTR_OPERATION_MATMUL_CDESC, CUDNN_TYPE_BACKEND_DESCRIPTOR, 1, &cDesc);
    checked_finalize(matmulOptDesc, "matmulOptDesc");

    cudnnBackendDescriptor_t optGraphDesc; cudnnBackendCreateDescriptor(CUDNN_BACKEND_OPERATIONGRAPH_DESCRIPTOR, &optGraphDesc);
    cudnnBackendSetAttribute(optGraphDesc, CUDNN_ATTR_OPERATIONGRAPH_HANDLE, CUDNN_TYPE_HANDLE, 1, &handle);
    cudnnBackendSetAttribute(optGraphDesc, CUDNN_ATTR_OPERATIONGRAPH_OPS, CUDNN_TYPE_BACKEND_DESCRIPTOR, 1, &matmulOptDesc);
    checked_finalize(optGraphDesc, "optGraphDesc");

    cudnnBackendDescriptor_t engineDesc, engineHeurDesc, engineCfgDesc;
    int64_t idx = 0, engineCfgDescCount;
    if (manual) {
        cudnnBackendCreateDescriptor(CUDNN_BACKEND_ENGINE_DESCRIPTOR, &engineDesc);
        cudnnBackendSetAttribute(engineDesc, CUDNN_ATTR_ENGINE_OPERATION_GRAPH, CUDNN_TYPE_BACKEND_DESCRIPTOR, 1, &optGraphDesc);
        cudnnBackendSetAttribute(engineDesc, CUDNN_ATTR_ENGINE_GLOBAL_INDEX, CUDNN_TYPE_INT64, 1, &idx);
        checked_finalize(engineDesc, "engineDesc");

        cudnnBackendCreateDescriptor(CUDNN_BACKEND_ENGINECFG_DESCRIPTOR, &engineCfgDesc);
        cudnnBackendSetAttribute(engineCfgDesc, CUDNN_ATTR_ENGINECFG_ENGINE, CUDNN_TYPE_BACKEND_DESCRIPTOR, 1, &engineDesc);
        checked_finalize(engineCfgDesc, "engineCfgDesc");
    } else {
        cudnnBackendCreateDescriptor(CUDNN_BACKEND_ENGINEHEUR_DESCRIPTOR, &engineHeurDesc);
        cudnnBackendSetAttribute(engineHeurDesc, CUDNN_ATTR_ENGINEHEUR_OPERATION_GRAPH, CUDNN_TYPE_BACKEND_DESCRIPTOR, 1, &optGraphDesc);
        cudnnBackendSetAttribute(engineHeurDesc, CUDNN_ATTR_ENGINEHEUR_MODE, CUDNN_TYPE_HEUR_MODE, 1, &heurMode);
        checked_finalize(engineHeurDesc, "engineHeurDesc");

        /* Line 93 */ status = cudnnBackendGetAttribute(engineHeurDesc, CUDNN_ATTR_ENGINEHEUR_RESULTS, CUDNN_TYPE_BACKEND_DESCRIPTOR, 1, &engineCfgDescCount, &engineCfgDesc);
        if (status != CUDNN_STATUS_SUCCESS) {
            printf("Getting engineCfgDesc error: %s, exit!\n", cudnnGetErrorString(status));
            exit(1);
        }
        if (!engineCfgDescCount) {
            printf("0 engineCfgDesc found, exit!");
            exit(1);
        }
    }

    cudnnBackendDescriptor_t executionPlanDesc; cudnnBackendCreateDescriptor(CUDNN_BACKEND_EXECUTION_PLAN_DESCRIPTOR, &executionPlanDesc);
    cudnnBackendSetAttribute(executionPlanDesc, CUDNN_ATTR_EXECUTION_PLAN_HANDLE, CUDNN_TYPE_HANDLE, 1, &handle);
    cudnnBackendSetAttribute(executionPlanDesc, CUDNN_ATTR_EXECUTION_PLAN_ENGINE_CONFIG, CUDNN_TYPE_BACKEND_DESCRIPTOR, 1, &engineCfgDesc);
    checked_finalize(executionPlanDesc, "executionPlanDesc");

    cudnnBackendDescriptor_t varianPackDesc; cudnnBackendCreateDescriptor(CUDNN_BACKEND_VARIANT_PACK_DESCRIPTOR, &varianPackDesc);
    void *dataPtrs[] = {A, B, C}; cudnnBackendSetAttribute(varianPackDesc, CUDNN_ATTR_VARIANT_PACK_DATA_POINTERS, CUDNN_TYPE_VOID_PTR, 3, dataPtrs);
    int64_t ids[] = {'A', 'B', 'C'}; cudnnBackendSetAttribute(varianPackDesc, CUDNN_ATTR_VARIANT_PACK_UNIQUE_IDS, CUDNN_TYPE_INT64, 3, ids);
    void *workspace; cudnnBackendSetAttribute(varianPackDesc, CUDNN_ATTR_VARIANT_PACK_WORKSPACE, CUDNN_TYPE_VOID_PTR, 1, &workspace);
    checked_finalize(varianPackDesc, "varianPackDesc");

    /* Line 115 */ if ((status = cudnnBackendExecute(handle, executionPlanDesc, varianPackDesc)) != CUDNN_STATUS_SUCCESS) {
        printf("Executing stride %d error: %s, exit!\n", stride, cudnnGetErrorString(status));
        exit(1);
    }

    cudaDeviceSynchronize();
    /* Line 121 */ ASSERT(C, stride);
    // printf("Executing stride %d OK.\n", stride);

    cudnnDestroy(handle);
    cudaFree(A);
    cudaFree(B);
    cudaFree(C);
}

int main(int argc, char **argv) {
    int stride;
    if (argc >= 2 && (stride = atoi(argv[argc - 1]))) {
        char *engine = argv[argc - 2];
        if (!strcmp(engine, "manual")) {
            run(stride, true, CUDNN_HEUR_MODE_FALLBACK);
            return 0;
        }
        if (!strcmp(engine, "heurA")) {
            run(stride, false, CUDNN_HEUR_MODE_A);
            return 0;
        }
        if (!strcmp(engine, "heurB")) {
            run(stride, false, CUDNN_HEUR_MODE_B);
            return 0;
        }
        if (!strcmp(engine, "fallback")) {
            run(stride, false, CUDNN_HEUR_MODE_FALLBACK);
            return 0;
        }
    }
    printf("Usage: ./sgemm_cudnn_test manual|heurA|heurB|fallback stride\n");
}

测试

$ nvcc sgemm_cudnn_test.cu \
       -o sgemm_cudnn_test \
       -I $CUDNN_ROOT/include \
       -L $CUDNN_ROOT/lib \
       -l cudnn

$ echo "stride time     kernel"; \
  for stride in 1024 2048 4096 8192 16384; do \
      nvprof ./sgemm_cudnn_test manual $stride 2> \
          >(awk -v stride=$stride '/GPU/{printf "%-6s %s %s\n", stride, $7, $9}'); \
  done

stride time     kernel
1024   5.5512ms volta_sgemm_128x64_nn
2048   24.040ms volta_sgemm_128x64_nn
4096   218.24ms volta_sgemm_32x128_nn
8192   1.40703s volta_sgemm_64x64_nn
16384  6.24338s volta_sgemm_128x64_nn

$ ./sgemm_cudnn_test manual 256

Executing stride 256 error: CUDNN_STATUS_EXECUTION_FAILED_CUBLAS, exit!

$ ./sgemm_cudnn_test manual 512

Assertion failed.
Aborted (core dumped)

$ for heur in heurA heurB fallback; do \
      ./sgemm_cudnn_test $heur 1024; \
  done

Getting engineCfgDesc error: CUDNN_STATUS_BAD_PARAM, exit!
Getting engineCfgDesc error: CUDNN_STATUS_BAD_PARAM, exit!
Getting engineCfgDesc error: CUDNN_STATUS_BAD_PARAM, exit!

问题(如上图)

  • 当使用引擎描述符手动配置测试时,它们按预期工作于strides 1024 2048 4096 8192 16384,但在第115行上调用API cudnnBackendExecute失败,并失败于stride 256并失败由于第 121 行上的断言错误跨步 512。我想发动机旋钮也应该手动调整,但由于缺乏具体的指导方针而没有这样做。对吗?

  • 当使用启发式引擎描述符配置测试时,在第 93 行调用 API cudnnBackendGetAttribute 时,它们在所有启发式模式上都失败了。我到现在还没想出如何解决。你能帮忙吗?谢谢!

参考文献

  • 用例示例
  • 矩阵乘法运算符
  • cudnn 图形 API
cuda blas cudnn
1个回答
0
投票
最后我成功找到并重新测试了第二个问题的解决方案。由于

cudnnBackendGetAttribute(..., void *arrayOfElements) 接受检索元素的不透明指针参数,因此我们必须首先实例化具体类型,然后再将其引用传递给调用。因此,相关部分更新如下,

cudnnBackendDescriptor_t engineDesc, engineHeurDesc; /* Line 77, instantiate engineCfgDesc before pass its reference to cudnnBackendGetAttribute */ cudnnBackendDescriptor_t engineCfgDesc; cudnnBackendCreateDescriptor(CUDNN_BACKEND_ENGINECFG_DESCRIPTOR, &engineCfgDesc); int64_t idx = 0, engineCfgDescCount; if (manual) { cudnnBackendCreateDescriptor(CUDNN_BACKEND_ENGINE_DESCRIPTOR, &engineDesc); cudnnBackendSetAttribute(engineDesc, CUDNN_ATTR_ENGINE_OPERATION_GRAPH, CUDNN_TYPE_BACKEND_DESCRIPTOR, 1, &optGraphDesc); cudnnBackendSetAttribute(engineDesc, CUDNN_ATTR_ENGINE_GLOBAL_INDEX, CUDNN_TYPE_INT64, 1, &idx); checked_finalize(engineDesc, "engineDesc"); cudnnBackendSetAttribute(engineCfgDesc, CUDNN_ATTR_ENGINECFG_ENGINE, CUDNN_TYPE_BACKEND_DESCRIPTOR, 1, &engineDesc); checked_finalize(engineCfgDesc, "engineCfgDesc"); } else { cudnnBackendCreateDescriptor(CUDNN_BACKEND_ENGINEHEUR_DESCRIPTOR, &engineHeurDesc); cudnnBackendSetAttribute(engineHeurDesc, CUDNN_ATTR_ENGINEHEUR_OPERATION_GRAPH, CUDNN_TYPE_BACKEND_DESCRIPTOR, 1, &optGraphDesc); cudnnBackendSetAttribute(engineHeurDesc, CUDNN_ATTR_ENGINEHEUR_MODE, CUDNN_TYPE_HEUR_MODE, 1, &heurMode); checked_finalize(engineHeurDesc, "engineHeurDesc"); status = cudnnBackendGetAttribute(engineHeurDesc, CUDNN_ATTR_ENGINEHEUR_RESULTS, CUDNN_TYPE_BACKEND_DESCRIPTOR, 1, &engineCfgDescCount, &engineCfgDesc); if (status != CUDNN_STATUS_SUCCESS) { printf("Getting engineCfgDesc error: %s, exit!\n", cudnnGetErrorString(status)); exit(1); } if (!engineCfgDescCount) { printf("0 engineCfgDesc found, exit!"); exit(1); } }
但是第一个问题在任何引擎模式下都仍然存在

(manual|heurA|heurB|fallback)。在我前面提到的测试用例中,CUDNN 无法正确处理 strides 256 512 的矩阵乘法。

© www.soinside.com 2019 - 2024. All rights reserved.