如何在答题卡中找到已填充选项的轮廓?

问题描述 投票:0回答:1

我想找到一个用铅笔填充的选项圈的轮廓,就像图中的那样,但是

findContours()
不能很好地工作。它总是找不到错误的轮廓,甚至找不到。

我要处理的图片:

The picture I want to deal with

我尝试过

findContours()
。我对opencv不熟悉,这是我知道的唯一方法。我也尝试过
HoughCircles
但是效果不太好。

我需要一个解决方案来找到计数。

这是我的代码:

void CorrectPerspective(const cv::Mat& InputArray, cv::Mat& OutputArray, const std::vector<cv::Point2f>& InputPoints) {
    std::vector<cv::Point2f> xSorted = InputPoints;
    std::sort(
        xSorted.begin(),
        xSorted.end(),
        [](const cv::Point2f& a, const cv::Point2f& b) {return a.x < b.x; }
    );
    std::vector<cv::Point2f> left(xSorted.begin(), xSorted.begin() + 2);
    std::vector<cv::Point2f> right(xSorted.begin() + 2, xSorted.end());
    std::sort(
        left.begin(),
        left.end(),
        [](const cv::Point2f& a, const cv::Point2f& b) {return a.y < b.y; }
    );
    cv::Point2f topLeft = left[0];
    cv::Point2f bottomLeft = left[1];
    std::vector<float> distance;
    for (const auto& point : right) {
        distance.push_back(cv::norm(topLeft - point));
    }
    cv::Point2f topRight = right[distance[0] < distance[1] ? 0 : 1];
    cv::Point2f bottomRight = right[distance[0] < distance[1] ? 1 : 0];
    std::vector<cv::Point2f> sourceVertex = {
        topLeft,
        topRight,
        bottomRight,
        bottomLeft
    };
    float widthA = std::sqrt(std::pow(bottomRight.x - bottomLeft.x, 2) + std::pow(bottomRight.y - bottomLeft.y, 2));
    float widthB = std::sqrt(std::pow(topRight.x - topLeft.x, 2) + std::pow(topRight.y - topLeft.y, 2));
    int width = static_cast<int>(std::max(widthA, widthB));
    float heightA = std::sqrt(std::pow(topRight.x - bottomRight.x, 2) + std::pow(topRight.y - bottomRight.y, 2));
    float heightB = std::sqrt(std::pow(topLeft.x - bottomLeft.x, 2) + std::pow(topLeft.y - bottomLeft.y, 2));
    int height = static_cast<int>(std::max(heightA, heightB));
    std::vector<cv::Point2f> targetVertex = {
        cv::Point2f(0, 0),
        cv::Point2f(width - 1, 0),
        cv::Point2f(width - 1, height - 1),
        cv::Point2f(0, height - 1)
    };
    cv::Mat transformMatrix = cv::getPerspectiveTransform(sourceVertex, targetVertex);
    cv::warpPerspective(InputArray, OutputArray, transformMatrix, cv::Size(width, height));
}

void PreProcess(const cv::Mat& InputArray, cv::Mat& OutputArray1, cv::Mat& OutputArray2) {
    cv::Mat gray;
    cv::cvtColor(
        InputArray,
        gray,
        cv::COLOR_BGR2GRAY
    );
    cv::Mat gaussianBlurred;
    cv::GaussianBlur(
        gray,
        gaussianBlurred,
        cv::Size(5, 5),
        0
    );
    cv::Mat edged;
    cv::Canny(
        gaussianBlurred,
        edged,
        50,
        200
    );
    OutputArray1 = edged.clone();
    cv::threshold(
        gaussianBlurred,
        OutputArray2,
        0,
        255,
        cv::THRESH_BINARY_INV | cv::THRESH_OTSU);
}

int main() {
    cv::Mat img;
    img = cv::imread("I:\\ProjectArtemis\\OpenCVTest1\\testimg.png");


    std::vector<std::vector<cv::Point>> contours;
    std::vector<cv::Vec4i> hierarchy;
    cv::Mat edged;
    cv::Mat thresh;
    PreProcess(img, edged, thresh);
    cv::findContours(
        edged.clone(),
        contours,
        hierarchy,
        cv::RETR_EXTERNAL,
        cv::CHAIN_APPROX_SIMPLE
    );


    cv::Mat warped;
    cv::Mat warped_thresh;
    for (const auto& c : contours) {
        double peri = 0.01 * cv::arcLength(c, true);
        std::vector<cv::Point> approx;
        cv::approxPolyDP(c, approx, peri, true);

        if (approx.size() == 4) {
            std::vector<cv::Point2f> points;
            for (const auto& point : approx) {
                points.push_back(cv::Point2f(point.x, point.y));
            }
            CorrectPerspective(img, warped, points);
            CorrectPerspective(thresh, warped_thresh, points);
        }
    }


    cv::Mat opt;
    cv::Mat opt_thresh;
    PreProcess(warped, opt, opt_thresh);    //能跑起来就先别动
    std::vector<std::vector<cv::Point>> optionContours;
    std::vector<cv::Vec4i> optionHierarchy;
    cv::findContours(
        opt.clone(),
        optionContours,
        optionHierarchy,
        cv::RETR_EXTERNAL,
        cv::CHAIN_APPROX_SIMPLE
    );


    cv::cvtColor(warped_thresh, warped_thresh, cv::COLOR_GRAY2BGR);

    std::vector<std::vector<cv::Point>> options;
    std::vector<cv::Rect> boundingRects;
    for (size_t i = 0; i < optionContours.size(); i++) {
        cv::Rect bounding = cv::boundingRect(optionContours[i]);
        int x = bounding.x;
        int y = bounding.y;
        int w = bounding.width;
        int h = bounding.height;

        float ar = w / static_cast<float>(h);

        if (w >= 25 && h >= 25 && ar >= 0.6 && ar <= 1.3) {
            options.push_back(optionContours[i]);
            boundingRects.push_back(bounding);
            cv::putText(warped, std::to_string(i), cv::Point(x - 1, y - 5), cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 255), 2);
            cv::putText(warped_thresh, std::to_string(i), cv::Point(x - 1, y - 5), cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 255), 2);
        }
    }
c++ opencv image-processing computer-vision omr
1个回答
0
投票

您的问题似乎与学习练习或项目有关。以下是另一种方法,可以帮助您以替代方式解决问题 给出像这样的结果图像 enter image description here

如果您的输入图像具有这样的质量,我认为不需要透视校正。相反,您可以对找到的矩形进行排序并使用矩形坐标来计算结果。

void PreProcess(const cv::Mat& InputArray, cv::Mat& OutputArray1, cv::Mat& OutputArray2) {
    cv::Mat gray;
    cv::cvtColor(
        InputArray,
        gray,
        cv::COLOR_BGR2GRAY
    );
    cv::Mat gaussianBlurred;
    cv::GaussianBlur(
        gray,
        gaussianBlurred,
        cv::Size(5, 5),
        0
    );
    cv::Mat edged;
    cv::Canny(
        gaussianBlurred,
        edged,
        50,
        200
    );
    OutputArray1 = edged.clone();
    cv::threshold(
        gaussianBlurred,
        OutputArray2,
        0,
        255,
        cv::THRESH_BINARY_INV | cv::THRESH_OTSU);
}

int main() {
    cv::Mat img;
    img = cv::imread("c:\\github\\e86ytJOv.jpg",IMREAD_REDUCED_COLOR_2);


    std::vector<std::vector<cv::Point>> contours;
    std::vector<cv::Vec4i> hierarchy;
    cv::Mat edged;
    cv::Mat thresh;
    PreProcess(img, edged, thresh);
    cv::findContours(
        edged.clone(),
        contours,
        hierarchy,
        cv::RETR_EXTERNAL,
        cv::CHAIN_APPROX_SIMPLE
    );

    for (const auto& c : contours) {
        if (c.size() > 40)
        {
            RotatedRect rr = minAreaRect(c);
            rr.angle = 0;
            float aspect_ratio = std::max(rr.size.height, rr.size.width) / std::min(rr.size.height, rr.size.width);
            if (aspect_ratio < 1.1)
            {
            Mat mpoints1;
            boxPoints(rr, mpoints1);
            mpoints1.convertTo(mpoints1, CV_32S);
            Rect r = rr.boundingRect();
            Mat box;
            dilate(img(r), box,Mat());
            Scalar rect_mean = mean(box);

            if ((rect_mean[0]+rect_mean[1]+rect_mean[2])/3 < 140)
                rectangle(img, r, Scalar(0, 0, 255), 2);
            else
                rectangle(img, r, Scalar(0, 255, 0), 1);
            }

        }
    }

    imshow("img", img);
    waitKey();
}
© www.soinside.com 2019 - 2024. All rights reserved.