Ask Your Question
3

How to detect marked black regions inside largest Rectangle Contour?

asked 2016-05-21 15:05:57 -0600

redleon80 gravatar image

updated 2016-05-26 04:48:16 -0600

image description

I can detect largest contour the answer sheet (20 questions, each have 4 alternative)

After the draw largest contour, what shall I do? Divide matris the rectangle by 20x4 cell? Or find countour again but this time inside the rectangle? I dont know what I need. Just I want to get which is marked.

 public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {

        return findLargestRectangle(inputFrame.rgba());
    }


    private Mat findLargestRectangle(Mat original_image) {
        Mat imgSource = original_image;
        hierarchy = new Mat();

        //convert the image to black and white
        Imgproc.cvtColor(imgSource, imgSource, Imgproc.COLOR_BGR2GRAY);

        //convert the image to black and white does (8 bit)
        Imgproc.Canny(imgSource, imgSource, 50, 50);

        //apply gaussian blur to smoothen lines of dots
        Imgproc.GaussianBlur(imgSource, imgSource, new Size(5, 5), 5);

        //find the contours
        List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
        Imgproc.findContours(imgSource, contours, hierarchy, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);

        hierarchy.release();

        double maxArea = -1;
        int maxAreaIdx = -1;
        MatOfPoint temp_contour = contours.get(0); //the largest is at the index 0 for starting point
        MatOfPoint2f approxCurve = new MatOfPoint2f();
        Mat largest_contour = contours.get(0);
        List<MatOfPoint> largest_contours = new ArrayList<MatOfPoint>();
        for (int idx = 0; idx < contours.size(); idx++) {
            temp_contour = contours.get(idx);
            double contourarea = Imgproc.contourArea(temp_contour);
            //compare this contour to the previous largest contour found
            if (contourarea > maxArea) {
                //check if this contour is a square
                MatOfPoint2f new_mat = new MatOfPoint2f( temp_contour.toArray() );
                int contourSize = (int)temp_contour.total();
                Imgproc.approxPolyDP(new_mat, approxCurve, contourSize*0.05, true);
                if (approxCurve.total() == 4) {
                    maxArea = contourarea;
                    maxAreaIdx = idx;
                    largest_contours.add(temp_contour);
                    largest_contour = temp_contour;
                }
            }
        }
        MatOfPoint temp_largest = largest_contours.get(largest_contours.size()-1);
        largest_contours = new ArrayList<MatOfPoint>();
        largest_contours.add(temp_largest);


        Imgproc.cvtColor(imgSource, imgSource, Imgproc.COLOR_BayerBG2RGB);
        Imgproc.drawContours(imgSource, contours, maxAreaIdx, new Scalar(0, 255, 0), 1);
        Log.d(TAG, "Largers Contour:" + contours.get(maxAreaIdx).toString());


        return imgSource;
    }

UPDATE 1: I want to thank you @sturkmen for the his answer. I can read and find black regions now. Here the Android codes:

public View onCreateView(LayoutInflater inflater, ViewGroup container,
                         Bundle savedInstanceState) {
    View _view = inflater.inflate(R.layout.fragment_main, container, false);
    // Inflate the layout for this fragment


    Button btnTest = (Button) _view.findViewById(R.id.btnTest);
    btnTest.setOnClickListener(new View.OnClickListener() {
        @Override
        public void onClick(View v) {

            Mat img = Imgcodecs.imread(mediaStorageDir().getPath() + "/" + "test2.jpg");
            if (img.empty()) {
                Log.d("Fragment", "IMG EMPTY");
            }


            Mat gray = new Mat();
            Mat thresh = new Mat();

            //convert the image to black and white
            Imgproc.cvtColor(img, gray, Imgproc.COLOR_BGR2GRAY);

            //convert the image to black and white does (8 bit)
            Imgproc.threshold(gray, thresh, 0, 255, Imgproc.THRESH_BINARY_INV + Imgproc.THRESH_OTSU);
            Mat temp = thresh.clone();
            //find the contours
            Mat hierarchy = new Mat();

            Mat corners = new Mat(4,1, CvType.CV_32FC2);
            List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
            Imgproc.findContours(temp, contours,hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
            hierarchy.release();

            for (int idx = 0; idx < contours.size(); idx++)
            {
                MatOfPoint contour = contours.get(idx);
                MatOfPoint2f contour_points = new MatOfPoint2f(contour.toArray());
                RotatedRect minRect = Imgproc.minAreaRect( contour_points );
                Point[] rect_points = new Point[4];
                minRect.points( rect_points );
                if(minRect.size.height > img.width() / 2)
                {
                    List<Point> srcPoints = new ArrayList<Point>(4);
                    srcPoints.add(rect_points[2]);
                    srcPoints.add(rect_points ...
(more)
edit retag flag offensive close merge delete

Comments

Don't try to detect circle look for black region.

  1. Big gaussian blur with a kernel of size 17 and sigma 8
  2. threshold image (thresh 70)
  3. bitwise not
  4. Liconnected component
  5. number of answer is equal to number connect componnent with size >1500

Another way is to use a mask with a perfect a grid which fit grid and estimate level in each answer.

LBerger gravatar imageLBerger ( 2016-05-22 09:42:37 -0600 )edit

Thank you @LBerger. Your list about methods I guess. Firstly I need to figure it out.

redleon80 gravatar imageredleon80 ( 2016-05-23 14:45:06 -0600 )edit

3 answers

Sort by ยป oldest newest most voted
5

answered 2016-05-22 14:02:23 -0600

updated 2016-05-23 22:44:59 -0600

i was intending to develop a commercial software about OMR. now i hit my leg and share this code :)

i hope it will be helpful. ( i will add some explanation about the code later)

Test Image ( edited your image. having an empty and invalid double mark )

image description

Result Image

image description

EDIT

i updated the C++ code ( a small bug is fixed )

a variable Size dims added. by changing it you can find marks on different dimensions.

image description image description

#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>

using namespace cv;
using namespace std;

int main( int argc, const char** argv )
{
    Mat img = imread(argv[1]);
    if(img.empty())
    {
        return -1;
    }

    Size dims(20,5); // this variable should be changed according input
    Mat gray,thresh;
    cvtColor(img, gray, COLOR_BGR2GRAY);
    threshold(gray, thresh, 0, 255, THRESH_BINARY_INV + THRESH_OTSU);

    Mat quad(img.size(), CV_8UC1); // should be improved
    Mat results(img.size(), CV_8UC3);

    vector<Point2f> quad_pts;
    quad_pts.push_back(cv::Point2f(0, 0));
    quad_pts.push_back(cv::Point2f(quad.cols, 0));
    quad_pts.push_back(cv::Point2f(quad.cols, quad.rows));
    quad_pts.push_back(cv::Point2f(0, quad.rows));

    vector<Point2f> corners;
    vector<vector<Point> > contours;

    findContours(thresh.clone(), contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);

    for( size_t i = 0; i< contours.size(); i++ )
    {
        RotatedRect minRect = minAreaRect( Mat(contours[i]) );

        // rotated rectangle
        Point2f rect_points[4];
        minRect.points( rect_points );

        if(Rect(minRect.boundingRect()).width > img.cols / 2) // should be improved
            for( int j = 0; j < 4; j++ )
            {
              Point2f pt = quad_pts[j];
              Point2f nearest_pt = rect_points[0];
              float dist = norm( pt - nearest_pt );
                for( int k = 1; k < 4; k++ )
                {
                 if( norm( pt - rect_points[k] ) < dist )
                 {
                   dist = norm( pt - rect_points[k] );
                   nearest_pt = rect_points[k];
                 }
                }
                corners.push_back( nearest_pt );
            }
    }

    erode(thresh,thresh,Mat(),Point(-1,-1), 10); // should be improved
    dilate(thresh,thresh,Mat(),Point(-1,-1), 5); // should be improved

    Mat transmtx = getPerspectiveTransform(corners, quad_pts);
    warpPerspective( img, results, transmtx, img.size()); // Create a Mat To Show results
    warpPerspective( thresh, quad, transmtx, img.size());

    resize(quad,quad,dims);

    for(int i = 0; i < quad.cols; i++)
    {
        String answer = "";

        answer += quad.at<uchar>(1,i) == 0 ? "" : "A";
        answer += quad.at<uchar>(2,i) == 0 ? "" : "B";
        answer += quad.at<uchar>(3,i) == 0 ? "" : "C";
        answer += quad.at<uchar>(4,i) == 0 ? "" : "D";

        if( answer.length()  > 1 ) answer = "X"; // Double mark
        int y = 0;
        if( answer == "A" ) y = results.rows / dims.height;
        if( answer == "B" ) y = results.rows / dims.height *2;
        if( answer == "C" ) y = results.rows / dims.height *3;
        if( answer == "D" ) y = results.rows / dims.height *4;
        if( answer == "" ) answer = "[-]";
        putText( results, answer, Point( 50* i + 15, 30 + y), FONT_HERSHEY_PLAIN, 2, Scalar(0,0,255),2);

    }
    imshow( "results", results );
    waitKey(0);

    return 0;
}

as a challenge to myself i tried to implement main part in JAVA ( a newcomer copy paste code )

here is the result image (20x5) : image description

Mat img = Imgcodecs.imread("test.jpg");
Mat gray = new Mat();
Mat thresh = new Mat();

//convert the image to black and white
Imgproc.cvtColor(img, gray, Imgproc.COLOR_BGR2GRAY);

//convert the image to black and white does (8 bit)
Imgproc.threshold(gray, thresh, 0, 255, Imgproc.THRESH_BINARY_INV + Imgproc.THRESH_OTSU);
Mat temp ...
(more)
edit flag offensive delete link more

Comments

thank you very much. I'm going to try as soon as possible. @sturkmen

redleon80 gravatar imageredleon80 ( 2016-05-23 14:45:31 -0600 )edit

i updated the C++ code ( a small bug is fixed )

sturkmen gravatar imagesturkmen ( 2016-05-23 22:45:52 -0600 )edit

Tried your Java code on Android. And getting your result img now. You used Mat.at<uchar> method in your c++ code. Is there any Mat method in java like this? how can I get results by text? A,B,C,D or empty column?

redleon80 gravatar imageredleon80 ( 2016-05-26 04:36:10 -0600 )edit

i will try to update JAVA code soon.

sturkmen gravatar imagesturkmen ( 2016-05-26 06:52:38 -0600 )edit

@sturkmen I totally understand the algorytim with your code. This is so helpful. It's work on Android now. Thank you so much

redleon80 gravatar imageredleon80 ( 2016-05-27 16:40:32 -0600 )edit

you are welcome. if you improve the code you can post as an answer.

sturkmen gravatar imagesturkmen ( 2016-05-27 16:54:21 -0600 )edit

take a look at this post to find JAVA equivalent of Mat.at<uchar>

sturkmen gravatar imagesturkmen ( 2016-05-27 17:25:57 -0600 )edit

in this code i am not getting how are you generating result image where alphabets are printed on filled circle. after that how are you decoding alphabets in single string?

Rashmi gravatar imageRashmi ( 2017-01-02 06:31:25 -0600 )edit
4

answered 2016-05-25 14:16:14 -0600

essamzaky gravatar image

updated 2016-05-25 14:41:44 -0600

first i would like to thank @sturkmen and waiting him to explain his idea for OMR , and i would like to see other ideas from other Guru memebers

now I would like to share with you another idea for solving OMR , i will use matTemplate to solve the problem the proposed idea works as follow:

  1. convert input image to gray
  2. convert gray to Black and white
  3. detect skew angle
  4. Correct the skew angle
  5. detect the Area of the Questions and Answers in the scanned answer sheet
  6. match the All answers with predefined empty answers , if the confidence level is low , that is mean the answer is checked

Here is the code to do the task

#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>

using namespace std;
using namespace cv;


int main( int, char** argv )
{
    //Start OMR application 
    //1-open the image under test as gray scale
    Mat greyMat = imread("C:\\temp\\14639435664447751.jpg",CV_LOAD_IMAGE_GRAYSCALE);

    //2-convert image from gray scale to black and white using adaptive thresholding
    Mat blackAndWhiteMat;
    adaptiveThreshold( greyMat, blackAndWhiteMat , 255,CV_ADAPTIVE_THRESH_MEAN_C,CV_THRESH_BINARY_INV,13, 1 );
    imwrite("C:\\temp\\AdaptiveBW.jpg",blackAndWhiteMat);

    //3-detect the skew angle using HoughTrnasform
    double dAngle = 0.; 
    Size size = blackAndWhiteMat.size();
    vector<Vec4i> lines;
    HoughLinesP(blackAndWhiteMat, lines, 1, CV_PI/180, 100, size.width / 2.f, 20);  
    double ftan = 0.;
    double angle = 0.;
    unsigned nb_lines = lines.size();
    for (unsigned i = 0; i < nb_lines; ++i)
    {       
        ftan = (double)(lines[i][3] - lines[i][1]) / (double)(lines[i][2] - lines[i][0]) ;      
        angle += atan(ftan);
    }
    angle /= nb_lines; // mean angle, in radians.
    // convert angle from radians to degree
    dAngle = angle * 180 / CV_PI ;

    //4-Rotate the image to correct the skew angle  
    Mat DeskewedMat,DeskewedMatOrg;
    bool bKeepOldSize = false;//if true we will use the old image size, false calcualte the new image size
    Size ImgSize = size;
    Point2f pt(blackAndWhiteMat.cols/2., blackAndWhiteMat.rows/2.);    
    Mat r = getRotationMatrix2D(pt, dAngle, 1.0);
    //Calculate the new image size if required
    cv::Rect bbox = cv::RotatedRect(pt,blackAndWhiteMat.size(), dAngle).boundingRect();
    if(bKeepOldSize == false)
    {
        // adjust transformation matrix and destination matrix
        r.at<double>(0,2) += bbox.width/2.0 - pt.x;
        r.at<double>(1,2) += bbox.height/2.0 - pt.y;
        ImgSize = bbox.size();
        DeskewedMat.create(ImgSize,blackAndWhiteMat.type());        
    }
    warpAffine(blackAndWhiteMat, DeskewedMat, r, ImgSize,INTER_LANCZOS4,BORDER_CONSTANT,Scalar(255));
    bitwise_not(DeskewedMat,DeskewedMat);
    //original rotated image
    warpAffine(greyMat, DeskewedMatOrg, r, ImgSize,INTER_LANCZOS4,BORDER_CONSTANT,Scalar(255));
    imwrite("C:\\temp\\DeskewedBW.jpg",DeskewedMat);

    //5-Now find the exam questions header and the answer using template matching   
    Mat resultMat;//final matrix which will be used to show the selected answers
    cvtColor(DeskewedMatOrg,resultMat,CV_GRAY2BGR); 
    Mat TemplMat = imread("C:\\temp\\Questions Header Template.png",CV_LOAD_IMAGE_GRAYSCALE);
    Mat AnswersTemplMat = imread("C:\\temp\\Answers Template.png",CV_LOAD_IMAGE_GRAYSCALE);
    int res_width,res_height,nWidth,nTWidth,nHight,nTHight;
    Rect HeaderRect,AnswerRect;
    double      minval, maxval;
    Point       minloc, maxloc;
    nWidth = DeskewedMat.cols;
    nTWidth = TemplMat.cols;
    nHight = DeskewedMat.rows;
    nTHight = TemplMat.rows;
    res_width  = nWidth - nTWidth + 1;
    res_height = nHight - nTHight + 1;
    Mat res = Mat(res_height,res_width,CV_32FC1);
    matchTemplate(DeskewedMat,TemplMat,res,CV_TM_CCOEFF_NORMED);
    minMaxLoc( res, &minval, &maxval, &minloc ...
(more)
edit flag offensive delete link more

Comments

1

i will take a look at your code later but +1 at first sight. seeing different approaches on a problem is twice helpful.

sturkmen gravatar imagesturkmen ( 2016-05-25 14:36:07 -0600 )edit

Can i get this code in python with open cv? i am not able to understand it..

Rashmi gravatar imageRashmi ( 2016-12-21 06:49:25 -0600 )edit

Hi @Rashmi i do not know Python , bit c++ code is commented and also steps are explained if you can not uderstand specific step i can explain it for you more

essamzaky gravatar imageessamzaky ( 2016-12-24 06:40:00 -0600 )edit

@essamzaky Is hough transform give best result for deskew of image?

Rashmi gravatar imageRashmi ( 2016-12-26 00:21:38 -0600 )edit

i am going to make software which can detect image and check image alignment and make it proper. Is we require reference image? what will be the case if reference image is also not in correct position? I want to clear the concept of image alignment?

Rashmi gravatar imageRashmi ( 2016-12-26 00:25:03 -0600 )edit

There are a lot of methods could be used to detect the skew , selecting the best method depends on you image and what is drawn inside the image. HoughLines is good when there is lines drawn in your image , here there are some methods you can use ,one of them or merge them to detect the skew 1-You can use opencv method called boundingRect 2-You can use Histogram by rotating the image many times by many angles and find the angle which produce histogram which has the maximum peaks values 3-You can use PCA to detect the skew 4-you can use findcontours to find connected components and merege the regions which intersect vertically together then find the bounding rect for every merged group, the top repeated angle is the skew. 5-you can use Match template with features to dectect object rotation

essamzaky gravatar imageessamzaky ( 2016-12-26 04:00:38 -0600 )edit
1

answered 2016-05-27 17:50:21 -0600

redleon80 gravatar image

updated 2016-05-27 17:51:58 -0600

I imrove @sturkmen' s code.

fragment_main.xml

<FrameLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
tools:context="{your package name}.FragmentMain">

<!-- TODO: Update blank fragment layout -->

<LinearLayout
    android:orientation="vertical"
    android:layout_width="match_parent"
    android:layout_height="match_parent">

    <Button
        android:id="@+id/btnTest"
        android:layout_width="match_parent"
        android:layout_height="80dp"
        android:text="Test" />

    <ImageView
        android:id="@+id/sampleImageView"
        android:layout_width="match_parent"
        android:layout_height="150dp"
        android:layout_centerHorizontal="true"/>
</LinearLayout>

</framelayout>

AndroidManifest.xml

Add this line for write permission.

<uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE" />

FragmentMain.java

IMAGE FILE: Add Internal Storage / Android / Data / Your Package Folder / test.JPG

public View onCreateView(LayoutInflater inflater, ViewGroup container,
                         Bundle savedInstanceState) {
    View _view = inflater.inflate(R.layout.fragment_main, container, false);

    Button btnTest = (Button) _view.findViewById(R.id.btnTest);
    btnTest.setOnClickListener(new View.OnClickListener() {
        @Override
        public void onClick(View v) {


            Mat img = Imgcodecs.imread(mediaStorageDir().getPath() + "/" + "test.JPG");
            if (img.empty()) {
                Log.d("FragmentMain", "Empty Image");
            }


            Size dims = new Size (20,5);
            Mat gray = new Mat();
            Mat thresh = new Mat();

            //convert the image to black and white
            Imgproc.cvtColor(img, gray, Imgproc.COLOR_BGR2GRAY);
            storeImage(gray);

            //convert the image to black and white does (8 bit)
            Imgproc.threshold(gray, thresh, 0, 255, Imgproc.THRESH_BINARY_INV + Imgproc.THRESH_OTSU);

            storeImage(thresh);

            Mat temp = thresh.clone();
            //find the contours
            Mat hierarchy = new Mat();

            Mat corners = new Mat(4,1, CvType.CV_32FC2);
            List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
            Imgproc.findContours(temp, contours,hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
            hierarchy.release();

            for (int idx = 0; idx < contours.size(); idx++)
            {
                MatOfPoint contour = contours.get(idx);
                MatOfPoint2f contour_points = new MatOfPoint2f(contour.toArray());
                RotatedRect minRect = Imgproc.minAreaRect( contour_points );
                Point[] rect_points = new Point[4];
                minRect.points( rect_points );
                if(minRect.size.height > img.width() / 2)
                {
                    List<Point> srcPoints = new ArrayList<Point>(4);
                    srcPoints.add(rect_points[2]);
                    srcPoints.add(rect_points[3]);
                    srcPoints.add(rect_points[0]);
                    srcPoints.add(rect_points[1]);

                    corners = Converters.vector_Point_to_Mat(
                            srcPoints, CvType.CV_32F);
                }

            }
            Imgproc.erode(thresh, thresh, new Mat(), new Point(-1,-1), 10);

            storeImage(thresh);
            Imgproc.dilate(thresh, thresh, new Mat(), new Point(-1,-1), 5);

            storeImage(thresh);

            Mat results = new Mat(1000,250,CvType.CV_8UC3);
            Mat quad = new Mat(1000,250,CvType.CV_8UC1);

            List<Point> dstPoints = new ArrayList<Point>(4);
            dstPoints.add(new Point(0, 0));
            dstPoints.add(new Point(1000, 0));
            dstPoints.add(new Point(1000, 250));
            dstPoints.add(new Point(0, 250));
            Mat quad_pts = Converters.vector_Point_to_Mat(
                    dstPoints, CvType.CV_32F);

            Mat transmtx = Imgproc.getPerspectiveTransform(corners, quad_pts);
            Imgproc.warpPerspective( img, results, transmtx, new Size(1000,250));
            Imgproc.warpPerspective( thresh, quad, transmtx, new Size(1000,250));

            Imgproc.resize(quad, quad, new Size(20,5));

            Imgcodecs.imwrite("results.png",quad);

            //store image
            storeImage(quad);

            //show image
            showImage(quad);


            System.out.println( quad.dump() );

            for(int i = 0; i < quad.cols(); i++)
            {
                int size = (int) (quad.total() * quad.channels());
                byte[] tmp = new byte[size];

                String answer = "";
                double[] d = new double[0];
                d = quad.get(1, i);
                answer += d[0] == 0 ? "" : "A";
                d = quad.get(2, i);
                answer += d[0] == 0 ? "" : "B";
                d = quad.get(3, i);
                answer ...
(more)
edit flag offensive delete link more

Question Tools

1 follower

Stats

Asked: 2016-05-21 15:05:05 -0600

Seen: 11,375 times

Last updated: May 27 '16