Question

I found this code somewhere I don't remember. The problem is, it shows the picture and the detected face, only after the second call of detectAndDisplay() and the author intentionally made this an infinite loop, but I just can't see why.

Any ideas?

Thanks for any help in advance.

The code :

 #include "opencv2/objdetect/objdetect.hpp"
    #include "opencv2/highgui/highgui.hpp"
    #include "opencv2/imgproc/imgproc.hpp"

    #include <iostream>
    #include <stdio.h>

    using namespace std;
    using namespace cv;

    // Function Headers
    void detectAndDisplay(Mat frame);

    // Global variables
    // Copy this file from opencv/data/haarscascades to target folder
    string face_cascade_name = "c:/haarcascade_frontalface_alt.xml";
    CascadeClassifier face_cascade;
    string window_name = "Capture - Face detection";
    int filenumber; // Number of file to be saved
    string filename;

    // Function main
    int main(void)
    {
        // Load the cascade
        if (!face_cascade.load(face_cascade_name))
        {
            printf("--(!)Error loading\n");
            int ch = std::cin.get();
            return (-1);
        };

        // Read the image file
        Mat frame = imread("C:/lena.jpg");

        for (;;) // -----> here. why an infinite loop? and why it works at the second turn of this loop ?
        {
            // Apply the classifier to the frame
            if (!frame.empty())
            {
                detectAndDisplay(frame);
            }
            else
            {
                printf(" --(!) No captured frame -- Break!");
                int ch = std::cin.get();
                break;
            }

            int c = waitKey(10);

            if (27 == char(c))
            {
                break;
            }
        }

        return 0;
    }

    // Function detectAndDisplay
    void detectAndDisplay(Mat frame)
    {
        std::vector<Rect> faces;
        Mat frame_gray;
        Mat crop;
        Mat res;
        Mat gray;
        string text;
        stringstream sstm;

        cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
        equalizeHist(frame_gray, frame_gray);

    // Detect faces
        face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));

    // Set Region of Interest
        cv::Rect roi_b;
        cv::Rect roi_c;

        size_t ic = 0; // ic is index of current element
        int ac = 0; // ac is area of current element

        size_t ib = 0; // ib is index of biggest element
        int ab = 0; // ab is area of biggest element

        for (ic = 0; ic < faces.size(); ic++) // Iterate through all current elements (detected faces)

        {
            roi_c.x = faces[ic].x;
            roi_c.y = faces[ic].y;
            roi_c.width = (faces[ic].width);
            roi_c.height = (faces[ic].height);

            ac = roi_c.width * roi_c.height; // Get the area of current element (detected face)

            roi_b.x = faces[ib].x;
            roi_b.y = faces[ib].y;
            roi_b.width = (faces[ib].width);
            roi_b.height = (faces[ib].height);

            ab = roi_b.width * roi_b.height; // Get the area of biggest element, at beginning it is same as "current" element

            if (ac > ab)
            {
                ib = ic;
                roi_b.x = faces[ib].x;
                roi_b.y = faces[ib].y;
                roi_b.width = (faces[ib].width);
                roi_b.height = (faces[ib].height);
            }

            crop = frame(roi_b);
            resize(crop, res, Size(128, 128), 0, 0, INTER_LINEAR); // This will be needed later while saving images
            cvtColor(crop, gray, CV_BGR2GRAY); // Convert cropped image to Grayscale

            // Form a filename
            filename = "";
            stringstream ssfn;
            ssfn << filenumber << ".png";
            filename = ssfn.str();
            filenumber++;

            imwrite(filename, gray);

            Point pt1(faces[ic].x, faces[ic].y); // Display detected faces on main window - live stream from camera
            Point pt2((faces[ic].x + faces[ic].height), (faces[ic].y + faces[ic].width));
            rectangle(frame, pt1, pt2, Scalar(0, 255, 0), 2, 8, 0);
        }

    // Show image
        sstm << "Crop area size: " << roi_b.width << "x" << roi_b.height << " Filename: " << filename;
        text = sstm.str();

        putText(frame, text, cvPoint(30, 30), FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0, 0, 255), 1, CV_AA);
        imshow("original", frame);

        if (!crop.empty())
        {
            imshow("detected", crop);
        }
        else
            destroyWindow("detected");
    }

Addition:

Thanks to Mhd.Tahawi, I also found this code, which again contains a while(true) loop. I just can't see why.

Moreover, what if I want to detect faces in a sequel of images, or in a video? Then I'm stuck at the first image ?

Output After the First Call :

enter image description here

Output After the Second Call (working) :

enter image description here

No correct solution

OTHER TIPS

Wait for user input is not all.

Calling the waitKey is essential even if you don't care about keyboard input.

From OpenCV DOC:

This function is the only method in HighGUI that can fetch and handle events, so it needs to be called periodically for normal event processing unless HighGUI is used within an environment that takes care of event processing

On your code:

imshow("original", frame);

The imshow() is a function of HighGui and the code need a call of waitKey reguraly, in order to process its event loop. If you don't call waitKey the HighGui can't process windows events like redraw.

So, basically, your are allowing the HighGui process windows events calling waitKey.

it is just because the author wants the program to keep running until he receives a key from the user to end it, that's all.

the program is intended to process live video stream, that is all. it doesn't have anything to do with the actual processing.

you can take the code out of the loop and process one single image with it and it would still function the same way

EDIT: I had the same scenario but I was working with a webcam and not reading from a file. the hardware was too slow to start, capture a frame and then send it to my program to process it. this the code I had in my case:

 VideoCapture capture;
  Mat frame;
  capture.open( 0 );
  if( capture.isOpened() )
  {
    for(;;)
    {
      capture >> frame;


      if( !frame.empty() )
       { 
           detectAndDisplay( frame );

      }

      int c = waitKey(5);
      if( (char)c == 'c' ) { break; }

    }
Licensed under: CC-BY-SA with attribution
Not affiliated with StackOverflow
scroll top