Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

Object tracking lag problem

Hi, I am using this code for track 4 colored objects.I get video from 8 ps3 eye camera.When i comment // my trackFilteredObject line there is no lag.But when using this code i have lot latency.I cant understand why happening because my normal cpu usage ~%15 ram usage 6.3GB/15GB (%40) when run this code cpu usage ~20-23 ram usage 6.4GB . I think its not about cpu-ram performance.What am i doing wrong ?

Note:Kamerasayisi mean cameracount My Track Function:

void trackFilteredObject(Object theObject,Mat threshold,Mat HSV, Mat &cameraFeed){
    //max number of objects to be detected in frame
    const int FRAME_WIDTH = 5120;
const int FRAME_HEIGHT = 480;

    const int MAX_NUM_OBJECTS=50;
//minimum and maximum object area
const int MIN_OBJECT_AREA = 10*10;
const int MAX_OBJECT_AREA = FRAME_HEIGHT*FRAME_WIDTH/1.5;
    vector <Object> objects;

    Mat temp;
    threshold.copyTo(temp);
    //these two vectors needed for output of findContours
    vector< vector<Point> > contours;
    vector<Vec4i> hierarchy;
    //find contours of filtered image using openCV findContours function
    findContours(temp,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE );
    //use moments method to find our filtered object
    double refArea = 0;
    bool objectFound = false;
    if (hierarchy.size() > 0) {
        int numObjects = hierarchy.size();
        //if number of objects greater than MAX_NUM_OBJECTS we have a noisy filter
        if(numObjects<MAX_NUM_OBJECTS){
            for (int index = 0; index >= 0; index = hierarchy[index][0]) {

                Moments moment = moments((cv::Mat)contours[index]);
                double area = moment.m00;

        //if the area is less than 20 px by 20px then it is probably just noise
        //if the area is the same as the 3/2 of the image size, probably just a bad filter
        //we only want the object with the largest area so we safe a reference area each
                //iteration and compare it to the area in the next iteration.
                if(area>MIN_OBJECT_AREA){

                    Object object;

                    object.setXPos(moment.m10/area);
                    object.setYPos(moment.m01/area);
                    object.setType(theObject.getType());
                    object.setColor(theObject.getColor());

                    objects.push_back(object);

                    objectFound = true;

                }else objectFound = false;
            }
            //let user know you found an object
            if(objectFound ==true){
                //draw object location on screen
                drawObject(objects,cameraFeed,temp,contours,hierarchy);}

        }else putText(cameraFeed,"TOO MUCH NOISE! ADJUST FILTER",Point(0,50),1,2,Scalar(0,0,255),2);
    }       
}
};

My Main Code:

    void Run()
    {


        int w, h;

        _fps = 30;
        IplImage *pCapImage[kameraSayisi];
        IplImage *pDisplayImage;
        PBYTE pCapBuffer = NULL;
        // Create camera instance
        for(int i = 0; i < kameraSayisi; i++)
        {
            _cam[i] = CLEyeCreateCamera(_cameraGUID[i], _mode, _resolution, _fps);
            if(_cam[i] == NULL) return;
            // Get camera frame dimensions
            CLEyeCameraGetFrameDimensions(_cam[i], w, h);
            // Create the OpenCV images
            pCapImage[i] = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);

            // Set some camera parameters
            CLEyeSetCameraParameter(_cam[i], CLEYE_GAIN, 0);
            CLEyeSetCameraParameter(_cam[i], CLEYE_EXPOSURE, 511);

            // Start capturing
            CLEyeCameraStart(_cam[i]);


        }
        pDisplayImage = cvCreateImage(cvSize(w*kameraSayisi / 2, h * kameraSayisi/4 ), IPL_DEPTH_8U  ,1);

        if(_cam == NULL)        return;

  int iLastX = -1; 
 int iLastY = -1;



  //Capture a temporary image from the camera
    //program
    bool trackObjects = true;
    bool useMorphOps = true;

        Mat HSV;
  //Create a black image with the size as the camera output
 Mat imgLines;
  // imgLines = Mat::zeros( cvarrToMat(image).size(), CV_8UC3 );;
 Mat threshold;
    //x and y values for the location of the object
    int x=0, y=0;
    bool calibrationMode = false;
    if(calibrationMode){
        //create slider bars for HSV filtering
        createTrackbars();
    }

        // image capturing loop
        while(_running)
        {

            PBYTE pCapBuffer;
            // Capture camera images
            for(int i = 0; i < kameraSayisi; i++)
            {
                cvGetImageRawData(pCapImage[i], &pCapBuffer);
                CLEyeCameraGetFrame(_cam[i], pCapBuffer, (i==0)?2000:0);

            }

            // Display stereo image
                for(int i = 0; i < kameraSayisi; i++)
                {
                    cvSetImageROI(pDisplayImage, cvRect(w * (i%4) ,i/4 * h, w, h));
                    cvCopy(pCapImage[i], pDisplayImage);
                }
                cvResetImageROI(pDisplayImage);

            Mat imgOriginal;
            Mat imgConverted = cvarrToMat(pDisplayImage);
            if(calibrationMode==true)
            {

            //need to find the appropriate color range values
            // calibrationMode must be false

            //if in calibration mode, we track objects based on the HSV slider values.
                //cvtColor(imgOriginal,imgOriginal,CV_BayerRG2RGB);
                cvtColor(imgConverted,imgOriginal,CV_BayerGB2BGR);
                cvtColor(imgOriginal,HSV,CV_BGR2HSV);
                inRange(HSV,Scalar(H_MIN,S_MIN,V_MIN),Scalar(H_MAX,S_MAX,V_MAX),threshold);
                morphOps(threshold);
                imshow(_windowName + 'T',threshold);

            //the folowing for canny edge detec         
                /// Create a matrix of the same type and size as src (for dst)
                dst.create( imgOriginal.size(), src.type() );
                /// Convert the image to grayscale
                cvtColor( imgOriginal, src_gray, CV_BGR2GRAY );
                /// Create a window
                namedWindow( window_name, CV_WINDOW_AUTOSIZE );
                /// Create a Trackbar for user to enter threshold
            //  createTrackbar( "Min Threshold:", window_name, &lowThreshold, max_lowThreshold, CannyThreshold );
                /// Show the image
                Object a = Object(H_MIN,S_MIN,V_MIN,H_MAX,S_MAX,V_MAX);
                trackFilteredObject(a,threshold,HSV,imgOriginal);
        }

            else{

            //we can use their member functions/information
            Object blue("blue"), yellow("yellow"), red("red"), orange("orange"),white("white");
                cvtColor(imgConverted,imgOriginal,CV_BayerGB2BGR);
            //first find blue objects
            cvtColor(imgOriginal,HSV,CV_RGB2HSV);
            inRange(HSV,blue.getHSVmin(),blue.getHSVmax(),threshold);
            morphOps(threshold);

            //then yellows
            inRange(HSV,yellow.getHSVmin(),yellow.getHSVmax(),threshold);

            //then reds

            inRange(HSV,red.getHSVmin(),red.getHSVmax(),threshold);

            //then white

            inRange(HSV,white.getHSVmin(),white.getHSVmax(),threshold);

                //then orange

            inRange(HSV,orange.getHSVmin(),orange.getHSVmax(),threshold);
        trackFilteredObject(yellow,threshold,HSV,imgOriginal);
                trackFilteredObject(white,threshold,HSV,imgOriginal);
            trackFilteredObject(red,threshold,HSV,imgOriginal);
            trackFilteredObject(blue,threshold,HSV,imgOriginal);
            trackFilteredObject(orange,threshold,HSV,imgOriginal);

            }
        //delay 10ms so that screen can refresh.
        //image will not appear without this waitKey() command
                if (cvWaitKey(30) == 27) //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
             {
                    cout << "esc key is pressed by user" << endl;
                    break; 
             }

        //  cvShowImage(_windowName, image);
            imshow(_windowName,imgOriginal);
        }
            for(int i = 0; i < kameraSayisi; i++)
        {
            // Stop camera capture
            CLEyeCameraStop(_cam[i]);
            // Destroy camera object
            CLEyeDestroyCamera(_cam[i]);
            // Destroy the allocated OpenCV image
            cvReleaseImage(&pCapImage[i]);
            _cam[i] = NULL;
        }

    }

Object tracking lag problem

Hi, I am using this code for track 4 colored objects.I get video from 8 ps3 eye camera.When i comment // my trackFilteredObject line there is no lag.But when using this code i have lot latency.I cant understand why happening because my normal cpu usage ~%15 ram usage 6.3GB/15GB (%40) when run this code cpu usage ~20-23 ram usage 6.4GB . I think its not about cpu-ram performance.What am i doing wrong ?

Video: https://www.youtube.com/watch?v=_BKtJpPrkO4 (You can see lag in first 10 sec.After 10 sen i comment tracking codes.)

Note:Kamerasayisi mean cameracount My Track Function:

void trackFilteredObject(Object theObject,Mat threshold,Mat HSV, Mat &cameraFeed){
    //max number of objects to be detected in frame
    const int FRAME_WIDTH = 5120;
const int FRAME_HEIGHT = 480;

    const int MAX_NUM_OBJECTS=50;
//minimum and maximum object area
const int MIN_OBJECT_AREA = 10*10;
const int MAX_OBJECT_AREA = FRAME_HEIGHT*FRAME_WIDTH/1.5;
    vector <Object> objects;

    Mat temp;
    threshold.copyTo(temp);
    //these two vectors needed for output of findContours
    vector< vector<Point> > contours;
    vector<Vec4i> hierarchy;
    //find contours of filtered image using openCV findContours function
    findContours(temp,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE );
    //use moments method to find our filtered object
    double refArea = 0;
    bool objectFound = false;
    if (hierarchy.size() > 0) {
        int numObjects = hierarchy.size();
        //if number of objects greater than MAX_NUM_OBJECTS we have a noisy filter
        if(numObjects<MAX_NUM_OBJECTS){
            for (int index = 0; index >= 0; index = hierarchy[index][0]) {

                Moments moment = moments((cv::Mat)contours[index]);
                double area = moment.m00;

        //if the area is less than 20 px by 20px then it is probably just noise
        //if the area is the same as the 3/2 of the image size, probably just a bad filter
        //we only want the object with the largest area so we safe a reference area each
                //iteration and compare it to the area in the next iteration.
                if(area>MIN_OBJECT_AREA){

                    Object object;

                    object.setXPos(moment.m10/area);
                    object.setYPos(moment.m01/area);
                    object.setType(theObject.getType());
                    object.setColor(theObject.getColor());

                    objects.push_back(object);

                    objectFound = true;

                }else objectFound = false;
            }
            //let user know you found an object
            if(objectFound ==true){
                //draw object location on screen
                drawObject(objects,cameraFeed,temp,contours,hierarchy);}

        }else putText(cameraFeed,"TOO MUCH NOISE! ADJUST FILTER",Point(0,50),1,2,Scalar(0,0,255),2);
    }       
}
};

My Main Code:

    void Run()
    {


        int w, h;

        _fps = 30;
        IplImage *pCapImage[kameraSayisi];
        IplImage *pDisplayImage;
        PBYTE pCapBuffer = NULL;
        // Create camera instance
        for(int i = 0; i < kameraSayisi; i++)
        {
            _cam[i] = CLEyeCreateCamera(_cameraGUID[i], _mode, _resolution, _fps);
            if(_cam[i] == NULL) return;
            // Get camera frame dimensions
            CLEyeCameraGetFrameDimensions(_cam[i], w, h);
            // Create the OpenCV images
            pCapImage[i] = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);

            // Set some camera parameters
            CLEyeSetCameraParameter(_cam[i], CLEYE_GAIN, 0);
            CLEyeSetCameraParameter(_cam[i], CLEYE_EXPOSURE, 511);

            // Start capturing
            CLEyeCameraStart(_cam[i]);


        }
        pDisplayImage = cvCreateImage(cvSize(w*kameraSayisi / 2, h * kameraSayisi/4 ), IPL_DEPTH_8U  ,1);

        if(_cam == NULL)        return;

  int iLastX = -1; 
 int iLastY = -1;



  //Capture a temporary image from the camera
    //program
    bool trackObjects = true;
    bool useMorphOps = true;

        Mat HSV;
  //Create a black image with the size as the camera output
 Mat imgLines;
  // imgLines = Mat::zeros( cvarrToMat(image).size(), CV_8UC3 );;
 Mat threshold;
    //x and y values for the location of the object
    int x=0, y=0;
    bool calibrationMode = false;
    if(calibrationMode){
        //create slider bars for HSV filtering
        createTrackbars();
    }

        // image capturing loop
        while(_running)
        {

            PBYTE pCapBuffer;
            // Capture camera images
            for(int i = 0; i < kameraSayisi; i++)
            {
                cvGetImageRawData(pCapImage[i], &pCapBuffer);
                CLEyeCameraGetFrame(_cam[i], pCapBuffer, (i==0)?2000:0);

            }

            // Display stereo image
                for(int i = 0; i < kameraSayisi; i++)
                {
                    cvSetImageROI(pDisplayImage, cvRect(w * (i%4) ,i/4 * h, w, h));
                    cvCopy(pCapImage[i], pDisplayImage);
                }
                cvResetImageROI(pDisplayImage);

            Mat imgOriginal;
            Mat imgConverted = cvarrToMat(pDisplayImage);
            if(calibrationMode==true)
            {

            //need to find the appropriate color range values
            // calibrationMode must be false

            //if in calibration mode, we track objects based on the HSV slider values.
                //cvtColor(imgOriginal,imgOriginal,CV_BayerRG2RGB);
                cvtColor(imgConverted,imgOriginal,CV_BayerGB2BGR);
                cvtColor(imgOriginal,HSV,CV_BGR2HSV);
                inRange(HSV,Scalar(H_MIN,S_MIN,V_MIN),Scalar(H_MAX,S_MAX,V_MAX),threshold);
                morphOps(threshold);
                imshow(_windowName + 'T',threshold);

            //the folowing for canny edge detec         
                /// Create a matrix of the same type and size as src (for dst)
                dst.create( imgOriginal.size(), src.type() );
                /// Convert the image to grayscale
                cvtColor( imgOriginal, src_gray, CV_BGR2GRAY );
                /// Create a window
                namedWindow( window_name, CV_WINDOW_AUTOSIZE );
                /// Create a Trackbar for user to enter threshold
            //  createTrackbar( "Min Threshold:", window_name, &lowThreshold, max_lowThreshold, CannyThreshold );
                /// Show the image
                Object a = Object(H_MIN,S_MIN,V_MIN,H_MAX,S_MAX,V_MAX);
                trackFilteredObject(a,threshold,HSV,imgOriginal);
        }

            else{

            //we can use their member functions/information
            Object blue("blue"), yellow("yellow"), red("red"), orange("orange"),white("white");
                cvtColor(imgConverted,imgOriginal,CV_BayerGB2BGR);
            //first find blue objects
            cvtColor(imgOriginal,HSV,CV_RGB2HSV);
            inRange(HSV,blue.getHSVmin(),blue.getHSVmax(),threshold);
            morphOps(threshold);

            //then yellows
            inRange(HSV,yellow.getHSVmin(),yellow.getHSVmax(),threshold);

            //then reds

            inRange(HSV,red.getHSVmin(),red.getHSVmax(),threshold);

            //then white

            inRange(HSV,white.getHSVmin(),white.getHSVmax(),threshold);

                //then orange

            inRange(HSV,orange.getHSVmin(),orange.getHSVmax(),threshold);
        trackFilteredObject(yellow,threshold,HSV,imgOriginal);
                trackFilteredObject(white,threshold,HSV,imgOriginal);
            trackFilteredObject(red,threshold,HSV,imgOriginal);
            trackFilteredObject(blue,threshold,HSV,imgOriginal);
            trackFilteredObject(orange,threshold,HSV,imgOriginal);

            }
        //delay 10ms so that screen can refresh.
        //image will not appear without this waitKey() command
                if (cvWaitKey(30) == 27) //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
             {
                    cout << "esc key is pressed by user" << endl;
                    break; 
             }

        //  cvShowImage(_windowName, image);
            imshow(_windowName,imgOriginal);
        }
            for(int i = 0; i < kameraSayisi; i++)
        {
            // Stop camera capture
            CLEyeCameraStop(_cam[i]);
            // Destroy camera object
            CLEyeDestroyCamera(_cam[i]);
            // Destroy the allocated OpenCV image
            cvReleaseImage(&pCapImage[i]);
            _cam[i] = NULL;
        }

    }