multithreading on position tracking function opencv c++
I am reading 4 video files using OpenCV C++ and then apply red and green color position tracking to get coordinates (for red and green LED) for each video and then do some other complex analysis on coordinates thus extracted. I was doing it sequentially just now like I have shown in the code below. I thought of making the code fast using multi-threading and read about TBB, std::thread, but kind of novice in multi-threading. Can someone suggest me how to go about using either TBB or std::thread?
#include <iostream>
#include <fstream>
#include <string>
#include <cstdio>
#include "opencv2/opencv_modules.hpp"
#include <opencv2/core/utility.hpp>
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/stitching/detail/autocalib.hpp"
#include "opencv2/stitching/detail/blenders.hpp"
#include "opencv2/stitching/detail/timelapsers.hpp"
#include "opencv2/stitching/detail/camera.hpp"
#include "opencv2/stitching/detail/exposure_compensate.hpp"
#include "opencv2/stitching/detail/matchers.hpp"
#include "opencv2/stitching/detail/motion_estimators.hpp"
#include "opencv2/stitching/detail/seam_finders.hpp"
#include "opencv2/stitching/detail/warpers.hpp"
#include "opencv2/stitching/warpers.hpp"
#include <numeric>
#include <thread>
#define ENABLE_LOG 1
#define LOG(msg) std::cout << msg
#define LOGLN(msg) std::cout << msg << std::endl
vector<Point2f> getLedCoordinates(Mat frame)
{
Point2f redLedPos = Point2f(-1,-1);
Point2f greenLedPos = Point2f(-1,-1);
vector<Point2f> ledPos;
Mat thresholdedImage;
//thresholded image
threshold(frame, thresholdedImage, 160, 255,THRESH_BINARY);
//remove small noise from the red and green colro thesholded image
Mat str_el = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(2,2));
morphologyEx(thresholdedImage, thresholdedImage, cv::MORPH_OPEN, str_el);
morphologyEx(thresholdedImage, thresholdedImage, cv::MORPH_CLOSE, str_el);
// Convert input image to HSV
Mat hsv_image;
cvtColor(thresholdedImage, hsv_image, cv::COLOR_BGR2HSV);
// Threshold the HSV image, keep only the red pixels
Mat lower_red_hue_range, upper_red_hue_range;
inRange(hsv_image, cv::Scalar(0, 100, 100), cv::Scalar(10, 255, 255), lower_red_hue_range);
inRange(hsv_image, cv::Scalar(160, 100, 100), cv::Scalar(179, 255, 255), upper_red_hue_range);
// Combine the above two image
Mat red_hue_image;
addWeighted(lower_red_hue_range, 1.0, upper_red_hue_range, 1.0, 0.0, red_hue_image);
//blur the image to avoid false positives
GaussianBlur(red_hue_image, red_hue_image, cv::Size(9, 9), 2, 2);
// Threshold the HSV image, keep only the green pixels
Mat green_hue_image;
inRange(hsv_image, cv::Scalar(50, 50, 120), cv::Scalar(70, 255, 255), green_hue_image);
//blur the image to avoid false positives
GaussianBlur(green_hue_image, green_hue_image, cv::Size(9, 9), 2, 2);
//find center of red contours and green contours with max area
vector<vector<Point> > redContours, greenContours;
vector<Vec4i> redHeirarchy, greenHeirarchy;
//find contours
findContours(red_hue_image.clone(), redContours, redHeirarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
findContours(green_hue_image.clone(), greenContours, greenHeirarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
//iterate through each contour and find the centroid of max area contour for red LED
double largest_area = 0;
int largest_contour_index = 0;
size_t count = (int)redContours.size();
if(count>0){
for(unsigned int i = 0; i< count; i++ )
{
// Find the area of contour
double a=contourArea(redContours[i],false);
if(a>largest_area){
largest_area=a;
// Store the index of largest contour
largest_contour_index=i;
}
}
Moments redMoment = moments(redContours[largest_contour_index], false);
redLedPos = Point2f(redMoment.m10/redMoment.m00, redMoment.m01/redMoment.m00);
}
//iterate through each contour and find the centroid ...
can you post some frames from your cameras?thanks