Frame Difference based tracker stuck with first frame
So I have been working these last few weeks on detecting and tracking motion on a video. My purpose is simple, detecting a dog once he moves and tracking it (with a rectangle box surrounding it). I only want to track one motion (the dog's) ignoring any other motion. After many unsuccessful trials with object and motion tracking algorithms and codes using Opencv, I came across something that I was able to modify to get closer to my goal. The only issue is that the code seems to keep the information from the first frame during the whole video, which causes the code to detect and put a rectangle in an empty area ignoring the actual motion.
Here's the code I'm using:
import imutils
import time
import cv2
previousFrame = None
count = 0
test = 0
temp_frame = None
rect = None
def searchForMovement(cnts, frame, min_area):
global rect
text = "Undetected"
flag = 0
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) < min_area:
continue
#print(c)
#Use the flag to prevent the detection of other motions in the video
if flag == 0:
(x, y, w, h) = cv2.boundingRect(c)
x = x - 100
y = y - 100
w = 400
h = 400
#print("x y w h")
#print(x,y,w,h)
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
text = "Detected"
rect = c
flag = 1
if text == "Undetected":
(x, y, w, h) = cv2.boundingRect(rect)
x = x - 100
y = y - 100
w = 400
h = 400
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
text = "Detected"
if text == "Undetected":
print (text, temp_frame)
return frame, text
def trackMotion(ret,frame, gaussian_kernel, sensitivity_value, min_area):
if ret:
# Convert to grayscale and blur it for better frame difference
# frame = cv2.bilateralFilter(frame, 7, 150, 150)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (gaussian_kernel, gaussian_kernel), 0)
global previousFrame
global count
global test
global temp_frame
if previousFrame is None:
previousFrame = gray
return frame, "Uninitialized", frame, frame
frameDiff = cv2.absdiff(previousFrame, gray)
thresh = cv2.threshold(frameDiff, sensitivity_value, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
_, cnts, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
frame, text = searchForMovement(cnts, frame, min_area)
if text == "Detected":
temp_frame = frame
else:
frame = temp_frame
if text == "Undetected":
print (text, temp_frame)
if count % 10 == 0:
previousFrame = gray
count = count + 1
return frame, text, thresh, frameDiff
if __name__ == '__main__':
video = "Track.avi"
video0= "Track.mp4"
video1= "Ntest1.avi"
video2= "Ntest2.avi"
camera = cv2.VideoCapture(video2)
time.sleep(0.25)
min_area = 5000 #int(sys.argv[1])
while camera.isOpened():
gaussian_kernel = 27
sensitivity_value = 5
min_area = 2500
ret, frame = camera.read()
#Check if the next camera read is not null
if ret:
frame, text, thresh, frameDiff = trackMotion(ret,frame, gaussian_kernel, sensitivity_value, min_area)
else:
print("Video Finished")
close = False
while not close:
key1 = cv2.waitKey(3) & 0xFF
if key1 == 27 or key1 == ord('q'):
close = True
break
cv2.namedWindow('Thresh',cv2.WINDOW_NORMAL)
cv2.namedWindow('Frame Difference ...