from imutils.video import VideoStream from imutils.video import FPS import numpy as np import argparse import imutils import time import dlib import cv2 from cv2 import TrackerCSRT_create, TrackerMedianFlow_create, TrackerBoosting_create, TrackerKCF_create
construct the argument parse and parse the arguments
ap = argparse.ArgumentParser() ap.add_argument("-p", "--prototxt", required=True, help="path to Caffe 'deploy' prototxt file") ap.add_argument("-m", "--model", required=True, help="path to Caffe pre-trained model") ap.add_argument("-i", "--input", type=str, help="path to optional input video file") ap.add_argument("-o", "--output", type=str, help="path to optional output video file") ap.add_argument("-c", "--confidence", type=float, default=0.5, help="minimum probability to filter weak detections") ap.add_argument("-s", "--skip-frames", type=int, default=25, help="# of skip frames between detections")
args = vars(ap.parse_args())
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
print("[INFO] loading model...") net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
if not args.get("input", False): print("[INFO] starting video stream...") # vs = VideoStream(src=0).start() vs = VideoStream('rtsp://admin:[email protected]:554/H.264').start()
time.sleep(2.0)
else: print("[INFO] opening video file...") vs = cv2.VideoCapture(args["input"])
writer = None
initialize the frame dimensions (we'll set them as soon as we read
the first frame from the video)
W = None H = None
trackers = []
trackers = cv2.MultiTracker_create()
trackableObjects = {}
totalFrames = 0 totalDown = 0 totalUp = 0
start the frames per second throughput estimator
fps = FPS().start()
counter = 0 while True:
frame = vs.read()
frame = frame[1] if args.get("input", False) else frame
confidences = []
if args["input"] is not None and frame is None:
break
frame = imutils.resize(frame, width=500)
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if W is None or H is None:
(H, W) = frame.shape[:2]
line_position_up = ((0, H // 4 + 30), (W, H // 4 + 30))
line_position_down = ((0, 3 * (H // 4)), (W, 3 * (H // 4)))
cv2.line(frame, line_position_up[0], line_position_up[1], (0, 255, 255), 2)
cv2.line(frame, line_position_down[0], line_position_down[1], (0, 0, 255), 2)
if args["output"] is not None and writer is None:
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter(args["output"], fourcc, 30,
(W, H), True)
status = "Waiting"
rects = []
if totalFrames % args["skip_frames"] == 0:
# set the status and initialize our new set of object trackers
status = "Detecting"
trackers = []
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 0.007843, (300, 300), 127.5)
net.setInput(blob)
detections = net.forward()
for i in np.arange(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > args["confidence"]:
# extract the index of the class label from the
# detections list
idx = int(detections[0, 0, i, 1])
# if the class label is not a person, ignore it
if CLASSES[idx] not in ["person", "car"]:
continue
# compute the (x, y)-coordinates of the bounding box
# for the object
box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
(startX, startY, endX, endY) = box.astype("int")
tracker = TrackerBoosting_create()
tracker.init(frame, startX, startY, endX, endY)
trackers.append(tracker)
confidences.append(float(confidence))
else:
# loop over the trackers
for tracker in trackers:
status = "Tracking"
(success, pos) = tracker.update(rgb)
rects.append(pos)
info = [
("Up", totalUp),
("Down", totalDown),
("Status", status),
]
for (i, (k, v)) in enumerate(info):
text = "{}: {}".format(k, v)
cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
if writer is not None:
writer.write(frame)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
totalFrames += 1
fps.update()
fps.stop() print("[INFO] elapsed time: {:.2f}".format(fps.elapsed())) print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
if writer is not None: writer.release()
if not args.get("input", False): vs.stop()
else: vs.release()
cv2.destroyAllWindows()
Error Text-->
tracker.init(frame,(centerX, centerY, width, height)) cv2.error: OpenCV(3.4.8) C:\projects\opencv-python\opencv\modules\core\src\matrix.cpp:466: error: (-215:Assertion failed) 0 <= roi.x && 0 <= roi.width && roi.x + roi.width <= m.cols && 0 <= roi.y && 0 <= roi.height && roi.y + roi.height <= m.rows in function 'cv::Mat::Mat'