blob: 92a5e4a35431e2b68924d6f471f1db0c877c0b96 [file] [log] [blame]
"""
SPDX-FileCopyrightText: 2020-present Open Networking Foundation <info@opennetworking.org>
SPDX-License-Identifier: LicenseRef-ONF-Member-1.01
"""
from __future__ import print_function
from collections import namedtuple
import logging as log
import os
import sys
from argparse import ArgumentParser, SUPPRESS
from imutils import build_montages
import cv2
from openvino.inference_engine import IECore
from base_camera import BaseCamera
import mqtt
Shape = namedtuple('Shape', ['n','c','h','w'])
class Camera(BaseCamera):
model = None
prob_threshold = 0.0
input = None
device = None
def __init__(self, device, args):
log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
self.model_xml = args.model
self.input = args.input
self.prob_threshold = args.prob_threshold
self.is_async_mode = True
self.device = device
super(Camera, self).__init__(device, args.idle)
def __del__(self):
# stream.release()
cv2.destroyAllWindows()
def init_stream(self):
if self.input == 'cam':
input_stream = 0
elif self.input == 'gstreamer':
input_stream = 'udpsrc port=500' + self.device + ' caps = " application/x-rtp, media=(string)video, clock-rate=(int)90000, encoding-name=(string)H264, payload=(int)96" ! rtph264depay ! avdec_h264 output-corrupt=false ! videoconvert ! appsink'
else:
input_stream = self.input
assert os.path.isfile(self.input), "Specified input file doesn't exist"
if self.input == 'gstreamer':
stream = cv2.VideoCapture(input_stream, cv2.CAP_GSTREAMER)
else:
stream = cv2.VideoCapture(input_stream)
return stream
def init_inference(self):
self.model_bin = os.path.splitext(self.model_xml)[0] + ".bin"
# Read IR
log.info("Reading IR...")
net = IECore().read_network(model=self.model_xml, weights=self.model_bin)
assert len(net.inputs.keys()) == 1, "Demo supports only single input topologies"
assert len(net.outputs) == 1, "Demo supports only single output topologies"
input_blob = next(iter(net.inputs))
out_blob = next(iter(net.outputs))
log.info("Loading IR to the plugin...")
exec_net = IECore().load_network(network=net, device_name="CPU", num_requests=2)
# Read and pre-process input image
shape = Shape(*net.inputs[input_blob].shape)
del net
return exec_net, shape, input_blob, out_blob
def frames(self):
exec_net, shape, input_blob, out_blob = self.init_inference()
stream = self.init_stream()
cur_request_id = 0
next_request_id = 1
ret, frame = stream.read()
while True:
if self.is_async_mode:
ret, next_frame = stream.read()
else:
ret, frame = stream.read()
if not ret:
break
initial_w = stream.get(cv2.CAP_PROP_FRAME_WIDTH)
initial_h = stream.get(cv2.CAP_PROP_FRAME_HEIGHT)
# Main sync point:
# in the truly Async mode we start the NEXT infer request, while waiting for the CURRENT to complete
# in the regular mode we start the CURRENT request and immediately wait for it's completion
if self.is_async_mode:
in_frame = cv2.resize(next_frame, (shape.w, shape.h))
in_frame = in_frame.transpose((2, 0, 1)) # Change data layout from HWC to CHW
in_frame = in_frame.reshape((shape.n, shape.c, shape.h, shape.w))
exec_net.start_async(request_id=next_request_id, inputs={input_blob: in_frame})
else:
in_frame = cv2.resize(frame, (shape.w, shape.h))
in_frame = in_frame.transpose((2, 0, 1)) # Change data layout from HWC to CHW
in_frame = in_frame.reshape((shape.n, shape.c, shape.h, shape.w))
exec_net.start_async(request_id=cur_request_id, inputs={input_blob: in_frame})
if exec_net.requests[cur_request_id].wait(-1) == 0:
# Parse detection results of the current request
res = exec_net.requests[cur_request_id].outputs[out_blob]
initial_w = 640
initial_h = 480
frame = cv2.resize(frame, (initial_w, initial_h))
obj_count = 0
red = (0, 0, 255)
black = (0, 0, 0)
for obj in res[0][0]:
# Draw only objects when probability more than specified threshold
if obj[2] > self.prob_threshold:
xmin = int(obj[3] * initial_w)
ymin = int(obj[4] * initial_h)
xmax = int(obj[5] * initial_w)
ymax = int(obj[6] * initial_h)
# Draw box and prob
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), red, 2)
cv2.putText(frame, str(round(obj[2] * 100, 1)) + ' %', (xmin, ymin - 7),
cv2.FONT_HERSHEY_COMPLEX, 0.6, black, 1)
obj_count += 1
cv2.putText(frame, "persons: {}".format(str(obj_count)), (10, 20),
cv2.FONT_HERSHEY_COMPLEX, 0.6, black, 1)
cv2.putText(frame, "camera: {}".format(self.device), (10, int(initial_h - 20)),
cv2.FONT_HERSHEY_COMPLEX, 0.6, black, 1)
if obj_count > 0:
mqtt.person_detected(self.device, obj_count)
yield cv2.imencode('.jpg', frame)[1].tobytes()
if self.is_async_mode:
cur_request_id, next_request_id = next_request_id, cur_request_id
frame = next_frame