Queue frames
Change-Id: Id176d529f224fcb58376219194f26fc93539bc24
diff --git a/person_detection/base_camera.py b/person_detection/base_camera.py
index 90a2474..0998e1a 100644
--- a/person_detection/base_camera.py
+++ b/person_detection/base_camera.py
@@ -1,90 +1,32 @@
-import time
import threading
-try:
- from greenlet import getcurrent as get_ident
-except ImportError:
- try:
- from thread import get_ident
- except ImportError:
- from _thread import get_ident
-
-
-class CameraEvent(object):
- """An Event-like class that signals all active clients when a new frame is
- available.
- """
- def __init__(self):
- self.events = {}
-
- def wait(self):
- """Invoked from each client's thread to wait for the next frame."""
- ident = get_ident()
- if ident not in self.events:
- # this is a new client
- # add an entry for it in the self.events dict
- # each entry has two elements, a threading.Event() and a timestamp
- self.events[ident] = [threading.Event(), time.time()]
- return self.events[ident][0].wait()
-
- def set(self):
- """Invoked by the camera thread when a new frame is available."""
- now = time.time()
- remove = None
- for ident, event in self.events.items():
- if not event[0].isSet():
- # if this client's event is not set, then set it
- # also update the last set timestamp to now
- event[0].set()
- event[1] = now
- else:
- # if the client's event is already set, it means the client
- # did not process a previous frame
- # if the event stays set for more than 5 seconds, then assume
- # the client is gone and remove it
- if now - event[1] > 5:
- remove = ident
- if remove:
- del self.events[remove]
-
- def clear(self):
- """Invoked from each client's thread after a frame was processed."""
- self.events[get_ident()][0].clear()
+from queue import Queue
class BaseCamera(object):
thread = {} # background thread that reads frames from camera
- frame = {} # current frame is stored here by background thread
- last_access = {} # time of last client access to the camera
- event = {}
- idle = False # if True, stops thread if no client connected
+ frame = {} # frame queue
def __init__(self, device=None, idle=False):
"""Start the background camera thread if it isn't running yet."""
self.device = device
- self.idle = idle
- BaseCamera.event[self.device] = CameraEvent()
if self.device not in BaseCamera.thread:
BaseCamera.thread[self.device] = None
if BaseCamera.thread[self.device] is None:
- BaseCamera.last_access[self.device] = time.time()
+
+ self.frame[device] = Queue(100)
# start background frame thread
BaseCamera.thread[self.device] = threading.Thread(target=self._thread, args=(self.device))
BaseCamera.thread[self.device].start()
# wait until frames are available
- while self.get_frame() is None:
- time.sleep(0)
+ _ = self.get_frame()
def get_frame(self):
"""Return the current camera frame."""
- BaseCamera.last_access[self.device] = time.time()
- # wait for a signal from the camera thread
- BaseCamera.event[self.device].wait()
- BaseCamera.event[self.device].clear()
-
- return BaseCamera.frame[self.device]
+ # blocks
+ return BaseCamera.frame[self.device].get(block=True)
def frames(self):
""""Generator that returns frames from the camera."""
@@ -94,16 +36,6 @@
"""Camera background thread."""
frames_iterator = self.frames()
for frame in frames_iterator:
- BaseCamera.frame[device] = frame
- BaseCamera.event[device].set() # send signal to clients
- time.sleep(0)
-
- if self.idle:
- # if there hasn't been any clients asking for frames in
- # the last 10 seconds then stop the thread
- if time.time() - BaseCamera.last_access[device] > 10:
- frames_iterator.close()
- print('Stopping camera thread due to inactivity.')
- break
+ BaseCamera.frame[device].put(frame, block=True)
BaseCamera.thread[device] = None
diff --git a/person_detection/person_detection.py b/person_detection/person_detection.py
index 8eff1bc..9c5f0d9 100644
--- a/person_detection/person_detection.py
+++ b/person_detection/person_detection.py
@@ -29,7 +29,7 @@
def __init__(self, device, args):
log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
- self.model = args.model
+ self.model_xml = args.model
self.input = args.input
self.prob_threshold = args.prob_threshold
@@ -61,12 +61,11 @@
def init_inference(self):
- model_xml = self.model
- model_bin = os.path.splitext(model_xml)[0] + ".bin"
+ self.model_bin = os.path.splitext(self.model_xml)[0] + ".bin"
# Read IR
log.info("Reading IR...")
- net = IECore().read_network(model=model_xml, weights=model_bin)
+ net = IECore().read_network(model=self.model_xml, weights=self.model_bin)
assert len(net.inputs.keys()) == 1, "Demo supports only single input topologies"
assert len(net.outputs) == 1, "Demo supports only single output topologies"
@@ -77,6 +76,7 @@
exec_net = IECore().load_network(network=net, device_name="CPU", num_requests=2)
# Read and pre-process input image
shape = Shape(*net.inputs[input_blob].shape)
+
del net
return exec_net, shape, input_blob, out_blob
@@ -90,10 +90,6 @@
cur_request_id = 0
next_request_id = 1
- log.info("Starting inference in async mode...")
- log.info("To switch between sync and async modes press Tab button")
- log.info("To stop the demo execution press Esc button")
-
# Async doesn't work if True
# Request issues = Runtime Error: [REQUEST BUSY]
# self.is_async_mode = False
@@ -146,8 +142,6 @@
det_label = str(class_id)
cv2.putText(frame, det_label + ' ' + str(round(obj[2] * 100, 1)) + ' %', (xmin, ymin - 7),
cv2.FONT_HERSHEY_COMPLEX, 0.6, color, 1)
- # print('Object detected, class_id:', class_id, 'probability:', obj[2], 'xmin:', xmin, 'ymin:', ymin,
- # 'xmax:', xmax, 'ymax:', ymax)
cv2.putText(frame, self.device, (10, int(initial_h - 20)),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)