Code refactor

Change-Id: Ice294178d7af9be94a8bb65c1d0a7c8064f0ce7e
diff --git a/bin/person_detection.sh b/bin/person_detection.sh
index 97fab3b..e8a7587 100755
--- a/bin/person_detection.sh
+++ b/bin/person_detection.sh
@@ -3,5 +3,5 @@
 source .venv/bin/activate
 source /opt/intel/openvino/bin/setupvars.sh
 #python3 person_detection/person_detection.py -d CPU -m models/intel/person-detection-retail-0013/FP32/person-detection-retail-0013.xml $args
-python3 person_detection/app.py -d CPU -m models/intel/person-detection-retail-0013/FP32/person-detection-retail-0013.xml $args
+python3 person_detection/app.py -m models/intel/person-detection-retail-0013/FP32/person-detection-retail-0013.xml $args
 
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 7cfcbfa..12d5c52 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -25,4 +25,4 @@
      pip3 install -r requirements.txt
 
 ENTRYPOINT ["./bin/person_detection.sh"]
-CMD ["-i gstreamer -pt 0.75"]
+CMD ["-pt 0.75"]
diff --git a/person_detection/app.py b/person_detection/app.py
index 5cf454b..3605ea0 100644
--- a/person_detection/app.py
+++ b/person_detection/app.py
@@ -52,19 +52,10 @@
     args.add_argument("-m", "--model", help="Required. Path to an .xml file with a trained model.",
                       required=True, type=str)
     args.add_argument("-i", "--input",
-                      help="Required. Path to video file or image. 'cam' for capturing video stream from camera",
-                      required=True, type=str)
-    args.add_argument("-l", "--cpu_extension",
-                      help="Optional. Required for CPU custom layers. Absolute path to a shared library with the "
-                           "kernels implementations.", type=str, default=None)
-    args.add_argument("-pp", "--plugin_dir", help="Optional. Path to a plugin folder", type=str, default=None)
-    args.add_argument("-d", "--device",
-                      help="Optional. Specify the target device to infer on; CPU, GPU, FPGA, HDDL or MYRIAD is "
-                           "acceptable. The demo will look for a suitable plugin for device specified. "
-                           "Default value is CPU", default="CPU", type=str)
-    args.add_argument("--labels", help="Optional. Path to labels mapping file", default=None, type=str)
+                      help="Path to video file or image. 'cam' for capturing video stream from camera",
+                      default = "gstreamer", type=str)
     args.add_argument("-pt", "--prob_threshold", help="Optional. Probability threshold for detections filtering",
-                      default=0.5, type=float)
+                      default=0.0, type=float)
     args.add_argument("--idle", action='store_true', help="Idle if no clients connected")
 
     return parser
diff --git a/person_detection/person_detection.py b/person_detection/person_detection.py
index 29aef27..8eff1bc 100644
--- a/person_detection/person_detection.py
+++ b/person_detection/person_detection.py
@@ -5,49 +5,63 @@
 
 from __future__ import print_function
 
-import cv2
+from collections import namedtuple
 import logging as log
 import os
 import sys
 import time
 from argparse import ArgumentParser, SUPPRESS
 from imutils import build_montages
+
+import cv2
 from openvino.inference_engine import IECore
+
 from base_camera import BaseCamera
 
-DEFAULT_PROB_THRESH = 0.5
-
-def build_argparser():
-    parser = ArgumentParser(add_help=False)
-    args = parser.add_argument_group('Options')
-    args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
-    args.add_argument("-m", "--model", help="Required. Path to an .xml file with a trained model.",
-                      required=True, type=str)
-    args.add_argument("-i", "--input",
-                      help="Required. Path to video file or image. 'cam' for capturing video stream from camera",
-                      required=True, type=str)
-    args.add_argument("-l", "--cpu_extension",
-                      help="Optional. Required for CPU custom layers. Absolute path to a shared library with the "
-                           "kernels implementations.", type=str, default=None)
-    args.add_argument("-pp", "--plugin_dir", help="Optional. Path to a plugin folder", type=str, default=None)
-    args.add_argument("-d", "--device",
-                      help="Optional. Specify the target device to infer on; CPU, GPU, FPGA, HDDL or MYRIAD is "
-                           "acceptable. The demo will look for a suitable plugin for device specified. "
-                           "Default value is CPU", default="CPU", type=str)
-    args.add_argument("--labels", help="Optional. Path to labels mapping file", default=None, type=str)
-    args.add_argument("-pt", "--prob_threshold", help="Optional. Probability threshold for detections filtering",
-                      default=DEFAULT_PROB_THRESH, type=float)
-    args.add_argument("-ns", help='No show output', action='store_true')
-
-    return parser
-
+Shape = namedtuple('Shape', ['n','c','h','w'])
 
 class Camera(BaseCamera):
-    prob_threshold = DEFAULT_PROB_THRESH
+    model = None
+    prob_threshold = 0.0
+    input = None
+    device = None
 
     def __init__(self, device, args):
         log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
-        model_xml = args.model
+
+        self.model = args.model
+        self.input = args.input
+        self.prob_threshold = args.prob_threshold
+
+        self.is_async_mode = True
+
+        self.device = device
+
+        super(Camera, self).__init__(device, args.idle)
+
+    def __del__(self):
+        # stream.release()
+        cv2.destroyAllWindows()
+
+    def init_stream(self):
+        if self.input == 'cam':
+            input_stream = 0
+        elif self.input == 'gstreamer':
+            input_stream = 'udpsrc port=500' + self.device + ' caps = " application/x-rtp, media=(string)video, clock-rate=(int)90000, encoding-name=(string)H264, payload=(int)96" ! rtph264depay ! avdec_h264 ! videoconvert ! appsink'
+        else:
+            input_stream = self.input
+            assert os.path.isfile(self.input), "Specified input file doesn't exist"
+
+        if self.input == 'gstreamer':
+            stream = cv2.VideoCapture(input_stream, cv2.CAP_GSTREAMER)
+        else:
+            stream = cv2.VideoCapture(input_stream)
+
+        return stream
+
+
+    def init_inference(self):
+        model_xml = self.model
         model_bin = os.path.splitext(model_xml)[0] + ".bin"
 
         # Read IR
@@ -56,47 +70,22 @@
 
         assert len(net.inputs.keys()) == 1, "Demo supports only single input topologies"
         assert len(net.outputs) == 1, "Demo supports only single output topologies"
-        self.input_blob = next(iter(net.inputs))
-        self.out_blob = next(iter(net.outputs))
+        input_blob = next(iter(net.inputs))
+        out_blob = next(iter(net.outputs))
 
         log.info("Loading IR to the plugin...")
-        self.exec_net = IECore().load_network(network=net, device_name=args.device, num_requests=2)
+        exec_net = IECore().load_network(network=net, device_name="CPU", num_requests=2)
         # Read and pre-process input image
-        self.n, self.c, self.h, self.w = net.inputs[self.input_blob].shape
+        shape = Shape(*net.inputs[input_blob].shape)
         del net
-        if args.input == 'cam':
-            self.input_stream = 0
-        elif args.input == 'gstreamer':
-            # M-JPEG
-            # self.input_stream = 'udpsrc port=500' + device + ' caps = " application/x-rtp, encoding-name=JPEG,payload=26" ! rtpjpegdepay ! decodebin ! videoconvert ! appsink'
-            # H.264
-            self.input_stream = 'udpsrc port=500' + device + ' caps = " application/x-rtp, media=(string)video, clock-rate=(int)90000, encoding-name=(string)H264, payload=(int)96" ! rtph264depay ! avdec_h264 ! videoconvert ! appsink'
-            print("input_stream:", self.input_stream)
-        else:
-            self.input_stream = args.input
-            assert os.path.isfile(args.input), "Specified input file doesn't exist"
 
-        if args.labels:
-            with open(args.labels, 'r') as f:
-                self.labels_map = [x.strip() for x in f]
-        else:
-            self.labels_map = None
+        return exec_net, shape, input_blob, out_blob
 
-        self.args = args
-        self.prob_threshold = args.prob_threshold
-
-        super(Camera, self).__init__(device, args.idle)
-
-    def __del__(self):
-        self.cap.release()
-        cv2.destroyAllWindows()
 
     def frames(self):
 
-        if self.input_stream == 'gstreamer':
-            self.cap = cv2.VideoCapture(self.input_stream, cv2.CAP_GSTREAMER)
-        else:
-            self.cap = cv2.VideoCapture(self.input_stream)
+        exec_net, shape, input_blob, out_blob  = self.init_inference()
+        stream = self.init_stream()
 
         cur_request_id = 0
         next_request_id = 1
@@ -108,43 +97,40 @@
         # Async doesn't work if True
         # Request issues = Runtime Error: [REQUEST BUSY]
         # self.is_async_mode = False
-        self.is_async_mode = True
         render_time = 0
-        ret, frame = self.cap.read()
-
-        print("To close the application, press 'CTRL+C' or any key with focus on the output window")
+        ret, frame = stream.read()
 
         while True:
             if self.is_async_mode:
-                ret, next_frame = self.cap.read()
+                ret, next_frame = stream.read()
             else:
-                ret, frame = self.cap.read()
+                ret, frame = stream.read()
             if not ret:
                 break
-            initial_w = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)
-            initial_h = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
+            initial_w = stream.get(cv2.CAP_PROP_FRAME_WIDTH)
+            initial_h = stream.get(cv2.CAP_PROP_FRAME_HEIGHT)
 
             # Main sync point:
             # in the truly Async mode we start the NEXT infer request, while waiting for the CURRENT to complete
             # in the regular mode we start the CURRENT request and immediately wait for it's completion
             inf_start = time.time()
             if self.is_async_mode:
-                in_frame = cv2.resize(next_frame, (self.w, self.h))
+                in_frame = cv2.resize(next_frame, (shape.w, shape.h))
                 in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
-                in_frame = in_frame.reshape((self.n, self.c, self.h, self.w))
-                self.exec_net.start_async(request_id=next_request_id, inputs={self.input_blob: in_frame})
+                in_frame = in_frame.reshape((shape.n, shape.c, shape.h, shape.w))
+                exec_net.start_async(request_id=next_request_id, inputs={input_blob: in_frame})
             else:
-                in_frame = cv2.resize(frame, (self.w, self.h))
+                in_frame = cv2.resize(frame, (shape.w, shape.h))
                 in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
-                in_frame = in_frame.reshape((self.n, self.c, self.h, self.w))
-                self.exec_net.start_async(request_id=cur_request_id, inputs={self.input_blob: in_frame})
+                in_frame = in_frame.reshape((shape.n, shape.c, shape.h, shape.w))
+                exec_net.start_async(request_id=cur_request_id, inputs={input_blob: in_frame})
 
-            if self.exec_net.requests[cur_request_id].wait(-1) == 0:
+            if exec_net.requests[cur_request_id].wait(-1) == 0:
                 inf_end = time.time()
                 det_time = inf_end - inf_start
 
                 # Parse detection results of the current request
-                res = self.exec_net.requests[cur_request_id].outputs[self.out_blob]
+                res = exec_net.requests[cur_request_id].outputs[out_blob]
 
                 for obj in res[0][0]:
                     # Draw only objects when probability more than specified threshold
@@ -157,7 +143,7 @@
                         # Draw box and label\class_id
                         color = (0, 0, 255)
                         cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color, 2)
-                        det_label = self.labels_map[class_id] if self.labels_map else str(class_id)
+                        det_label = str(class_id)
                         cv2.putText(frame, det_label + ' ' + str(round(obj[2] * 100, 1)) + ' %', (xmin, ymin - 7),
                                     cv2.FONT_HERSHEY_COMPLEX, 0.6, color, 1)
                         # print('Object detected, class_id:', class_id, 'probability:', obj[2], 'xmin:', xmin, 'ymin:', ymin,
@@ -176,10 +162,3 @@
             if self.is_async_mode:
                 cur_request_id, next_request_id = next_request_id, cur_request_id
                 frame = next_frame
-
-
-if __name__ == '__main__':
-    args = build_argparser().parse_args()
-    camera = Camera(args)
-    camera.frames()
-    del camera