AETHER-2319 remove no-show option

Change-Id: I582a6e26e276a41066690c341a7318e9a989cd36
diff --git a/Makefile b/Makefile
index 3c5b584..1ca4c1f 100644
--- a/Makefile
+++ b/Makefile
@@ -45,21 +45,15 @@
 run:
 	docker run -itu root:root --privileged --network host --name $(IMAGE) --rm $(IMAGE)
 
-run-native-test: $(VENV)
+run-native-file: $(VENV)
 	. ./bin/person_detection.sh -i ./resources/run.mp4
 
-run-native: $(VENV) 
+run-native-gstream: $(VENV) 
 	. ./bin/person_detection.sh -i gstreamer
 
 run-native-cam: $(VENV)
 	. ./bin/person_detection.sh -i cam
 
-run-native-test-no-show: $(VENV)
-	. ./bin/person_detection.sh -i ./resources/run.mp4 -ns
-
-run-native-no-show: $(VENV)
-	. ./bin/person_detection.sh -i gstreamer -ns
-
 test:
 
 
diff --git a/person_detection/app.py b/person_detection/app.py
index dec9c21..13e7abc 100644
--- a/person_detection/app.py
+++ b/person_detection/app.py
@@ -58,7 +58,6 @@
     args.add_argument("--labels", help="Optional. Path to labels mapping file", default=None, type=str)
     args.add_argument("-pt", "--prob_threshold", help="Optional. Probability threshold for detections filtering",
                       default=0.5, type=float)
-    args.add_argument("-ns", help='No show output', action='store_true')
 
     return parser
 
diff --git a/person_detection/base_camera.py b/person_detection/base_camera.py
index 96c148a..74b8f08 100644
--- a/person_detection/base_camera.py
+++ b/person_detection/base_camera.py
@@ -80,9 +80,9 @@
 
         return BaseCamera.frame
 
-    def frames():
+    def frames(self):
         """"Generator that returns frames from the camera."""
-        raise RuntimeError('Must be implemented by subclasses.')
+        raise NotImplementedError('Must be implemented by subclasses.')
 
     def _thread(self):
         """Camera background thread."""
diff --git a/person_detection/person_detection.py b/person_detection/person_detection.py
index 4b1611d..f5963fb 100644
--- a/person_detection/person_detection.py
+++ b/person_detection/person_detection.py
@@ -67,9 +67,9 @@
         elif args.input == 'gstreamer':
             # gst rtp sink
             self.input_stream = 'udpsrc port=5000 caps = " application/x-rtp, encoding-name=JPEG,payload=26" ! rtpjpegdepay ! decodebin ! videoconvert ! appsink'
-            #self.input_stream = 'udpsrc port=5000 caps = "application/x-rtp, media=(string)video, clock-rate=(int)90000, encoding-name=(string)H264, payload=(int)96" ! rtph264depay ! decodebin ! videoconvert ! appsink'
+            #input_stream = 'udpsrc port=5000 caps = "application/x-rtp, media=(string)video, clock-rate=(int)90000, encoding-name=(string)H264, payload=(int)96" ! rtph264depay ! decodebin ! videoconvert ! appsink'
         else:
-            input_stream = args.input
+            self.input_stream = args.input
             assert os.path.isfile(args.input), "Specified input file doesn't exist"
 
         if args.labels:
@@ -107,8 +107,6 @@
         render_time = 0
         ret, frame = self.cap.read()
 
-        frameList = []
-
         print("To close the application, press 'CTRL+C' or any key with focus on the output window")
 
         while True:
@@ -126,17 +124,15 @@
             # in the regular mode we start the CURRENT request and immediately wait for it's completion
             inf_start = time.time()
             if self.is_async_mode:
-                if ret:
-                    in_frame = cv2.resize(next_frame, (self.w, self.h))
-                    in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
-                    in_frame = in_frame.reshape((self.n, self.c, self.h, self.w))
-                    self.exec_net.start_async(request_id=next_request_id, inputs={self.input_blob: in_frame})
+                in_frame = cv2.resize(next_frame, (self.w, self.h))
+                in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
+                in_frame = in_frame.reshape((self.n, self.c, self.h, self.w))
+                self.exec_net.start_async(request_id=next_request_id, inputs={self.input_blob: in_frame})
             else:
-                if ret:
-                    in_frame = cv2.resize(frame, (self.w, self.h))
-                    in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
-                    in_frame = in_frame.reshape((self.n, self.c, self.h, self.w))
-                    self.exec_net.start_async(request_id=cur_request_id, inputs={self.input_blob: in_frame})
+                in_frame = cv2.resize(frame, (self.w, self.h))
+                in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
+                in_frame = in_frame.reshape((self.n, self.c, self.h, self.w))
+                self.exec_net.start_async(request_id=cur_request_id, inputs={self.input_blob: in_frame})
 
             if self.exec_net.requests[cur_request_id].wait(-1) == 0:
                 inf_end = time.time()
@@ -159,8 +155,8 @@
                         det_label = self.labels_map[class_id] if self.labels_map else str(class_id)
                         cv2.putText(frame, det_label + ' ' + str(round(obj[2] * 100, 1)) + ' %', (xmin, ymin - 7),
                                     cv2.FONT_HERSHEY_COMPLEX, 0.6, color, 1)
-                        print('Object detected, class_id:', class_id, 'probability:', obj[2], 'xmin:', xmin, 'ymin:', ymin,
-                              'xmax:', xmax, 'ymax:', ymax)
+                        # print('Object detected, class_id:', class_id, 'probability:', obj[2], 'xmin:', xmin, 'ymin:', ymin,
+                        #      'xmax:', xmax, 'ymax:', ymax)
 
                 # Draw performance stats
                 inf_time_message = "Inference time: Not applicable for async mode" if self.is_async_mode else \
@@ -180,22 +176,12 @@
 
             yield cv2.imencode('.jpg', frame)[1].tobytes()
 
-            if not self.args.ns:
-                if ret:
-                    cv2.imshow("Detection results", frame)
-                render_end = time.time()
-                render_time = render_end - render_start
+            render_end = time.time()
+            render_time = render_end - render_start
 
             if self.is_async_mode:
                 cur_request_id, next_request_id = next_request_id, cur_request_id
-
                 frame = next_frame
-            key = cv2.waitKey(1)
-            if key == 27:
-                break
-            if 9 == key:
-                self.is_async_mode = not self.is_async_mode
-                log.info("Switched to {} mode".format("async" if self.is_async_mode else "sync"))
 
 
 if __name__ == '__main__':