-
Notifications
You must be signed in to change notification settings - Fork 17
/
Copy pathdetection_stream.py
111 lines (87 loc) · 3.84 KB
/
detection_stream.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import argparse
from os import path
import time
import logging
import sys
import numpy as np
import cv2
from picamera.array import PiRGBArray
from picamera import PiCamera
from object_detector_detection_api import ObjectDetectorDetectionAPI
from yolo_darfklow import YOLODarkflowDetector
from object_detector_detection_api_lite import ObjectDetectorLite
from utils.utils import Models
logging.basicConfig(
stream=sys.stdout,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt=' %I:%M:%S ',
level="INFO"
)
logger = logging.getLogger('detector')
basepath = path.dirname(__file__)
if __name__ == '__main__':
# initiate the parser
parser = argparse.ArgumentParser(prog='test_models.py')
# add arguments
parser.add_argument("--model_name", "-mn", type=Models.from_string,
required=True, choices=list(Models),
help="name of detection model: {}".format(list(Models)))
parser.add_argument("--graph_path", "-gp", type=str, required=False,
default=path.join(basepath, "frozen_inference_graph.pb"),
help="path to ssdlight model frozen graph *.pb file")
parser.add_argument("--cfg_path", "-cfg", type=str, required=False,
default=path.join(basepath, "tiny-yolo-voc.cfg"),
help="path to yolo *.cfg file")
parser.add_argument("--weights_path", "-w", type=str, required=False,
default=path.join(basepath, "tiny-yolo-voc.weights"),
help="path to yolo weights *.weights file")
# read arguments from the command line
args = parser.parse_args()
for k, v in vars(args).items():
logger.info('Arguments. {}: {}'.format(k, v))
# initialize detector
logger.info('Model loading...')
if args.model_name == Models.ssd_lite:
predictor = ObjectDetectorDetectionAPI(args.graph_path)
elif args.model_name == Models.tiny_yolo:
predictor = YOLODarkflowDetector(args.cfg_path, args.weights_path)
elif args.model_name == Models.tf_lite:
predictor = ObjectDetectorLite()
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(640, 480))
# allow the camera to warmup
time.sleep(0.1)
frame_rate_calc = 1
freq = cv2.getTickFrequency()
# capture frames from the camera
for frame in camera.capture_continuous(rawCapture, format="bgr",
use_video_port=True):
t1 = cv2.getTickCount()
# grab the raw NumPy array representing the image, then initialize the timestamp
# and occupied/unoccupied text
image = frame.array
logger.info("FPS: {0:.2f}".format(frame_rate_calc))
cv2.putText(image, "FPS: {0:.2f}".format(frame_rate_calc), (20, 20),
cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 0), 2, cv2.LINE_AA)
result = predictor.detect(image)
for obj in result:
logger.info('coordinates: {} {}. class: "{}". confidence: {:.2f}'.
format(obj[0], obj[1], obj[3], obj[2]))
cv2.rectangle(image, obj[0], obj[1], (0, 255, 0), 2)
cv2.putText(image, '{}: {:.2f}'.format(obj[3], obj[2]),
(obj[0][0], obj[0][1] - 5),
cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 2)
# show the frame
cv2.imshow("Stream", image)
key = cv2.waitKey(1) & 0xFF
t2 = cv2.getTickCount()
time1 = (t2 - t1) / freq
frame_rate_calc = 1 / time1
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break