from picamera.array import PiRGBArray from picamera import PiCamera import cv2 import time # Import the face detection haar file face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml') # Used in Screen resolution and positioning later widths = 440 heights = 320 # initialise servo values servox = 0 servoy = 0 # initialize the camera and grab a reference to the raw camera capture camera = PiCamera() camera.resolution = (widths, heights) camera.framerate = 32 camera.hflip = True rawCapture = PiRGBArray(camera, size=(widths, heights)) # allow the camera to warmup time.sleep(0.1) # Init flags with their default values showVideo = True cv2.namedWindow('VideoOutput') # Main Loop while(True): |
# capture frames from the camera for image in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): frame = image.array gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 2, 5) if (len(faces) > 0): faceFound = True else: faceFound = False if (faceFound) : for idx,(x,y,w,h) in enumerate(faces): cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2) roi_gray = gray[y:y+h,x:x+w] roi_color = frame[y:y+h,x:x+w] eyes=eye_cascade.detectMultiScale(roi_gray, 2,5) for eidx,(ex,ey,ew,eh) in enumerate(eyes): cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,0,255),2) # Output the video if (showVideo): cv2.imshow('VideoOutput', frame) # clear the stream in preparation for the next frame rawCapture.truncate(0) # Check for keypresses key = cv2.waitKey(1) & 0xFF if key == ord("q"): print "Q Pressed" break cv2.destroyAllWindows()
|