import cv2
import numpy as np
def anonymize_face_pixelate(image, blocks=3):
# divide the input image into NxN blocks
(h, w) = image.shape[:2]
xSteps = np.linspace(0, w, blocks + 1, dtype="int")
ySteps = np.linspace(0, h, blocks + 1, dtype="int")
# loop over the blocks in both the x and y direction
for i in range(1, len(ySteps)):
for j in range(1, len(xSteps)):
# compute the starting and ending (x, y)-coordinates
# for the current block
startX = xSteps[j - 1]
startY = ySteps[i - 1]
endX = xSteps[j]
endY = ySteps[i]
# extract the ROI using NumPy array slicing, compute the
# mean of the ROI, and then draw a rectangle with the
# mean RGB values over the ROI in the original image
roi = image[startY:endY, startX:endX]
(B, G, R) = [int(x) for x in cv2.mean(roi)[:3]]
cv2.rectangle(image, (startX, startY), (endX, endY),
(B, G, R), -1)
# return the pixelated blurred image
return image
cap = cv2.VideoCapture("assets/fashion.mp4")
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
while True:
ret, frame = cap.read()
width = int(cap.get(3))
height = int(cap.get(4))
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#detectMultiScale (frame, a, b)
#good value of a is between 1 and 1.5, smaller a = more accurate
#good value of b is between 3 and 6
#detect face
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
#cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
face = frame[y:y+h, x:x+w]
frame[y:y + h, x:x+w] = anonymize_face_pixelate(face, 5)
cv2.imshow('frame', frame)
if cv2.waitKey(1) == ord('q'):
break
if cv2.waitKey(1) == ord('p'):
cv2.waitKey(-1) # wait until any key is pressed
cap.release()
cv2.destroyAllWindows()
reference:
No comments:
Post a Comment