12-4FaceAutn.py

000: import cv2
001: import os
002: import numpy as np
003:
004: def __main():
005: global infoFlag
006: global faces
007: global labels
008: global authCount
009: message = “”
010:
011: recognizer = cv2.face.LBPHFaceRecognizer_create()
012: recognizer.train(faces, np.array(labels))
013:
014: cap = cv2.VideoCapture(0, cv2.CAP_V4L)
015: cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
016: cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
017:
018: if not cap.isOpened():
019: print(‘Not Opend Video Camera’)
020: exit()
021:
022: while True:
023: ret, img = cap.read()
024: # ここに処理を書く
025: dst = setResultArea(img) # 認証可能エリアを切り取る
026: camFace = getCameraFace(dst) # 認証用顔画像を取得
027: if len(camFace) is not 0: # 認証画像が無い場合はサイズが0
028: infoFlag = True
029: camFace = resizeAuthImg(camFace, faces[0]) # カメラの顔画像を学習用の画像サイズに合わせる
030: camFace = cv2.cvtColor(camFace, cv2.COLOR_BGR2GRAY)
031: labelNum, cofidence = recognizer.predict(camFace) # 学習データとカメラ画像を照合 最も近いラベル番号と信頼度値を受け取る
032: if cofidence > 50:
033: authCount = 0
034: else:
035: message = ‘Label={0} level={1}’.format(labelNum, int(cofidence))
036: infoFlag = False
037: print()
038:
039: if infoFlag is True:
040: message = ‘During verification’
041: setInfo(img, message) # 認証中のメッセージ
042:
043: cv2.imshow(‘Face Auth’, img)
044: if cv2.waitKey(10) > -1:
045: break
046:
047: cap.release()
048: cv2.destroyAllWindows()
049:
050: def init(src):
051: h = src.shape[0]
052: w = src.shape[1]
053: x = w / 2
054: y = h / 2
055: rectLength = w * 0.3
056: x1 = int(x – (rectLength / 2))
057: y1 = int(y – (rectLength / 2))
058: x2 = int(x + (rectLength / 2))
059: y2 = int(y + (rectLength / 2))
060: return x1, y1, x2, y2
061:
062: def setResultArea(src):
063: x1, y1, x2, y2 = init(src)
064: dst = src[y1:y2, x1:x2]
065: cv2.rectangle(src, (x1, y1), (x2, y2), (255, 255, 255), 2)
066: return dst
067:
068: def resizeAuthImg(src, baseImg):
069: height = baseImg.shape[0]
070: width = baseImg.shape[1]
071: dst = cv2.resize(src, (width, height), interpolation=None)
072: return dst
073:
074: def getCameraFace(img):
075: global cascade
076: global authCount
077: face = []
078: gray = cv2.cvtColor(src=img, code=cv2.COLOR_BGR2GRAY)
079: bodyRect = cascade.detectMultiScale(image=gray, scaleFactor=1.05, minNeighbors=10, flags=None, minSize=(100, 100))
080: for x, y, w, h in bodyRect:
081: authCount += 1
082: if authCount == 10: # 撮影開始直後の映像は失敗が多いので10枚目を認証用に採用する
083:
084: face = img[y:y+h, x:x+w]
085: cv2.rectangle(img=img, pt1=(x, y), pt2=(x + w, y + h), color=(0, 255, 255), thickness=3)
086: return face
087:
088: def loadFace():
089: faceImages = list()
090: fileCount = 0
091: baseImg = []
092: path = (‘1’, ‘2’, ‘3’) # 写真の保存してあるフォルダ名を列挙する
093: for i, folder in enumerate(path):
094: for fileName in os.listdir(folder):
095: print(fileName)
096: face = cv2.imread(folder + ‘/’ + fileName)
097: if fileCount == 0:
098: baseImg = face
099: face = resizeAuthImg(face, baseImg)
100: face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
101: faceImages.append(face)
102: labels.append(int(folder))
103: fileCount += 1
104: return faceImages
105:
106: def setInfo(img, msg):
107: cv2.rectangle(img, (50, 50), (600, 120), (0, 0, 0), -1)
108: cv2.putText(img=img, text=”{0}”.format(msg), org=(100, 100), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
109: fontScale=1.5, color=(255, 255, 255), lineType=cv2.LINE_AA)
110:
111: if __name__ == ‘__main__’:
112: cascade = cv2.CascadeClassifier(‘haarcascade_frontalface_default.xml’)
113: authCount = 0
114: infoFlag = False
115: labels = []
116: faces = loadFace()
117: __main()