Face_Recognition 人臉識別函數詳解

加載人臉圖片文件

load_image_file(file, mode='RGB'):  

通過 PIL.image.open 加載圖片文件

mode 有兩種模式 ‘RGB’ 3通道 和 ‘L' 單通道

返回 numpy.array

查找人臉位置 人臉分割

face_locations(img, number_of_times_to_upsample=1, model="hog"): 

:Param

img 是一個 numpy.array 指定要查找人臉位置的圖像矩陣

number_of_times_to_upsample 指定要查找的次數

model 指定查找的模式 'hog' 不精確但是在CPU上運算速度快 'CNN' 是一種深度學習的精確查找,但是速度慢。需要GPU/CUDA加速

返回 人臉位置 list  (top, right, bottom, left)

對人臉進行編碼

face_encodings(face_image, known_face_locations=None, num_jitters=1, model="small"): 

:Param 

face_image 指定數據類型爲numpy.array編碼的人臉矩陣 數據類型

known_face_locations 指定人臉位置 如果值爲None 則默認按照 'Hog'模式 調用 _raw_face_locations 查找人臉位置

num_jitters 重新採樣編碼次數 默認爲1 

model 預測人臉關鍵點個數 large 爲68個點 small 爲 5個關鍵點

返回 128維特徵向量 list

人臉對比

compare_faces(known_face_encodings, face_encoding_to_check, tolerance=0.6):

:Param 

known_face_encodings  已經編碼的人臉 list

face_encoding_to_check 要檢測的單個人臉

tolerance 默認人臉對比距離長度  對比方式    np.linalg.norm(face_encodings - face_to_compare, axis=1)

返回 對比結果 list

 

附加自己編寫的測試類

import face_recognition
import cv2
import numpy as np
import time



class Wqs_Face_Recognition:

    know_Encodings = []
    know_names = []
    tolerance = 0.4
    model = 'CNN'
    num_of_times_to_upSample = 1
    isDebug = False

    def __init__(self, tolerance=0.6, num_of_times_to_upSample = 1, model='CNN', imageType = cv2.IMREAD_COLOR, isDebug = False):
        self.tolerance = tolerance
        self.model = model
        self.num_of_times_to_upSample = num_of_times_to_upSample
        self.imageType = imageType
        self.isDebug = isDebug

    # 設置 識別精度 數據越小 精度越高
    def SetTolerance(self, tolerance):
        self.tolerance = tolerance

    # 獲取 識別精度
    def GetTolerance(self):
        return self.tolerance

    def Set_Num_of_times_to_upSample(self, num_of_times_to_upSample):
        self.num_of_times_to_upSample = num_of_times_to_upSample

    def Get_Num_of_times_to_upSample(self, num_of_times_to_upSample):
        return self.num_of_times_to_upSample

    def SetModel(self, model):
        self.model = model

    def GetModel(self, model):
        return self.model

    #圖像分割並命名
    def FaceSegmentation(self, faceFile:str = None, faceMat:np = None, faceName:str = None):
        if faceFile != None:
            faceMat = cv2.imread(faceFile)
        try:
            shp = faceMat.shape
            if self.isDebug:
                print('Recognition faceMat shape', faceMat.shape)
        except AttributeError:
            print('FaceSegmentation faceMat is None')
            return False
        locations = face_recognition.face_locations(faceMat, self.num_of_times_to_upSample, model=self.model)
        #cv2.imshow('l', liudehua_encoding)
        i = 0
        for location in locations:
            top, right, bottom, left = location
            if faceName == None and faceFile != None:
                index = faceFile.find('.')
                faceName = faceFile[:index]

            outName = faceName + '_name_' + str(i) + '.jpg'
            #filename = 'name%d.jpg' % i
            i = i+1
            faceFile = faceMat[top:bottom, left:right]

            cv2.imwrite(outName, faceFile)

    #識別特徵編碼並加載
    def Know_Encoding(self, faceFile:str = None, faceMat:np = None, faceName:str = None, index = -1):
        if faceFile != None:
            faceMat = cv2.imread(faceFile)
        try:
            shp = faceMat.shape
            if self.isDebug:
                print('Recognition faceMat shape', faceMat.shape)
        except AttributeError:
            print('Know_Encoding faceMat is None')
            return False
        if faceName == None and faceFile != None:
            loc = faceFile.find('.')
            faceName = faceFile[:loc]
        if faceName == None:
            faceName = 'NoName'
        tm = time.time()
        face_encoding = face_recognition.face_encodings(faceMat, model='small')[0]

        tm = time.time()- tm
        print('spend time ', tm)
        if index == -1:
            self.know_Encodings.append(face_encoding)
            self.know_names.append(faceName)
        else:
            self.know_Encodings.insert(index, face_encoding)
            self.know_names.insert(index, faceName)

    #加載已知人臉特徵編碼
    def LoadKnow_Encoding(self, face_encoding:np, name:np, index = -1):
        if index == -1:
            self.know_Encodings.append(face_encoding)
            self.know_names.append(name)
        else:
            self.know_Encodings.insert(index, face_encoding)
            self.know_names.insert(index, name)

    #全體查找人臉識別
    def AllRecognition(self, faceFile:str = None, faceMat:np = None):
        if faceFile != None:
            faceMat = cv2.imread(faceFile)
        try:
            shp = faceMat.shape
            if self.isDebug:
                print('Recognition faceMat shape', faceMat.shape)
        except AttributeError:
            print('AllRecognition faceMat is None')
            return False
        face_locations = face_recognition.face_locations(faceMat, self.num_of_times_to_upSample, model=self.model)
        faceEncodings = face_recognition.face_encodings(faceMat, face_locations)

        nameArray = []
        indexArray = []
        locationArray = []

        locationIndex = -1

        for face_location, face_encode in zip(face_locations , faceEncodings):
            index = 0
            locationIndex = locationIndex + 1

            (top, right, bottom, left) = face_location
            face = faceMat[top:bottom, left:right]
            cv2.imwrite('testFace.jpg', face)

            matches = face_recognition.compare_faces(self.know_Encodings, face_encode, self.tolerance)
            #index = matches.find('True')

            for match in matches:
                if match == True:
                    nameArray.append(self.know_names[index])
                    indexArray.append(index)
                    locationArray.append(face_locations[locationIndex])
                    #break
                index += 1
            if self.isDebug:
                for x in range(len(indexArray)):
                    print('Name', nameArray[x])
                    print('index', indexArray[x])
                    print('face_locations', locationArray[x])
                else:
                    print('Unknow people')
        return nameArray, indexArray, locationArray

    #精準人臉識別
    def Recognition(self,  know_Encoding:np, know_name:np, faceFile:str = None, faceMat:np = None):
        if faceFile != None:
            faceMat = cv2.imread(faceFile)
        try:
            shp = faceMat.shape
            if self.isDebug:
                print('Recognition faceMat shape', faceMat.shape)
        except AttributeError:
            print('Recognition faceMat is None')
            return False
        face_locations = face_recognition.face_locations(faceMat)
        faceEncodings = face_recognition.face_encodings(faceMat, face_locations)

        for face_location, face_encode in zip(face_locations, faceEncodings):

            (top, right, bottom, left) = face_location
            face = faceMat[top:bottom, left:right]
            if self.isDebug:
                cv2.imwrite('testFace.jpg', face)

            matches = face_recognition.compare_faces([know_Encoding], face_encode, self.tolerance)
            #index = matches.find('True')
            index = 0
            for match in matches:
                if match == True:
                    break
                index += 1

            if index < len(matches):
                if self.isDebug:
                    print('Know =', know_name)
                return True, know_name
            else:
                if self.isDebug:
                    print('Unknow people')
                return False, None

def drawRect(file:str, locations:np):
    img = cv2.imread(file)
    print('drawRect locations', locations)
    for loc in locations:
        cv2.rectangle(img,(loc[3], loc[0]), (loc[1], loc[2]), color= (255, 0, 0))
    cv2.imshow(file,img)
    cv2.waitKey(0)

def main():
    wqsRecognition = Wqs_Face_Recognition(0.4)

    wqsRecognition.FaceSegmentation('sunli1.jpg')
    wqsRecognition.FaceSegmentation('yangmi.jpg')


    wqsRecognition.Know_Encoding('sunli1_name_0.jpg', faceName='sunli1')
    mat = cv2.imread('yangmi_name_0.jpg')
    wqsRecognition.Know_Encoding(faceMat=mat, faceName= 'yangmi')

    mat = cv2.imread('ymTest.jpg')
    nameArray, indexArray, locations = wqsRecognition.AllRecognition(faceMat=mat)

    drawRect('ymTest.jpg', locations)

    wqsRecognition.tolerance = 0.6
    wqsRecognition.Recognition(wqsRecognition.know_Encodings[1], wqsRecognition.know_names[1], 'ymTest.jpg')

if __name__ == '__main__':
    main()

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章