Live Face Recognition in under 90 Seconds

0.4 Update

change fr.draw_face_box() to fr.draw_box()

Steps

Download Models

Download and unzip fd_model.zip and model-lite.zip from the release page

Import SDK & Video Stream Classes

from trueface.recognition import FaceRecognizer
from trueface.video import VideoStream, QVideoStream
import cv2

Create a collection of images

Your image collection file structure

Each subfolder is named after the person whose images it contains.

collection
├── Manuel
│   ├── profile2015.png
│   └── profile.png
└── Nezare
    ├── 10.jpg
    └── 11.jpg
fr.create_collection('collection', 'collection.npz', return_features=False)

Init FaceRecognizer Class


fr = FaceRecognizer(ctx='cpu',
                    fd_model_path='./fd_model',
                    fr_model_path='./model-lite/model.trueface', 
                    params_path='./model-lite/model.params',
                    license='<your sdk token from creds.json>')

Start Video Capture Session

vcap = VideoStream(src=0).start()

Create Main Loop

while(True):
    #read frame
    frame = vcap.read()
    frame = cv2.resize(frame, (640,480))
    #perform face detect
    bounding_boxes, points, chips = fr.find_faces(frame, return_chips=True, return_binary=True)
    #if no faces in frame
    if bounding_boxes is None:
            continue
    #loop over extracted chips
    for i,chip in enumerate(chips):
      	#run identify
        identity = fr.identify(chip, threshold=0.3, collection='./collection.npz')
        #print result
        print identity
        #if chip was identified
        if identity['predicted_label']:
          	#draw label
            fr.draw_label(
                frame, 
                (int(bounding_boxes[i][0]), 
                 int(bounding_boxes[i][1])), 
                identity['predicted_label'])
        #draw bounding box
        fr.draw_box(frame, bounding_boxes[i])

Show Stream

    cv2.imshow('Trueface.ai', frame)
    if cv2.waitKey(33) == ord('q'):
        break

Full Code

from trueface.recognition import FaceRecognizer
from trueface.video import VideoStream, QVideoStream
import cv2


fr = FaceRecognizer(ctx='cpu',
                   fd_model_path='./fd_model',
                   fr_model_path='./model-lite/model.trueface', 
                   params_path='./model-lite/model.params',
                   license='<your sdk token from creds.json>')

fr.create_collection('collection', 'collection.npz', return_features=False)

vcap = VideoStream(src=0).start()

while(True):
    frame = vcap.read()
    frame = cv2.resize(frame, (640,480))
    bounding_boxes, points, chips = fr.find_faces(frame, return_chips=True, return_binary=True)
    if bounding_boxes is None:
            continue
    for i,chip in enumerate(chips):
        identity = fr.identify(chip, threshold=0.3, collection='./collection.npz')
        print identity
        if identity['predicted_label']:
            fr.draw_label(frame, 
                         (int(bounding_boxes[i][0]), 
                          int(bounding_boxes[i][1])), 
                         identity['predicted_label'])
        fr.draw_box(frame, bounding_boxes[i])
    cv2.imshow('Trueface.ai', frame)
    if cv2.waitKey(33) == ord('q'):
        break

Live Face Recognition in under 90 Seconds


Suggested Edits are limited on API Reference Pages

You can only suggest edits to Markdown body content, but not to the API spec.