Suggest Edits

Trueface Visionbox API

 

A full reference with code exmples for these endpoints can be found here

These endpoints cover the following image:

trueface/visionbox-cpu:latest
trueface/visionbox-gpu:latest

Authentication

Set an "x-auth" header equal to the token provided.

headers = {
  "x-auth":"your_token",
  "content-type":"application/json"
}

Enroll returns encoded, json-safe features representing a human face

data = {
    "source":base64.b64encode(open('<path to your image>', 'rb').read()).decode('utf-8'),
    "namespace": "client123",
    "collection_id": "home",
    "label": "Nezare Chafni"
}

r = requests.post(BASEURL+"/enroll", json=data)

Sample response

# data contains the features extracted from the enrolled image

{
    'execution_time': 0.4584970474243164, 
    'message': 'enroll successful', 
    'data': 'Nj75u9/jvDwEzzq9I+Z0PXwgd73LB+Y8+FD7PNqgR702mUi9NOBbvXfeBr2Fo8K9gfkEvchT/DwUYTS9XlxAPS9xlbzbeOS8MQFwPaB9DLz7/zW9LESBPaPYmjy/8Jq5HpZgPBbyL73qFoA7x0QwvfCVg7wwvEg9KAMSvf17lb0GR9G9r0N9PK4mo71B/4M93qxMPcOH37sXdwo+y/2zPEXEy72sjI290MC0u5/PuDy7Qrq9NjkXPQMsTb2KKYU80j0vPS+f4jzMKbK8ghZbvdBcTrxuSEK6O9QWPHdSCb2YX6A96+zFPBqUBjvtom69NXVBvXQaFb0od7g8sO/cPBaQID15H6K7PqPOvMWiCL3NSAa9bwQvvHTSzT3vAxa9cJDjPXCIsDxVolY87fP3vZvUEDsnpaK9jm5Yvf7hfLxpZ5G8u1CEu+B71byhGgS8l/NFPXE6pTzK6og91TTpPEkFnj2s06a98VmjOyrf8LyI+i49wjeavf527bvPlFE9B8xLvZTai73ZhZ48MNnJPBqzDTwIyMI7vRscPHQKgjzYn/A8iBIzvc+tOD1Wggg9PPXUvL3Erjw8E029+YwOPHJHXz28thm9cMZSvWXGLr2Wjn+80j6mPFj8Ir2IQv+8K4Kpvf3gDb3DJ/G8ANnEPJrWu7xUB5U85jFSPSu3qDxoQKe969CEvXxjtjyixra8MQkJvSzF6DvLZGQ7SMYBPYUBPb3GIx89P6GbPImOIrrfKnK9CMT7PLwqm7297ai9fWWTPG8qOTwNE6O8JrC7vP/CCj0iNJM9VcbXvXiqWzzS44E9MZaFvW0iBrsWpQk7g2ZbvaoTxrvqniA9vv1SPYVXjz10wcg8yTMPPODXkr2zeTk90VwcvTr4OTwxjms9Ejl2PJ6vA736h/E8q7uJPPwP77xddYS8rPgivfu3xzzlXk08ujPcPG1w5ryzNwG9m8s+vLXTlDyCRtk8gGGXvBXdFb1WL9S855tTvI7iYr1YWri8zaE3vMEqjryGeAE9qinuPGjQgjyGDwo9Gs1JPbBwXD2oLZi9rlW3vTtA5DylAZK8wbMOPfAR6bz1dRw7dsBfvf3Ad70pJ308ICiDPGGG/byw/vI8N5sjvQ4bBL4ejSY7DPf5vPXsAD1qF6w5vbtcvLaCxjq1Evy9TmXVu5h57zoc7848eSkSvfKoozwmL168+1ZIur7ha72KMSY8o+YEPd/olDyzjhe9ftAEPSdl8DzKj189ssd3vLf7V72t6ak9HIiNPNCbNDy1dL69tj5BvUoAab1fHYq8omOgPa9Okr3BeFQ9Et9PPWdjlz273PW6iPIbPV2wprwbi4C9GMNBvV+goz02CMI7MP+ZvMNSl7yrPTu9YxSmvbr8nbtVZAa9KqU6u9Hu6DzbNoy9KZryumLfBD07zRI9vqJ8PS+Lc70eL3y8dTkNOkrXKDpBGQU9cOpNPYi3Y7wE7Mi9UdB7Pcpa+7v2lXa8u3kAvZMOFjtGtTu8XZgRvLCdgD2OcRw8ucwPPYcN8jso+J29P1QvPbik9Tsmsu68ZFWVvQ5rf73iwT69CWpuvJSLVT15KB29NAJgPb64Br0NgSk9e4fxO4IShjx7Q9M9VQ65vETTojzaWSa9gZYivWvYqbwuaku9W0QNPUoAIj0b7tQ8Yg+IPHF1Sz2D2fq8YVSnvWHpdzxhgMI8JMV+vRZySz1N2OI7Vx7hvDD7yzsQA107f0KnPCpSar1B8Vi9XXDDu9fNFj2CPXA8aMCcPYCZjj3qoyA6HvIQvFUzp7zBW6O9RX21O4hvRzqvXYQ8ayUFvf7sGj0F2+48n32PPHEqsT1SfQQ9JFG5POhPYrwCipC8IzOSPVZ4Mb3PUi69jpvzvHfK8bxIhW88nTC/PBs5ET0UqBS92qfxOr0tvr2Xxma9JniRvOLjlb0DgCS9XBCEvc84Gj3+8Ei84Gg3uKGEyT25lb28q1CLuo63gT3pqLg7FARSvADfu72ZDJO8gu4YPevBZztzNGO9kdYMvTMUej1ftoW954txvVCpJr0er1g9llO3vOzpW7tpZsS8zVbOvLHbRj0/qMw88OJLPd6BEz0CEYm97rp5vGgunDte2jQ93KlQOyhgJ71J4cO9rtOmPOPB/zyJCWC8wTqNPJJB5rvu5AI9DHgQvYbPEDwlZKo8V0wFvXstnTw/czO9vXP0vbBe3zsaPRq906NCvQ3UzbzgZxI9ZuGMPA1WNr1nlz89yl3evBeCWjwBQRi96mXTPZMCyLxmTRw90GIHPY/a5zuGGWe9EcSSPaNOMj33md07nQLBPWxVKjzqwxE8/dMPvWLMOL2lha+9ymgevLcTrDqfswU8cs+9PIRtIL3jDay8nK2auhgdSL1ZF2O9bc6FvGFHZDwTqxY93+daPLxWnz2SKtI8VnyNPCgAqzwETD88Oy4yvNjwGT1YwgQ8VYl2PLv9XTwSJ189mxvvPN0bhT3qQSU9YRCVO/D2CD1KLUG90mjsvKytuT2Bly87pxAnPaRoeb1i8ja9ky7zPKen5jwljuE8t7UGPaMwmbujNKe80tvBvCqBCD03Apa7Qml4PHNYarzCe9W8hH26PdlITL2nGK08ROEkPfb9gTuN9mC93yvOvAJnezzG+ng9Hc+lvLCdxL0+RyO9ifnBPAZ/KD0pfuA9E29nO6XaNz01IaM9Wi1pPGZzIz1mJ928R1G2vOpcFj3ifls8NrWpPVBWUb0=', 
    'success': True
}

This endpoint preforms 1:1 matching. It accepts images and features.

The api produces a distance measurement for the biggest face in the source and target, the default threshold is 1.5 (anything below is a positive match, anything above is a negative match). The threshold can be adjusted by simply passing a threshold float in the request.

Compare two URLs

import requests

url = BASEURL + "/match"

querystring = {
 "source_url":"https://upload.wikimedia.org/wikipedia/commons/thumb/5/51/Brad_Pitt_Fury_2014.jpg/220px-Brad_Pitt_Fury_2014.jpg",
  "target_url":"https://m.media-amazon.com/images/M/MV5BMjA1MjE2MTQ2MV5BMl5BanBnXkFtZTcwMjE5MDY0Nw@@._V1_UX214_CR0,0,214,317_AL_.jpg"}


response = requests.request("GET", headers=headers, params=querystring)

Match two images

url = BASEURL + "/match"
data = {
    "image":{base64.b64encode(open(image).read())},
    "target":{base64.b64encode(open(image).read())},
}

request = requests.post(url, data=json.dumps(data), headers=HEADERS)

print request.text

Match two features

url = BASEURL + "/match"
data = {
    "source_feature":{test_features},
    "target_feature":{test_features},
    "use_similarity":False
}

request = requests.post(url, data=json.dumps(data), headers=HEADERS)
print request.text

Sample response

{
  "execution_time": 0.664146900177002, 
  "message": "Operation performed successfully", 
  "data": {
    "distance": 0.0, 
    "match": true
  },
  "success": true}

When use_similarity = False we calculate the L2 distance between two features.

When use_similarity = True we calculate the similarity between two features.

If you want more information on the difference between similarity and distance, here is a good explanation.

To use similarity when matching vs distance simply pass a "use_similarity":True

url = BASEURL + "/match"
data = {
    "image":{base64.b64encode(open(image).read())},
    "target":{base64.b64encode(open(image).read())},
    "use_similarity":True
}

request = requests.post(url, data=json.dumps(data), headers=HEADERS)
print request.text

Sample response

{
  "execution_time": 3.775343894958496, 
  "message": "Operation performed successfully", 
  "data": {
    "similarity": 0.9999999403953552
    }, 
  "success": true}

Match against multiple features

url = BASEURL + "/match"


data = {
    "source_feature":{test_features},
		"target_features":{
				"label1":test_feature,
				"label2":test_feature,
				"label3":test_feature
			}
}

request = requests.post(url, data=json.dumps(data), headers=HEADERS)
print request.text

Sample response

{
  'execution_time': 0.24106812477111816,
  'message': u'match successful',
  'data': [{
  	'label1': {
  		'message': 'match successful',
  		'data': [{
  			'score': 0.19585075974464417,
  			'match': False}]
			}
		}, {
      'label2': {
        	'message': 'match successful',
        	'data': [{
            'score': 0.19585075974464417,
            'match': False
          }]
      }
    }, {
      'label3': {
        'message': 'match successful', 
        'data': [{
          'score': 0.19585075974464417, 
          'match': False}]
      }
    }
	], 
	'success': True
}
Suggest Edits

Collection

 

Create or Update a Collection

This call creates a collection if it doesn't exist.

This call also updates a collection with an enrollment if the collection exists.

Collections are cached on the container for use with /identify.

url = BASEURL + "/collection"
data = {
    "label":"Nezare Chafni",
    "features":"features you got from /enroll",
    "collection_id":"home",
    "namespace":"client123"
}

request = requests.post(url, data=json.dumps(data), headers=HEADERS)

print request.text

Retrieve a collection

url = BASEURL + "/collection"

data = {
    "collection_id":"home",
    "namespace":"client123"
}

request = requests.get(url, data=json.dumps(data), headers=HEADERS)

if not os.path.exists(data['namespace']):
    os.makedirs(data['namespace'])

with open('%s/%s.trueface' % (data['namespace'], data['collection_id']), 'w') as collection:  
    collection.write(request.content)

Put a collection on the container

url = BASEURL + "/collection"

data = {
    "collection_id":"home",
    "namespace":"client123"
}
files = {
    "collection":open('%s/%s.trueface' % (data['namespace'], data['collection_id'])),
}

request = requests.put(url, data=data, files=files, headers=HEADERS)

print request.text

Delete a label from a collection

url = BASEURL + "/delete-from-collection"
data = {
  "collection_id": "home",
  "namespace": "client123",
  "label": "test1",
}
request = requests.post(url, json=data)

Sample response:

{
    'execution_time': '0.00479793548584 seconds', 
    'message': 'removed label test1 from collection home successfully', 
    'success': True
}
 
url = BASEURL + "/identify"

data = {
		"source":base64.b64encode(open('./nezare.jpeg', 'rb').read()).decode('utf-8'),
		"collection_id":"home",
		"namespace":"client123"
	}

request = requests.post(url, data=json.dumps(data))

Sample response:

{
	'execution_time': 0.42548680305480957,
	'message': 'Identify successful', 
	'data': [{'bounding_box': [137.2210047543049, 
				   81.69231644272804, 
				   309.59957841038704, 
				   330.9033211097121, 
				   0.9989269375801086],
		  'predicted_label': 'Nezare Chafni', 
	  	  'probability': 0.08898697565231924, 
		  'similarity': 0.07858646661043167}
		], 
	'success': True
 }
Suggest Edits

Spoof Detection

 
url = BASEURL + '/spdetect'

data = {
    'image':base64.b64encode(open(path).read())
}

request = requests.post(url, data=json.dumps(data), headers=HEADERS)
        
print request.text
#spoof attempt
{"message": "Probability this is real: 0.0003", "data": {"score": 0.0003331679035909474}, "success": true}

#real face
{"message": "Probability this is real: 0.9998", "data": {"score": 0.9997538924217224}, "success": true}
Suggest Edits

Trueface Agebox

 

Download & Run

sudo docker run -e "TOKEN=$YOUR_TOKEN" -p 8080:8080 -it trueface/agebox:latest

Test Agebox

curl http://localhost:8080/predict?url=https://i.pinimg.com/originals/10/ac/e6/10ace651ae2f286b3dfa4bb8770ff377.jpg

/Predict

URL PARAMS

field
type
description

url

image url

a url to the image you'd like to process

JSON Payload

field
type
description

url

string

a url to the image you'd like to process

image

string

base64 encoded image

FORM DATA

field
type

url

string

a url to the image you'd like to process

imge

binary

image file

Sample responses

For a call with multiple faces present in the image

{
    "message": "Operation successful.",
    "data": [
        {
            "estimated_age": 28.315752029418945,
            "face_location": [
                809.9471360743046,
                385.23763477802277,
                1166.1402197182178,
                838.8555288016796
            ]
        },
        {
            "estimated_age": 33.08721160888672,
            "face_location": [
                461.4064075946808,
                177.87696634232998,
                874.9451689720154,
                768.3043241016567
            ]
        }
    ],
    "success": true
}

For a call with no faces present

{
    "message": "no faces found.",
    "data": {},
    "success": false
}
Suggest Edits

TF Dashboard

 
trueface/onprem-dashboard

The Trueface Dashboard image provides a UI for viewing and managing Trueface servers you are running. You can also view performance stats like CPU, memory consumption and real-time logs.

Here, you can also start new servers and stop or restart existing ones.

Get started with the below command:

sudo docker run -dit -p 5000:5000 -v /var/run/docker.sock:/var/run/docker.sock --name trueface_dashboard trueface/onprem-dashboard

Then navigate to http://localhost:5000 or to the IP port you're running on to view the server information.

In order for your servers to appear in the dashboard, make sure the container name starts with 'trueface'.

The python SDK start-server command automatically names the container as 'trueface_[id]'. However, if you started a container manually you can simply rename it with:

sudo docker rename [container_id] trueface_server12
Suggest Edits

Trueface ALPR (License Plate Recognition)

 

This docker image provides endpoints to detect cars and license plates.

sudo docker run -e "TOKEN=<your token>" -p 8080:8080 -it trueface/alprbox:latest

A postman collection that hits the endpoint with different parameters can be found here

Suggest Edits

predict with URL

 

title: "predict with URL"

excerpt: ""

predict car model, make and color

import requests

data = {
    "url":"https://i.stack.imgur.com/xPxvv.jpg",
    "make_model":True,   # detect the make and model of the car
    "car_detect":True,   # return the location of the car in the image
    "color_detect":True, # return the colors of the car
    "color_limit":2,     # limit maximum number of colors returned
}

r = requests.get("http://localhost:8080/predict", params=data)
print r.json()

Sample response

{
    "message": "Operation Successful.",
    "data": [
        {
            "estimated_color": [
                [
                    0.22086242394267377,
                    [
                        3642,
                        "darkslategrey",
                        [
                            47,
                            79,
                            79
                        ]
                    ]
                ],
                [
                    0.2161695862711494,
                    [
                        666,
                        "dimgray",
                        [
                            105,
                            105,
                            105
                        ]
                    ]
                ]
            ],
            "object_location": [
                44,
                97,
                427,
                324
            ],
            "type": "truck",
            "license_plate": [
                "M666Y0B"
            ],
            "make_model": {
                "make": "Chrysler",
                "model": "PT-Cruiser",
                "probability": 99.01735782623291
            }
        }
    ],
    "success": true
}
Suggest Edits

predict with base64 image

 

predict car model, make and color

url = BASEURL + "/predict"

data = {
  "image":base64.b64encode("<path to your image>"
  "url":"https://i.stack.imgur.com/xPxvv.jpg",
  "make_model":False,   # detect the make and model of the car
  "car_detect":False,   # return the location of the car in the image
  "color_detect":False, # return the colors of the car
  "color_limit":2,      # limit maximum number of colors returned
                         
}

r = requests.get("http://localhost:8080/predict", json=data)
print r.json()
        
return True
     

Sample response

{
    "message": "Operation Successful.",
    "data": [
        {
            "estimated_color": null,
            "object_location": [
                44,
                97,
                427,
                324
            ],
            "type": "truck",
            "license_plate": [
                "M666Y0B"
            ],
            "make_model": {
                "make": null,
                "model": null,
                "probability": 0
            }
        }
    ],
    "success": true
}
Suggest Edits

Spoof Detection

 

Docker image: trueface/tf-spoof

url = BASEURL + '/spdetect'

data = {
    'image':base64.b64encode(open(path).read())
}

request = requests.post(url, data=json.dumps(data), headers=HEADERS)
        
print request.text
#spoof attempt
{"message": "Probability this is real: 0.0003", "data": {"score": 0.0003331679035909474}, "success": true}

#real face
{"message": "Probability this is real: 0.9998", "data": {"score": 0.9997538924217224}, "success": true}