The Trueface Developer Hub

Welcome to the Trueface developer hub. You'll find comprehensive guides and documentation to help you start working with Trueface as quickly as possible, as well as support if you get stuck. Let's jump right in!

Suggest Edits

Trueface Visionbox API

 

A full reference with code exmples for these endpoints can be found here

These endpoints cover the following image:

trueface/visionbox-cpu:latest
trueface/visionbox-gpu:latest

Authentication

Set an "x-auth" header equal to the token provided.

headers = {
  "x-auth":"your_token",
  "content-type":"application/json"
}
Suggest Edits

Collection

 

Create or Update a Collection

This call creates a collection if it doesn't exist.

This call also updates a collection with an enrollment if the collection exists.

Collections are cached on the container for use with /identify.

url = BASEURL + "/collection"
data = {
    "label":"Nezare Chafni",
    "features":"features you got from /enroll",
    "collection_id":"home",
    "namespace":"client123"
}

request = requests.post(url, data=json.dumps(data), headers=HEADERS)

print request.text

Retrieve a collection

url = BASEURL + "/collection"

data = {
    "collection_id":"home",
    "namespace":"client123"
}

request = requests.get(url, data=json.dumps(data), headers=HEADERS)

if not os.path.exists(data['namespace']):
    os.makedirs(data['namespace'])

with open('%s/%s.trueface' % (data['namespace'], data['collection_id']), 'w') as collection:  
    collection.write(request.content)

Put a collection on the container

url = BASEURL + "/collection"

data = {
    "collection_id":"home",
    "namespace":"client123"
}
files = {
    "collection":open('%s/%s.trueface' % (data['namespace'], data['collection_id'])),
}

request = requests.put(url, data=data, files=files, headers=HEADERS)

print request.text

Delete a label from a collection

url = BASEURL + "/delete-from-collection"
data = {
  "collection_id": "home",
  "namespace": "client123",
  "label": "test1",
}
request = requests.post(url, json=data)

Sample response:

{
    'execution_time': '0.00479793548584 seconds', 
    'message': 'removed label test1 from collection home successfully', 
    'success': True
}

Enroll returns encoded, json-safe features representing a human face. If you don't pass namespace, collection_id and label enroll will simply return the extracted feature of the biggest face and not store anything in a collection.

data = {
    "source":base64.b64encode(open('<path to your image>', 'rb').read()).decode('utf-8'),
    "namespace": "client123",
    "collection_id": "home",
    "label": "Nezare Chafni"
}

r = requests.post(BASEURL+"/enroll", json=data)

Sample response

# data contains the features extracted from the enrolled image

{
    'execution_time': 0.4584970474243164, 
    'message': 'enroll successful', 
    'data': 'Nj75u9/jvDwEzzq9I+Z0PXwgd73LB+Y8+FD7PNqgR702mUi9NOBbvXfeBr2Fo8K9gfkEvchT/DwUYTS9XlxAPS9xlbzbeOS8MQFwPaB9DLz7/zW9LESBPaPYmjy/8Jq5HpZgPBbyL73qFoA7x0QwvfCVg7wwvEg9KAMSvf17lb0GR9G9r0N9PK4mo71B/4M93qxMPcOH37sXdwo+y/2zPEXEy72sjI290MC0u5/PuDy7Qrq9NjkXPQMsTb2KKYU80j0vPS+f4jzMKbK8ghZbvdBcTrxuSEK6O9QWPHdSCb2YX6A96+zFPBqUBjvtom69NXVBvXQaFb0od7g8sO/cPBaQID15H6K7PqPOvMWiCL3NSAa9bwQvvHTSzT3vAxa9cJDjPXCIsDxVolY87fP3vZvUEDsnpaK9jm5Yvf7hfLxpZ5G8u1CEu+B71byhGgS8l/NFPXE6pTzK6og91TTpPEkFnj2s06a98VmjOyrf8LyI+i49wjeavf527bvPlFE9B8xLvZTai73ZhZ48MNnJPBqzDTwIyMI7vRscPHQKgjzYn/A8iBIzvc+tOD1Wggg9PPXUvL3Erjw8E029+YwOPHJHXz28thm9cMZSvWXGLr2Wjn+80j6mPFj8Ir2IQv+8K4Kpvf3gDb3DJ/G8ANnEPJrWu7xUB5U85jFSPSu3qDxoQKe969CEvXxjtjyixra8MQkJvSzF6DvLZGQ7SMYBPYUBPb3GIx89P6GbPImOIrrfKnK9CMT7PLwqm7297ai9fWWTPG8qOTwNE6O8JrC7vP/CCj0iNJM9VcbXvXiqWzzS44E9MZaFvW0iBrsWpQk7g2ZbvaoTxrvqniA9vv1SPYVXjz10wcg8yTMPPODXkr2zeTk90VwcvTr4OTwxjms9Ejl2PJ6vA736h/E8q7uJPPwP77xddYS8rPgivfu3xzzlXk08ujPcPG1w5ryzNwG9m8s+vLXTlDyCRtk8gGGXvBXdFb1WL9S855tTvI7iYr1YWri8zaE3vMEqjryGeAE9qinuPGjQgjyGDwo9Gs1JPbBwXD2oLZi9rlW3vTtA5DylAZK8wbMOPfAR6bz1dRw7dsBfvf3Ad70pJ308ICiDPGGG/byw/vI8N5sjvQ4bBL4ejSY7DPf5vPXsAD1qF6w5vbtcvLaCxjq1Evy9TmXVu5h57zoc7848eSkSvfKoozwmL168+1ZIur7ha72KMSY8o+YEPd/olDyzjhe9ftAEPSdl8DzKj189ssd3vLf7V72t6ak9HIiNPNCbNDy1dL69tj5BvUoAab1fHYq8omOgPa9Okr3BeFQ9Et9PPWdjlz273PW6iPIbPV2wprwbi4C9GMNBvV+goz02CMI7MP+ZvMNSl7yrPTu9YxSmvbr8nbtVZAa9KqU6u9Hu6DzbNoy9KZryumLfBD07zRI9vqJ8PS+Lc70eL3y8dTkNOkrXKDpBGQU9cOpNPYi3Y7wE7Mi9UdB7Pcpa+7v2lXa8u3kAvZMOFjtGtTu8XZgRvLCdgD2OcRw8ucwPPYcN8jso+J29P1QvPbik9Tsmsu68ZFWVvQ5rf73iwT69CWpuvJSLVT15KB29NAJgPb64Br0NgSk9e4fxO4IShjx7Q9M9VQ65vETTojzaWSa9gZYivWvYqbwuaku9W0QNPUoAIj0b7tQ8Yg+IPHF1Sz2D2fq8YVSnvWHpdzxhgMI8JMV+vRZySz1N2OI7Vx7hvDD7yzsQA107f0KnPCpSar1B8Vi9XXDDu9fNFj2CPXA8aMCcPYCZjj3qoyA6HvIQvFUzp7zBW6O9RX21O4hvRzqvXYQ8ayUFvf7sGj0F2+48n32PPHEqsT1SfQQ9JFG5POhPYrwCipC8IzOSPVZ4Mb3PUi69jpvzvHfK8bxIhW88nTC/PBs5ET0UqBS92qfxOr0tvr2Xxma9JniRvOLjlb0DgCS9XBCEvc84Gj3+8Ei84Gg3uKGEyT25lb28q1CLuo63gT3pqLg7FARSvADfu72ZDJO8gu4YPevBZztzNGO9kdYMvTMUej1ftoW954txvVCpJr0er1g9llO3vOzpW7tpZsS8zVbOvLHbRj0/qMw88OJLPd6BEz0CEYm97rp5vGgunDte2jQ93KlQOyhgJ71J4cO9rtOmPOPB/zyJCWC8wTqNPJJB5rvu5AI9DHgQvYbPEDwlZKo8V0wFvXstnTw/czO9vXP0vbBe3zsaPRq906NCvQ3UzbzgZxI9ZuGMPA1WNr1nlz89yl3evBeCWjwBQRi96mXTPZMCyLxmTRw90GIHPY/a5zuGGWe9EcSSPaNOMj33md07nQLBPWxVKjzqwxE8/dMPvWLMOL2lha+9ymgevLcTrDqfswU8cs+9PIRtIL3jDay8nK2auhgdSL1ZF2O9bc6FvGFHZDwTqxY93+daPLxWnz2SKtI8VnyNPCgAqzwETD88Oy4yvNjwGT1YwgQ8VYl2PLv9XTwSJ189mxvvPN0bhT3qQSU9YRCVO/D2CD1KLUG90mjsvKytuT2Bly87pxAnPaRoeb1i8ja9ky7zPKen5jwljuE8t7UGPaMwmbujNKe80tvBvCqBCD03Apa7Qml4PHNYarzCe9W8hH26PdlITL2nGK08ROEkPfb9gTuN9mC93yvOvAJnezzG+ng9Hc+lvLCdxL0+RyO9ifnBPAZ/KD0pfuA9E29nO6XaNz01IaM9Wi1pPGZzIz1mJ928R1G2vOpcFj3ifls8NrWpPVBWUb0=', 
    'success': True
}
Suggest Edits

face-detect

 

Detects all faces in an image. Face-detect does not extract features for efficiency. If you want to extract features for a specific face you call /enroll with that face chip.

url = BASEURL + "/face-detect"
data = {
        "source":base64.b64encode(open('./12.jpeg', 'rb').read()).decode('utf-8'),
       }
request = requests.post(url, json=data)
print(request.json())

Sample response:

{
  'execution_time': 0.12682199478149414, 
  'message': 'face detect successful', 
  'data': 
  [
    {
      'bounding_box': [
        413.65186087042093, 
        51.43216875195503, 
        653.0961793661118, 
        371.76333314180374, 
        0.9999322891235352], 
      'landmarks': [
        474.88751220703125, 
        585.4620971679688, 
        521.65283203125, 
        464.38153076171875, 
        584.4368896484375, 
        181.99205017089844, 
        189.38502502441406, 
        252.26614379882812, 
        285.7392272949219, 
        90.578857421875], 
      'chip': '2+by1+bxzuDtvNnooc7khcDidrfdcq3UcqTIbpKsZnuOaHWHVGR6QFNnKzhHLjtJN0RSQ09dFSEtERwpDRchHScxFB8nGyQuND9LIjBCITNMJDlVLD5dPk9wTF1+UmGCXGuNZHKUb3mbdX+gfYangoyrhI+thpGtfYmn
                                                                                       <base 64 encoded chip data cut out for brevity>                                 JjcDKjcHLjsLMisHLh8HLhL7IhLvEg7nEgbjDfrbCe7TAerTAerPAgLfEjcHO'
        }
      ], 
  'success': True
}

Elements of the response:

bounding_box:

Contains x,y coordinates of the top left corner followed by the bottom right corner of the face bounding box. The fifth element is the probability that the part of the image contained in the bounding box is indeed a face.

landmarks:

Contains x,y coordinates of the following landmark points of the face:
left eye
right eye
nose
left mouth corner
right mouth corner

 
url = BASEURL + "/identify"

data = {
		"source":base64.b64encode(open('./nezare.jpeg', 'rb').read()).decode('utf-8'),
		"collection_id":"home",
		"namespace":"client123"
	}

request = requests.post(url, data=json.dumps(data))

Sample response:

{
	'execution_time': 0.42548680305480957,
	'message': 'Identify successful', 
	'data': [{'bounding_box': [137.2210047543049, 
				   81.69231644272804, 
				   309.59957841038704, 
				   330.9033211097121, 
				   0.9989269375801086],
		  'predicted_label': 'Nezare Chafni', 
	  	  'probability': 0.08898697565231924, 
		  'similarity': 0.07858646661043167}
		], 
	'success': True
 }

This endpoint preforms 1:1 matching. It accepts images and features.

The api produces a similarity measurement for the biggest face in the source and target, the default threshold is 0.3 (anything above is a positive match, anything below is a negative match). The threshold can be adjusted by simply passing a threshold float in the request.

Compare two URLs

import requests

url = BASEURL + "/match"

querystring = {
 "source_url":"https://upload.wikimedia.org/wikipedia/commons/thumb/5/51/Brad_Pitt_Fury_2014.jpg/220px-Brad_Pitt_Fury_2014.jpg",
  "target_url":"https://m.media-amazon.com/images/M/MV5BMjA1MjE2MTQ2MV5BMl5BanBnXkFtZTcwMjE5MDY0Nw@@._V1_UX214_CR0,0,214,317_AL_.jpg"}


response = requests.request("GET", headers=headers, params=querystring)

Match two images

url = BASEURL + "/match"

data = {
    "source":base64.b64encode(open('./12.jpeg', 'rb').read()).decode('utf-8'),
    "target":base64.b64encode(open('./12.jpeg', 'rb').read()).decode('utf-8'),
}
request = requests.post(url, files=data, headers=HEADERS)

print request.text

Match two features

    url = BASEURL + "/match"

    data_512 = {
            "source":base64.b64encode(open('./nezare.jpeg', 'rb').read()).decode('utf-8'),
            "target_features":{
            "label1":test_feature_512,
            "label2":test_feature_512,
            }
    }

    start = datetime.datetime.now()
    request = requests.post(url, json=data_512)
    end = datetime.datetime.now()


url = BASEURL + "/match"
data = {
    "source_feature":{test_features},
    "target_feature":{test_features},
}

request = requests.post(url, data=json.dumps(data), headers=HEADERS)
print request.text

Sample response

 {
    "execution_time": 0.0003688335418701172,
    "message": "match succesful",
    "data": [
      {
        "probability": 0.9999992283344232, 
        "match": true, 
        "similarity": 0.9493268728256226
      }],
    "success": true
} 

Match against multiple features, using a source image

# run an enroll call without giving a collection name or namespace to
# just get the extracted feature back
data = {
    "source": base64.b64encode(open('./nezare.jpeg', 'rb').read()).decode(
        'utf-8'),
}
r = requests.post(BASEURL + "/enroll", json=data)
nez_feature = r.json()['data']

# repeat for another persons image
data = {
    "source": base64.b64encode(open('./manuel.jpeg', 'rb').read()).decode(
        'utf-8'),
}
r = requests.post(BASEURL + "/enroll", json=data)
manuel_feature = r.json()['data']

# now compare an image to those two extracted features
data = {
    "source": base64.b64encode(open('./nezare.jpeg', 'rb').read()).decode(
        'utf-8'),
    "target_features": {
        "Manuel": manuel_feature,
        "Nezare": nez_feature,
    }
}
response = requests.post(BASEURL + "/match", json=data)

print response.json()

Sample response

{'execution_time': 0.7452030181884766,
 'message': 'match successful',
 'data': [{'Manuel': 
           [{'score': 0.00022656144574284554,
             'probability': 0.021838278849839843,
             'match': False}]},
          {'Nezare': 
           [{'score': 0.9999997615814209,
             'probability': 0.9999997028891058,
             'match': True}]}],
   'success': True
}

Match against multiple features, using already extracted source features

    # run an enroll call without giving a collection name or namespace to
    # just get the extracted feature back
    data = {
        "source": base64.b64encode(open('./nezare.jpeg', 'rb').read()).decode(
            'utf-8'),
    }
    r = requests.post(BASEURL + "/enroll", json=data)
    nez_feature = r.json()['data']

    # repeat for another persons image
    data = {
        "source": base64.b64encode(open('./manuel.jpeg', 'rb').read()).decode(
            'utf-8'),
    }
    r = requests.post(BASEURL + "/enroll", json=data)
    manuel_feature = r.json()['data']

    # now compare the source feature to those two extracted features
    data = {
            "source_feature":nez_feature,
            "target_features":{
            "Manuel":manuel_feature,
            "Nezare":nez_feature,
            }
    }
    response = requests.post(BASEURL + "/match", json=data)
    
    assert(response.json()['data'][0]['Manuel'][0]['match'] is False)
    assert(response.json()['data'][1]['Nezare'][0]['match'] is True)
Suggest Edits

Spoof Detection

 
url = BASEURL + '/spdetect'

data = {
    'image':base64.b64encode(open(path).read())
}

request = requests.post(url, data=json.dumps(data), headers=HEADERS)
        
print request.text
#spoof attempt
{"message": "Probability this is real: 0.0003", "data": {"score": 0.0003331679035909474}, "success": true}

#real face
{"message": "Probability this is real: 0.9998", "data": {"score": 0.9997538924217224}, "success": true}
Suggest Edits

Trueface Agebox

 

Download & Run

sudo docker run -e "TOKEN=$YOUR_TOKEN" -p 8080:8080 -it trueface/agebox:latest

Test Agebox

curl http://localhost:8080/predict?url=https://i.pinimg.com/originals/10/ac/e6/10ace651ae2f286b3dfa4bb8770ff377.jpg

/Predict

URL PARAMS

field
type
description

url

image url

a url to the image you'd like to process

JSON Payload

field
type
description

url

string

a url to the image you'd like to process

image

string

base64 encoded image

FORM DATA

field
type

url

string

a url to the image you'd like to process

imge

binary

image file

Sample responses

For a call with multiple faces present in the image

{
    "message": "Operation successful.",
    "data": [
        {
            "estimated_age": 28.315752029418945,
            "face_location": [
                809.9471360743046,
                385.23763477802277,
                1166.1402197182178,
                838.8555288016796
            ]
        },
        {
            "estimated_age": 33.08721160888672,
            "face_location": [
                461.4064075946808,
                177.87696634232998,
                874.9451689720154,
                768.3043241016567
            ]
        }
    ],
    "success": true
}

For a call with no faces present

{
    "message": "no faces found.",
    "data": {},
    "success": false
}
Suggest Edits

TF Dashboard

 
trueface/onprem-dashboard

The Trueface Dashboard image provides a UI for viewing and managing Trueface servers you are running. You can also view performance stats like CPU, memory consumption and real-time logs.

Here, you can also start new servers and stop or restart existing ones.

Get started with the below command:

sudo docker run -dit -p 5000:5000 -v /var/run/docker.sock:/var/run/docker.sock --name trueface_dashboard trueface/onprem-dashboard

Then navigate to http://localhost:5000 or to the IP port you're running on to view the server information.

In order for your servers to appear in the dashboard, make sure the container name starts with 'trueface'.

The python SDK start-server command automatically names the container as 'trueface_[id]'. However, if you started a container manually you can simply rename it with:

sudo docker rename [container_id] trueface_server12
Suggest Edits

Trueface ALPR (License Plate Recognition)

 

This docker image provides endpoints to detect cars and license plates.

sudo docker run -e "TOKEN=<your token>" -p 8080:8080 -it trueface/alprbox:latest

A postman collection that hits the endpoint with different parameters can be found here

Suggest Edits

predict with URL

 

title: "predict with URL"

excerpt: ""

predict car model, make and color

import requests

data = {
    "url":"https://i.stack.imgur.com/xPxvv.jpg",
    "make_model":True,   # detect the make and model of the car
    "car_detect":True,   # return the location of the car in the image
    "color_detect":True, # return the colors of the car
    "color_limit":2,     # limit maximum number of colors returned
}

r = requests.get("http://localhost:8080/predict", params=data)
print r.json()

Sample response

{
    "message": "Operation Successful.",
    "data": [
        {
            "estimated_color": [
                [
                    0.22086242394267377,
                    [
                        3642,
                        "darkslategrey",
                        [
                            47,
                            79,
                            79
                        ]
                    ]
                ],
                [
                    0.2161695862711494,
                    [
                        666,
                        "dimgray",
                        [
                            105,
                            105,
                            105
                        ]
                    ]
                ]
            ],
            "object_location": [
                44,
                97,
                427,
                324
            ],
            "type": "truck",
            "license_plate": [
                "M666Y0B"
            ],
            "make_model": {
                "make": "Chrysler",
                "model": "PT-Cruiser",
                "probability": 99.01735782623291
            }
        }
    ],
    "success": true
}
Suggest Edits

predict with base64 image

 

predict car model, make and color

url = BASEURL + "/predict"

data = {
  "image":base64.b64encode("<path to your image>"
  "url":"https://i.stack.imgur.com/xPxvv.jpg",
  "make_model":False,   # detect the make and model of the car
  "car_detect":False,   # return the location of the car in the image
  "color_detect":False, # return the colors of the car
  "color_limit":2,      # limit maximum number of colors returned
                         
}

r = requests.get("http://localhost:8080/predict", json=data)
print r.json()
        
return True
     

Sample response

{
    "message": "Operation Successful.",
    "data": [
        {
            "estimated_color": null,
            "object_location": [
                44,
                97,
                427,
                324
            ],
            "type": "truck",
            "license_plate": [
                "M666Y0B"
            ],
            "make_model": {
                "make": null,
                "model": null,
                "probability": 0
            }
        }
    ],
    "success": true
}
Suggest Edits

Spoof Detection

 

Docker image: trueface/tf-spoof

url = BASEURL + '/spdetect'

data = {
    'image':base64.b64encode(open(path).read())
}

request = requests.post(url, data=json.dumps(data), headers=HEADERS)
        
print request.text
#spoof attempt
{"message": "Probability this is real: 0.0003", "data": {"score": 0.0003331679035909474}, "success": true}

#real face
{"message": "Probability this is real: 0.9998", "data": {"score": 0.9997538924217224}, "success": true}
 
Suggest Edits

Get a collection

Returns:
the list of collections stored on the server

 
get/collections
curl --request GET \
  --url http://example.com//collections
var request = require("request");

var options = { method: 'GET', url: 'http://example.com//collections' };

request(options, function (error, response, body) {
  if (error) throw new Error(error);

  console.log(body);
});
require 'uri'
require 'net/http'

url = URI("http://example.com//collections")

http = Net::HTTP.new(url.host, url.port)

request = Net::HTTP::Get.new(url)

response = http.request(request)
puts response.read_body
var data = JSON.stringify(false);

var xhr = new XMLHttpRequest();
xhr.withCredentials = true;

xhr.addEventListener("readystatechange", function () {
  if (this.readyState === this.DONE) {
    console.log(this.responseText);
  }
});

xhr.open("GET", "http://example.com//collections");

xhr.send(data);
import requests

url = "http://example.com//collections"

response = requests.request("GET", url)

print(response.text)
A binary file was returned

You couldn't be authenticated

Try the API to see results

Headers

X-Fields
mask

An optional fields mask

Response

Success

dbboolean

whether this collection is stored in the DB backend or in an npz file

imagesarray

list of base64 encoded images

labelsarray

list of corresponding label strings

namestring

name of the collection

Suggest Edits

/collections

 
postexample.com//collections
curl --request POST \
  --url http://example.com//collections
var request = require("request");

var options = { method: 'POST', url: 'http://example.com//collections' };

request(options, function (error, response, body) {
  if (error) throw new Error(error);

  console.log(body);
});
require 'uri'
require 'net/http'

url = URI("http://example.com//collections")

http = Net::HTTP.new(url.host, url.port)

request = Net::HTTP::Post.new(url)

response = http.request(request)
puts response.read_body
var data = JSON.stringify(false);

var xhr = new XMLHttpRequest();
xhr.withCredentials = true;

xhr.addEventListener("readystatechange", function () {
  if (this.readyState === this.DONE) {
    console.log(this.responseText);
  }
});

xhr.open("POST", "http://example.com//collections");

xhr.send(data);
import requests

url = "http://example.com//collections"

response = requests.request("POST", url)

print(response.text)
A binary file was returned

You couldn't be authenticated

Try the API to see results

Body Params

db
boolean
required

whether this collection is stored in the DB backend or in an npz file

images
array of strings

list of base64 encoded images

labels
array of strings

list of corresponding label strings

name
string
required

name of the collection

Headers

X-Fields
mask

An optional fields mask

Response

Success

dbboolean

whether this collection is stored in the DB backend or in an npz file

imagesarray

list of base64 encoded images

labelsarray

list of corresponding label strings

namestring

name of the collection

getexample.com//faces
curl --request GET \
  --url http://example.com//faces
var request = require("request");

var options = { method: 'GET', url: 'http://example.com//faces' };

request(options, function (error, response, body) {
  if (error) throw new Error(error);

  console.log(body);
});
require 'uri'
require 'net/http'

url = URI("http://example.com//faces")

http = Net::HTTP.new(url.host, url.port)

request = Net::HTTP::Get.new(url)

response = http.request(request)
puts response.read_body
var data = JSON.stringify(false);

var xhr = new XMLHttpRequest();
xhr.withCredentials = true;

xhr.addEventListener("readystatechange", function () {
  if (this.readyState === this.DONE) {
    console.log(this.responseText);
  }
});

xhr.open("GET", "http://example.com//faces");

xhr.send(data);
import requests

url = "http://example.com//faces"

response = requests.request("GET", url)

print(response.text)
A binary file was returned

You couldn't be authenticated

Try the API to see results

Headers

X-Fields
mask

An optional fields mask

Response

Success

labelstring

label string for this feature

featurestring

base64 encoded feature string

postexample.com//faces
curl --request POST \
  --url http://example.com//faces
var request = require("request");

var options = { method: 'POST', url: 'http://example.com//faces' };

request(options, function (error, response, body) {
  if (error) throw new Error(error);

  console.log(body);
});
require 'uri'
require 'net/http'

url = URI("http://example.com//faces")

http = Net::HTTP.new(url.host, url.port)

request = Net::HTTP::Post.new(url)

response = http.request(request)
puts response.read_body
var data = JSON.stringify(false);

var xhr = new XMLHttpRequest();
xhr.withCredentials = true;

xhr.addEventListener("readystatechange", function () {
  if (this.readyState === this.DONE) {
    console.log(this.responseText);
  }
});

xhr.open("POST", "http://example.com//faces");

xhr.send(data);
import requests

url = "http://example.com//faces"

response = requests.request("POST", url)

print(response.text)
A binary file was returned

You couldn't be authenticated

Try the API to see results

Body Params

label
string
required

label string for this feature

feature
string
required

base64 encoded feature string

Headers

X-Fields
mask

An optional fields mask

Response

Success

labelstring

label string for this feature

featurestring

base64 encoded feature string