Created
Jul 3, 2020 6:12 PM
Tags
VisionboxGlykon
š½ Assuming you have a dataset with the following structure
test_dataset
āāā fake
ā āāā video1.mp4
āāā real
āāā video1.mp4
šØš½āš» The following example code will help you generate a
- Confusion matrix
- Accuracy and classification report
- RoC Curve with thresholds
from sklearn.metrics import classification_report
from imutils import paths
import numpy as np
import argparse
import cv2
from sklearn.metrics import classification_report, confusion_matrix, roc_curve
import plotly.graph_objects as go
import plotly
import requests
#function to call Glykon API
def call_glykon(file):
base = "http://localhost:8000"
url = "%s/run" % base
data = {
"video":open(file, "rb")
}
r = requests.post(url, files=data).json()
return r
#Load data in test dataset
data = paths.list_files("ground_truths")
#Loop over data processing it with Glykon and saving results
y_true = []
scores = []
y_pred = []
for file in data:
l_true = file.split("/")[-2]
print(l_true)
score = call_glykon(file)
print(score)
if score["prediction"] == None:
print("Video Rejected!")
print(score["guidance"])
continue
scores.append(score["frame_analysis_real_score"])
y_pred.append(score["prediction"])
y_true.append(l_true)
#encode labels
y_true = [0 if x == "fake" else 1 for x in y_true]
y_pred = [0 if x == "fake" else 1 for x in y_pred]
#Generate Confusion Matrix
cm = confusion_matrix(
y_true,
y_pred)
#Measure Accuracy
l = len(y_true)
acc = sum([y_pred[i]==y_true[i] for i in range(l)])/l
target_names = ['real', 'fake']
#Generate Classification Report
cr = classification_report(y_true, y_pred, target_names=target_names)
fpr, tpr, thresholds = roc_curve(
y_true,
scores, pos_label=1)
print("fpr: %s" % fpr)
print("tpr: %s" % tpr)
thresholds = [str(t) for t in thresholds]
fig = go.Figure(data=go.Scatter(x=fpr, y=tpr, hovertext=thresholds))
plot_name = "roc.html"
fig.write_html(plot_name, auto_open=True)
print("Accuracy: %s\nConfusion Matrix:\n%s\nClassification Report:\n%s" % (acc, cm, cr))
print("ROC Curve plot saved to %s" % plot_name)
ā¬ļø Sample terminal output
Accuracy: 0.8095238095238095
Confusion Matrix:
[[8 0]
[4 9]]
Classification Report:
precision recall f1-score support
real 0.67 1.00 0.80 8
fake 1.00 0.69 0.82 13
accuracy 0.81 21
macro avg 0.83 0.85 0.81 21
weighted avg 0.87 0.81 0.81 21
ROC Curve plot saved to roc.html
š Sample RoC Curve output (check below for an embedded interactive curve with thresholds)