Posts by @Jasper_Holton (359)

@Jasper_Holton's profile photo

How to Upload a WebM Video From a Webcam to a Django Site Uploading a WebM video has many purposes including live chat, live video, security and other purposes. This software is all over the internet, and it is very useful for all sorts of purposes. I hope you find this code useful and deploy it yourself, expanding on my ideas to create your own products. I'll explain how to implement basic security, which happens quickly and efficiently without very much cost.

# The models
# app/models.py
def get_file_path(instance, filename):
    ext = filename.split('.')[-1]
    filename = "%s.%s" % (uuid.uuid4(), ext)
    return os.path.join('video/', filename)

class Camera(models.Model):
    user = models.ForeignKey(User, on_delete=models.CASCADE, null=True, blank=True, related_name='camera')
    frame = models.FileField(upload_to=get_file_path, null=True, blank=True)
    last_frame = models.DateTimeField(default=timezone.now)
# The views
# app/views.py
@login_required
@csrf_exempt
def video(request):
    cameras = VideoCamera.objects.filter(user=request.user)
    camera = None
    if cameras.count() == 0:
        camera = VideoCamera.objects.create(user=request.user)
        camera.save()
    else:
        camera = cameras.first()
    if request.method == 'POST':
        try:
            form = CameraForm(request.POST, request.FILES, instance=camera)
            camera = form.save()
            camera.review() # Review the image with sightengine
        except:
            print(traceback.format_exc())
        return HttpResponse(status=200)
    return render(request, 'app/video.html', {'title': 'Video', 'form': CameraForm()})
# The forms
# app/forms.py
from django import forms
from app.models import Camera

class CameraForm(forms.ModelForm):
    def __init__(self, *args, **kwargs):
        super(CameraForm, self).__init__(*args, **kwargs)
    class Meta:
        model = Camera
        fields = ('frame',)
<!-- The template -->
<!-- templates/video.html -->
{% extends 'base.html' %}
{% block content %}
<div id="container">
<video autoplay="true" muted="true" id="video-element" width="100%"></video>
<form method="POST" enctype="multipart/form-data" id="live-form" style="position: absolute; display: none; visibility: hidden;">
{{ form }}
</form>
</div>
{% endblock %}
// The javascript
// templates/video.js
var form = document.getElementById('live-form');
var scale = 0.2;
var width = 1920 * scale;
var height = 1070 * scale
var video = document.getElementById('video-element');
var data;
var mediaRecorder;
var mediaChunks = [];
const VIDEO_INTERVAL = 5000; // The length of each packet to send, ideally more than 5000 ms (5 seconds)
function capture() {
    mediaRecorder.stop(); // Stop to recod data
}
const clone = (items) => items.map(item => Array.isArray(item) ? clone(item) : item);
function startup() {
    navigator.mediaDevices.getUserMedia({
            video: {
                width: {
                    ideal: width
                },
                height: {
                    ideal: height
                }
            },
            audio: true
        })
        .then(function(stream) {
            video.srcObject = stream;
            video.play();
            mediaRecorder = new MediaRecorder(stream);
            mediaRecorder.addEventListener("dataavailable", event => {
                mediaChunks.push(event.data);
                var mediaData = clone(mediaChunks);
                var file = new Blob(mediaData, {
                    'type': 'video/webm'
                });
                mediaChunks = [];
                mediaRecorder.start();
                var formdata = new FormData(form);
                formdata.append('frame', new File([file], 'frame.webm'));
                $.ajax({
                    url: window.location.href,
                    type: "POST",
                    data: formdata,
                    processData: false,
                    contentType: false,
                }).done(function(respond) {
                    console.log(respond);
                    console.log("Sent frame");
                });
            });
            setTimeout(function() {
                setInterval(capture, VIDEO_INTERVAL);
            }, 5000);
            mediaRecorder.start();
        }).catch(function(err) {
            console.log("An error occurred: " + err);
        });
}
startup();
This is all it takes to upload a WebM video from your webcam. Django sites are ideal for this, as they support large objects and can index them easily. Please be cautious with this however, and do use APIs to make sure your uploaded content is safe. I use an API from SightEngine.com which contains a workflow to remove video I don't want on my site. This is what it looks like:
# The API call
# app/apis.py
import requests
import json

params = {
  'workflow': 'wfl_00000000000000000US',
  'api_user': '000000000',
  'api_secret': '000000000000000000'
}

def is_safe(video_path):
    files = {'media': open(video_path, 'rb')}
    r = requests.post('https://api.sightengine.com/1.0/video/check-workflow-sync.json', files=files, data=params)
    output = json.loads(r.text)
    if output['status'] == 'failure' or output['summary']['action'] == 'reject':
        return False
    return True
The next part is a save call in the models.py.
# And the models.py review call
# app/models.py
import os
from .apis import is_safe
...
    def review(self):
        if self.frame and not is_safe(self.frame.path):
            os.remove(self.frame.path)
            self.frame = None
            self.save()
Creating a workflow on SightEngine allows you to filter out offensive content, celebrities, children, and even alcohol or drugs. This keeps sites safer when uploading videos. I also recommend using facial recognition in order to verify which users are uploading what content. This is important when keeping records of access for verification. How much does it cost? Running a server that can cache video can be expensive if you have a lot of video to cache, but experimenting is quite inexpensive, less than $10 a month for the server. The API, SightEngine is free for 500 API calls per day and 2000 per month, but this means only about 42 minutes of video per day with 5-second video segments. It is still worthwhile to keep your site secure, as, at $29 per month, you get 10,000 API calls running about 833 hours or 14 full days (28 12-hour days). I hope this code is useful to you. I appreciate your feedback if you are willing to comment or like, you can log in with your face!


@Jasper_Holton's profile photo

I appreciate your interest in my site and I'm looking forward to having you here. I've done more work to keep the site secure and this should make the site easier to use. I'm hoping people will want to log in with their faces, and create new accounts with their faces. Using proximity detection, I can make sure the users face is close to the camera, and I have added a spinner to the login form.

View the photo from a post by @Jasper_Holton

@AussieinthePNW, likes this,

@Jasper_Holton's profile photo

Log in to Uglek or create an account with your face! It even works in the dark. Here's my face to go with the new feature. Visit Uglek.com/face/login to log in with your face or create a new account with your face. Try it out now!

View the photo from a post by @Jasper_Holton

@Jasper_Holton's profile photo

How to identify and recognize faces using python with no APIs I use the below code to implement a login with face function on Uglek. The code works by assigning a user a face ID when they upload a face to their profile or go to log in, and then retrieving their account by image using the face ID. Here is the code

# face/face.py
from django.contrib.auth.models import User
import uuid
from .models import Face
import face_recognition

NUM_FACES = 9

def get_face_id(image_path):
    image = face_recognition.load_image_file(image_path)
    face_locations = face_recognition.face_locations(image)
    if len(face_locations) > 1 or len(face_locations) < 1:
        return False

    for user in User.objects.filter(profile__enable_facial_recognition=True):
        known_image = face_recognition.load_image_file(user.profile.face.path)
        unknown_image = image
        user_encoding = face_recognition.face_encodings(known_image)[0]
        user_encodings = list()
        user_encodings.append(user_encoding)
        user_faces = Face.objects.filter(user=user).order_by('-timestamp')
        for face in user_faces:
            if open(face.image.path,"rb").read() == open(image_path,"rb").read():
                return False
        if user_faces.count() > NUM_FACES:
            user_faces = user_faces[:NUM_FACES]
        for face in user_faces:
            image = face_recognition.load_image_file(face.image.path)
            image_encoding = face_recognition.face_encodings(image)[0]
            user_encodings.append(image_encoding)
        unknown_encoding = face_recognition.face_encodings(unknown_image)[0]
        results = face_recognition.compare_faces(user_encodings, unknown_encoding)
        if results[0]:
            return user.profile.uuid
    return str(uuid.uuid4())
In action, the code looks like get_face_id(User.objects.get(id=1).face.path) in testing. This gets my face ID from the face uploaded to my profile. To get a face ID of a logging in user, I save a face form with a face object and then call get_face_id(face.image.path) to query the user instance and redirect to their authentication URL. This works well. I hope this is useful to you. For more information, see the GitHub below: github.com/ageitgey/face_recognition


@Jasper_Holton's profile photo

A view from my porch in north Seattle, Washington. I've lived in Seattle for over a year now, and been in the area since a few weeks after my son was born in 2018 in Hawaii. Seattle is a nice vibe, with a very busy and laid back approach.

View the photo from a post by @Jasper_Holton

@Jasper_Holton's profile photo

Pianino For a Cloudy Day This is a beautiful pianino coupled with drums, synthesizer, and strings. It's a pleasant listen, with upbeat tones and minor chords. It gets even better at the end, too.


@Wen, likes this,

@Jasper_Holton's profile photo

An Epic Orchestral Synthesizer Track This is an awesome song I wrote earlier this month. I wrote it to share with @AussieinthePNW on my birthday but I didn't get a chance to post it then. Finally remembered to do so, as I am posting some of my other songs. This is an awesome listen!


@AussieinthePNW, @Wen, like this,

@Jasper_Holton's profile photo

This is a song I wrote, I'm not sure how far back. I found it on my Mac, and I thought it sounded pretty good, in my usual simple style. Enjoy it!


@Jasper_Holton's profile photo

How to Identify Unique Faces with the Microsoft Azure Face API Using the Microsoft Azure Face API, you can assign unique faces a UUID and identify them for use in login, verification, or any other purpose. The following code accepts an image of a single face and returns a unique UUID representing that face. This has a huge application potential in internet security and could make some sites and businesses much more secure, by uniquely attributing faces to profiles within the apps or security solutions. Using the face API with Microsoft Azure is free for basic use, and isn't expensive otherwise. To install python modules for this code, run $ pip install --upgrade azure-cognitiveservices-vision-face $ pip install --upgrade Pillow The code is as follows.

# face/face.py
import asyncio
import io
import glob
import os
import sys
import time
import uuid
import requests
from urllib.parse import urlparse
from io import BytesIO
from PIL import Image, ImageDraw
from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials
from azure.cognitiveservices.vision.face.models import TrainingStatusType, Person, QualityForRecognition
import json

# This key will serve all examples in this document.
KEY = "000000000000000000000000000000"
# This endpoint will be used in all examples in this quickstart.
ENDPOINT = "https://endpoint.api.cognitive.microsoft.com/"

PERSON_GROUP_ID = str("group") # assign a random ID (or name it anything)

def get_face_id(single_face_image_url):
    # Create an authenticated FaceClient.
    face_client = FaceClient(ENDPOINT, CognitiveServicesCredentials(KEY))
    # Detect a face in an image that contains a single face
    single_image_name = os.path.basename(single_face_image_url)
    # We use detection model 3 to get better performance.
    face_ids = []
    # We use detection model 3 to get better performance, recognition model 4 to support quality for recognition attribute.
    faces = face_client.face.detect_with_url(single_face_image_url, detection_model='detection_03') #, recognition_model='recognition_04', return_face_attributes=['qualityForRecognition'])
    # Remove this line after initial call with first face (or you will get an error on the next call)
    face_client.person_group.create(person_group_id=PERSON_GROUP_ID, name=PERSON_GROUP_ID)

    for face in faces: # Add faces in the photo to a list
        face_ids.append(face.face_id)

    if len(faces) > 1: # Return if there are too many faces
        return False

    results = None
    try:
        results = face_client.face.identify(face_ids, PERSON_GROUP_ID) # Identify the face
    except:
        results = None
    if not results: # Add the face if they are not identified
        p = face_client.person_group_person.create(PERSON_GROUP_ID, uuid.uuid4()) # Identify them with a UUID
        face_client.person_group_person.add_face_from_url(PERSON_GROUP_ID, p.person_id, single_face_image_url)
        face_client.person_group.train(PERSON_GROUP_ID) # Training
        while (True):
            training_status = face_client.person_group.get_training_status(PERSON_GROUP_ID)
            print("Training status: {}.".format(training_status.status))
            print()
            if (training_status.status is TrainingStatusType.succeeded):
                break
            elif (training_status.status is TrainingStatusType.failed):
                sys.exit('Training the person group has failed.')
            time.sleep(5)
        results = face_client.face.identify(face_ids, PERSON_GROUP_ID)
    if results and len(results) > 0: # Load their UUID
        res = json.loads(str(results[0].candidates[0]).replace('\'',"\""))['person_id']
        print(res)
        return res # Return their UUID
    return False # Or return false to indicate that no face was recognized.

f = 'uglek.com/media/face/1b195bf5-8150-4f84-931d-ef0f2a464d06.png'
print(get_face_id(f)) # Identify a face from this image
Using this code, you can call get_face_id(face_url) to get an ID from any face. Your face ID will be unique to each user, so you can cache it on a profile and use it to retrieve a profile. This is the way the "Login with your face" option works on Uglek. I hope you enjoy this code, and it is useful to you. Feel free to use it as you will, but be sure to install your own API keys from Azure.com. Thank you!


© Uglek, 2022

Terms of Use and Privacy Policy