Hola amigos de la #nerdytud
En el día de hoy, quiero compartirles un ejemplo sencillo de reconocimiento facial por intermedio del browser, utilizando librerías de tensorflow (tfjs.js y blazeface.js).
Creando la interface
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>face2face</title>
</head>
<body>
<video id="video" autoplay style="display: none"></video>
<canvas id="canvas" width="600px" height="400px"></canvas>
</body>
<script src="scripts/tfjs.js"></script>
<script src="scripts/blazeface.js"></script>
<script src="scripts/main.js"></script>
</html>
Generando script principal
let video = document.getElementById("video");
let model;
let canvas = document.getElementById("canvas");
let context = canvas.getContext("2d");
const setupCamera = () => {
navigator.mediaDevices
.getUserMedia({
video: { width: 600, height: 400 },
audio: false,
})
.then((stream) => {
video.srcObject = stream;
});
};
const detectFaces = async () => {
const prediction = await model.estimateFaces(video, false);
console.log(prediction);
context.drawImage(video, 0, 0, 600, 400);
prediction.forEach((pred) => {
context.beginPath();
context.lineWidth = "1";
context.strokeStyle = "green";
context.rect(
pred.topLeft[0],
pred.topLeft[1],
pred.bottomRight[0] - pred.topLeft[0],
pred.bottomRight[1] - pred.topLeft[1]
);
context.stroke();
context.fillStyle = "tomato";
pred.landmarks.forEach((land) => {
context.fillRect(land[0], land[1], 4, 4);
});
});
};
setupCamera();
video.addEventListener("loadeddata", async () => {
model = await blazeface.load();
setInterval(detectFaces, 100);
});
Simplemente nos resta probar el archivo index.html en el navegador