我正试图构建这样的东西https://github.com/xenomuta/caraweb使用OpenCV和nodejs进行人脸检测。
我使用的是Ubuntu,我已经可以通过网络摄像头流式传输图片了。
这是我的网络摄像头的.js
var socket = io.connect();
var fps = 30;
socket.on('connect', function () {
$('.serverStatus').text('Connected')
});
socket.on('connecting', function () {
$('.serverStatus').text('Connecting')
});
socket.on('disconnect', function () {
$('.serverStatus').text('Disconnected')
});
var declinedCam = function(e) {
alert('You have to enable the webcam');
};
window.URL = window.URL || window.webkitURL;
navigator.getUserMedia = (navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia);
var video = document.querySelector('video');
if (navigator.getUserMedia) {
navigator.getUserMedia({video: true}, function(stream) {
video.src = window.URL.createObjectURL(stream);
}, declinedCam);
} else {
alert('Your browser does not support the webcamcontrol');
}
video.addEventListener('play', function(e) {
var canvas = document.getElementById('frontendCanvas');
var goByScale = (video.videoHeight >= video.videoWidth) ? 'Height':'Width';
var scale = (300/ video['video'+goByScale]);
canvas.width = video.videoWidth * scale;
canvas.style.width = canvas.width + 'px';
canvas.height = video.videoHeight * scale;
canvas.style.height = canvas.height + 'px';
var ctx = canvas.getContext('2d');
setInterval(function() {
if(video.paused !== true){
ctx.drawImage(video, 0, 0, video.videoWidth*scale, video.videoHeight*scale);
socket.emit('videoStream', canvas.toDataURL('image/webp'));
}
}, 1000 / fps)
}, false);
和app.js
var express = require('express.io');
var app = express();
var path = require('path');
var favicon = require('serve-favicon');
var logger = require('morgan');
var cookieParser = require('cookie-parser');
var bodyParser = require('body-parser');
var routes = require('./routes/index');
var backend = require('./routes/backend');
var cv = require('opencv');
var fs = require('fs');
// view engine setup
app.set('views', path.join(__dirname, 'views'));
app.set('view engine', 'ejs');
// connection setup
app.http().io();
app.io.sockets.on('connection', function(socket) {
socket.on('videoStream', function(data) {
socket.broadcast.emit('sendVideo', data);
var buffer = new Buffer(data, 'base64');
});
})
当我试图检测照片中的人脸时,我使用的是这样的东西:
cv.readImage("/picture.png", function(err, im){});
但是,我如何使用网络摄像头流中的数据来检测摄像头前的人?非常感谢。
我知道它有点旧,但它可能会对某人有所帮助。
您不必将画布图像编码为dataURL,您可以直接将blob发送到服务器。在您的HTML(客户端):
function processCanvas(){
// You call processCanvas() only once, after that is socket `canvasModified`
// event who calls it everytime it gets refreshed (see below)
var cnv = document.getElementById("canvasOutput")
cnv.toBlob(function(blb){
socket.emit('canvasUpdated', blb);
}, 'image/jpeg', 0.8);
}
然后,在Node.js服务器上:
let modifyCanvas = (canvasBlob) => {
return new Promise((resolve, reject) => {
cv.readImage(canvasBlob, function(err, im){
// Here you can process and do whatever with your image (im)
...
// Once done everything, you can pass it again to JS by doing
let arraybuffer = im.toBuffer();
resolve(arraybuffer);
});
});
}
socket.on('connection', function(client){
client.on('canvasUpdated', function(b){
let buffer = Buffer.from(b);
modifyCanvas(buffer).then((blb) => {
try{
socket.emit('canvasModified', blb);
}catch(e){
console.log(e);
}
});
});
});
最后,再次在客户端,当事件发出时,您将其绘制在图像元素上(这更容易):
socket.on('canvasModified', function(blb){
var b = new Blob([blb]);
document.getElementById('imgModified').src = URL.createObjectURL(b);
processCanvas();
});
如果你想把它画到画布上,你可以抓住这个图像元素,然后通过以下操作把它画在画布上:
var imageElement = document.getElementById("imgModified");
document.getElementById("canvasOutput").getContext('2d').drawImage(imageElement, 0, 0);
希望它能有所帮助!