OpenCv.js(图像处理)学习历程

news2024/12/28 3:22:35

opencv.js官网

4.5.0文档

以下内容整理于opencv.js官网。

简介

OpenCV由Gary Bradski于1999年在英特尔创建。第一次发行是在2000年。OpenCV支持c++、Python、Java等多种编程语言,支持Windows、Linux、Os X、Android、iOS等平台。基于CUDA和OpenCL的高速GPU操作接口也在积极开发中。OpenCV.js将OpenCV带到开放的web平台,并使JavaScript程序员可以使用它。

图片处理 

读取图片

readImage.html

<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Hello OpenCV.js</title>
</head>
<body>
<h2>读取图片</h2>
<p id="status">OpenCV.js is loading...</p>
<div>
 <div class="inputoutput">
 <img id="imageSrc" alt="No Image" />
 <div class="caption">imageSrc <input type="file" id="fileInput" name="file" /></div>
 </div>
 <div class="inputoutput">
 <canvas id="canvasOutput" ></canvas>
 <div class="caption">canvasOutput</div>
 </div>
</div>
<script type="text/javascript">
let imgElement = document.getElementById('imageSrc');
let inputElement = document.getElementById('fileInput');
inputElement.addEventListener('change', (e) => {
 imgElement.src = URL.createObjectURL(e.target.files[0]);
}, false);
imgElement.onload = function() {
 let mat = cv.imread(imgElement);
 cv.imshow('canvasOutput', mat);
 mat.delete();
};
var Module = {
 // https://emscripten.org/docs/api_reference/module.html#Module.onRuntimeInitialized
 onRuntimeInitialized() {
 document.getElementById('status').innerHTML = 'OpenCV.js is ready.';
 }
};
</script>
<!-- <script async src="https://docs.opencv.org/4.5.0/opencv.js" type="text/javascript"></script> -->
<script async src="opencvjs/opencv.js" type="text/javascript"></script>
<script src="opencvjs/utils.js" type="text/javascript"></script>
</body>
</html>

浏览器运行。 

灰度图

blackAWhite.html


<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Adaptive Threshold Example</title>
<link href="opencvjs/style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>IMAGE</h2>
<p></p>
<div>
<div class="control"><button id="tryIt" disabled>Start</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false"></textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script type="text/javascript" src="opencvjs/opencv.js"></script>
<script src="opencvjs/utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
// To distinguish the input and output, we graying the image.
// You can try different conversions.
cv.cvtColor(src, dst, cv.COLOR_RGBA2GRAY);
cv.imshow('canvasOutput', dst);
src.delete();
dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');

utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');

let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
    utils.executeCode('codeEditor');
});

utils.loadOpenCv(() => {
    tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

浏览器运行。 

 

 阈值

inRangeImage.html

<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Hello OpenCV.js</title>
</head>
<body>
<h2></h2>
<p id="status">OpenCV.js is loading...</p>
<div>
 <div class="inputoutput">
 <img id="imageSrc" alt="No Image" />
 <div class="caption">imageSrc <input type="file" id="fileInput" name="file" /></div>
 </div>
 <div class="inputoutput">
 <canvas id="canvasOutput" ></canvas>
 <div class="caption">canvasOutput</div>
 </div>
</div>
<script type="text/javascript">
let imgElement = document.getElementById('imageSrc');
let inputElement = document.getElementById('fileInput');
inputElement.addEventListener('change', (e) => {
 imgElement.src = URL.createObjectURL(e.target.files[0]);
}, false);
imgElement.onload = function() {
 let src = cv.imread('imageSrc');
 let dst = new cv.Mat();
 let low = new cv.Mat(src.rows, src.cols, src.type(), [0, 0, 0, 0]);
 let high = new cv.Mat(src.rows, src.cols, src.type(), [150, 150, 150, 255]);
 // You can try more different parameters
 cv.inRange(src, low, high, dst);
 cv.imshow('canvasOutput', dst);
 src.delete(); dst.delete(); low.delete(); high.delete();
};
var Module = {
 // https://emscripten.org/docs/api_reference/module.html#Module.onRuntimeInitialized
 onRuntimeInitialized() {
 document.getElementById('status').innerHTML = 'OpenCV.js is ready.';
 }
};
</script>
<script async src="opencvjs/opencv.js" type="text/javascript"></script>
<script src="opencvjs/utils.js" type="text/javascript"></script>
</body>
</html>

 

其它 

imageTemplate.html

 修改框内的代码即可对图片进行不同的操作,此代码适用于大部分的官网中有关图片操作的代码。 

视频处理 

 均值漂移


<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>CamShift Example</title>
<link href="opencvjs/style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<div>
<div class="control"><button id="startAndStop" disabled>Start</button></div>
<textarea class="code" rows="29" cols="100" id="codeEditor" spellcheck="false"></textarea>
</div>
<p class="err" id="errorMessage"></p>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<video id="videoInput" width="320" height="240" muted loop></video>
</td>
<td>
<canvas id="canvasOutput" width="320" height="240"></canvas>
</td>
<td></td>
<td></td>
</tr>
<tr>
<td>
<div class="caption">videoInput</div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
<td></td>
<td></td>
</tr>
</table>
</div>
<script src="https://webrtc.github.io/adapter/adapter-5.0.4.js" type="text/javascript"></script>
<script type="text/javascript" src="opencvjs/opencv.js"></script>
<script src="opencvjs/utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let video = document.getElementById('videoInput');
let cap = new cv.VideoCapture(video);

// take first frame of the video
let frame = new cv.Mat(video.height, video.width, cv.CV_8UC4);
cap.read(frame);

// hardcode the initial location of window
let trackWindow = new cv.Rect(150, 60, 63, 125);

// set up the ROI for tracking
let roi = frame.roi(trackWindow);
let hsvRoi = new cv.Mat();
cv.cvtColor(roi, hsvRoi, cv.COLOR_RGBA2RGB);
cv.cvtColor(hsvRoi, hsvRoi, cv.COLOR_RGB2HSV);
let mask = new cv.Mat();
let lowScalar = new cv.Scalar(30, 30, 0);
let highScalar = new cv.Scalar(180, 180, 180);
let low = new cv.Mat(hsvRoi.rows, hsvRoi.cols, hsvRoi.type(), lowScalar);
let high = new cv.Mat(hsvRoi.rows, hsvRoi.cols, hsvRoi.type(), highScalar);
cv.inRange(hsvRoi, low, high, mask);
let roiHist = new cv.Mat();
let hsvRoiVec = new cv.MatVector();
hsvRoiVec.push_back(hsvRoi);
cv.calcHist(hsvRoiVec, [0], mask, roiHist, [180], [0, 180]);
cv.normalize(roiHist, roiHist, 0, 255, cv.NORM_MINMAX);

// delete useless mats.
roi.delete(); hsvRoi.delete(); mask.delete(); low.delete(); high.delete(); hsvRoiVec.delete();

// Setup the termination criteria, either 10 iteration or move by atleast 1 pt
let termCrit = new cv.TermCriteria(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1);

let hsv = new cv.Mat(video.height, video.width, cv.CV_8UC3);
let hsvVec = new cv.MatVector();
hsvVec.push_back(hsv);
let dst = new cv.Mat();
let trackBox = null;

const FPS = 30;
function processVideo() {
    try {
        if (!streaming) {
            // clean and stop.
            frame.delete(); dst.delete(); hsvVec.delete(); roiHist.delete(); hsv.delete();
            return;
        }
        let begin = Date.now();

        // start processing.
        cap.read(frame);
        cv.cvtColor(frame, hsv, cv.COLOR_RGBA2RGB);
        cv.cvtColor(hsv, hsv, cv.COLOR_RGB2HSV);
        cv.calcBackProject(hsvVec, [0], roiHist, dst, [0, 180], 1);

        // apply camshift to get the new location
        [trackBox, trackWindow] = cv.CamShift(dst, trackWindow, termCrit);

        // Draw it on image
        let pts = cv.rotatedRectPoints(trackBox);
        cv.line(frame, pts[0], pts[1], [255, 0, 0, 255], 3);
        cv.line(frame, pts[1], pts[2], [255, 0, 0, 255], 3);
        cv.line(frame, pts[2], pts[3], [255, 0, 0, 255], 3);
        cv.line(frame, pts[3], pts[0], [255, 0, 0, 255], 3);
        cv.imshow('canvasOutput', frame);

        // schedule the next one.
        let delay = 1000/FPS - (Date.now() - begin);
        setTimeout(processVideo, delay);
    } catch (err) {
        utils.printError(err);
    }
};

// schedule the first one.
setTimeout(processVideo, 0);
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');

utils.loadCode('codeSnippet', 'codeEditor');

let streaming = false;
let videoInput = document.getElementById('videoInput');
let startAndStop = document.getElementById('startAndStop');
let canvasOutput = document.getElementById('canvasOutput');
let canvasContext = canvasOutput.getContext('2d');

startAndStop.addEventListener('click', () => {
    if (!streaming) {
        utils.clearError();
        videoInput.play().then(() => {
            onVideoStarted();
        });
    } else {
        videoInput.pause();
        videoInput.currentTime = 0;
        onVideoStopped();
    }
});

function onVideoStarted() {
    streaming = true;
    startAndStop.innerText = 'Stop';
    videoInput.height = videoInput.width * (videoInput.videoHeight / videoInput.videoWidth);
    utils.executeCode('codeEditor');
}

function onVideoStopped() {
    streaming = false;
    canvasContext.clearRect(0, 0, canvasOutput.width, canvasOutput.height);
    startAndStop.innerText = 'Start';
}

utils.loadOpenCv(() => {
    videoInput.addEventListener('canplay', () => {
        startAndStop.removeAttribute('disabled');
    });
    videoInput.src = 'video/cup.mp4';
});
</script>
</body>
</html>

背景差分 

backgroundSubtraction.html


<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<link href="opencvjs/style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<div>
<div class="control"><button id="startAndStop" disabled>Start</button></div>
<textarea class="code" rows="29" cols="100" id="codeEditor" spellcheck="false"></textarea>
</div>
<p class="err" id="errorMessage"></p>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<video id="videoInput" width="320" height="240" muted></video>
</td>
<td>
<canvas id="canvasOutput" width="320" height="240"></canvas>
</td>
<td></td>
<td></td>
</tr>
<tr>
<td>
<div class="caption">videoInput</div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
<td></td>
<td></td>
</tr>
</table>
</div>
<script type="text/javascript" src="opencvjs/opencv.js"></script>
<script src="opencvjs/utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let video = document.getElementById('videoInput');
let cap = new cv.VideoCapture(video);

let frame = new cv.Mat(video.height, video.width, cv.CV_8UC4);
let fgmask = new cv.Mat(video.height, video.width, cv.CV_8UC1);
let fgbg = new cv.BackgroundSubtractorMOG2(500, 16, true);

const FPS = 30;
function processVideo() {
    try {
        if (!streaming) {
            // clean and stop.
            frame.delete(); fgmask.delete(); fgbg.delete();
            return;
        }
        let begin = Date.now();
        // start processing.
        cap.read(frame);
        fgbg.apply(frame, fgmask);
        cv.imshow('canvasOutput', fgmask);
        // schedule the next one.
        let delay = 1000/FPS - (Date.now() - begin);
        setTimeout(processVideo, delay);
    } catch (err) {
        utils.printError(err);
    }
};

// schedule the first one.
setTimeout(processVideo, 0);

</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');

utils.loadCode('codeSnippet', 'codeEditor');

let streaming = false;
let videoInput = document.getElementById('videoInput');
let startAndStop = document.getElementById('startAndStop');

startAndStop.addEventListener('click', () => {
    if (!streaming) {
        utils.clearError();
        videoInput.play().then(() => {
            onVideoStarted();
        });
    } else {
        videoInput.pause();
        videoInput.currentTime = 0;
        onVideoStopped();
    }
});

function onVideoStarted() {
    streaming = true;
    startAndStop.innerText = 'Stop';
    videoInput.height = videoInput.width * (videoInput.videoHeight / videoInput.videoWidth);
    utils.executeCode('codeEditor');
}

function onVideoStopped() {
    streaming = false;
    startAndStop.innerText = 'Start';
}

videoInput.addEventListener('ended', () => {
    onVideoStopped();
});

utils.loadOpenCv(() => {
    videoInput.addEventListener('canplay', () => {
        startAndStop.removeAttribute('disabled');
    });
    videoInput.src = 'video/box.mp4';
});
</script>
</body>
</html>

卡纳德方法

LucasKanade.html


<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<link href="opencvjs/style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<div>
<div class="control"><button id="startAndStop" disabled>Start</button></div>
<textarea class="code" rows="29" cols="100" id="codeEditor" spellcheck="false"></textarea>
</div>
<p class="err" id="errorMessage"></p>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<video id="videoInput" width="320" height="240" muted></video>
</td>
<td>
<canvas id="canvasOutput" width="320" height="240"></canvas>
</td>
<td></td>
<td></td>
</tr>
<tr>
<td>
<div class="caption">videoInput</div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
<td></td>
<td></td>
</tr>
</table>
</div>
<script type="text/javascript" src="opencvjs/opencv.js"></script>
<script src="opencvjs/utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let video = document.getElementById('videoInput');
let cap = new cv.VideoCapture(video);

// take first frame of the video
let frame1 = new cv.Mat(video.height, video.width, cv.CV_8UC4);
cap.read(frame1);

let prvs = new cv.Mat();
cv.cvtColor(frame1, prvs, cv.COLOR_RGBA2GRAY);
frame1.delete();
let hsv = new cv.Mat();
let hsv0 = new cv.Mat(video.height, video.width, cv.CV_8UC1);
let hsv1 = new cv.Mat(video.height, video.width, cv.CV_8UC1, new cv.Scalar(255));
let hsv2 = new cv.Mat(video.height, video.width, cv.CV_8UC1);
let hsvVec = new cv.MatVector();
hsvVec.push_back(hsv0); hsvVec.push_back(hsv1); hsvVec.push_back(hsv2);

let frame2 = new cv.Mat(video.height, video.width, cv.CV_8UC4);
let next = new cv.Mat(video.height, video.width, cv.CV_8UC1);
let flow = new cv.Mat(video.height, video.width, cv.CV_32FC2);
let flowVec = new cv.MatVector();
let mag = new cv.Mat(video.height, video.width, cv.CV_32FC1);
let ang = new cv.Mat(video.height, video.width, cv.CV_32FC1);
let rgb = new cv.Mat(video.height, video.width, cv.CV_8UC3);

const FPS = 30;
function processVideo() {
    try {
        if (!streaming) {
            // clean and stop.
            prvs.delete(); hsv.delete(); hsv0.delete(); hsv1.delete(); hsv2.delete();
            hsvVec.delete(); frame2.delete(); flow.delete(); flowVec.delete(); next.delete();
            mag.delete(); ang.delete(); rgb.delete();
            return;
        }
        let begin = Date.now();

        // start processing.
        cap.read(frame2);
        cv.cvtColor(frame2, next, cv.COLOR_RGBA2GRAY);
        cv.calcOpticalFlowFarneback(prvs, next, flow, 0.5, 3, 15, 3, 5, 1.2, 0);
        cv.split(flow, flowVec);
        let u = flowVec.get(0);
        let v = flowVec.get(1);
        cv.cartToPolar(u, v, mag, ang);
        u.delete(); v.delete();
        ang.convertTo(hsv0, cv.CV_8UC1, 180/Math.PI/2);
        cv.normalize(mag, hsv2, 0, 255, cv.NORM_MINMAX, cv.CV_8UC1);
        cv.merge(hsvVec, hsv);
        cv.cvtColor(hsv, rgb, cv.COLOR_HSV2RGB);
        cv.imshow('canvasOutput', rgb);
        next.copyTo(prvs);

        // schedule the next one.
        let delay = 1000/FPS - (Date.now() - begin);
        setTimeout(processVideo, delay);
    } catch (err) {
        utils.printError(err);
    }
};

// schedule the first one.
setTimeout(processVideo, 0);
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');

utils.loadCode('codeSnippet', 'codeEditor');

let streaming = false;
let videoInput = document.getElementById('videoInput');
let startAndStop = document.getElementById('startAndStop');

startAndStop.addEventListener('click', () => {
    if (!streaming) {
        utils.clearError();
        videoInput.play().then(() => {
            onVideoStarted();
        });
    } else {
        videoInput.pause();
        videoInput.currentTime = 0;
        onVideoStopped();
    }
});

function onVideoStarted() {
    streaming = true;
    startAndStop.innerText = 'Stop';
    videoInput.height = videoInput.width * (videoInput.videoHeight / videoInput.videoWidth);
    utils.executeCode('codeEditor');
}

function onVideoStopped() {
    streaming = false;
    startAndStop.innerText = 'Start';
}

videoInput.addEventListener('ended', () => {
    onVideoStopped();
});

utils.loadOpenCv(() => {
    videoInput.addEventListener('canplay', () => {
        startAndStop.removeAttribute('disabled');
    });
    videoInput.src = 'video/box.mp4';
});
</script>
</body>
</html>

人脸识别

人脸识别_照片 


<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Face Detection Example</title>
<link href="opencvjs/style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<div>
<div class="control"><button id="tryIt" disabled>Start</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false"></textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script type="text/javascript" src="opencvjs/opencv.js"></script>
<script src="opencvjs/utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let gray = new cv.Mat();
cv.cvtColor(src, gray, cv.COLOR_RGBA2GRAY, 0);
let faces = new cv.RectVector();
let eyes = new cv.RectVector();
let faceCascade = new cv.CascadeClassifier();
let eyeCascade = new cv.CascadeClassifier();
// load pre-trained classifiers
faceCascade.load('haarcascade_frontalface_default.xml');
eyeCascade.load('haarcascade_eye.xml');
// detect faces
let msize = new cv.Size(0, 0);
faceCascade.detectMultiScale(gray, faces, 1.1, 3, 0, msize, msize);
for (let i = 0; i < faces.size(); ++i) {
    let roiGray = gray.roi(faces.get(i));
    let roiSrc = src.roi(faces.get(i));
    let point1 = new cv.Point(faces.get(i).x, faces.get(i).y);
    let point2 = new cv.Point(faces.get(i).x + faces.get(i).width,
                              faces.get(i).y + faces.get(i).height);
    cv.rectangle(src, point1, point2, [255, 0, 0, 255]);
    // detect eyes in face ROI
    eyeCascade.detectMultiScale(roiGray, eyes);
    for (let j = 0; j < eyes.size(); ++j) {
        let point1 = new cv.Point(eyes.get(j).x, eyes.get(j).y);
        let point2 = new cv.Point(eyes.get(j).x + eyes.get(j).width,
                                  eyes.get(j).y + eyes.get(j).height);
        cv.rectangle(roiSrc, point1, point2, [0, 0, 255, 255]);
    }
    roiGray.delete(); roiSrc.delete();
}
cv.imshow('canvasOutput', src);
src.delete(); gray.delete(); faceCascade.delete();
eyeCascade.delete(); faces.delete(); eyes.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');

utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');

let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
    utils.executeCode('codeEditor');
});

utils.loadOpenCv(() => {
    let eyeCascadeFile = 'haarcascade_eye.xml';
    utils.createFileFromUrl(eyeCascadeFile, eyeCascadeFile, () => {
        let faceCascadeFile = 'haarcascade_frontalface_default.xml';
        utils.createFileFromUrl(faceCascadeFile, faceCascadeFile, () => {
            tryIt.removeAttribute('disabled');
        });
    });
});
</script>
</body>
</html>

 

 以上测试图片来自百度。

可以看到,有的地方不是耳朵也会被标记成耳朵,原因是官方的数据集数据比较微小,数据不够准确,但是眼睛的地方能标记出来就已经达到我们的学习目的了,以后可以用更精确的数据集替换效果就会好很多。

人脸识别_相机 


<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Face Detection Camera Example</title>
<link href="opencvjs/style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<!-- <h2>Face Detection Camera Example</h2> -->
<div>
<div class="control"><button id="startAndStop">Start</button></div>
<textarea class="code" rows="29" cols="80" id="codeEditor" spellcheck="false">
	
</textarea>
</div>
<p class="err" id="errorMessage"></p>
<div>
    <table cellpadding="0" cellspacing="0" width="0" border="0">
    <tr>
        <td>
            <video id="videoInput" width=320 height=240></video>
        </td>
        <td>
            <canvas id="canvasOutput" width=320 height=240></canvas>
        </td>
        <td></td>
        <td></td>
    </tr>
    <tr>
        <td>
            <div class="caption">videoInput</div>
        </td>
        <td>
            <div class="caption">canvasOutput</div>
        </td>
        <td></td>
        <td></td>
    </tr>
    </table>
</div>
<script type="text/javascript" src="opencvjs/opencv.js"></script>
<script src="opencvjs/utils.js" type="text/javascript"></script>
<!-- <script src="opencvjs/adapter-5.0.4.js" type="text/javascript"></script> -->
<!-- <script src="opencvjs/known.js" type="text/javascript"></script> -->
<script id="codeSnippet" type="text/code-snippet">
let video = document.getElementById('videoInput');
let src = new cv.Mat(video.height, video.width, cv.CV_8UC4);
let dst = new cv.Mat(video.height, video.width, cv.CV_8UC4);
let gray = new cv.Mat();
let cap = new cv.VideoCapture(video);
let faces = new cv.RectVector();
let classifier = new cv.CascadeClassifier();

// load pre-trained classifiers
classifier.load('haarcascade_frontalface_default.xml');

const FPS = 30;
function processVideo() {
    try {
        if (!streaming) {
            // clean and stop.
            src.delete();
            dst.delete();
            gray.delete();
            faces.delete();
            classifier.delete();
            return;
        }
        let begin = Date.now();
        // start processing.
        cap.read(src);
        src.copyTo(dst);
        cv.cvtColor(dst, gray, cv.COLOR_RGBA2GRAY, 0);
        // detect faces.
        classifier.detectMultiScale(gray, faces, 1.1, 3, 0);
        // draw faces.
        for (let i = 0; i < faces.size(); ++i) {
            let face = faces.get(i);
            let point1 = new cv.Point(face.x, face.y);
            let point2 = new cv.Point(face.x + face.width, face.y + face.height);
            cv.rectangle(dst, point1, point2, [255, 0, 0, 255]);
        }
        cv.imshow('canvasOutput', dst);
        // schedule the next one.
        let delay = 1000/FPS - (Date.now() - begin);
        setTimeout(processVideo, delay);
    } catch (err) {
        utils.printError(err);
    }
};

// schedule the first one.
setTimeout(processVideo, 0);
</script>
<script type="text/javascript">
	
	let utils = new Utils('errorMessage');
	
	utils.loadCode('codeSnippet', 'codeEditor');
	
	let streaming = false;
	let videoInput = document.getElementById('videoInput');
	let startAndStop = document.getElementById('startAndStop');
	let canvasOutput = document.getElementById('canvasOutput');
	let canvasContext = canvasOutput.getContext('2d');
	
	startAndStop.addEventListener('click', () => {
	    if (!streaming) {
	        utils.clearError();
	        utils.startCamera('qvga', onVideoStarted, 'videoInput');
	    } else {
	        utils.stopCamera();
	        onVideoStopped();
	    }
	});
	
	function onVideoStarted() {
	    streaming = true;
	    startAndStop.innerText = 'Stop';
	    videoInput.width = videoInput.videoWidth;
	    videoInput.height = videoInput.videoHeight;
	    utils.executeCode('codeEditor');
	}
	
	function onVideoStopped() {
	    streaming = false;
	    canvasContext.clearRect(0, 0, canvasOutput.width, canvasOutput.height);
	    startAndStop.innerText = 'Start';
	}
	
	utils.loadOpenCv(() => {
	    let faceCascadeFile = 'haarcascade_frontalface_default.xml';
	    utils.createFileFromUrl(faceCascadeFile, faceCascadeFile, () => {
	        // startAndStop.removeAttribute('disabled');
	    });
	});


</script>
</body>
</html>

4.8.0文档

4.8.0相对于4.5.0新增了深度学习模块。

深度神经网络

 

选择相应的model,从对应的链接中得到对应的数据集文件,看第一块代码的配置项是否一致。 

 上传对应的文件,点击运行。

起初认为是时间问题,晚上挂了一段时间,早上起来依旧没有任何数据 ,打开浏览器审查页面,发现有很多报错,无论如何我都得不到想要的结果,这块就放弃了。

补充

完整文件

gitee获取

去掉文本代码区域

在实际开发中我们是不需要textarea的,所以必须去掉它。但是我尝试更改了html文件和js文件,没有成功,所以干脆直接让textarea区域:display:none。

<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false" style="display: none;"></textarea>

参考文献

代码哈士,opencv.js人脸识别简单

opencv.js官网的js文件不是真正的js文件,官网文档里需要建立其环境,这片文章介绍通过爬取得到官方的资源。 

opencv.js官网 

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/842602.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

【Seata1.5.2 下载 配置 整合 踩坑 测试】—— 含各种踩坑记录(详细版)

&#x1f4a7; S e a t a 1.5.2 ——含各种踩坑记录 \color{#FF1493}{Seata1.5.2 —— 含各种踩坑记录} Seata1.5.2——含各种踩坑记录&#x1f4a7; &#x1f337; 仰望天空&#xff0c;妳我亦是行人.✨ &#x1f984; 个人主页——微风撞见云的博客&#x1f390; &…

KM生存分析

这个系列从2022年开始&#xff0c;一直更新使用R语言分析数据及绘制精美图形。小杜的生信笔记主要分享小杜学习日常&#xff01;如果&#xff0c;你对此感兴趣可以加入该系列的学习。 最终图形 本期图形代码 ##KM生存分析 ##20230807##导入包 library(survival) library(surv…

【深度学习】StyleGANv2 2019 论文,Analyzing and Improving the Image Quality of StyleGAN

StyleGAN论文&#xff1a; 《A Style-Based Generator Architecture for Generative Adversarial Networks》 论文&#xff1a;https://arxiv.org/abs/1812.04948 代码&#xff1a; https://github.com/NVlabs/stylegan StyleGANv2论文&#xff1a; 《Analyzing and Improving …

Linux葵花宝典-无需自宫版

1. Linux简介 1.1 什么是Linux Linux&#xff0c;全称GNU/Linux&#xff0c;是一种免费使用和自由传播的类UNIX操作系统&#xff0c;其内核由Linus Torvalds于1991年10月5日首次发布&#xff0c;它主要受到Minix和Unix思想的启发&#xff0c;是一个基于POSIX的多用户、多任务、…

【知网检索稳定】第八届现代管理和教育技术国际学术会议(MMET2023)

第八届现代管理和教育技术国际学术会议&#xff08;MMET 2023&#xff09;将于2023年09月22-24日在中国上海召开。会议由四川大学、泰国程逸皇家大学、泰国程逸皇家大学中泰同文同学国际交流中心主办、乐山师范学院、四川职业技术学院、AEIC学术交流中心协办。会议主要围绕会议…

边写代码边学习之numpy

1. numpy.matmul() 用法 matmul() 用于计算两个数组的矩阵乘积。示例如下 def matmul_test():array1 np.array([[[1.0, 3], [1, 1], [2, 3]]])array2 np.array([[2, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0],[1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0], ])result np.matmul(array1, …

C语言有关文件的操作

打开文件与关闭文件 在编写代码时&#xff0c;我有一个习惯是“保证一一对应”。 写下代码fopen()之后&#xff0c;还没有写对文件进行增删查改等操作的代码&#xff0c;先立刻写上fclose()&#xff0c;避免忘记关闭FILE* fd的情况。 不关闭fd&#xff0c;在fopen()次数较少的…

linux初始命令

如果没有ip地址&#xff0c;配置&#xff1a; 查看当前时间&#xff1a; 指定格式查看时间&#xff1a; 修改时间&#xff1a; 查看时区&#xff1a; 设置时区&#xff1a; 查看当前工作目录&#xff1a; root的家目录就是根&#xff0c;普通用户家目录是home

迈瑞BeneVision N17/N15/N12协议解析

迈瑞BeneVision N17/N15/N12协议解析

pdf 怎么转换成word 文档?这几种方法不容错过

pdf 怎么转换成word 文档&#xff1f;PDF 和 Word 都是日常工作和学习中常见的文档格式&#xff0c;但是它们拥有不同的特点。PDF 可以保持文档格式的一致性&#xff0c;并且不易修改&#xff0c;而 Word 则更加灵活&#xff0c;可以随意编辑和修改。因此&#xff0c;将 PDF 转…

春秋云镜 CVE-2020-26042

春秋云镜 CVE-2020-26042 Hoosk CMS v1.8.0 存在sql注入漏洞 靶标介绍 Hoosk CMS v1.8.0 install/index.php 存在sql注入漏洞。 启动场景 漏洞利用 SQL注入POC POST /install/index.php HTTP/1.1 Host: xxxx User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; r…

无涯教程-Perl - chdir函数

描述 此功能将当前工作目录更改为EXPR,如果未指定,则更改为用户的主目录。此函数调用等效于Unix命令 cd EXPR 。 语法 以下是此函数的简单语法- chdir EXPRchdir返回值 如果失败,此函数返回0,如果成功,则返回1。 例 以下是显示其基本用法的示例代码,假设您在/user/home/…

【stm32】初识stm32—stm32环境的搭建

文章目录 &#x1f6f8;stm32资料分享&#x1f354;stm32是什么&#x1f384;具体过程&#x1f3f3;️‍&#x1f308;安装驱动&#x1f388;1&#x1f388;2 &#x1f3f3;️‍&#x1f308;建立Start文件夹 &#x1f6f8;stm32资料分享 我用夸克网盘分享了「STM32入门教程资料…

Proxmox VE lxc容器中使用samba共享文件夹遇到mount error(1): Operation not permitted 的解决

问题描述&#xff1a; 在PVE的LXC 容器中使用samba成功创建共享并使用&#xff0c; 用smbclient访问共享可以正常连接和使用&#xff0c;但是使用 mount.cifs 或者 mount.smb3时提示权限错误 mount error(1): Operation not permitted Refer to the mount.cifs(8) manual page…

【机密计算-大厂有话说】AMD

基于 VirTEE/SEV 的 SEV-SNP 平台证明 刊号 58217&#xff0c;版本 v1.2&#xff0c;发布于 2023.7 1. 介绍 VirTEE/sev 工具箱提供了一套基于 rust 语言的简单易用的 API 来访问 AMD EPYC 处理器内的安全处理器&#xff0c;这个库已经早已经支持传统的 SEV 固件&#xff0c;最…

代理模式:静态代理+JDK/CGLIB 动态代理

文章目录 1. 代理模式2. 静态代理3. 动态代理3.1. JDK 动态代理机制3.1.1. 介绍 3.1.2. JDK 动态代理类使用步骤3.1.3. 代码示例3.2. CGLIB 动态代理机制3.2.1. 介绍3.2.2. CGLIB 动态代理类使用步骤3.2.3. 代码示例 3.3. JDK 动态代理和 CGLIB 动态代理对比 4. 静态代理和动态…

uniapp 微信小程序 封装公共的请求js(api版本)

一、新建api文件夹 在项目目录下创建api文件夹&#xff0c;内放files跟index.js文件夹&#xff0c;files文件夹内放每个页面对应的js请求接口 1、index.js /*** api接口的统一出口*/ const api {}; const requireComponent require.context(./files, false, /\.js$/) requi…

3.2 防火墙

数据参考&#xff1a;CISP官方 目录 防火墙基础概念防火墙的典型技术防火墙企业部署防火墙的局限性 一、防火墙基础概念 防火墙基础概念&#xff1a; 防火墙&#xff08;Firewall&#xff09;一词来源于早期的欧式建筑&#xff0c;它是建筑物之间的一道矮墙&#xff0c;用…

【基础IO】文件系统 {磁盘的物理结构,存储结构,逻辑结构;CHS 和 LBA 寻址方式;磁盘分区和块组;文件inode;软硬链接}

文件系统 文件分为&#xff1a; 内存文件&#xff1a;被进程打开的文件&#xff0c;文件被加载到内存中供进程快速读写。磁盘文件&#xff1a;没有被打开的文件&#xff0c;保存在磁盘上。磁盘文件被分门别类的存储和管理&#xff0c;用于支持更好的存取。 提示&#xff1a; …

Amazon CodeWhisperer亚马逊云代码生成器idea体验使用

阿丹&#xff1a; 自从接触到微服务以来发现要写的代码越来越多了&#xff0c;之前一直面向ChatGPT来编程&#xff0c;今天找到了一个新的ai代码生成器。体验一下。安装的过程给兄弟们演示一下。 关键还是免费的。 连接如下:AI 代码生成器 - Amazon CodeWhisperer - AWS 查看…