subrepo:
  subdir:   "lib/tracky-mouse"
  merged:   "e98eac3"
upstream:
  origin:   "https://github.com/1j01/tracky-mouse.git"
  branch:   "main"
  commit:   "e98eac3"
git-subrepo:
  version:  "0.4.3"
  origin:   "https://github.com/ingydotnet/git-subrepo"
  commit:   "2f68596"
main
Isaiah Odhner 2021-05-20 18:58:39 -04:00
parent 25057eac35
commit d900ef1129
30 changed files with 150399 additions and 0 deletions

1
lib/tracky-mouse/.gitattributes vendored Normal file
View File

@ -0,0 +1 @@
* text=auto

2
lib/tracky-mouse/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
node_modules/
private/

12
lib/tracky-mouse/.gitrepo Normal file
View File

@ -0,0 +1,12 @@
; DO NOT EDIT (unless you know what you are doing)
;
; This subdirectory is a git "subrepo", and this file is maintained by the
; git-subrepo command. See https://github.com/git-commands/git-subrepo#readme
;
[subrepo]
remote = https://github.com/1j01/tracky-mouse.git
branch = main
commit = e98eac3faf5dc3df970e6c548308ea844d57894a
parent = b7385e95e06810359715d3831aaba99aca1b394b
method = merge
cmdver = 0.4.3

View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2021 Isaiah Odhner
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

158
lib/tracky-mouse/README.md Normal file
View File

@ -0,0 +1,158 @@
# ![](./images/tracky-mouse-logo-32.png) Tracky Mouse
> Control your computer by moving your head.
Tracky Mouse is intended to be a complete UI for head tracking, similar to [eViacam](https://github.com/cmauri/eviacam), but embeddable in web applications (such as [JS Paint, with its Eye Gaze Mode](https://jspaint.app/#eye-gaze-mode), which I might rename Hands-Free Mode or Facial Mouse Mode), as well as downloadable as an application to use to control your entire computer.
I'm also thinking about making a browser extension, which would 1. bridge between the desktop application and web applications, making it so you don't need to disable dwell clicking in the desktop app to use a web app that provides dwell clicking, 2. provide the equivalent of the desktop application for Chrome OS, and 3. automatically enhance webpages to be friendlier toward facial mouse input, by preventing menus from closing based on hover, enlarging elements etc., probably using site-specific enhancements.
So this would be a three-in-one project: desktop app, JavaScript library, and browser extension.
Sharing code between these different facets of the project means a lot of improvements can be made to three different products at once, and the library means that applications can have a fully functional facial mouse UI, and get people interested in head tracking because they can try it out right away.
Options could be exported/imported or even synced between the products.
## Why did I make this?
- eViacam isn't working on my computer
- There's not that much facial mouse software out there, especially cross-platform, and I think it's good to have options.
- I want people to be able to try JS Paint's Eye Gaze Mode out easily, and an embeddable facial mouse GUI would be great for that. (Backstory: Someone emailed me asking how they might build an eye gaze mode into jspaint, and so I built it for them. I want to build it into something a lot of people can use.)
- Sometimes my joints hurt a lot and I'd like to relieve strain by switching to an alternative input method, such as head movement. Although I also have serious neck problems, so I don't know what I was thinking. Working on this project I have to use it very sparingly, using a demo video instead of camera input whenever possible for testing.
## Libraries Used
- [jsfeat](https://github.com/inspirit/jsfeat) for point tracking
- [MIT License](https://github.com/inspirit/jsfeat/blob/master/LICENSE)
- [clmtrackr.js](https://github.com/auduno/clmtrackr) for fast and lightweight but inaccurate face tracking
- [MIT License](https://github.com/auduno/clmtrackr/blob/dev/LICENSE.txt)
- [facemesh](https://github.com/tensorflow/tfjs-models/tree/master/facemesh#mediapipe-facemesh) and [TensorFlow.js](https://www.tensorflow.org/) for accurate face tracking (once this loads, it stops using clmtrackr.js)
- [Apache License 2.0](https://github.com/tensorflow/tfjs-models/blob/master/LICENSE)
- [Apache License 2.0](https://github.com/tensorflow/tensorflow/blob/master/LICENSE)
## License
MIT-licensed, see [LICENSE.txt](./LICENSE.txt)
## Development Setup
- [Clone the repo.](https://help.github.com/articles/cloning-a-repository/)
- Install [Node.js](https://nodejs.org/) if you don't have it
- Open up a command prompt / terminal in the project directory.
- Run `npm install`
- For the electron app:
- First install [RobotJS build dependencies](https://robotjs.io/docs/building) (on Ubuntu, `sudo apt-get install libxtst-dev libpng++-dev build-essential python2.7`)
- Then `cd tracky-mouse-electron && npm install`
## Install Desktop App
The app is not yet distributed as precompiled binaries.
If you want to try out the desktop app in the meantime:
- See Development Setup
- In folder `tracky-mouse-electron`, run `npm start`
## Add to your project
Tracky Mouse is available on npm:
`npm i tracky-mouse`
```html
<script src="node_modules/tracky-mouse/tracky-mouse.js"></script>
<script>
TrackyMouse.dependenciesRoot = "node_modules/tracky-mouse";
TrackyMouse.loadDependencies().then(function() {
TrackyMouse.init();
// This sort of logic will be built into tracky-mouse in the future.
const getEventOptions = ({x, y})=> {
return {
view: window, // needed for offsetX/Y calculation
clientX: x,
clientY: y,
pointerId: 1234567890,
pointerType: "mouse",
isPrimary: true,
};
};
TrackyMouse.onPointerMove = (x, y) => {
const target = document.elementFromPoint(x, y) || document.body;
if (target !== last_el_over) {
if (last_el_over) {
const event = new PointerEvent("pointerleave", Object.assign(getEventOptions({ x, y }), {
button: 0,
buttons: 1,
bubbles: false,
cancelable: false,
}));
last_el_over.dispatchEvent(event);
}
const event = new PointerEvent("pointerenter", Object.assign(getEventOptions({ x, y }), {
button: 0,
buttons: 1,
bubbles: false,
cancelable: false,
}));
target.dispatchEvent(event);
last_el_over = target;
}
const event = new PointerEvent("pointermove", Object.assign(getEventOptions({ x, y }), {
button: 0,
buttons: 1,
bubbles: true,
cancelable: true,
}));
target.dispatchEvent(event);
};
});
</script>
```
## TODO
- Improve acceleration option (can reference eviacam source code, and play around with different equations)
- Should be able to make smooth circular movements, right now it comes out kinda squarish
- Minimum distance to start moving pointer (option)
- Might want a margin outside of the bounds of the screen, in order to reliably point to the edges of the screen
The mouse would always be clamped to the screen, but the internal notion of the mouse's position would have some legroom.
It shouldn't be too much, because going to the edge of the screen is also useful for simple on-the-fly "calibration"
- Robust error handling, for camera access etc.
- Test differing video aspect ratios
- Coach user on:
- Granting camera access
- Troubleshooting camera access
- Another application may be using it
- Try unplugging and plugging it back in
- Make sure you can use your camera with another application (but close this application before trying to get it to work in here again)
- Installing (and maybe running?) `guvcview` can magically fix a webcam not showing up (worked for my Logitech C920 when it wouldn't show up in applications even after restart, but was listed in `lsusb`) ([source](https://forums.linuxmint.com/viewtopic.php?t=131011))
- Correct camera
- [`enumerateDevices`](https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/enumerateDevices)
- Disabling camera autofocus maybe
- Positioning the camera and yourself
- Above or below the screen is fine but you should be centered so the pointer doesn't move left/right too much when you want it to go up or down
- In particular, you should be in line with the camera, such that your face appears head-on when looking comfortably at the center of the screen
- A guide could show your head rotation
- Callibration for an off-center camera should be possible (or explicitly using your head rotation instead of a projected position)
- If the camera is above, leaning forward generally moves the pointer down
- If the camera is below, leaning forward generally moves the pointer up
- Tilting your head or moving your head both move the pointer
- Lighting
- Detect bad lighting conditions and report to the user
- "Callibration" via simply moving your head to the edges of the screen (it's not like a gesture, it's just implicit in the fact that there are boundaries)
- Choosing settings (sensitivity etc.)
- If you move yourself or your camera, you may want to adjust the sensitivity.
- If you're further away from the camera, you'll want a higher sensitivity.
- Would it make sense to scale this to your head size in the camera? Maybe not with the innacurate face tracker, but with the face tracker... but you probably wouldn't want it to switch setting scaling schemes suddenly
- It could detect if your head size significantly changes (and is stable for a period of time) from what it has been (stably for a period of time), and alert you, suggesting changing the setting, maybe even suggesting a value
- Integrate with dwell clicking functionality in jspaint...
- Dwell click time / area, beep on click options, etc.
- Sparkly effect of some kind instead of just green dots on your face?
- Pose invariance (smiling etc.)
- Simplest might be to just use the bridge of your nose
- Points can disappear due to pruning, but we could use other points as a fallback, but just use a nose point as long as it exists?
- Handle occluders explicitly by looking for differing optical flow? (most often a hand, e.g. brushing hair out of eyes)
- Latency compensation for Worker results: I made a "time travel" system, recording camera frames since the frame sent to the worker for processing, and playing them back when receiving the results from the worker to bring them up to speed, but it was too slow to actually do the replaying (tracking the points is actually kind of expensive)
- Dedupe grayscale() computation...
- WebAssembly for tracking points?
- Time travel for adding AND removing points
- Eye tracker
- Hybrid eye tracking + head tracking control, where eye tracking is used for quick movements to any place on the screen, and head tracking is used for fine adjustment. Like [Precision Gaze Mouse](https://precisiongazemouse.org/)
- Try moving away from electron, to a lighter-weight platform like <https://github.com/webview/webview>

View File

@ -0,0 +1,35 @@
importScripts('lib/tf.js');
importScripts('lib/facemesh/facemesh.js');
// Don't use CPU backend for facemesh.
// It's too slow to be useful, without advanced time travel technology. (I have dabbled in time travel, but not cracked it.)
// If the facemesh worker fails to get a WebGL context, it's better that we keep using clmTracker.
// tf.setBackend('cpu');
tf.setBackend('webgl').then((success) => {
if (!success) {
console.log("tf.setBackend('webgl') failed");
close();
}
}, (error) => {
console.log("tf.setBackend('webgl') error", error);
close();
});
var facemeshTensorFlowModel;
onmessage = (e) => {
// console.log('Message received from main script', e.data);
if (e.data.type === "LOAD") {
facemesh.load(e.data.options).then((model) => {
facemeshTensorFlowModel = model;
postMessage({ type: "LOADED" });
});
} else if (e.data.type === "ESTIMATE_FACES") {
facemeshTensorFlowModel.estimateFaces(e.data.imageData).then((predictions) => {
postMessage({ type: "ESTIMATED_FACES", predictions });
}, (error) => {
console.log(error);
});
}
};

Binary file not shown.

After

Width:  |  Height:  |  Size: 330 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 830 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

View File

@ -0,0 +1,91 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="32.0px"
height="32.0px"
viewBox="0 0 32.0 32.0"
version="1.1"
id="SVGRoot"
inkscape:version="1.0.2 (394de47547, 2021-03-26)">
<defs
id="defs10" />
<sodipodi:namedview
id="base"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
inkscape:zoom="1.9505793"
inkscape:cx="-140.32709"
inkscape:cy="-40.987874"
inkscape:document-units="px"
inkscape:current-layer="layer1"
inkscape:document-rotation="0"
showgrid="false"
inkscape:window-width="1920"
inkscape:window-height="1016"
inkscape:window-x="0"
inkscape:window-y="0"
inkscape:window-maximized="1"
inkscape:object-paths="true"
inkscape:snap-object-midpoints="true">
<inkscape:grid
type="xygrid"
id="grid19"
empspacing="4" />
</sodipodi:namedview>
<metadata
id="metadata13">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title></dc:title>
</cc:Work>
</rdf:RDF>
</metadata>
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1">
<path
style="opacity:1;fill:#ffff00;stroke:#000000;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stop-color:#000000"
d="m 5,1 v 29 l 6,-6 5,7 6,-3 -4,-6 h 10 z"
id="path26"
sodipodi:nodetypes="cccccccc" />
<path
style="opacity:1;fill:none;stroke:#000000;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stop-color:#000000"
d="M 5,18.977829 C 13.15721,22.499704 17,16 17,16"
id="path851"
sodipodi:nodetypes="cc" />
<g
id="path853"
style="opacity:1">
<path
style="color:#000000;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:medium;line-height:normal;font-family:sans-serif;font-variant-ligatures:normal;font-variant-position:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-alternates:normal;font-variant-east-asian:normal;font-feature-settings:normal;font-variation-settings:normal;text-indent:0;text-align:start;text-decoration:none;text-decoration-line:none;text-decoration-style:solid;text-decoration-color:#000000;letter-spacing:normal;word-spacing:normal;text-transform:none;writing-mode:lr-tb;direction:ltr;text-orientation:mixed;dominant-baseline:auto;baseline-shift:baseline;text-anchor:start;white-space:normal;shape-padding:0;shape-margin:0;inline-size:0;clip-rule:nonzero;display:inline;overflow:visible;visibility:visible;isolation:auto;mix-blend-mode:normal;color-interpolation:sRGB;color-interpolation-filters:linearRGB;solid-color:#000000;solid-opacity:1;vector-effect:none;fill:#deaa87;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1.0;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;enable-background:accumulate;stop-color:#000000;stop-opacity:1;"
d="m 8,12 v 1"
id="path955" />
</g>
<circle
style="opacity:1;fill:#000000;stroke:none;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stop-color:#000000"
id="path977"
cx="8.3563318"
cy="13.886208"
r="1.7257235" />
<circle
style="fill:#000000;stroke:none;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stop-color:#000000"
id="path977-3"
cx="12.984397"
cy="13.836293"
r="1.8717158" />
</g>
</svg>

After

Width:  |  Height:  |  Size: 4.1 KiB

View File

@ -0,0 +1,50 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<meta http-equiv="Content-Security-Policy" content="default-src 'self' 'unsafe-inline' 'unsafe-eval' blob:">
<title>Tracky Mouse</title>
<script src="lib/stats.js"></script>
<script src="lib/clmtrackr.js"></script>
<!-- <script src="lib/jsfeat-min.js"></script> exported from patched clmtrackr.js -->
<style>
body {
background-color: white;
color: black;
}
@media (prefers-color-scheme: dark) {
body {
background-color: black;
color: white;
}
a:link {
color: aquamarine;
}
a:visited {
color: rgb(197, 127, 255);
}
}
</style>
<link rel="stylesheet" type="text/css" href="tracky-mouse.css">
<link rel="icon" type="image/png" sizes="16x16" href="images/tracky-mouse-logo-16.png">
<link rel="icon" type="image/png" sizes="512x512" href="images/tracky-mouse-logo-512.png">
</head>
<body>
<h2>Tracky Mouse</h2>
<p>
This is a head tracking system similar to <a href="https://eviacam.crea-si.com/">eViacam</a>,
using <a href="https://en.wikipedia.org/wiki/Lucas%E2%80%93Kanade_method">LucasKanade optical flow</a> to track points for high accuracy,
and face detection for understanding of where to place tracking points.
It uses two different face detectors, a fast-to-load one and a slower to load but much more accurate one, which it switches to automatically when available.
</p>
<p><a href="https://github.com/1j01/tracky-mouse">Open source on GitHub.</a> MIT-licensed.</p>
<script src="tracky-mouse.js"></script>
<script>
TrackyMouse.dependenciesRoot = ".";
TrackyMouse.init();
</script>
</body>
</html>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

1
lib/tracky-mouse/lib/jsfeat-min.js vendored Normal file

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,179 @@
(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() :
typeof define === 'function' && define.amd ? define(factory) :
(global.Stats = factory());
}(this, (function () { 'use strict';
/**
* @author mrdoob / http://mrdoob.com/
*/
var Stats = function () {
var mode = 0;
var container = document.createElement( 'div' );
container.style.cssText = 'position:fixed;top:0;left:0;cursor:pointer;opacity:0.9;z-index:10000';
container.addEventListener( 'click', function ( event ) {
event.preventDefault();
showPanel( ++ mode % container.children.length );
}, false );
//
function addPanel( panel ) {
container.appendChild( panel.dom );
return panel;
}
function showPanel( id ) {
for ( var i = 0; i < container.children.length; i ++ ) {
container.children[ i ].style.display = i === id ? 'block' : 'none';
}
mode = id;
}
//
var beginTime = ( performance || Date ).now(), prevTime = beginTime, frames = 0;
var fpsPanel = addPanel( new Stats.Panel( 'FPS', '#0ff', '#002' ) );
var msPanel = addPanel( new Stats.Panel( 'MS', '#0f0', '#020' ) );
if ( self.performance && self.performance.memory ) {
var memPanel = addPanel( new Stats.Panel( 'MB', '#f08', '#201' ) );
}
showPanel( 0 );
return {
REVISION: 16,
dom: container,
addPanel: addPanel,
showPanel: showPanel,
begin: function () {
beginTime = ( performance || Date ).now();
},
end: function () {
frames ++;
var time = ( performance || Date ).now();
msPanel.update( time - beginTime, 200 );
if ( time >= prevTime + 1000 ) {
fpsPanel.update( ( frames * 1000 ) / ( time - prevTime ), 100 );
prevTime = time;
frames = 0;
if ( memPanel ) {
var memory = performance.memory;
memPanel.update( memory.usedJSHeapSize / 1048576, memory.jsHeapSizeLimit / 1048576 );
}
}
return time;
},
update: function () {
beginTime = this.end();
},
// Backwards Compatibility
domElement: container,
setMode: showPanel
};
};
Stats.Panel = function ( name, fg, bg ) {
var min = Infinity, max = 0, round = Math.round;
var PR = round( window.devicePixelRatio || 1 );
var WIDTH = 80 * PR, HEIGHT = 48 * PR,
TEXT_X = 3 * PR, TEXT_Y = 2 * PR,
GRAPH_X = 3 * PR, GRAPH_Y = 15 * PR,
GRAPH_WIDTH = 74 * PR, GRAPH_HEIGHT = 30 * PR;
var canvas = document.createElement( 'canvas' );
canvas.width = WIDTH;
canvas.height = HEIGHT;
canvas.style.cssText = 'width:80px;height:48px';
var context = canvas.getContext( '2d' );
context.font = 'bold ' + ( 9 * PR ) + 'px Helvetica,Arial,sans-serif';
context.textBaseline = 'top';
context.fillStyle = bg;
context.fillRect( 0, 0, WIDTH, HEIGHT );
context.fillStyle = fg;
context.fillText( name, TEXT_X, TEXT_Y );
context.fillRect( GRAPH_X, GRAPH_Y, GRAPH_WIDTH, GRAPH_HEIGHT );
context.fillStyle = bg;
context.globalAlpha = 0.9;
context.fillRect( GRAPH_X, GRAPH_Y, GRAPH_WIDTH, GRAPH_HEIGHT );
return {
dom: canvas,
update: function ( value, maxValue ) {
min = Math.min( min, value );
max = Math.max( max, value );
context.fillStyle = bg;
context.globalAlpha = 1;
context.fillRect( 0, 0, WIDTH, GRAPH_Y );
context.fillStyle = fg;
context.fillText( round( value ) + ' ' + name + ' (' + round( min ) + '-' + round( max ) + ')', TEXT_X, TEXT_Y );
context.drawImage( canvas, GRAPH_X + PR, GRAPH_Y, GRAPH_WIDTH - PR, GRAPH_HEIGHT, GRAPH_X, GRAPH_Y, GRAPH_WIDTH - PR, GRAPH_HEIGHT );
context.fillRect( GRAPH_X + GRAPH_WIDTH - PR, GRAPH_Y, PR, GRAPH_HEIGHT );
context.fillStyle = bg;
context.globalAlpha = 0.9;
context.fillRect( GRAPH_X + GRAPH_WIDTH - PR, GRAPH_Y, PR, round( ( 1 - ( value / maxValue ) ) * GRAPH_HEIGHT ) );
}
};
};
return Stats;
})));

121678
lib/tracky-mouse/lib/tf.js Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,47 @@
{
"name": "tracky-mouse",
"version": "1.0.0",
"description": "Add facial mouse accessibility to JavaScript applications",
"license": "MIT",
"author": {
"name": "Isaiah Odhner",
"email": "isaiahodhner@gmail.com",
"url": "https://isaiahodhner.io"
},
"keywords": [
"camera-mouse",
"mouse",
"camera",
"webcam",
"head-tracker",
"head-tracking",
"face-tracker",
"face-tracking",
"headmouse",
"facial-mouse",
"accessibility",
"cursor",
"pointer",
"pointing",
"input-method",
"hands-free",
"handsfree",
"desktop-automation"
],
"repository": {
"type": "git",
"url": "git+https://github.com/1j01/tracky-mouse.git"
},
"bugs": {
"url": "https://github.com/1j01/tracky-mouse/issues"
},
"homepage": "https://github.com/1j01/tracky-mouse#readme",
"main": "tracky-mouse.js",
"browser": "tracky-mouse.js",
"files": [
"tracky-mouse.js",
"tracky-mouse.css",
"lib/"
],
"scripts": {}
}

View File

@ -0,0 +1,91 @@
copied/
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
lerna-debug.log*
# Diagnostic reports (https://nodejs.org/api/report.html)
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
# Runtime data
pids
*.pid
*.seed
*.pid.lock
.DS_Store
# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov
# Coverage directory used by tools like istanbul
coverage
*.lcov
# nyc test coverage
.nyc_output
# node-waf configuration
.lock-wscript
# Compiled binary addons (https://nodejs.org/api/addons.html)
build/Release
# Dependency directories
node_modules/
jspm_packages/
# TypeScript v1 declaration files
typings/
# TypeScript cache
*.tsbuildinfo
# Optional npm cache directory
.npm
# Optional eslint cache
.eslintcache
# Optional REPL history
.node_repl_history
# Output of 'npm pack'
*.tgz
# Yarn Integrity file
.yarn-integrity
# dotenv environment variables file
.env
.env.test
# parcel-bundler cache (https://parceljs.org/)
.cache
# next.js build output
.next
# nuxt.js build output
.nuxt
# vuepress build output
.vuepress/dist
# Serverless directories
.serverless/
# FuseBox cache
.fusebox/
# DynamoDB Local files
.dynamodb/
# Webpack
.webpack/
# Electron-Forge
out/

View File

@ -0,0 +1,74 @@
const fs = require("fs");
const path = require("path");
const glob = require("glob");
const logFile = fs.createWriteStream(path.join(__dirname, "forge-hook.log"));
logFile.write("Hello World\n\n");
module.exports = {
"packagerConfig": {},
"makers": [
{
"name": "@electron-forge/maker-squirrel",
"config": {
"name": "tracky_mouse_electron"
}
},
{
"name": "@electron-forge/maker-zip",
"platforms": [
"darwin"
]
},
{
"name": "@electron-forge/maker-deb",
"config": {}
},
{
"name": "@electron-forge/maker-rpm",
"config": {}
}
],
hooks: {
prePackage: (forgeConfig) => {
logFile.write("prePackage hook\n\n");
return new Promise((resolve, reject) => {
const fromFolder = path.resolve(`${__dirname}/../`);
const toFolder = `${__dirname}/copied/`;
const appGlob = `${fromFolder}/**`;
logFile.write(`appGlob: ${appGlob} \n\n`);
glob(appGlob, {
ignore: [
".*/**",
"**/tracky-mouse-electron/**",
"**/node_modules/**",
"**/private/**",
]
}, async (error, files) => {
logFile.write(`glob callback, files:\n${JSON.stringify(files)}\n\n`);
logFile.write(`Deleting ${toFolder}\n\n`);
await fs.promises.rmdir(toFolder, { recursive: true });
if (error) {
logFile.write(`Failed to copy app files:\n${error}`);
reject(error);
return;
}
const copyPromises = [];
for (const file of files) {
const newFile = path.join(toFolder, path.relative(fromFolder, file));
if (!fs.statSync(file).isDirectory()) {
await fs.promises.mkdir(path.dirname(newFile), { recursive: true });
logFile.write(`Copy: ${file}\n`);
logFile.write(`To: ${newFile}\n`);
copyPromises.push(fs.promises.copyFile(file, newFile));
}
}
await Promise.all(copyPromises);
resolve();
});
});
}
}
};

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,64 @@
{
"name": "tracky-mouse-electron",
"productName": "Tracky Mouse",
"version": "1.0.0",
"description": "Control your computer with your head movements",
"license": "MIT",
"author": {
"name": "Isaiah Odhner",
"email": "isaiahodhner@gmail.com",
"url": "https://isaiahodhner.io"
},
"keywords": [
"camera-mouse",
"mouse",
"camera",
"webcam",
"head-tracker",
"head-tracking",
"face-tracker",
"face-tracking",
"headmouse",
"facial-mouse",
"accessibility",
"cursor",
"pointer",
"pointing",
"input-method",
"hands-free",
"handsfree",
"desktop-automation"
],
"repository": {
"type": "git",
"url": "git+https://github.com/1j01/tracky-mouse.git"
},
"bugs": {
"url": "https://github.com/1j01/tracky-mouse/issues"
},
"homepage": "https://github.com/1j01/tracky-mouse#readme",
"main": "src/electron-main.js",
"scripts": {
"start": "electron-forge start",
"package": "electron-forge package",
"make": "electron-forge make",
"publish": "electron-forge publish",
"lint": "echo \"No linting configured\""
},
"config": {
"forge": "forge.config.js"
},
"dependencies": {
"electron-squirrel-startup": "^1.0.0",
"robotjs": "^0.6.0"
},
"devDependencies": {
"@electron-forge/cli": "^6.0.0-beta.54",
"@electron-forge/maker-deb": "^6.0.0-beta.54",
"@electron-forge/maker-rpm": "^6.0.0-beta.54",
"@electron-forge/maker-squirrel": "^6.0.0-beta.54",
"@electron-forge/maker-zip": "^6.0.0-beta.54",
"electron": "12.0.7",
"glob": "^7.1.7"
}
}

View File

@ -0,0 +1,107 @@
const { app, globalShortcut, BrowserWindow } = require('electron');
const path = require('path');
// Handle creating/removing shortcuts on Windows when installing/uninstalling.
if (require('electron-squirrel-startup')) { // eslint-disable-line global-require
app.quit();
}
// Needed for RobotJS native module in renderer process (could be moved to main with IPC)
app.allowRendererProcessReuse = false;
// Allow recovering from WebGL crash unlimited times.
// (To test the recovery, I've been using Ctrl+Alt+F1 and Ctrl+Alt+F2 in Ubuntu.
// Note, if Ctrl + Alt + F2 doesn't get you back, try Ctrl+Alt+F7.)
app.commandLine.appendSwitch("--disable-gpu-process-crash-limit");
const trackyMouseFolder = app.isPackaged ? `${app.getAppPath()}/copied/` : `${__dirname}/../../`;
let mainWindow;
const createWindow = () => {
// Create the browser window.
mainWindow = new BrowserWindow({
width: 800,
height: 600,
webPreferences: {
preload: path.join(app.getAppPath(), 'src/preload.js'),
},
// icon: `${trackyMouseFolder}/images/tracky-mouse-logo-16.png`,
icon: `${trackyMouseFolder}/images/tracky-mouse-logo-512.png`,
});
// and load the index.html of the app.
mainWindow.loadFile(`${trackyMouseFolder}/index.html`);
// Toggle the DevTools with F12
mainWindow.webContents.on("before-input-event", (e, input) => {
if (input.type === "keyDown" && input.key === "F12") {
mainWindow.webContents.toggleDevTools();
mainWindow.webContents.on('devtools-opened', () => {
// Can't use mainWindow.webContents.devToolsWebContents.on("before-input-event") - it just doesn't intercept any events.
mainWindow.webContents.devToolsWebContents.executeJavaScript(`
new Promise((resolve)=> {
addEventListener("keydown", (event) => {
if (event.key === "F12") {
resolve();
}
}, { once: true });
})
`)
.then(() => {
mainWindow.webContents.toggleDevTools();
});
});
}
});
};
// This method will be called when Electron has finished
// initialization and is ready to create browser windows.
// Some APIs can only be used after this event occurs.
app.on('ready', () => {
createWindow();
const success = globalShortcut.register('F9', () => {
// console.log('Toggle tracking');
mainWindow.webContents.send("shortcut", "toggle-tracking");
});
});
// Prevent multiple instances of the app
if (!app.requestSingleInstanceLock()) {
app.quit();
}
app.on('second-instance', () => {
if (mainWindow) {
if (mainWindow.isMinimized()) {
mainWindow.restore();
}
mainWindow.show();
}
});
// Quit when all windows are closed, except on macOS. There, it's common
// for applications and their menu bar to stay active until the user quits
// explicitly with Cmd + Q.
app.on('window-all-closed', () => {
if (process.platform !== 'darwin') {
app.quit();
}
});
app.on('activate', () => {
// On OS X it's common to re-create a window in the app when the
// dock icon is clicked and there are no other windows open.
if (BrowserWindow.getAllWindows().length === 0) {
createWindow();
}
});
// In this file you can include the rest of your app's specific main process
// code. You can also put them in separate files and import them here.

View File

@ -0,0 +1,11 @@
const { moveMouse } = require('robotjs');
const { contextBridge, ipcRenderer } = require('electron')
contextBridge.exposeInMainWorld("moveMouse", (...args) => moveMouse(...args));
contextBridge.exposeInMainWorld("onShortcut", (callback) => {
ipcRenderer.on("shortcut", (event, data) => {
// console.log("shortcut", data);
callback(data);
});
});

View File

@ -0,0 +1,28 @@
.tracky-mouse-pointer {
z-index: 900000;
pointer-events: none;
border-radius: 50%;
background-color: red;
width: 20px;
height: 20px;
position: fixed;
transform: translate(-50%, -50%);
}
.tracky-mouse-ui {
background-color: rgb(195, 173, 230);
color: black;
padding: 10px;
border-radius: 5px;
max-width: 600px;
}
.tracky-mouse-canvas {
width: 100%;
background-color: rgba(15, 0, 20, 0.5);
}
.tracky-mouse-ui label {
display: block;
}
.tracky-mouse-ui .label-text {
display: inline-block;
min-width: 150px;
}

View File

@ -0,0 +1,970 @@
const TrackyMouse = {
dependenciesRoot: "./tracky-mouse",
};
TrackyMouse.loadDependencies = function () {
TrackyMouse.dependenciesRoot = TrackyMouse.dependenciesRoot.replace(/\/+$/, "");
const loadScript = src => {
return new Promise((resolve, reject) => {
// This wouldn't wait for them to load
// for (const script of document.scripts) {
// if (script.src.includes(src)) {
// resolve();
// return;
// }
// }
const script = document.createElement('script');
script.type = 'text/javascript';
script.onload = resolve;
script.onerror = reject;
script.src = src;
document.head.append(script);
})
};
const scriptFiles = [
`${TrackyMouse.dependenciesRoot}/lib/clmtrackr.js`,
`${TrackyMouse.dependenciesRoot}/lib/facemesh/facemesh.js`,
`${TrackyMouse.dependenciesRoot}/lib/stats.js`,
`${TrackyMouse.dependenciesRoot}/lib/tf.js`,
];
return Promise.all(scriptFiles.map(loadScript));
};
TrackyMouse.init = function (div) {
var uiContainer = div || document.createElement("div");
uiContainer.classList.add("tracky-mouse-ui");
uiContainer.innerHTML = `
<div class="tracky-mouse-controls">
<button id="use-camera">Use my camera</button>
<button id="use-demo">Use demo footage</button>
<br>
<br>
<label><span class="label-text">Horizontal Sensitivity</span> <input type="range" min="0" max="100" value="25" id="sensitivity-x"></label>
<label><span class="label-text">Vertical Sensitivity</span> <input type="range" min="0" max="100" value="50" id="sensitivity-y"></label>
<!-- <label><span class="label-text">Smoothing</span> <input type="range" min="0" max="100" value="50" id="smoothing"></label> -->
<label><span class="label-text">Acceleration</span> <input type="range" min="0" max="100" value="50" id="acceleration"></label>
<!-- <label><span class="label-text">Easy Stop (min distance to move)</span> <input type="range" min="0" max="100" value="50" id="min-distance"></label> -->
<br>
<label><span class="label-text"><input type="checkbox" checked id="mirror"> Mirror</label>
<br>
</div>
<canvas class="tracky-mouse-canvas" id="tracky-mouse-canvas"></canvas>
`;
if (!div) {
document.body.appendChild(uiContainer);
}
var mirrorCheckbox = uiContainer.querySelector("#mirror");
var sensitivityXSlider = uiContainer.querySelector("#sensitivity-x");
var sensitivityYSlider = uiContainer.querySelector("#sensitivity-y");
var accelerationSlider = uiContainer.querySelector("#acceleration");
var useCameraButton = uiContainer.querySelector("#use-camera");
var useDemoFootageButton = uiContainer.querySelector("#use-demo");
var canvas = uiContainer.querySelector("#tracky-mouse-canvas");
var ctx = canvas.getContext('2d');
var pointerEl = document.createElement('div');
pointerEl.className = "tracky-mouse-pointer";
document.body.appendChild(pointerEl);
var cameraVideo = document.createElement('video');
// required to work in iOS 11 & up:
cameraVideo.setAttribute('playsinline', '');
var stats = new Stats();
stats.domElement.style.position = 'absolute';
stats.domElement.style.top = '0px';
stats.domElement.style.right = '0px';
stats.domElement.style.left = '';
document.body.appendChild(stats.domElement);
var defaultWidth = 640;
var defaultHeight = 480;
var maxPoints = 1000;
var mouseX = 0;
var mouseY = 0;
var prevMovementX = 0;
var prevMovementY = 0;
var enableTimeTravel = false;
// var movementXSinceFacemeshUpdate = 0;
// var movementYSinceFacemeshUpdate = 0;
var cameraFramesSinceFacemeshUpdate = [];
var sensitivityX;
var sensitivityY;
var acceleration;
var face;
var faceScore = 0;
var faceScoreThreshold = 0.5;
var faceConvergence = 0;
var faceConvergenceThreshold = 50;
var pointsBasedOnFaceScore = 0;
var paused = false;
var mouseNeedsInitPos = true;
const SLOWMO = false;
var debugTimeTravel = false;
var debugAcceleration = false;
var showDebugText = false;
var mirror;
var useClmTracking = true;
var showClmTracking = useClmTracking;
var useFacemesh = true;
var facemeshOptions = {
maxContinuousChecks: 5,
detectionConfidence: 0.9,
maxFaces: 1,
iouThreshold: 0.3,
scoreThreshold: 0.75
};
var fallbackTimeoutID;
var facemeshLoaded = false;
var facemeshFirstEstimation = true;
var facemeshEstimating = false;
var facemeshRejectNext = 0;
var facemeshPrediction;
var facemeshEstimateFaces;
var faceInViewConfidenceThreshold = 0.7;
var pointsBasedOnFaceInViewConfidence = 0;
// scale of size of frames that are passed to worker and then computed several at once when backtracking for latency compensation
// reducing this makes it much more likely to drop points and thus not work
// THIS IS DISABLED and using a performance optimization of currentCameraImageData instead of getCameraImageData;
// (the currentCameraImageData is also scaled differently, to the fixed canvas size instead of using the native camera image size)
// const frameScaleForWorker = 1;
var mainOops;
var workerSyncedOops;
// const frameCanvas = document.createElement("canvas");
// const frameCtx = frameCanvas.getContext("2d");
// const getCameraImageData = () => {
// if (cameraVideo.videoWidth * frameScaleForWorker * cameraVideo.videoHeight * frameScaleForWorker < 1) {
// return;
// }
// frameCanvas.width = cameraVideo.videoWidth * frameScaleForWorker;
// frameCanvas.height = cameraVideo.videoHeight * frameScaleForWorker;
// frameCtx.drawImage(cameraVideo, 0, 0, frameCanvas.width, frameCanvas.height);
// return frameCtx.getImageData(0, 0, frameCanvas.width, frameCanvas.height);
// };
let currentCameraImageData;
let facemeshWorker;
const initFacemeshWorker = () => {
if (facemeshWorker) {
facemeshWorker.terminate();
}
facemeshEstimating = false;
facemeshFirstEstimation = true;
facemeshLoaded = false;
facemeshWorker = new Worker(`${TrackyMouse.dependenciesRoot}/facemesh.worker.js`);
facemeshWorker.addEventListener("message", (e) => {
// console.log('Message received from worker', e.data);
if (e.data.type === "LOADED") {
facemeshLoaded = true;
facemeshEstimateFaces = () => {
const imageData = currentCameraImageData;//getCameraImageData();
if (!imageData) {
return;
}
facemeshWorker.postMessage({ type: "ESTIMATE_FACES", imageData });
return new Promise((resolve, reject) => {
facemeshWorker.addEventListener("message", (e) => {
if (e.data.type === "ESTIMATED_FACES") {
resolve(e.data.predictions);
}
}, { once: true });
});
};
}
}, { once: true });
facemeshWorker.postMessage({ type: "LOAD", options: facemeshOptions });
};
if (useFacemesh) {
initFacemeshWorker();
};
sensitivityXSlider.onchange = () => {
sensitivityX = sensitivityXSlider.value / 1000;
};
sensitivityYSlider.onchange = () => {
sensitivityY = sensitivityYSlider.value / 1000;
};
accelerationSlider.onchange = () => {
acceleration = accelerationSlider.value / 100;
};
mirrorCheckbox.onchange = () => {
mirror = mirrorCheckbox.checked;
};
mirrorCheckbox.onchange();
sensitivityXSlider.onchange();
sensitivityYSlider.onchange();
accelerationSlider.onchange();
// Don't use WebGL because clmTracker is our fallback! It's also not much slower than with WebGL.
var clmTracker = new clm.tracker({ useWebGL: false });
clmTracker.init();
var clmTrackingStarted = false;
const reset = () => {
clmTrackingStarted = false;
cameraFramesSinceFacemeshUpdate.length = 0;
if (facemeshPrediction) {
// facemesh has a setting maxContinuousChecks that determines "How many frames to go without running
// the bounding box detector. Only relevant if maxFaces > 1. Defaults to 5."
facemeshRejectNext = facemeshOptions.maxContinuousChecks;
}
facemeshPrediction = null;
useClmTracking = true;
showClmTracking = true;
pointsBasedOnFaceScore = 0;
faceScore = 0;
faceConvergence = 0;
};
useCameraButton.onclick = TrackyMouse.useCamera = () => {
navigator.mediaDevices.getUserMedia({
audio: false,
video: {
width: defaultWidth,
height: defaultHeight,
facingMode: "user",
}
}).then((stream) => {
reset();
try {
if ('srcObject' in cameraVideo) {
cameraVideo.srcObject = stream;
} else {
cameraVideo.src = window.URL.createObjectURL(stream);
}
} catch (err) {
cameraVideo.src = stream;
}
}, (error) => {
console.log(error);
});
};
useDemoFootageButton.onclick = TrackyMouse.useDemoFootage = () => {
reset();
cameraVideo.srcObject = null;
cameraVideo.src = `${TrackyMouse.dependenciesRoot}/private/demo-input-footage.webm`;
cameraVideo.loop = true;
};
if (!(navigator.mediaDevices && navigator.mediaDevices.getUserMedia)) {
console.log('getUserMedia not supported in this browser');
}
cameraVideo.addEventListener('loadedmetadata', () => {
cameraVideo.play();
cameraVideo.width = cameraVideo.videoWidth;
cameraVideo.height = cameraVideo.videoHeight;
canvas.width = cameraVideo.videoWidth;
canvas.height = cameraVideo.videoHeight;
debugFramesCanvas.width = cameraVideo.videoWidth;
debugFramesCanvas.height = cameraVideo.videoHeight;
debugPointsCanvas.width = cameraVideo.videoWidth;
debugPointsCanvas.height = cameraVideo.videoHeight;
mainOops = new OOPS();
if (useFacemesh) {
workerSyncedOops = new OOPS();
}
});
cameraVideo.addEventListener('play', () => {
clmTracker.reset();
clmTracker.initFaceDetector(cameraVideo);
clmTrackingStarted = true;
});
canvas.width = defaultWidth;
canvas.height = defaultHeight;
cameraVideo.width = defaultWidth;
cameraVideo.height = defaultHeight;
const debugFramesCanvas = document.createElement("canvas");
debugFramesCanvas.width = canvas.width;
debugFramesCanvas.height = canvas.height;
const debugFramesCtx = debugFramesCanvas.getContext("2d");
const debugPointsCanvas = document.createElement("canvas");
debugPointsCanvas.width = canvas.width;
debugPointsCanvas.height = canvas.height;
const debugPointsCtx = debugPointsCanvas.getContext("2d");
// function getPyramidData(pyramid) {
// const array = new Float32Array(pyramid.data.reduce((sum, matrix)=> sum + matrix.buffer.f32.length, 0));
// let offset = 0;
// for (const matrix of pyramid.data) {
// copy matrix.buffer.f32 into array starting at offset;
// offset += matrix.buffer.f32.length;
// }
// return array;
// }
// function setPyramidData(pyramid, array) {
// let offset = 0;
// for (const matrix of pyramid.data) {
// copy portion of array starting at offset into matrix.buffer.f32
// offset += matrix.buffer.f32.length;
// }
// }
// maybe should be based on size of head in view?
const pruningGridSize = 5;
const minDistanceToAddPoint = pruningGridSize * 1.5;
// Object Oriented Programming Sucks
// or Optical flOw Points System
class OOPS {
constructor() {
this.curPyramid = new jsfeat.pyramid_t(3);
this.prevPyramid = new jsfeat.pyramid_t(3);
this.curPyramid.allocate(cameraVideo.videoWidth, cameraVideo.videoHeight, jsfeat.U8C1_t);
this.prevPyramid.allocate(cameraVideo.videoWidth, cameraVideo.videoHeight, jsfeat.U8C1_t);
this.pointCount = 0;
this.pointStatus = new Uint8Array(maxPoints);
this.prevXY = new Float32Array(maxPoints * 2);
this.curXY = new Float32Array(maxPoints * 2);
}
addPoint(x, y) {
if (this.pointCount < maxPoints) {
var pointIndex = this.pointCount * 2;
this.curXY[pointIndex] = x;
this.curXY[pointIndex + 1] = y;
this.prevXY[pointIndex] = x;
this.prevXY[pointIndex + 1] = y;
this.pointCount++;
}
}
filterPoints(condition) {
var outputPointIndex = 0;
for (var inputPointIndex = 0; inputPointIndex < this.pointCount; inputPointIndex++) {
if (condition(inputPointIndex)) {
if (outputPointIndex < inputPointIndex) {
var inputOffset = inputPointIndex * 2;
var outputOffset = outputPointIndex * 2;
this.curXY[outputOffset] = this.curXY[inputOffset];
this.curXY[outputOffset + 1] = this.curXY[inputOffset + 1];
this.prevXY[outputOffset] = this.prevXY[inputOffset];
this.prevXY[outputOffset + 1] = this.prevXY[inputOffset + 1];
}
outputPointIndex++;
} else {
debugPointsCtx.fillStyle = "red";
var inputOffset = inputPointIndex * 2;
circle(debugPointsCtx, this.curXY[inputOffset], this.curXY[inputOffset + 1], 5);
debugPointsCtx.fillText(condition.toString(), 5 + this.curXY[inputOffset], this.curXY[inputOffset + 1]);
// console.log(this.curXY[inputOffset], this.curXY[inputOffset + 1]);
ctx.strokeStyle = ctx.fillStyle;
ctx.beginPath();
ctx.moveTo(this.prevXY[inputOffset], this.prevXY[inputOffset + 1]);
ctx.lineTo(this.curXY[inputOffset], this.curXY[inputOffset + 1]);
ctx.stroke();
}
}
this.pointCount = outputPointIndex;
}
prunePoints() {
// pointStatus is only valid (indices line up) before filtering occurs, so must come first (could be combined though)
this.filterPoints((pointIndex) => this.pointStatus[pointIndex] == 1);
// De-duplicate points that are too close together
// - Points that have collapsed together are completely useless.
// - Points that are too close together are not necessarily helpful,
// and may adversely affect the tracking due to uneven weighting across your face.
// - Reducing the number of points improves FPS.
const grid = {};
for (let pointIndex = 0; pointIndex < this.pointCount; pointIndex++) {
const pointOffset = pointIndex * 2;
grid[`${~~(this.curXY[pointOffset] / pruningGridSize)},${~~(this.curXY[pointOffset + 1] / pruningGridSize)}`] = pointIndex;
}
const indexesToKeep = Object.values(grid);
this.filterPoints((pointIndex) => indexesToKeep.includes(pointIndex));
}
update(imageData) {
[this.prevXY, this.curXY] = [this.curXY, this.prevXY];
[this.prevPyramid, this.curPyramid] = [this.curPyramid, this.prevPyramid];
// these are options worth breaking out and exploring
var winSize = 20;
var maxIterations = 30;
var epsilon = 0.01;
var minEigen = 0.001;
jsfeat.imgproc.grayscale(imageData.data, imageData.width, imageData.height, this.curPyramid.data[0]);
this.curPyramid.build(this.curPyramid.data[0], true);
jsfeat.optical_flow_lk.track(
this.prevPyramid, this.curPyramid,
this.prevXY, this.curXY,
this.pointCount,
winSize, maxIterations,
this.pointStatus,
epsilon, minEigen);
this.prunePoints();
}
draw(ctx) {
for (var i = 0; i < this.pointCount; i++) {
var pointOffset = i * 2;
// var distMoved = Math.hypot(
// this.prevXY[pointOffset] - this.curXY[pointOffset],
// this.prevXY[pointOffset + 1] - this.curXY[pointOffset + 1]
// );
// if (distMoved >= 1) {
// ctx.fillStyle = "lime";
// } else {
// ctx.fillStyle = "gray";
// }
circle(ctx, this.curXY[pointOffset], this.curXY[pointOffset + 1], 3);
ctx.strokeStyle = ctx.fillStyle;
ctx.beginPath();
ctx.moveTo(this.prevXY[pointOffset], this.prevXY[pointOffset + 1]);
ctx.lineTo(this.curXY[pointOffset], this.curXY[pointOffset + 1]);
ctx.stroke();
}
}
getMovement() {
var movementX = 0;
var movementY = 0;
var numMovements = 0;
for (var i = 0; i < this.pointCount; i++) {
var pointOffset = i * 2;
movementX += this.curXY[pointOffset] - this.prevXY[pointOffset];
movementY += this.curXY[pointOffset + 1] - this.prevXY[pointOffset + 1];
numMovements += 1;
}
if (numMovements > 0) {
movementX /= numMovements;
movementY /= numMovements;
}
return [movementX, movementY];
}
}
canvas.addEventListener('click', (event) => {
if (!mainOops) {
return;
}
const rect = canvas.getBoundingClientRect();
if (mirror) {
mainOops.addPoint(
(rect.right - event.clientX) / rect.width * canvas.width,
(event.clientY - rect.top) / rect.height * canvas.height,
);
} else {
mainOops.addPoint(
(event.clientX - rect.left) / rect.width * canvas.width,
(event.clientY - rect.top) / rect.height * canvas.height,
);
}
});
function maybeAddPoint(oops, x, y) {
// In order to prefer points that already exist, since they're already tracking,
// in order to keep a smooth overall tracking calculation,
// don't add points if they're close to an existing point.
// Otherwise, it would not just be redundant, but often remove the older points, in the pruning.
for (var pointIndex = 0; pointIndex < oops.pointCount; pointIndex++) {
var pointOffset = pointIndex * 2;
// var distance = Math.hypot(
// x - oops.curXY[pointOffset],
// y - oops.curXY[pointOffset + 1]
// );
// if (distance < 8) {
// return;
// }
// It might be good to base this on the size of the face...
// Also, since we're pruning points based on a grid,
// there's not much point in using Euclidean distance here,
// we can just look at x and y distances.
if (
Math.abs(x - oops.curXY[pointOffset]) <= minDistanceToAddPoint ||
Math.abs(y - oops.curXY[pointOffset + 1]) <= minDistanceToAddPoint
) {
return;
}
}
oops.addPoint(x, y);
}
function animate() {
requestAnimationFrame(animate);
draw(!SLOWMO && (!paused || document.visibilityState === "visible"));
}
function draw(update = true) {
ctx.resetTransform(); // in case there is an error, don't flip constantly back and forth due to mirroring
ctx.clearRect(0, 0, canvas.width, canvas.height); // in case there's no footage
ctx.save();
ctx.drawImage(cameraVideo, 0, 0, canvas.width, canvas.height);
const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
currentCameraImageData = imageData;
if (mirror) {
ctx.translate(canvas.width, 0);
ctx.scale(-1, 1);
ctx.drawImage(cameraVideo, 0, 0, canvas.width, canvas.height);
}
if (!mainOops) {
return;
}
if (update) {
if (clmTrackingStarted) {
if (useClmTracking || showClmTracking) {
try {
clmTracker.track(cameraVideo);
} catch (error) {
console.warn("Error in clmTracker.track()", error);
if (clmTracker.getCurrentParameters().includes(NaN)) {
console.warn("NaNs creeped in.");
}
}
face = clmTracker.getCurrentPosition();
faceScore = clmTracker.getScore();
faceConvergence = Math.pow(clmTracker.getConvergence(), 0.5);
}
if (facemeshLoaded && !facemeshEstimating) {
facemeshEstimating = true;
// movementXSinceFacemeshUpdate = 0;
// movementYSinceFacemeshUpdate = 0;
cameraFramesSinceFacemeshUpdate = [];
// If I switch virtual console desktop sessions in Ubuntu with Ctrl+Alt+F1 (and back with Ctrl+Alt+F2),
// WebGL context is lost, which breaks facemesh (and clmTracker if useWebGL is not false)
// Error: Size(8192) must match the product of shape 0, 0, 0
// at inferFromImplicitShape (tf.js:14142)
// at Object.reshape$3 [as kernelFunc] (tf.js:110368)
// at kernelFunc (tf.js:17241)
// at tf.js:17334
// at Engine.scopedRun (tf.js:17094)
// at Engine.runKernelFunc (tf.js:17328)
// at Engine.runKernel (tf.js:17171)
// at reshape_ (tf.js:25875)
// at reshape__op (tf.js:18348)
// at executeOp (tf.js:85396)
// WebGL: CONTEXT_LOST_WEBGL: loseContext: context lost
// Note that the first estimation from facemesh often takes a while,
// and we don't want to continuously terminate the worker as it's working on those first results.
// And also, for the first estimate it hasn't actually disabled clmtracker yet, so it's fine if it's a long timeout.
clearTimeout(fallbackTimeoutID);
fallbackTimeoutID = setTimeout(() => {
if (!useClmTracking) {
reset();
clmTracker.init();
clmTracker.reset();
clmTracker.initFaceDetector(cameraVideo);
clmTrackingStarted = true;
console.warn("Falling back to clmtracker");
}
// If you've switched desktop sessions, it will presuably fail to get a new webgl context until you've switched back
// Is this setInterval useful, vs just starting the worker?
// It probably has a faster cycle, with the code as it is now, but maybe not inherently.
// TODO: do the extra getContext() calls add to a GPU process crash limit
// that makes it only able to recover a couple times (outside the electron app)?
// For electron, I set chromium flag --disable-gpu-process-crash-limit so it can recover unlimited times.
// TODO: there's still the case of WebGL backend failing to initialize NOT due to the process crash limit,
// where it'd be good to have it try again (maybe with exponential falloff?)
// (I think I can move my fallbackTimeout code into/around `initFacemeshWorker` and `facemeshEstimateFaces`)
// Note: clearTimeout/clearInterval work interchangably
fallbackTimeoutID = setInterval(() => {
try {
// Once we can create a webgl2 canvas...
document.createElement("canvas").getContext("webgl2");
clearInterval(fallbackTimeoutID);
// It's worth trying to re-initialize...
setTimeout(() => {
console.warn("Re-initializing facemesh worker");
initFacemeshWorker();
facemeshRejectNext = 1; // or more?
}, 1000);
} catch (e) { }
}, 500);
}, facemeshFirstEstimation ? 20000 : 2000);
facemeshEstimateFaces().then((predictions) => {
facemeshEstimating = false;
facemeshFirstEstimation = false;
facemeshRejectNext -= 1;
if (facemeshRejectNext > 0) {
return;
}
facemeshPrediction = predictions[0]; // undefined if no faces found
useClmTracking = false;
showClmTracking = false;
clearTimeout(fallbackTimeoutID);
if (!facemeshPrediction) {
return;
}
// this applies to facemeshPrediction.annotations as well, which references the same points
// facemeshPrediction.scaledMesh.forEach((point) => {
// point[0] /= frameScaleForWorker;
// point[1] /= frameScaleForWorker;
// });
// time travel latency compensation
// keep a history of camera frames since the prediciton was requested,
// and analyze optical flow of new points over that history
// mainOops.filterPoints(() => false); // for DEBUG, empty points (could probably also just set pointCount = 0;
workerSyncedOops.filterPoints(() => false); // empty points (could probably also just set pointCount = 0;
const { annotations } = facemeshPrediction;
// nostrils
workerSyncedOops.addPoint(annotations.noseLeftCorner[0][0], annotations.noseLeftCorner[0][1]);
workerSyncedOops.addPoint(annotations.noseRightCorner[0][0], annotations.noseRightCorner[0][1]);
// midway between eyes
workerSyncedOops.addPoint(annotations.midwayBetweenEyes[0][0], annotations.midwayBetweenEyes[0][1]);
// inner eye corners
// workerSyncedOops.addPoint(annotations.leftEyeLower0[8][0], annotations.leftEyeLower0[8][1]);
// workerSyncedOops.addPoint(annotations.rightEyeLower0[8][0], annotations.rightEyeLower0[8][1]);
// console.log(workerSyncedOops.pointCount, cameraFramesSinceFacemeshUpdate.length, workerSyncedOops.curXY);
if (enableTimeTravel) {
debugFramesCtx.clearRect(0, 0, debugFramesCanvas.width, debugFramesCanvas.height);
setTimeout(() => {
debugPointsCtx.clearRect(0, 0, debugPointsCanvas.width, debugPointsCanvas.height);
}, 900)
cameraFramesSinceFacemeshUpdate.forEach((imageData, index) => {
if (debugTimeTravel) {
debugFramesCtx.save();
debugFramesCtx.globalAlpha = 0.1;
// debugFramesCtx.globalCompositeOperation = index % 2 === 0 ? "xor" : "xor";
frameCtx.putImageData(imageData, 0, 0);
// debugFramesCtx.putImageData(imageData, 0, 0);
debugFramesCtx.drawImage(frameCanvas, 0, 0, canvas.width, canvas.height);
debugFramesCtx.restore();
debugPointsCtx.fillStyle = "aqua";
workerSyncedOops.draw(debugPointsCtx);
}
workerSyncedOops.update(imageData);
});
}
// Bring points from workerSyncedOops to realtime mainOops
for (var pointIndex = 0; pointIndex < workerSyncedOops.pointCount; pointIndex++) {
const pointOffset = pointIndex * 2;
maybeAddPoint(mainOops, workerSyncedOops.curXY[pointOffset], workerSyncedOops.curXY[pointOffset + 1]);
}
// Don't do this! It's not how this is supposed to work.
// mainOops.pointCount = workerSyncedOops.pointCount;
// for (var pointIndex = 0; pointIndex < workerSyncedOops.pointCount; pointIndex++) {
// const pointOffset = pointIndex * 2;
// mainOops.curXY[pointOffset] = workerSyncedOops.curXY[pointOffset];
// mainOops.curXY[pointOffset+1] = workerSyncedOops.curXY[pointOffset+1];
// mainOops.prevXY[pointOffset] = workerSyncedOops.prevXY[pointOffset];
// mainOops.prevXY[pointOffset+1] = workerSyncedOops.prevXY[pointOffset+1];
// }
// naive latency compensation
// Note: this applies to facemeshPrediction.annotations as well which references the same point objects
// Note: This latency compensation only really works if it's already tracking well
// if (prevFaceInViewConfidence > 0.99) {
// facemeshPrediction.scaledMesh.forEach((point) => {
// point[0] += movementXSinceFacemeshUpdate;
// point[1] += movementYSinceFacemeshUpdate;
// });
// }
pointsBasedOnFaceInViewConfidence = facemeshPrediction.faceInViewConfidence;
// TODO: separate confidence threshold for removing vs adding points?
// cull points to those within useful facial region
// TODO: use time travel for this too, probably! with a history of the points
// a complexity would be that points can be removed over time and we need to keep them identified
mainOops.filterPoints((pointIndex) => {
var pointOffset = pointIndex * 2;
// distance from tip of nose (stretched so make an ellipse taller than wide)
var distance = Math.hypot(
(annotations.noseTip[0][0] - mainOops.curXY[pointOffset]) * 1.4,
annotations.noseTip[0][1] - mainOops.curXY[pointOffset + 1]
);
var headSize = Math.hypot(
annotations.leftCheek[0][0] - annotations.rightCheek[0][0],
annotations.leftCheek[0][1] - annotations.rightCheek[0][1]
);
if (distance > headSize) {
return false;
}
// Avoid blinking eyes affecting pointer position.
// distance to outer corners of eyes
distance = Math.min(
Math.hypot(
annotations.leftEyeLower0[0][0] - mainOops.curXY[pointOffset],
annotations.leftEyeLower0[0][1] - mainOops.curXY[pointOffset + 1]
),
Math.hypot(
annotations.rightEyeLower0[0][0] - mainOops.curXY[pointOffset],
annotations.rightEyeLower0[0][1] - mainOops.curXY[pointOffset + 1]
),
);
if (distance < headSize * 0.42) {
return false;
}
return true;
});
}, () => {
facemeshEstimating = false;
facemeshFirstEstimation = false;
});
}
}
mainOops.update(imageData);
}
if (facemeshPrediction) {
ctx.fillStyle = "red";
const bad = facemeshPrediction.faceInViewConfidence < faceInViewConfidenceThreshold;
ctx.fillStyle = bad ? 'rgb(255,255,0)' : 'rgb(130,255,50)';
if (!bad || mainOops.pointCount < 3 || facemeshPrediction.faceInViewConfidence > pointsBasedOnFaceInViewConfidence + 0.05) {
if (bad) {
ctx.fillStyle = 'rgba(255,0,255)';
}
if (update && useFacemesh) {
// this should just be visual, since we only add/remove points based on the facemesh data when receiving it
facemeshPrediction.scaledMesh.forEach((point) => {
point[0] += prevMovementX;
point[1] += prevMovementY;
});
}
facemeshPrediction.scaledMesh.forEach(([x, y, z]) => {
ctx.fillRect(x, y, 1, 1);
});
} else {
if (update && useFacemesh) {
pointsBasedOnFaceInViewConfidence -= 0.001;
}
}
}
if (face) {
const bad = faceScore < faceScoreThreshold;
ctx.strokeStyle = bad ? 'rgb(255,255,0)' : 'rgb(130,255,50)';
if (!bad || mainOops.pointCount < 2 || faceScore > pointsBasedOnFaceScore + 0.05) {
if (bad) {
ctx.strokeStyle = 'rgba(255,0,255)';
}
if (update && useClmTracking) {
pointsBasedOnFaceScore = faceScore;
// nostrils
maybeAddPoint(mainOops, face[42][0], face[42][1]);
maybeAddPoint(mainOops, face[43][0], face[43][1]);
// inner eye corners
// maybeAddPoint(mainOops, face[25][0], face[25][1]);
// maybeAddPoint(mainOops, face[30][0], face[30][1]);
// TODO: separate confidence threshold for removing vs adding points?
// cull points to those within useful facial region
mainOops.filterPoints((pointIndex) => {
var pointOffset = pointIndex * 2;
// distance from tip of nose (stretched so make an ellipse taller than wide)
var distance = Math.hypot(
(face[62][0] - mainOops.curXY[pointOffset]) * 1.4,
face[62][1] - mainOops.curXY[pointOffset + 1]
);
// distance based on outer eye corners
var headSize = Math.hypot(
face[23][0] - face[28][0],
face[23][1] - face[28][1]
);
if (distance > headSize) {
return false;
}
return true;
});
}
} else {
if (update && useClmTracking) {
pointsBasedOnFaceScore -= 0.001;
}
}
if (showClmTracking) {
clmTracker.draw(canvas, undefined, undefined, true);
}
}
if (debugTimeTravel) {
ctx.save();
ctx.globalAlpha = 0.8;
ctx.drawImage(debugFramesCanvas, 0, 0);
ctx.restore();
ctx.drawImage(debugPointsCanvas, 0, 0);
}
ctx.fillStyle = "lime";
mainOops.draw(ctx);
debugPointsCtx.fillStyle = "green";
mainOops.draw(debugPointsCtx);
if (update) {
var [movementX, movementY] = mainOops.getMovement();
// Acceleration curves add a lot of stability,
// letting you focus on a specific point without jitter, but still move quickly.
// var accelerate = (delta, distance) => (delta / 10) * (distance ** 0.8);
// var accelerate = (delta, distance) => (delta / 1) * (Math.abs(delta) ** 0.8);
var accelerate = (delta, distance) => (delta / 1) * (Math.abs(delta * 5) ** acceleration);
var distance = Math.hypot(movementX, movementY);
var deltaX = accelerate(movementX * sensitivityX, distance);
var deltaY = accelerate(movementY * sensitivityY, distance);
if (debugAcceleration) {
const graphWidth = 200;
const graphHeight = 150;
const graphMaxInput = 0.2;
const graphMaxOutput = 0.4;
const hilightInputRange = 0.01;
ctx.save();
ctx.fillStyle = "black";
ctx.fillRect(0, 0, graphWidth, graphHeight);
const hilightInput = movementX * sensitivityX;
for (let x = 0; x < graphWidth; x++) {
const input = x / graphWidth * graphMaxInput;
const output = accelerate(input, input);
const y = output / graphMaxOutput * graphHeight;
// ctx.fillStyle = Math.abs(y - deltaX) < 1 ? "yellow" : "lime";
const hilight = Math.abs(Math.abs(input) - Math.abs(hilightInput)) < hilightInputRange;
if (hilight) {
ctx.fillStyle = "rgba(255, 255, 0, 0.3)";
ctx.fillRect(x, 0, 1, graphHeight);
}
ctx.fillStyle = hilight ? "yellow" : "lime";
ctx.fillRect(x, graphHeight - y, 1, y);
}
ctx.restore();
}
// This should never happen
if (!isFinite(deltaX) || !isFinite(deltaY)) {
return;
}
if (!paused) {
const screenWidth = window.moveMouse ? screen.width : innerWidth;
const screenHeight = window.moveMouse ? screen.height : innerHeight;
mouseX -= deltaX * screenWidth;
mouseY += deltaY * screenHeight;
mouseX = Math.min(Math.max(0, mouseX), screenWidth);
mouseY = Math.min(Math.max(0, mouseY), screenHeight);
if (mouseNeedsInitPos) {
// TODO: option to get preexisting mouse position instead of set it to center of screen
mouseX = screenWidth / 2;
mouseY = screenHeight / 2;
mouseNeedsInitPos = false;
}
if (window.moveMouse) {
window.moveMouse(~~mouseX, ~~mouseY);
pointerEl.style.display = "none";
} else {
pointerEl.style.display = "";
pointerEl.style.left = `${mouseX}px`;
pointerEl.style.top = `${mouseY}px`;
}
if (TrackyMouse.onPointerMove) {
TrackyMouse.onPointerMove(mouseX, mouseY);
}
}
prevMovementX = movementX;
prevMovementY = movementY;
// movementXSinceFacemeshUpdate += movementX;
// movementYSinceFacemeshUpdate += movementY;
if (enableTimeTravel) {
if (facemeshEstimating) {
const imageData = getCameraImageData();
if (imageData) {
cameraFramesSinceFacemeshUpdate.push(imageData);
}
// limit this buffer size in case something goes wrong
if (cameraFramesSinceFacemeshUpdate.length > 500) {
// maybe just clear it entirely, because a partial buffer might not be useful
cameraFramesSinceFacemeshUpdate.length = 0;
}
}
}
}
ctx.restore();
if (showDebugText) {
ctx.save();
ctx.fillStyle = "#fff";
ctx.strokeStyle = "#000";
ctx.lineWidth = 3;
ctx.font = "20px sans-serif";
ctx.beginPath();
const text3 = "Face convergence score: " + ((useFacemesh && facemeshPrediction) ? "N/A" : faceConvergence.toFixed(4));
const text1 = "Face tracking score: " + ((useFacemesh && facemeshPrediction) ? facemeshPrediction.faceInViewConfidence : faceScore).toFixed(4);
const text2 = "Points based on score: " + ((useFacemesh && facemeshPrediction) ? pointsBasedOnFaceInViewConfidence : pointsBasedOnFaceScore).toFixed(4);
ctx.strokeText(text1, 50, 50);
ctx.fillText(text1, 50, 50);
ctx.strokeText(text2, 50, 70);
ctx.fillText(text2, 50, 70);
ctx.strokeText(text3, 50, 170);
ctx.fillText(text3, 50, 170);
ctx.fillStyle = "lime";
ctx.fillRect(0, 150, faceConvergence, 5);
ctx.fillRect(0, 0, faceScore * canvas.width, 5);
ctx.restore();
}
stats.update();
}
function circle(ctx, x, y, r) {
ctx.beginPath();
ctx.arc(x, y, r, 0, Math.PI * 2);
ctx.fill();
}
animate();
if (SLOWMO) {
setInterval(draw, 200);
}
let autoDemo = false;
try {
autoDemo = localStorage.trackyMouseAutoDemo === "true";
} catch (error) {
}
if (autoDemo) {
useDemoFootage();
} else if (window.moveMouse) {
useCamera();
}
const handleShortcut = (shortcutType) => {
if (shortcutType === "toggle-tracking") {
paused = !paused;
mouseNeedsInitPos = true;
if (paused) {
pointerEl.style.display = "none";
}
}
};
if (typeof onShortcut !== "undefined") {
onShortcut(handleShortcut);
} else {
addEventListener("keydown", (event) => {
// Same shortcut as the global shortcut in the electron app (is that gonna be a problem?)
if (!event.ctrlKey && !event.metaKey && !event.altKey && !event.shiftKey && event.key === "F9") {
handleShortcut("toggle-tracking");
}
});
}
}