Reading:
https://github.com/ellennickles/ml5js-model-and-data-provenance-project
Some questions I have after the reading are
Last classed we discussed labeling images and how that data was collected. This is a similar case in terms of biases and ethical standards. Knowing the sources and potential biases allows creators to make informed choices about which models to use based on their intended purpose. Also, being aware of ethical issues regarding data usage gives creators a sense of responsibility and transparency, along with other researchers and organizations (hopefully).
Link to github project, though I need to upload changes because I got it to work, just not with bodyPose: https://cp3636.github.io/
Although SVC says I need homebrew downloaded??
Original Code Using poseNet:
poseNet = ml5.poseNet(video, modelReady);
// This sets up an event that fills the global variable "poses"
// with an array every time new poses are detected
poseNet.on("pose", function (results) {
poses = results;
});
bodyPose Assignment
Goal: Update live web final project from using poseNet to bodyPose
New code:
bodyPose = ml5.bodyPose(video, modelReady);
bodyPose.on('pose', function (results) {
poses = results;
});
button = createButton("");
button = createImg("images/camera.png");
button.position(600, 790);
button.size(50, 50);
button.mousePressed(takePhoto);
buttonclown = createImg(
"<https://cdn.glitch.global/0aaba933-6b39-4595-bd6a-263b91e4b137/clown_nose.png?v=1646677023601>"
);
buttonclown.position(440, 780);
buttonclown.size(80, 80);
buttonclown.mousePressed(function () {
clownnosestate = !clownnosestate;
});
buttonsunglasses = createImg(
"<https://cdn.glitch.global/0aaba933-6b39-4595-bd6a-263b91e4b137/sunglasses.png?v=1646677057956>"
);
buttonsunglasses.position(750, 790);
buttonsunglasses.size(80, 50);
buttonsunglasses.mousePressed(function () {
sunglassesstate = !sunglassesstate;
});
buttonFXPorklife = createImg("images/questionmark.png");
buttonFXPorklife.position(900, 795);
buttonFXPorklife.size(42, 40);
buttonFXPorklife.mousePressed(myFilter);
}
function windowResized() {
button.position(8 * windowWidth / 15, 795);
buttonclown.position(windowWidth / 3, 775);
buttonFXPorklife.position(9 * windowWidth / 15, 800);
buttonsunglasses.position(6.2 * windowWidth / 15, 790);
}
function myFilter() {
let filters = ["GRAY", INVERT, OPAQUE, BLUR];
sfilter = random(filters);
console.log(sfilter);
}
function modelReady() {
console.log("BodyPose Model Loaded");
}
function draw() {
image(video, 0, 0, width, height);
if (sfilter === "RAT") {
image(video, 0, 0, width / 2, height);
translate(width, 0);
scale(-1, 1);
} else if (sfilter) {
filter(sfilter);
}
noStroke();
drawKeypoints();
drawSkeleton();
if (clownnosestate === true) {
drawclownnose();
}
if (sunglassesstate === true) {
drawsunglasses();
}
if (FXPorklifeState === -1) {
filter(BLUR, 5);
}
}
function drawKeypoints() {
for (let i = 0; i < poses.length; i++) {
let pose = poses[i].pose;
for (let j = 0; j < pose.keypoints.length; j++) {
let keypoint = pose.keypoints[j];
if (keypoint.score > 0.2) {
fill(255, 0, 0);
noStroke();
ellipse(keypoint.position.x, keypoint.position.y, 10, 10);
}
}
}
}
function drawSkeleton() {
for (let i = 0; i < poses.length; i++) {
let skeleton = poses[i].skeleton;
for (let j = 0; j < skeleton.length; j++) {
let partA = skeleton[j][0];
let partB = skeleton[j][1];
stroke(255, 0, 0);
line(
partA.position.x,
partA.position.y,
partB.position.x,
partB.position.y
);
}
}
}
function drawclownnose() {
for (let i = 0; i < poses.length; i++) {
let pose = poses[i].pose;
image(clownnose, pose.nose.x - 120, pose.nose.y - 125, 250, 250);
}
}
function drawsunglasses() {
for (let i = 0; i < poses.length; i++) {
let pose = poses[i].pose;
image(sunglasses, pose.leftEye.x - 160, pose.leftEye.y - 100, 250, 250);
}
}
function takePhoto() {
saveFrames("selfie", "png", 1, 1);
}
function FXPorklife() {
FXPorklifeState *= -1;
}