That title doesn't even tell the story right.
Just to be clear of my specs:
I'm on OS X 10.11.1
OpenFrameworks v0.9.0
XCode 7.2
using these addons:
ofxCv
ofxFaceTracker
ofxGui
ofxKinect
ofxOpenCv
ofxPS3EyeGrabber
What I've done is taken the example-calibrated
from the ofxFaceTracker
addon and modified it so that instead of
cam.bind();
physicalMesh.drawFaces();
cam.unbind()
I've created my own ofImage
and bound it to the faces. Also, I'm using a ps3 Eye Camera via the ofxPS3EyeGrabber
addon (via @bakercp). Everything works fine except for that I am displaying the final image on a display screen that has a lowest resolution setting of 800x600. The ps3cam has a native and maximum of 640x480. So, I've made everything else sit in a window of 640x480 as a result. My ofImage is allocated 640x480, etc. Even the example-calibrated is all set to 640x480.
The problem is that when I scale everything to 800x600 the physicalMesh.drawFaces()
draws as if it is still in a 640x480 window, missing the face it is supposed to draw on by whatever (-x, -y) difference.
Here is my code:
main.cpp
#include "ofAppGlutWindow.h"
int main() {
ofAppGlutWindow window;
ofSetupOpenGL(&window, 800, 600, OF_WINDOW); // i changed this from (640, 480)
ofRunApp(new ofApp());
}
ofApp.h
#pragma once
#include "ofMain.h"
#include "ofxCv.h"
#include "ofxFaceTracker.h"
#include "ofxPS3EyeGrabber.h"
class ofApp : public ofBaseApp {
public:
void setup();
void updatePhysicalMesh();
void update();
void draw();
void keyPressed(int key);
void imageNoise();
ofImage noise;
int whiteBlack;
int lineY;
ofVideoGrabber ps3cam;
ofxFaceTracker tracker;
ofxCv::Calibration calibration;
ofLight light;
ofMesh physicalMesh;
ofPixels pixelsRGB;
bool swapBackground;
};
ofApp.cpp
#include "ofApp.h"
#include "ofMeshUtils.h"
#include "ofxPS3EyeGrabber.h"
using namespace ofxCv;
using namespace cv;
void ofApp::setup() {
ofSetVerticalSync(true);
// ps3EyeCamera
cout << "elapsed time: " << ofGetElapsedTimeMillis() << endl;
ps3cam.setGrabber(std::make_shared<ofxPS3EyeGrabber>());
ps3cam.setDesiredFrameRate(45);
ps3cam.setup(800, 600); // this is normally (640, 480)
ps3cam.getGrabber<ofxPS3EyeGrabber>()->setAutogain(true);
ps3cam.getGrabber<ofxPS3EyeGrabber>()->setAutoWhiteBalance(true);
pixelsRGB.allocate(ps3cam.getWidth(), ps3cam.getHeight(), 3);
// tracker
tracker.setup();
calibration.load("mbp-isight.yml");
glLightModeli(GL_LIGHT_MODEL_TWO_SIDE, GL_TRUE);
light.setPosition(100, 100, 1000);
// noise
noise.getPixels().allocate(800, 600,1); // i've changed this allocation from (640, 480)
whiteBlack = 255;
ofSetColor(255);
}
void ofApp::updatePhysicalMesh() {
// 1 load object and image points as Point2f/3f
vector<Point3f> objectPoints;
vector<Point2f> imagePoints;
for(int i = 0; i < tracker.size(); i++) {
objectPoints.push_back(toCv(tracker.getObjectPoint(i)));
imagePoints.push_back(toCv(tracker.getImagePoint(i)));
}
// 2 guess for the rotation and translation of the face
Mat cameraMatrix = calibration.getDistortedIntrinsics().getCameraMatrix();
Mat distCoeffs = calibration.getDistCoeffs();
Mat rvec, tvec;
solvePnP(Mat(objectPoints), Mat(imagePoints),
cameraMatrix, distCoeffs,
rvec, tvec);
// 3 reproject using guess, and fit to the actual image location
vector<ofVec3f> fitWorldPoints;
Mat cameraMatrixInv = cameraMatrix.inv();
Mat rmat;
Rodrigues(rvec, rmat);
for(int i = 0; i < objectPoints.size(); i++) {
Point2d imgImg = imagePoints[i];
Point3d objObj = objectPoints[i];
Point3d imgHom(imgImg.x, imgImg.y, 1.); // img->hom
Point3d imgWor = (Point3f) Mat(cameraMatrixInv * Mat(imgHom)); // hom->wor
Point3d objWor = (Point3d) Mat(tvec + rmat * Mat(objObj)); // obj->wor
Point3d fitWor = intersectPointRay(objWor, imgWor); // scoot it over
// if it was projected on the wrong side, flip it over
if(fitWor.z < 0) {
fitWor *= -1;
}
fitWorldPoints.push_back(toOf(fitWor));
// convert down to image space coordinates
//Point3d fitHom = (Point3d) Mat(cameraMatrix * Mat(fitWor)); // wor->hom
//Point2d fitImg(fitHom.x / fitHom.z, fitHom.y / fitHom.z); // hom->img
}
// 4 use the resulting 3d points to build a mesh with normals
physicalMesh = convertFromIndices(tracker.getMesh(fitWorldPoints));
physicalMesh.setMode(OF_PRIMITIVE_TRIANGLES);
buildNormals(physicalMesh);
}
void ofApp::update() {
ps3cam.update();
if(ps3cam.isFrameNew()) {
tracker.update(toCv(ps3cam));
// face tracker mesh
if(tracker.getFound()) {
updatePhysicalMesh();
}
}
// image noise
lineY = ofRandom(ofGetHeight());
whiteBlack = ofRandom(256);
for (int x = 0; x < noise.getWidth(); ++x)
{
for (int y = 0; y < noise.getHeight(); ++y)
{
int index = noise.getPixels().getPixelIndex(x, y);
if (y != lineY)
{
noise.getPixels()[index] = ofRandom(256);
}
else
{
noise.getPixels()[index] = whiteBlack;
}
}
}
noise.update();;
// swap time and reset time
if (ofGetMinutes() <= 15){
swapBackground = false;
}
else if (ofGetMinutes() > 15 && ofGetMinutes() <= 30){
swapBackground = true;
}
else if (ofGetMinutes() > 30 && ofGetMinutes() <= 45){
swapBackground = false;
}
else if (ofGetMinutes() > 45 && ofGetMinutes() <= 59){
swapBackground = true;
}
if (ofGetElapsedTimeMillis() >= 10000){
tracker.reset();
ofResetElapsedTimeCounter();
}
cout << "elapsed time: " << ofGetElapsedTimeMillis() << endl;
}
void ofApp::draw() {
if (swapBackground == false){
ps3cam.draw(0, 0, 800, 600); // drawing ps3cam from from 640x480 to 800x600
// frames per second displayed
std::stringstream ss;
//
// ss << " App FPS: " << ofGetFrameRate() << std::endl;
// ss << " Cam FPS: " << ps3cam.getGrabber<ofxPS3EyeGrabber>()->getFPS() << std::endl;
// ss << "Real FPS: " << ps3cam.getGrabber<ofxPS3EyeGrabber>()->getActualFPS() << std::endl;
ss << "#ofNoiseFace" << std::endl;
ss << "#openFrameworks" << std::endl;
ss << "@mosspassion";
ofDrawBitmapStringHighlight(ss.str(), ofPoint(10, 20));
}
else if (swapBackground == true){
noise.draw(0, 0); // already allocated 800x600, no need to scale
// frames per second displayed
std::stringstream ss;
//
// ss << " App FPS: " << ofGetFrameRate() << std::endl;
// ss << " Cam FPS: " << ps3cam.getGrabber<ofxPS3EyeGrabber>()->getFPS() << std::endl;
// ss << "Real FPS: " << ps3cam.getGrabber<ofxPS3EyeGrabber>()->getActualFPS() << std::endl;
ss << "#ofNoiseFace" << std::endl;
ss << "#openFrameworks" << std::endl;
ss << "@mosspassion";
ofDrawBitmapStringHighlight(ss.str(), ofPoint(10, 20));
}
// tracker
if(tracker.getFound()) {
ofMesh objectMesh = tracker.getObjectMesh();
ofMesh meanMesh = tracker.getMeanObjectMesh();
ofSetupScreenOrtho(800, 600, -1000, 1000); // this was also (640, 480, -1000, 1000)
ofScale(5,5,5);
calibration.getDistortedIntrinsics().loadProjectionMatrix();
// swap background and face
if (swapBackground == false){
ofEnableLighting();
light.enable();
noise.getTexture().bind();
physicalMesh.drawFaces();
noise.getTexture().unbind();
ofDisableLighting();
}
else if (swapBackground == true){
ps3cam.bind();
physicalMesh.drawFaces();
ps3cam.unbind();
}
}
}
void ofApp::keyPressed(int key) {
if(key == 'r') {
tracker.reset();
}
if (key == 'f'){
ofToggleFullscreen();
}
}
So , the result is this (on an external display, in OF_FULLSCREEN via ofToggleFullscreen()):
^That noise mask should be drawn right on my face^
My instructor, @bakercp, has led me to ofRectangle for scaling while retaining aspect ratio, and from the example /openFrameworks/examples/graphics/rectangleAlignmentAndScaling
it looks like it is exactly capable of what I need, I just have no idea how to integrate it.
Any help would be appreciated, even any help on cleaning up my code from redundant things, anything. It's almost done!
Thank you,
Nicholas