之前的教程里,给大家讲过基于dragonboard 410c开发板搭建kinect应用实现自动跟随机器人,今天我们来讲讲实现人脸识别。
一、核心代码:
//------------------------------------------------------------------------------
//
// copyright (c) microsoft corporation. all rights reserved.
//
//------------------------------------------------------------------------------
// defines the entry point for the application.
//
#include stdafx.h
#include singleface.h
#include eggavatar.h
#include
#include fthelper.h
class singleface
{
public:
singleface()
: m_hinst(null)
, m_hwnd(null)
, m_hacceltable(null)
, m_pimagebuffer(null)
, m_pvideobuffer(null)
, m_depthtype(nui_image_type_depth_and_player_index)
, m_colortype(nui_image_type_color)
, m_depthres(nui_image_resolution_320x240)
, m_colorres(nui_image_resolution_640x480)
, m_bnearmode(true)
, m_bseatedskeletonmode(false)
{}
int run(hinstance hinst, pwstr lpcmdline, int ncmdshow);
protected:
bool initinstance(hinstance hinst, pwstr lpcmdline, int ncmdshow);
void parsecmdstring(pwstr lpcmdline);
void uninitinstance();
atom registerclass(pcwstr szwindowclass);
static lresult callback wndprocstatic(hwnd hwnd, uint message, wparam wparam, lparam lparam);
lresult callback wndproc(hwnd hwnd, uint message, wparam wparam, lparam lparam);
static int_ptr callback about(hwnd hwnd, uint message, wparam wparam, lparam lparam);
bool paintwindow(hdc hdc, hwnd hwnd);
bool showvideo(hdc hdc, int width, int height, int originx, int originy);
bool showeggavatar(hdc hdc, int width, int height, int originx, int originy);
static void fthelpercallingback(lpvoid lpparam);
static int const maxloadstringchars = 100;
hinstance m_hinst;
hwnd m_hwnd;
haccel m_hacceltable;
eggavatar m_eggavatar;
fthelper m_fthelper;
iftimage* m_pimagebuffer;
iftimage* m_pvideobuffer;
nui_image_type m_depthtype;
nui_image_type m_colortype;
nui_image_resolution m_depthres;
nui_image_resolution m_colorres;
bool m_bnearmode;
bool m_bseatedskeletonmode;
};
// run the singleface application.
int singleface::run(hinstance hinst, pwstr lpcmdline, int ncmdshow)
{
msg msg = {static_cast(0), static_cast(0), static_cast(-1)};
if (initinstance(hinst, lpcmdline, ncmdshow))
{
// main message loop:
while (getmessage(&msg, null, 0, 0))
{
if (!translateaccelerator(msg.hwnd, m_hacceltable, &msg))
{
translatemessage(&msg);
dispatchmessage(&msg);
}
}
}
uninitinstance();
return (int)msg.wparam;
}
// in this function, we save the instance handle, then create and display the main program window.
bool singleface::initinstance(hinstance hinstance, pwstr lpcmdline, int ncmdshow)
{
m_hinst = hinstance; // store instance handle in our global variable
parsecmdstring(lpcmdline);
wchar sztitle[maxloadstringchars]; // the title bar text
loadstring(m_hinst, ids_app_title, sztitle, arraysize(sztitle));
static const pcwstr res_map[] = { l80x60, l320x240, l640x480, l1280x960 };
static const pcwstr img_map[] = { lplayerid, lrgb, lyuv, lyuv_raw, ldepth };
// add mode params in title
wchar sztitlecomplete[max_path];
swprintf_s(sztitlecomplete, l%s -- depth:%s:%s color:%s:%s nearmode:%s, seatedskeleton:%s, sztitle,
img_map[m_depthtype], (m_depthres < 0)? lerror: res_map[m_depthres], img_map[m_colortype], (m_colorres release();
m_pimagebuffer = null;
}
if (m_pvideobuffer)
{
m_pvideobuffer->release();
m_pvideobuffer = null;
}
}
// register the window class.
atom singleface::registerclass(pcwstr szwindowclass)
{
wndclassex wcex = {0};
wcex.cbsize = sizeof(wndclassex);
wcex.style = cs_hredraw | cs_vredraw;
wcex.lpfnwndproc = &singleface::wndprocstatic;
wcex.cbclsextra = 0;
wcex.cbwndextra = 0;
wcex.hinstance = m_hinst;
wcex.hicon = loadicon(m_hinst, makeintresource(idi_singleface));
wcex.hcursor = loadcursor(null, idc_arrow);
wcex.hbrbackground = (hbrush)(color_window+1);
wcex.lpszmenuname = makeintresource(idc_singleface);
wcex.lpszclassname = szwindowclass;
return registerclassex(&wcex);
}
lresult callback singleface::wndprocstatic(hwnd hwnd, uint message, wparam wparam, lparam lparam)
{
static singleface* pthis = null; // cheating, but since there is just one window now, it will suffice.
if (wm_create == message)
{
pthis = reinterpret_cast(reinterpret_cast(lparam)->lpcreateparams);
}
return pthis ? pthis->wndproc(hwnd, message, wparam, lparam) : defwindowproc(hwnd, message, wparam, lparam);
}
// function: wndproc(hwnd, uint, wparam, lparam)
//
// purpose: processes messages for the main window.
//
// wm_command - process the application menu
// wm_keyup - exit in response to esc key
// wm_paint - paint the main window
// wm_destroy - post a quit message and return
lresult callback singleface::wndproc(hwnd hwnd, uint message, wparam wparam, lparam lparam)
{
uint wmid, wmevent;
paintstruct ps;
hdc hdc;
switch (message)
{
case wm_command:
wmid = loword(wparam);
wmevent = hiword(wparam);
// parse the menu selections:
switch (wmid)
{
case idm_about:
dialogbox(m_hinst, makeintresource(idd_aboutbox), hwnd, about);
break;
case idm_exit:
postquitmessage(0);
break;
default:
return defwindowproc(hwnd, message, wparam, lparam);
}
break;
case wm_keyup:
if (wparam == vk_escape)
{
postquitmessage(0);
}
break;
case wm_paint:
hdc = beginpaint(hwnd, &ps);
// draw the avatar window and the video window
paintwindow(hdc, hwnd);
endpaint(hwnd, &ps);
break;
case wm_destroy:
postquitmessage(0);
break;
default:
return defwindowproc(hwnd, message, wparam, lparam);
}
return 0;
}
// message handler for about box.
int_ptr callback singleface::about(hwnd hdlg, uint message, wparam wparam, lparam lparam)
{
unreferenced_parameter(lparam);
switch (message)
{
case wm_initdialog:
return (int_ptr)true;
case wm_command:
if (loword(wparam) == idok || loword(wparam) == idcancel)
{
enddialog(hdlg, loword(wparam));
return (int_ptr)true;
}
break;
}
return (int_ptr)false;
}
// drawing the video window
bool singleface::showvideo(hdc hdc, int width, int height, int originx, int originy)
{
bool ret = true;
// now, copy a fraction of the camera image into the screen.
iftimage* colorimage = m_fthelper.getcolorimage();
if (colorimage)
{
int iwidth = colorimage->getwidth();
int iheight = colorimage->getheight();
if (iwidth > 0 && iheight > 0)
{
int itop = 0;
int ibottom = iheight;
int ileft = 0;
int iright = iwidth;
// keep a separate buffer.
if (m_pvideobuffer && succeeded(m_pvideobuffer->allocate(iwidth, iheight, ftimageformat_uint8_b8g8r8a8)))
{
// copy do the video buffer while converting bytes
colorimage->copyto(m_pvideobuffer, null, 0, 0);
// compute the best approximate copy ratio.
float w1 = (float)iheight * (float)width;
float w2 = (float)iwidth * (float)height;
if (w2 > w1 && height > 0)
{
// video image too wide
float wx = w1/height;
ileft = (int)max(0, m_fthelper.getxcenterface() - wx / 2);
iright = ileft + (int)wx;
if (iright > iwidth)
{
iright = iwidth;
ileft = iright - (int)wx;
}
}
else if (w1 > w2 && width > 0)
{
// video image too narrow
float hy = w2/width;
itop = (int)max(0, m_fthelper.getycenterface() - hy / 2);
ibottom = itop + (int)hy;
if (ibottom > iheight)
{
ibottom = iheight;
itop = ibottom - (int)hy;
}
}
int const bmppixsize = m_pvideobuffer->getbytesperpixel();
setstretchbltmode(hdc, halftone);
bitmapinfo bmi = {sizeof(bitmapinfo), iwidth, iheight, 1, static_cast(bmppixsize * char_bit), bi_rgb, m_pvideobuffer->getstride() * iheight, 5000, 5000, 0, 0};
if (0 == stretchdibits(hdc, originx, originy, width, height,
ileft, ibottom, iright-ileft, itop-ibottom, m_pvideobuffer->getbuffer(), &bmi, dib_rgb_colors, srccopy))
{
ret = false;
}
}
}
}
return ret;
}
// drawing code
bool singleface::showeggavatar(hdc hdc, int width, int height, int originx, int originy)
{
static int errcount = 0;
bool ret = false;
if (m_pimagebuffer && succeeded(m_pimagebuffer->allocate(width, height, ftimageformat_uint8_b8g8r8a8)))
{
memset(m_pimagebuffer->getbuffer(), 0, m_pimagebuffer->getstride() * height); // clear to black
m_eggavatar.setscaleandtranslationtowindow(height, width);
m_eggavatar.drawimage(m_pimagebuffer);
bitmapinfo bmi = {sizeof(bitmapinfo), width, height, 1, static_cast(m_pimagebuffer->getbytesperpixel() * char_bit), bi_rgb, m_pimagebuffer->getstride() * height, 5000, 5000, 0, 0};
errcount += (0 == stretchdibits(hdc, 0, 0, width, height, 0, 0, width, height, m_pimagebuffer->getbuffer(), &bmi, dib_rgb_colors, srccopy));
ret = true;
}
return ret;
}
// draw the egg head and the camera video with the mask superimposed.
bool singleface::paintwindow(hdc hdc, hwnd hwnd)
{
static int errcount = 0;
bool ret = false;
rect rect;
getclientrect(hwnd, &rect);
int width = rect.right - rect.left;
int height = rect.bottom - rect.top;
int halfwidth = width/2;
// show the video on the right of the window
errcount += !showvideo(hdc, width - halfwidth, height, halfwidth, 0);
// draw the egg avatar on the left of the window
errcount += !showeggavatar(hdc, halfwidth, height, 0, 0);
return ret;
}
/*
* the face tracker helper class is generic. it will call back this function
* after a face has been successfully tracked. the code in the call back passes the parameters
* to the egg avatar, so it can be animated.
*/
void singleface::fthelpercallingback(pvoid pvoid)
{
singleface* papp = reinterpret_cast(pvoid);
if (papp)
{
iftresult* presult = papp->m_fthelper.getresult();
if (presult && succeeded(presult->getstatus()))
{
float* pau = null;
uint numau;
presult->getaucoefficients(&pau, &numau);
papp->m_eggavatar.setcandideau(pau, numau);
float scale;
float rotationxyz[3];
float translationxyz[3];
presult->get3dpose(&scale, rotationxyz, translationxyz);
papp->m_eggavatar.settranslations(translationxyz[0], translationxyz[1], translationxyz[2]);
papp->m_eggavatar.setrotations(rotationxyz[0], rotationxyz[1], rotationxyz[2]);
}
}
}
void singleface::parsecmdstring(pwstr lpcmdline)
{
const wchar key_depth[] = l-depth;
const wchar key_color[] = l-color;
const wchar key_near_mode[] = l-nearmode;
const wchar key_default_distance_mode[] = l-defaultdistancemode;
const wchar key_seated_skeleton_mode[] = l-seatedskeleton;
const wchar str_nui_image_type_depth[] = ldepth;
const wchar str_nui_image_type_depth_and_player_index[] = lplayerid;
const wchar str_nui_image_type_color[] = lrgb;
const wchar str_nui_image_type_color_yuv[] = lyuv;
const wchar str_nui_image_resolution_80x60[] = l80x60;
const wchar str_nui_image_resolution_320x240[] = l320x240;
const wchar str_nui_image_resolution_640x480[] = l640x480;
const wchar str_nui_image_resolution_1280x960[] = l1280x960;
enum token_enum
{
token_error,
token_depth,
token_color,
token_nearmode,
token_defaultdistancemode,
token_seatedskeleton
};
int argc = 0;
lpwstr *argv = commandlinetoargvw(lpcmdline, &argc);
for(int i = 0; i < argc; i++)
{
nui_image_type* ptype = null;
nui_image_resolution* pres = null;
token_enum tokentype = token_error;
pwchar context = null;
pwchar token = wcstok_s(argv[i], l:, &context);
if(0 == wcsncmp(token, key_depth, arraysize(key_depth)))
{
tokentype = token_depth;
ptype = &m_depthtype;
pres = &m_depthres;
}
else if(0 == wcsncmp(token, key_color, arraysize(key_color)))
{
tokentype = token_color;
ptype = &m_colortype;
pres = &m_colorres;
}
else if(0 == wcsncmp(token, key_near_mode, arraysize(key_near_mode)))
{
tokentype = token_nearmode;
m_bnearmode = true;
}
else if(0 == wcsncmp(token, key_default_distance_mode, arraysize(key_default_distance_mode)))
{
tokentype = token_defaultdistancemode;
m_bnearmode = false;
}
else if(0 == wcsncmp(token, key_seated_skeleton_mode, arraysize(key_seated_skeleton_mode)))
{
tokentype = token_seatedskeleton;
m_bseatedskeletonmode = true;
}
if(tokentype == token_depth || tokentype == token_color)
{
_assert(ptype != null && pres != null);
while((token = wcstok_s(null, l:, &context)) != null)
{
if(0 == wcsncmp(token, str_nui_image_type_depth, arraysize(str_nui_image_type_depth)))
{
*ptype = nui_image_type_depth;
}
else if(0 == wcsncmp(token, str_nui_image_type_depth_and_player_index, arraysize(str_nui_image_type_depth_and_player_index)))
{
*ptype = nui_image_type_depth_and_player_index;
}
else if(0 == wcsncmp(token, str_nui_image_type_color, arraysize(str_nui_image_type_color)))
{
*ptype = nui_image_type_color;
}
else if(0 == wcsncmp(token, str_nui_image_type_color_yuv, arraysize(str_nui_image_type_color_yuv)))
{
*ptype = nui_image_type_color_yuv;
}
else if(0 == wcsncmp(token, str_nui_image_resolution_80x60, arraysize(str_nui_image_resolution_80x60)))
{
*pres = nui_image_resolution_80x60;
}
else if(0 == wcsncmp(token, str_nui_image_resolution_320x240, arraysize(str_nui_image_resolution_320x240)))
{
*pres = nui_image_resolution_320x240;
}
else if(0 == wcsncmp(token, str_nui_image_resolution_640x480, arraysize(str_nui_image_resolution_640x480)))
{
*pres = nui_image_resolution_640x480;
}
else if(0 == wcsncmp(token, str_nui_image_resolution_1280x960, arraysize(str_nui_image_resolution_1280x960)))
{
*pres = nui_image_resolution_1280x960;
}
}
}
}
if(argv) localfree(argv);
}
// program's main entry point
int apientry wwinmain(hinstance hinstance, hinstance hprevinstance, pwstr lpcmdline, int ncmdshow)
{
unreferenced_parameter(hprevinstance);
singleface app;
heapsetinformation(null, heapenableterminationoncorruption, null, 0);
return app.run(hinstance, lpcmdline, ncmdshow);
}
二、实测效果图:(活生生把美女脸“切割”出了形状,哈哈~!)
三、例程工程分享:
http://pan.baidu.com/s/1qyiv7hu
ROHS指令(标准)中实施环境管理物质适用范围
iphone14配置确认 iphone14什么配置
华为云等保合规方案,助力企业最快30天过等保
单相电源滤波器作用 浅谈单相电源滤波器应用
2019年Q4 AMOLED智能机面板出货排行榜: 维信诺成国内No.1
基于Dragonboard 410c的kinect应用系列之五——脸部识别实现代码
LED光疗应用于医美市场备受看好
单相双值电容电动机的反转方法
存储器和新兴非易失性存储器技术的特点
人工智能如何帮助打击网络犯罪
人工智能和机器人正在攻占零售链中的各个部分
大数据的特点有哪些
芯耀辉:高速接口IP赋能数字新世界
我国动力电池如何实现清洁拆解 哪种方式更安全
佳能6D2最新消息:佳能6D Mark II规格曝光 比现款6D大
小米6最新消息:4月11日小米6携手小米平板3齐亮相,恭候你的大驾
大声开麦:简述LED显示屏的故障高发与频繁维护问题
锐意进取,炫力出彩!英特尔持续耕耘锐炫显卡
最新3D打印生物体可植入体内释放受控剂量的药物
钟鼎式分样器用途广泛,是一种准确度较高的粮食分样器