#include "CFaceRecProc.h" #include #include #include "CAppInfo.h" #include "logproc.h" #include std::shared_ptr g_faceRecProcPtr = nullptr; CFaceRecProc::CFaceRecProc() { m_baseFeaturePtr = nullptr; m_pFaceDetector = nullptr; m_pFaceLandmarker = nullptr; m_pFaceRecognizer = nullptr; m_pFaceAntiSpoofing = nullptr; m_pPoseEstimator = nullptr; QFileInfo filePath("face.cfgl"); std::string prefix_path = filePath.absolutePath().toStdString(); std::string model_path = ""; model_path = "./models/"; // myDebug()<SetThreshold(0.3, 0.90); seeta::ModelSetting esd_setting; esd_setting.set_device( SEETA_DEVICE_AUTO ); esd_setting.set_id( 0 ); esd_setting.append((model_path + "_acc905_squeezenet_v15_90_90_closs1_2DB_4class_214000_1010.sta").c_str()); m_pEyeStateDetector = new seeta::EyeStateDetector( esd_setting); if(m_pEyeStateDetector == nullptr) { myServerLog()<<"Init EyeStateDetector failed"; } seeta::ModelSetting ps_setting; ps_setting.set_device(SEETA_DEVICE_AUTO); ps_setting.append((model_path + "SeetaPoseEstimation1.1.0.sta").c_str()); m_pPoseEstimator = new seeta::PoseEstimator(ps_setting); if(m_pPoseEstimator == nullptr) { myServerLog()<<"Init PoseEstimator failed"; } // myServerLog()<<"InitHandle succeed"; return 0; } void CFaceRecProc::destroyHandle() { if(m_pFaceDetector != nullptr) { delete m_pFaceDetector; m_pFaceDetector = nullptr; } if(m_pFaceLandmarker != nullptr) { delete m_pFaceLandmarker; m_pFaceLandmarker = nullptr; } if(m_pFaceRecognizer != nullptr) { delete m_pFaceRecognizer; m_pFaceRecognizer = nullptr; } if(m_pFaceAntiSpoofing != nullptr) { delete m_pFaceAntiSpoofing; m_pFaceAntiSpoofing = nullptr; } if(m_pEyeStateDetector != nullptr) { delete m_pEyeStateDetector; m_pEyeStateDetector = nullptr; } if(m_pPoseEstimator != nullptr) { delete m_pPoseEstimator; m_pPoseEstimator = nullptr; } } bool CFaceRecProc::getFaceCount(cv::Mat faceMat, int &nFaceCount) { try { SeetaImageData simg; simg.height = faceMat.rows; simg.width = faceMat.cols; simg.channels = faceMat.channels(); simg.data = faceMat.data; SeetaFaceInfoArray faces = m_pFaceDetector->detect(simg); nFaceCount = faces.size; return true; } catch (const std::exception &e) { m_sErrMsg = QString::fromLocal8Bit("人脸比对失败,%1").arg(e.what()); // myServerLog()<detect(simg); nfaceCount = faces.size; if(faces.size <= 0) { m_sErrMsg = QString::fromLocal8Bit("未检测到人脸"); return false; } else if(faces.size > 1) { m_sErrMsg = QString::fromLocal8Bit("检测到人脸人脸数量大于1"); return false; } std::shared_ptr feature(new float[m_pFaceRecognizer->GetExtractFeatureSize()]); SeetaPointF points[5]; m_pFaceLandmarker->mark(simg, faces.data[0].pos, points); m_pFaceRecognizer->Extract(simg, points, feature.get()); SeetaImageData base_simg; base_simg.height = baseMat.rows; base_simg.width = baseMat.cols; base_simg.channels = baseMat.channels(); base_simg.data = baseMat.data; SeetaFaceInfoArray base_faces = m_pFaceDetector->detect(base_simg); if(base_faces.size <= 0) { m_sErrMsg = QString::fromLocal8Bit("未检测到底照人脸"); return false; } else if(base_faces.size > 1) { m_sErrMsg = QString::fromLocal8Bit("检测到底照人脸人脸数量大于1"); return false; } std::shared_ptr base_feature(new float[m_pFaceRecognizer->GetExtractFeatureSize()]); SeetaPointF base_points[5]; m_pFaceLandmarker->mark(base_simg, base_faces.data[0].pos, base_points); m_pFaceRecognizer->Extract(base_simg, base_points, base_feature.get()); fScore = 0; fScore = m_pFaceRecognizer->CalculateSimilarity(feature.get(), base_feature.get()); return true; } catch (const std::exception &e) { m_sErrMsg = QString::fromLocal8Bit("人脸比对失败,%1").arg(e.what()); // myServerLog()<detect(simg); nFaceCount = faces.size; if(faces.size <= 0) { m_sErrMsg = QString::fromLocal8Bit("未检测到人脸"); return false; } else if(faces.size > 1) { m_sErrMsg = QString::fromLocal8Bit("检测人脸数量大于1"); return false; } double Detect = cv::getTickCount(); double detecttime = (Detect- begin) / cv::getTickFrequency(); myServerLog()<Estimate(simg, faces.data[0].pos, &yaw, &pitch, &roll); fYaw = yaw; fPitch = pitch; fRoll = roll; double PoseEstimator = cv::getTickCount(); detecttime = (PoseEstimator- Detect) / cv::getTickFrequency(); myServerLog()<detect(simg); nFaceCount = faces.size; if(faces.size <= 0) { m_sErrMsg = QString::fromLocal8Bit("未检测到人脸"); return false; } else if(faces.size > 1) { m_sErrMsg = QString::fromLocal8Bit("检测人脸数量大于1"); return false; } double Detect = cv::getTickCount(); double detecttime = (Detect- begin) / cv::getTickFrequency(); myServerLog()<mark(simg, faces.data[0].pos, points); SeetaPointF pts[5]; for(int m=0; m<5; m++) { pts[m].x = points[m].x; pts[m].y = points[m].y; } seeta::EyeStateDetector::EYE_STATE leftstate,rightstate; m_pEyeStateDetector->Detect(simg, pts, leftstate, rightstate); if(leftstate == seeta::EyeStateDetector::EYE_CLOSE || rightstate == seeta::EyeStateDetector::EYE_CLOSE) { bHasStatus = true; myServerLog()<Estimate(simg, faces.data[0].pos, &yaw, &pitch, &roll); double PoseEstimator = cv::getTickCount(); detecttime = (PoseEstimator- Detect) / cv::getTickFrequency(); myServerLog()< 20) { bHasStatus = true; } else { bHasStatus = false; } } else if(nFaceStatus == SL_HEAD_UP) { if(pitch > 10) { bHasStatus = true; } else { bHasStatus = false; } } else if(nFaceStatus == SL_HEAD_DOWN) { if(pitch < -10) { bHasStatus = true; } else { bHasStatus = false; } } } return true; } catch (const std::exception &e) { m_sErrMsg = QString::fromLocal8Bit("获取人脸属性失败,%1").arg(e.what()); return false; } } bool CFaceRecProc::setBaseImage(cv::Mat baseMat) { try { SeetaImageData simg; simg.height = baseMat.rows; simg.width = baseMat.cols; simg.channels = baseMat.channels(); simg.data = baseMat.data; SeetaFaceInfoArray faces = m_pFaceDetector->detect(simg); myDebug()<<"detect"; if(faces.size <= 0) { m_sErrMsg = QString::fromLocal8Bit("未检测到底照人脸"); return false; } else if(faces.size > 1) { m_sErrMsg = QString::fromLocal8Bit("检测到底照人脸人脸数量大于1"); return false; } m_baseFeaturePtr = std::shared_ptr(new float[m_pFaceRecognizer->GetExtractFeatureSize()]); SeetaPointF points[5]; m_pFaceLandmarker->mark(simg, faces.data[0].pos, points); myDebug()<<"mark"; m_pFaceRecognizer->Extract(simg, points, m_baseFeaturePtr.get()); myDebug()<<"Extract"; return true; } catch (const std::exception &e) { m_sErrMsg = QString::fromLocal8Bit("获取底照人脸属性失败,%1").arg(e.what()); return false; } } bool CFaceRecProc::setBaseImage(QString sBaseImge) { try { cv::Mat matBaseImg = cv::imread(sBaseImge.toLocal8Bit().toStdString()); return setBaseImage(matBaseImg); } catch (const std::exception &e) { m_sErrMsg = QString::fromLocal8Bit("设置底照失败,%1").arg(e.what()); return false; } } bool CFaceRecProc::compareWithBase(cv::Mat faceMat, int &nfaceCount, float &fScore) { try { if(m_baseFeaturePtr == nullptr) { m_sErrMsg = QString::fromLocal8Bit("请先设置底照,在进行比对"); return false; } SeetaImageData simg; simg.height = faceMat.rows; simg.width = faceMat.cols; simg.channels = faceMat.channels(); simg.data = faceMat.data; SeetaFaceInfoArray faces = m_pFaceDetector->detect(simg); nfaceCount = faces.size; if(faces.size <= 0) { m_sErrMsg = QString::fromLocal8Bit("未检测到人脸"); return false; } else if(faces.size > 1) { m_sErrMsg = QString::fromLocal8Bit("检测到人脸数量大于1"); return false; } std::shared_ptr feature(new float[m_pFaceRecognizer->GetExtractFeatureSize()]); SeetaPointF points[5]; m_pFaceLandmarker->mark(simg, faces.data[0].pos, points); m_pFaceRecognizer->Extract(simg, points, feature.get()); fScore = 0; fScore = m_pFaceRecognizer->CalculateSimilarity(feature.get(), m_baseFeaturePtr.get()); return true; } catch (const std::exception &e) { m_sErrMsg = QString::fromLocal8Bit("人脸比对失败,%1").arg(e.what()); return false; } } bool CFaceRecProc::compareWithBase(cv::Mat faceMat, int &nfaceCount, float &fScore, SeetaRect &rt) { try { if(m_baseFeaturePtr == nullptr) { m_sErrMsg = QString::fromLocal8Bit("请先设置底照,在进行比对"); return false; } SeetaImageData simg; simg.height = faceMat.rows; simg.width = faceMat.cols; simg.channels = faceMat.channels(); simg.data = faceMat.data; SeetaFaceInfoArray faces = m_pFaceDetector->detect(simg); nfaceCount = faces.size; if(faces.size <= 0) { m_sErrMsg = QString::fromLocal8Bit("未检测到人脸"); return false; } else if(faces.size > 1) { m_sErrMsg = QString::fromLocal8Bit("检测到人脸数量大于1"); return false; } rt = faces.data[0].pos; std::shared_ptr feature(new float[m_pFaceRecognizer->GetExtractFeatureSize()]); SeetaPointF points[5]; m_pFaceLandmarker->mark(simg, faces.data[0].pos, points); m_pFaceRecognizer->Extract(simg, points, feature.get()); fScore = 0; fScore = m_pFaceRecognizer->CalculateSimilarity(feature.get(), m_baseFeaturePtr.get()); return true; } catch (const std::exception &e) { m_sErrMsg = QString::fromLocal8Bit("人脸比对失败,%1").arg(e.what()); return false; } } bool CFaceRecProc::faceRealness(cv::Mat faceMat, bool &bRealness) { try { SeetaImageData simg; simg.height = faceMat.rows; simg.width = faceMat.cols; simg.channels = faceMat.channels(); simg.data = faceMat.data; SeetaFaceInfoArray faces = m_pFaceDetector->detect(simg); if(faces.size <= 0) { m_sErrMsg = QString::fromLocal8Bit("未检测到人脸"); return false; } else if(faces.size > 1) { m_sErrMsg = QString::fromLocal8Bit("检测到人脸数量大于1"); return false; } SeetaPointF points[5]; m_pFaceLandmarker->mark(simg, faces.data[0].pos, points); m_pFaceAntiSpoofing->SetVideoFrameCount(1); auto status = m_pFaceAntiSpoofing->PredictVideo(simg, faces.data[0].pos, points); if(status == seeta::FaceAntiSpoofing::REAL) { bRealness = true; } else { bRealness = false; } return true; } catch (const std::exception &e) { m_sErrMsg = QString::fromLocal8Bit("真实性检测失败,%1").arg(e.what()); return false; } } bool CFaceRecProc::compareWithBase(cv::Mat faceMat, int &nfaceCount, float &fScore, bool &bEyeClose, bool bIsCampare, bool bIsCheckEye) { try { if(m_baseFeaturePtr == nullptr) { m_sErrMsg = QString::fromLocal8Bit("请先设置底照,在进行比对"); return false; } SeetaImageData simg; simg.height = faceMat.rows; simg.width = faceMat.cols; simg.channels = faceMat.channels(); simg.data = faceMat.data; SeetaFaceInfoArray faces = m_pFaceDetector->detect(simg); nfaceCount = faces.size; if((bIsCampare || bIsCheckEye) && nfaceCount == 1) { std::shared_ptr feature(new float[m_pFaceRecognizer->GetExtractFeatureSize()]); SeetaPointF points[5]; m_pFaceLandmarker->mark(simg, faces.data[0].pos, points); if(bIsCampare) { m_pFaceRecognizer->Extract(simg, points, feature.get()); fScore = 0; fScore = m_pFaceRecognizer->CalculateSimilarity(feature.get(), m_baseFeaturePtr.get()); } if(bIsCheckEye) { SeetaPointF pts[5]; for(int m=0; m<5; m++) { pts[m].x = points[m].x; pts[m].y = points[m].y; } seeta::EyeStateDetector::EYE_STATE leftstate,rightstate; m_pEyeStateDetector->Detect(simg, pts, leftstate, rightstate); if(leftstate == seeta::EyeStateDetector::EYE_CLOSE || rightstate == seeta::EyeStateDetector::EYE_CLOSE) { bEyeClose = true; } else { bEyeClose = false; } } } return true; } catch (const std::exception &e) { m_sErrMsg = QString::fromLocal8Bit("人脸比对失败,%1").arg(e.what()); myDebug()<detect(simg); nfaceCount = faces.size; if(faces.size <= 0) { m_sErrMsg = QString::fromLocal8Bit("未检测到人脸"); return false; } else if(faces.size > 1) { m_sErrMsg = QString::fromLocal8Bit("检测到人脸数量大于1"); return false; } std::shared_ptr feature(new float[m_pFaceRecognizer->GetExtractFeatureSize()]); SeetaPointF points[5]; m_pFaceLandmarker->mark(simg, faces.data[0].pos, points); m_pFaceRecognizer->Extract(simg, points, feature.get()); fScore = 0; fScore = m_pFaceRecognizer->CalculateSimilarity(feature.get(), m_baseFeaturePtr.get()); SeetaPointF pts[5]; for(int m=0; m<5; m++) { pts[m].x = points[m].x; pts[m].y = points[m].y; } seeta::EyeStateDetector::EYE_STATE leftstate,rightstate; m_pEyeStateDetector->Detect(simg, pts, leftstate, rightstate); if(leftstate == seeta::EyeStateDetector::EYE_CLOSE || rightstate == seeta::EyeStateDetector::EYE_CLOSE) { bEyeClose = true; } else { bEyeClose = false; } return true; } catch (const std::exception &e) { m_sErrMsg = QString::fromLocal8Bit("人脸比对失败,%1").arg(e.what()); myDebug()<detect(simg); nfaceCount = faces.size; if(faces.size <= 0) { m_sErrMsg = QString::fromLocal8Bit("未检测到人脸"); return false; } else if(faces.size > 1) { m_sErrMsg = QString::fromLocal8Bit("检测到人脸数量大于1"); return false; } double Detect = cv::getTickCount(); double detecttime = (Detect- begin) / cv::getTickFrequency(); myServerLog()< feature(new float[m_pFaceRecognizer->GetExtractFeatureSize()]); SeetaPointF points[5]; m_pFaceLandmarker->mark(simg, faces.data[0].pos, points); m_pFaceRecognizer->Extract(simg, points, feature.get()); double VerifyGetFeature = cv::getTickCount(); detecttime = (VerifyGetFeature- Detect) / cv::getTickFrequency(); myServerLog()<CalculateSimilarity(feature.get(), m_baseFeaturePtr.get()); double CalculateSimilarity = cv::getTickCount(); detecttime = (CalculateSimilarity- VerifyGetFeature) / cv::getTickFrequency(); myServerLog()<SetVideoFrameCount(1); auto status = m_pFaceAntiSpoofing->PredictVideo(simg, faces.data[0].pos, points); if(status == seeta::FaceAntiSpoofing::REAL) { bRealness = true; } else { bRealness = false; } double FaceAntiSpoofing = cv::getTickCount(); detecttime = (FaceAntiSpoofing- CalculateSimilarity) / cv::getTickFrequency(); myServerLog()<detect(simg); nfaceCount = faces.size; if(faces.size <= 0) { return true; } double Detect = cv::getTickCount(); double detecttime = (Detect- begin) / cv::getTickFrequency(); myServerLog()< feature(new float[m_pFaceRecognizer->GetExtractFeatureSize()]); SeetaPointF points[5]; m_pFaceLandmarker->mark(simg, faces.data[i].pos, points); m_pFaceRecognizer->Extract(simg, points, feature.get()); double VerifyGetFeature = cv::getTickCount(); detecttime = (VerifyGetFeature- Detect) / cv::getTickFrequency(); myServerLog()<CalculateSimilarity(feature.get(), m_baseFeaturePtr.get()); if(fTempScore > fScore) { fScore = fTempScore; m_pFaceAntiSpoofing->SetVideoFrameCount(1); auto status = m_pFaceAntiSpoofing->PredictVideo(simg, faces.data[i].pos, points); if(status == seeta::FaceAntiSpoofing::REAL) { bRealness = true; } else { bRealness = false; } } double CalculateSimilarity = cv::getTickCount(); detecttime = (CalculateSimilarity- VerifyGetFeature) / cv::getTickFrequency(); myServerLog()<(new float[m_pFaceRecognizer->GetExtractFeatureSize()]); memcpy(m_baseFeaturePtr.get(), feature, sizeof(float)*m_pFaceRecognizer->GetExtractFeatureSize()); } } float* CFaceRecProc::getFeature() { return m_baseFeaturePtr.get(); } bool CFaceRecProc::hasBaseImage() { return m_baseFeaturePtr != nullptr; }