2024-07-16 16:28:35 +08:00
|
|
|
from PyQt5.QtCore import QThread, pyqtSignal
|
|
|
|
import core.face_detection as ftm
|
|
|
|
from numpy import ndarray
|
|
|
|
import cv2
|
|
|
|
|
|
|
|
|
|
|
|
class CameraThread(QThread):
|
|
|
|
new_frame = pyqtSignal(ndarray)
|
|
|
|
storage_frame = pyqtSignal(ndarray)
|
|
|
|
fps_signal = pyqtSignal(float)
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
super().__init__()
|
|
|
|
self.cap = cv2.VideoCapture(0)
|
|
|
|
self.fps = self.cap.get(cv2.CAP_PROP_FPS)
|
|
|
|
|
|
|
|
self.face_detector = ftm.FaceDetector()
|
|
|
|
self.running = True
|
|
|
|
|
|
|
|
def run(self):
|
|
|
|
while self.running:
|
|
|
|
|
|
|
|
ret, frame = self.cap.read()
|
|
|
|
if ret:
|
|
|
|
rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
2024-07-22 20:12:25 +08:00
|
|
|
original_img, img, bboxs = self.face_detector.find_faces(rgb_image)
|
2024-07-16 16:28:35 +08:00
|
|
|
|
|
|
|
face_img = None
|
2024-07-22 20:12:25 +08:00
|
|
|
|
2024-07-16 16:28:35 +08:00
|
|
|
for bbox in bboxs:
|
|
|
|
try:
|
2024-07-22 20:12:25 +08:00
|
|
|
x, y, w, h, confidence = bbox
|
|
|
|
face_img = img[y:y + h, x:x + w]
|
|
|
|
except Exception as e:
|
|
|
|
print(e)
|
2024-07-16 16:28:35 +08:00
|
|
|
pass
|
|
|
|
|
2024-07-22 20:12:25 +08:00
|
|
|
self.new_frame.emit(original_img)
|
2024-07-16 16:28:35 +08:00
|
|
|
if face_img is not None:
|
|
|
|
self.fps_signal.emit(self.fps)
|
|
|
|
self.storage_frame.emit(cv2.resize(face_img, (224, 224)))
|
|
|
|
|
|
|
|
def stop(self):
|
|
|
|
self.running = False
|
|
|
|
self.wait()
|
|
|
|
self.cap.release()
|