116 lines
3.7 KiB
Python
116 lines
3.7 KiB
Python
import datetime
|
|
import os
|
|
import time
|
|
import ffmpeg
|
|
import torch
|
|
import cv2
|
|
import numpy as np
|
|
from multiprocessing import Process, Manager
|
|
from threading import Thread
|
|
from read_data import LoadImages, LoadStreams
|
|
import torch.backends.cudnn as cudnn
|
|
from tools.draw_chinese import cv2ImgAddText
|
|
|
|
|
|
class SafeDetection():
|
|
time_reference = datetime.datetime.now()
|
|
counter_frame = 0
|
|
processed_fps = 0
|
|
|
|
def __init__(self,video_path=None):
|
|
|
|
self.model = torch.hub.load((os.getcwd()) + "/algorithm/yolov5", 'custom', source='local', path='./weight/safe_guard.pt', force_reload=True)
|
|
self.classes = self.model.names
|
|
|
|
self.frame = [None]
|
|
|
|
if video_path is not None:
|
|
self.video_name = video_path
|
|
else:
|
|
self.video_name = 'vid2.mp4' # A default video file
|
|
|
|
|
|
self.dataset = LoadImages(self.video_name)
|
|
self.flag = 0
|
|
def use_webcam(self, source):
|
|
# self.dataset.release() # Release any existing video capture
|
|
# self.cap = cv2.VideoCapture(0) # Open default webcam
|
|
# print('use_webcam')
|
|
self.source = source
|
|
cudnn.benchmark = True
|
|
# self.dataset = LoadStreams(source, img_size=self.imgsz)
|
|
self.dataset = LoadStreams(source)
|
|
|
|
def class_to_label(self, x):
|
|
return self.classes[int(x)]
|
|
|
|
def get_frame(self):
|
|
|
|
i = 0
|
|
|
|
for im0s in self.dataset:
|
|
# print(self.dataset.mode)
|
|
# print(self.dataset)
|
|
if self.dataset.mode == 'stream':
|
|
img = im0s[0].copy()
|
|
else:
|
|
img = im0s.copy()
|
|
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
|
|
results = self.model(img, size=640)
|
|
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
|
|
|
|
# Loop through each detected object and count the people
|
|
accuracy = 0
|
|
num_problem = len(results.xyxy[0])
|
|
bgr = (0, 255, 0)
|
|
|
|
|
|
for obj in results.xyxy[0]:
|
|
|
|
xmin, ymin, xmax, ymax = map(int, obj[:4])
|
|
|
|
accuracy = obj[4]
|
|
|
|
c = int(obj[-1])
|
|
|
|
|
|
if self.classes[c] == 'glove':
|
|
color = (255, 200, 90)
|
|
elif self.classes[c] == 'goggles':
|
|
color = (0, 0, 255)
|
|
elif self.classes[c] == 'helmet':
|
|
color = (0, 255, 0)
|
|
elif self.classes[c] == 'mask':
|
|
color = (50, 50, 50)
|
|
elif self.classes[c] == 'no_glove':
|
|
color = (255, 0, 0)
|
|
elif self.classes[c] == 'no_goggles':
|
|
color = (10, 20, 30)
|
|
elif self.classes[c] == 'no_mask':
|
|
color = (100, 0, 120)
|
|
elif self.classes[c] == 'no_shoes':
|
|
color = (100, 100, 0)
|
|
elif self.classes[c] == 'shoes':
|
|
color = (0, 0, 0)
|
|
|
|
cv2.rectangle(img, (xmin, ymin), (xmax, ymax), color, 2)
|
|
|
|
|
|
img = cv2ImgAddText(img,
|
|
f'{self.classes[c]}',
|
|
xmax + 2,
|
|
ymin - 1,
|
|
(0, 250, 0),
|
|
20,)
|
|
# cv2.putText(img, f"{self.classes[c]}, {round(float(accuracy), 2)}", (xmin, ymin),
|
|
# cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
|
|
|
|
ret, jpeg = cv2.imencode(".jpg", img)
|
|
|
|
|
|
resText=f'正在进行生产环境安全检测'
|
|
# print(num_people)
|
|
i = i+1
|
|
return jpeg.tobytes(), resText
|
|
|