algorithm_system_server/algorithm/Car_recognition/car_detection.py

287 lines
11 KiB
Python
Raw Normal View History

2024-06-21 10:06:54 +08:00
# -*- coding: UTF-8 -*-
import argparse
import time
import os
import cv2
import torch
from numpy import random
import copy
import numpy as np
from algorithm.Car_recognition.plate_recognition.plate_rec import get_plate_result,allFilePath,init_model,cv_imread
# from plate_recognition.plate_cls import cv_imread
from algorithm.Car_recognition.plate_recognition.double_plate_split_merge import get_split_merge
from algorithm.Car_recognition.plate_recognition.color_rec import plate_color_rec,init_color_model
from algorithm.Car_recognition.car_recognition.car_rec import init_car_rec_model,get_color_and_score
from algorithm.Car_recognition.utils.datasets import letterbox
from algorithm.Car_recognition.utils.general import check_img_size, non_max_suppression_face, scale_coords
from algorithm.Car_recognition.utils.cv_puttext import cv2ImgAddText
from read_data import LoadImages, LoadStreams
import torch.backends.cudnn as cudnn
clors = [(255,0,0),(0,255,0),(0,0,255),(255,255,0),(0,255,255)]
danger=['','']
object_color=[(0,255,255),(0,255,0),(255,255,0)]
class_type=['单层车牌','双层车牌','汽车']
class CarDetection():
def __init__(self, video_path=None):
# self.detect_model = detect_model
# self.plate_rec_model = plate_rec_model
# self.car_rec_model = car_rec_model
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.detect_model =torch.load('weight/traffic/best.pt', map_location=self.device)['model'].float().fuse()
# self.detect_model = load_model((os.getcwd()) + "/weight/traffic/detect.pt") #初始化检测模型
self.plate_rec_model= init_model((os.getcwd()) + "/weight/traffic/plate_rec_color.pth") #初始化识别模型
self.car_rec_model = init_car_rec_model((os.getcwd()) + "/weight/traffic/car_rec_color.pth") #初始化车辆识别模型
time_all = 0
time_begin=time.time()
# self.frame = [None]
if video_path is not None:
self.video_name = video_path
else:
self.video_name = 'vid2.mp4' # A default video file
self.imgsz = 384
self.dataset = LoadImages(self.video_name,self.imgsz)
def use_webcam(self, source):
source = source
cudnn.benchmark = True
self.dataset = LoadStreams(source, img_size=self.imgsz)
def get_frame(self):
for im0s in self.dataset:
# print(self.dataset.mode)
# print(self.dataset)
if self.dataset.mode == 'stream':
img = im0s[0].copy()
else:
img = im0s.copy()
dict_list=detect_Recognition_plate(self.detect_model, img, self.device, self.plate_rec_model, car_rec_model=self.car_rec_model)
ori_img=draw_result(img,dict_list)
ret, jpeg = cv2.imencode(".jpg", ori_img)
txt = str(dict_list)
return jpeg.tobytes(), txt
def order_points(pts): #四个点安好左上 右上 右下 左下排列
rect = np.zeros((4, 2), dtype = "float32")
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
return rect
def four_point_transform(image, pts): #透视变换得到车牌小图
rect = order_points(pts)
(tl, tr, br, bl) = rect
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype = "float32")
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
return warped
def scale_coords_landmarks(img1_shape, coords, img0_shape, ratio_pad=None): #返回到原图坐标
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
coords[:, [0, 2, 4, 6]] -= pad[0] # x padding
coords[:, [1, 3, 5, 7]] -= pad[1] # y padding
coords[:, :8] /= gain
#clip_coords(coords, img0_shape)
coords[:, 0].clamp_(0, img0_shape[1]) # x1
coords[:, 1].clamp_(0, img0_shape[0]) # y1
coords[:, 2].clamp_(0, img0_shape[1]) # x2
coords[:, 3].clamp_(0, img0_shape[0]) # y2
coords[:, 4].clamp_(0, img0_shape[1]) # x3
coords[:, 5].clamp_(0, img0_shape[0]) # y3
coords[:, 6].clamp_(0, img0_shape[1]) # x4
coords[:, 7].clamp_(0, img0_shape[0]) # y4
# coords[:, 8].clamp_(0, img0_shape[1]) # x5
# coords[:, 9].clamp_(0, img0_shape[0]) # y5
return coords
def get_plate_rec_landmark(img, xyxy, conf, landmarks, class_num,device,plate_rec_model,car_rec_model):
h,w,c = img.shape
result_dict={}
x1 = int(xyxy[0])
y1 = int(xyxy[1])
x2 = int(xyxy[2])
y2 = int(xyxy[3])
landmarks_np=np.zeros((4,2))
rect=[x1,y1,x2,y2]
if int(class_num) ==2:
car_roi_img = img[y1:y2,x1:x2]
car_color,color_conf=get_color_and_score(car_rec_model,car_roi_img,device)
result_dict['class_type']=class_type[int(class_num)]
result_dict['rect']=rect #车辆roi
result_dict['score']=conf #车牌区域检测得分
result_dict['object_no']=int(class_num)
result_dict['car_color']=car_color
result_dict['color_conf']=color_conf
return result_dict
for i in range(4):
point_x = int(landmarks[2 * i])
point_y = int(landmarks[2 * i + 1])
landmarks_np[i]=np.array([point_x,point_y])
class_label= int(class_num) #车牌的的类型0代表单牌1代表双层车牌
roi_img = four_point_transform(img,landmarks_np) #透视变换得到车牌小图
if class_label: #判断是否是双层车牌,是双牌的话进行分割后然后拼接
roi_img=get_split_merge(roi_img)
plate_number ,plate_color= get_plate_result(roi_img,device,plate_rec_model) #对车牌小图进行识别,得到颜色和车牌号
for dan in danger: #只要出现‘危’或者‘险’就是危险品车牌
if dan in plate_number:
plate_number='危险品'
# cv2.imwrite("roi.jpg",roi_img)
result_dict['class_type']=class_type[class_label]
result_dict['rect']=rect #车牌roi区域
result_dict['landmarks']=landmarks_np.tolist() #车牌角点坐标
result_dict['plate_no']=plate_number #车牌号
result_dict['roi_height']=roi_img.shape[0] #车牌高度
result_dict['plate_color']=plate_color #车牌颜色
result_dict['object_no']=class_label #单双层 0单层 1双层
result_dict['score']=conf #车牌区域检测得分
return result_dict
def detect_Recognition_plate(model, orgimg, device,plate_rec_model,car_rec_model=None):
# Load model
conf_thres = 0.3
iou_thres = 0.5
dict_list=[]
# orgimg = cv2.imread(image_path) # BGR
img0 = copy.deepcopy(orgimg)
img0 = np.transpose(img0, (2, 0, 1))
img = torch.from_numpy(img0)
assert orgimg is not None, 'Image Not Found '
# print(model)
model.to(device)
img.to(device)
img = img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
pred = model(img)[0]
# Apply NMS
pred = non_max_suppression_face(pred, conf_thres, iou_thres)
# Process detections
for i, det in enumerate(pred): # detections per image
if len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], orgimg.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
det[:, 5:13] = scale_coords_landmarks(img.shape[2:], det[:, 5:13], orgimg.shape).round()
for j in range(det.size()[0]):
xyxy = det[j, :4].view(-1).tolist()
conf = det[j, 4].cpu().numpy()
landmarks = det[j, 5:13].view(-1).tolist()
class_num = det[j, 13].cpu().numpy()
result_dict = get_plate_rec_landmark(orgimg, xyxy, conf, landmarks, class_num,device,plate_rec_model,car_rec_model)
dict_list.append(result_dict)
return dict_list
# cv2.imwrite('result.jpg', orgimg)
def draw_result(orgimg,dict_list):
result_str =""
for result in dict_list:
rect_area = result['rect']
object_no = result['object_no']
if not object_no==2:
x,y,w,h = rect_area[0],rect_area[1],rect_area[2]-rect_area[0],rect_area[3]-rect_area[1]
padding_w = 0.05*w
padding_h = 0.11*h
rect_area[0]=max(0,int(x-padding_w))
rect_area[1]=max(0,int(y-padding_h))
rect_area[2]=min(orgimg.shape[1],int(rect_area[2]+padding_w))
rect_area[3]=min(orgimg.shape[0],int(rect_area[3]+padding_h))
height_area = int(result['roi_height']/2)
landmarks=result['landmarks']
result_p = result['plate_no']
if result['object_no']==0:#单层
result_p+=" "+result['plate_color']
else: #双层
result_p+=" "+result['plate_color']+"双层"
result_str+=result_p+" "
for i in range(4): #关键点
cv2.circle(orgimg, (int(landmarks[i][0]), int(landmarks[i][1])), 5, clors[i], -1)
if len(result)>=1:
if "危险品" in result_p: #如果是危险品车牌,文字就画在下面
orgimg=cv2ImgAddText(orgimg,result_p,rect_area[0],rect_area[3],(0,255,0),height_area)
else:
orgimg=cv2ImgAddText(orgimg,result_p,rect_area[0]-height_area,rect_area[1]-height_area-10,(0,255,0),height_area)
else:
height_area=int((rect_area[3]-rect_area[1])/20)
car_color = result['car_color']
car_color_str="车辆颜色:"
car_color_str+=car_color
orgimg=cv2ImgAddText(orgimg,car_color_str,rect_area[0],rect_area[1],(0,255,0),height_area)
cv2.rectangle(orgimg,(rect_area[0],rect_area[1]),(rect_area[2],rect_area[3]),object_color[object_no],2) #画框
# print(result_str)
return orgimg
def get_second(capture):
if capture.isOpened():
rate = capture.get(5) # 帧速率
FrameNumber = capture.get(7) # 视频文件的帧数
duration = FrameNumber/rate # 帧速率/视频总帧数 是时间除以60之后单位是分钟
return int(rate),int(FrameNumber),int(duration)