上传文件至 /
parent
05646c050a
commit
0cc1c3eef3
|
@ -0,0 +1,39 @@
|
|||
import torch
|
||||
from ultralytics import YOLO
|
||||
from ultralytics.data import download
|
||||
|
||||
# 下载COCO128数据集
|
||||
download('coco128')
|
||||
|
||||
# 定义训练参数
|
||||
epochs = 10 # 训练轮数
|
||||
batch_size = 16 # 批次大小
|
||||
img_size = 640 # 输入图像尺寸
|
||||
|
||||
# 加载YOLOv8模型
|
||||
model = YOLO('yolov8s.yaml') # 创建新的模型实例
|
||||
|
||||
# 开始训练
|
||||
model.train(data='coco128.yaml', epochs=epochs, batch=batch_size, imgsz=img_size)
|
||||
|
||||
# 加载经过训练的模型,假设模型保存在 'best.pt'
|
||||
model = YOLO('best.pt')
|
||||
|
||||
# 设置要检测的对象类别,这里的例子是只检测行人
|
||||
class_names = model.names
|
||||
person_class_id = class_names.index('person')
|
||||
|
||||
# 加载图片或视频
|
||||
img_path = 'path_to_your_image.jpg'
|
||||
|
||||
# 进行目标检测
|
||||
results = model(img_path)
|
||||
|
||||
# 处理结果
|
||||
for result in results:
|
||||
boxes = result.boxes
|
||||
for box in boxes:
|
||||
if box.cls == person_class_id: # 只处理行人检测结果
|
||||
x1, y1, x2, y2 = box.xyxy[0] # 获取边界框坐标
|
||||
confidence = box.conf.item() # 获取置信度
|
||||
print(f"Pedestrian detected at ({x1:.2f}, {y1:.2f}) to ({x2:.2f}, {y2:.2f}), Confidence: {confidence:.2f}")
|
|
@ -0,0 +1,105 @@
|
|||
import numpy as np
|
||||
import cv2
|
||||
from ultralytics import YOLO
|
||||
|
||||
def point_to_line_distance(point, line_start, line_end):
|
||||
# 线的方向向量
|
||||
line_dir = np.array(line_end) - np.array(line_start)
|
||||
# 点到线起点的向量
|
||||
point_to_start = np.array(point) - np.array(line_start)
|
||||
|
||||
# 计算叉积(只适用于二维空间)
|
||||
cross_product = line_dir[0] * point_to_start[1] - line_dir[1] * point_to_start[0]
|
||||
|
||||
# 叉积的符号会表明点在直线的哪一侧
|
||||
return cross_product
|
||||
|
||||
def check_crossing_detection_box(box, start_point, end_point):
|
||||
x1, y1, x2, y2 = box
|
||||
points = [(x1, y1), (x2, y1), (x1, y2), (x2, y2)]
|
||||
distances = [point_to_line_distance(p, start_point, end_point) for p in points]
|
||||
|
||||
# 检查是否有正负距离,即检测框的点分别位于斜线两侧
|
||||
crossing = any(dist < 0 for dist in distances) and any(dist > 0 for dist in distances)
|
||||
return crossing
|
||||
|
||||
def process_frame(frame):
|
||||
height, width = frame.shape[:2]
|
||||
desired_height = 500
|
||||
scale = desired_height / height
|
||||
resized_frame = cv2.resize(frame, (int(width * scale), desired_height))
|
||||
frame = resized_frame
|
||||
|
||||
height, width = frame.shape[:2]
|
||||
start_point = (0, height // 2)
|
||||
end_point = (width, height // 3)
|
||||
|
||||
cv2.line(frame, start_point, end_point, (255, 0, 0), 3)
|
||||
|
||||
results = model(frame, verbose=False)
|
||||
for result in results:
|
||||
boxes = result.boxes
|
||||
classes = model.names
|
||||
for box in boxes:
|
||||
x1, y1, x2, y2 = box.xyxy[0].tolist()
|
||||
class_id = int(box.cls.item())
|
||||
if classes[class_id] == 'person':
|
||||
cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2)
|
||||
if check_crossing_detection_box((x1, y1, x2, y2), start_point, end_point):
|
||||
cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 0, 255), 2)
|
||||
|
||||
return frame
|
||||
|
||||
def main():
|
||||
global model
|
||||
try:
|
||||
model = YOLO('yolov8s.pt')
|
||||
print("模型加载成功。")
|
||||
except Exception as e:
|
||||
print(f"加载模型时出现错误: {e}")
|
||||
model = None
|
||||
|
||||
if model is not None:
|
||||
video_path = "./image/a.mp4"
|
||||
print("开始视频捕获...")
|
||||
cap = cv2.VideoCapture(video_path)
|
||||
if not cap.isOpened():
|
||||
print("打开视频流或文件时出错")
|
||||
else:
|
||||
print("视频流已成功打开。")
|
||||
|
||||
# 获取视频的帧率和尺寸
|
||||
fps = cap.get(cv2.CAP_PROP_FPS)
|
||||
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||||
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||||
|
||||
# 创建VideoWriter对象
|
||||
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # 使用mp4v编码器
|
||||
out = cv2.VideoWriter('output.mp4', fourcc, fps, (width, height)) # 输出文件名为output.mp4
|
||||
|
||||
while cap.isOpened():
|
||||
ret, frame = cap.read()
|
||||
if not ret:
|
||||
break
|
||||
|
||||
processed_frame = process_frame(frame)
|
||||
|
||||
# 将处理后的帧写入视频文件
|
||||
out.write(processed_frame)
|
||||
|
||||
cv2.imshow('Processed Video', processed_frame)
|
||||
key = cv2.waitKey(1)
|
||||
if key & 0xFF == ord('q'):
|
||||
break
|
||||
|
||||
# 释放VideoWriter资源
|
||||
out.release()
|
||||
|
||||
cap.release()
|
||||
cv2.destroyAllWindows()
|
||||
print("视频处理完成。")
|
||||
else:
|
||||
print("模型未加载,无法继续处理。")
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -0,0 +1,8 @@
|
|||
from ultralytics import YOLO
|
||||
|
||||
# 加载模型
|
||||
model = YOLO('yolov8n.pt') # 假设你使用的是yolov8n.yaml配置文件
|
||||
|
||||
# 开始训练,使用自定义参数
|
||||
|
||||
model.train(data='C://Users//86195//Desktop//ultralytics-main (1)//ultralytics-main//data.yaml', epochs=5, batch=20, imgsz=640)
|
|
@ -0,0 +1,26 @@
|
|||
# This CITATION.cff file was generated with https://bit.ly/cffinit
|
||||
|
||||
cff-version: 1.2.0
|
||||
title: Ultralytics YOLO
|
||||
message: >-
|
||||
If you use this software, please cite it using the
|
||||
metadata from this file.
|
||||
type: software
|
||||
authors:
|
||||
- given-names: Glenn
|
||||
family-names: Jocher
|
||||
affiliation: Ultralytics
|
||||
orcid: 'https://orcid.org/0000-0001-5950-6979'
|
||||
- given-names: Ayush
|
||||
family-names: Chaurasia
|
||||
affiliation: Ultralytics
|
||||
orcid: 'https://orcid.org/0000-0002-7603-6750'
|
||||
- family-names: Qiu
|
||||
given-names: Jing
|
||||
affiliation: Ultralytics
|
||||
orcid: 'https://orcid.org/0000-0003-3783-7069'
|
||||
repository-code: 'https://github.com/ultralytics/ultralytics'
|
||||
url: 'https://ultralytics.com'
|
||||
license: AGPL-3.0
|
||||
version: 8.0.0
|
||||
date-released: '2023-01-10'
|
Loading…
Reference in New Issue