+
83
-

回答

在 YOLO 中添加视频遮罩来只统计指定区域

我们可以通过以下几种方法实现:

1. 使用 ROI (Region of Interest) 蒙版方法
import cv2
import numpy as np
from ultralytics import YOLO

# 加载YOLO模型
model = YOLO('yolov8n.pt')

# 创建蒙版
def create_mask(frame_shape, points):
    mask = np.zeros(frame_shape[:2], dtype=np.uint8)
    # 将点转换为numpy数组
    points = np.array(points, dtype=np.int32)
    # 填充多边形区域
    cv2.fillPoly(mask, [points], 255)
    return mask

# 视频处理
def process_video(video_path, roi_points):
    cap = cv2.VideoCapture(video_path)

    # 获取第一帧来创建蒙版
    ret, frame = cap.read()
    if not ret:
        return

    # 创建ROI蒙版
    mask = create_mask(frame.shape, roi_points)

    while True:
        ret, frame = cap.read()
        if not ret:
            break

        # 应用蒙版
        masked_frame = cv2.bitwise_and(frame, frame, mask=mask)

        # YOLO检测
        results = model(masked_frame)

        # 处理检测结果
        for r in results:
            boxes = r.boxes
            for box in boxes:
                # 获取边界框坐标
                x1, y1, x2, y2 = box.xyxy[0]
                x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)

                # 检查边界框是否在ROI内
                box_center = ((x1 + x2) // 2, (y1 + y2) // 2)
                if mask[box_center[1], box_center[0]] > 0:
                    # 绘制边界框
                    cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)

                    # 显示类别和置信度
                    conf = box.conf[0]
                    cls = box.cls[0]
                    label = f'{model.names[int(cls)]} {conf:.2f}'
                    cv2.putText(frame, label, (x1, y1 - 10), 
                              cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

        # 绘制ROI区域
        cv2.polylines(frame, [roi_points], True, (0, 0, 255), 2)

        # 显示结果
        cv2.imshow('YOLO with ROI', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()

# 使用示例
if __name__ == "__main__":
    # 定义ROI区域的点(根据需要修改坐标)
    roi_points = np.array([
        [100, 100],   # 左上
        [500, 100],   # 右上
        [500, 400],   # 右下
        [100, 400]    # 左下
    ], dtype=np.int32)

    # 处理视频
    video_path = "path/to/your/video.mp4"
    process_video(video_path, roi_points)
2. 使用交互式ROI选择
import cv2
import numpy as np
from ultralytics import YOLO

class ROISelector:
    def __init__(self, window_name):
        self.window_name = window_name
        self.points = []
        self.drawing = False

    def mouse_callback(self, event, x, y, flags, param):
        if event == cv2.EVENT_LBUTTONDOWN:
            self.points.append([x, y])
            self.drawing = True
        elif event == cv2.EVENT_MOUSEMOVE and self.drawing:
            frame_copy = param.copy()
            points = np.array(self.points + [[x, y]], dtype=np.int32)
            cv2.polylines(frame_copy, [points], False, (0, 255, 0), 2)
            cv2.imshow(self.window_name, frame_copy)
        elif event == cv2.EVENT_LBUTTONUP:
            self.points.append([x, y])
            self.drawing = False

    def select_roi(self, frame):
        cv2.imshow(self.window_name, frame)
        cv2.setMouseCallback(self.window_name, self.mouse_callback, frame)

        while True:
            key = cv2.waitKey(1) & 0xFF
            if key == ord('c'):  # 完成选择
                break
            elif key == ord('r'):  # 重置选择
                self.points = []
                cv2.imshow(self.window_name, frame)

        return np.array(self.points, dtype=np.int32)

def main():
    # 加载YOLO模型
    model = YOLO('yolov8n.pt')

    # 打开视频
    video_path = "path/to/your/video.mp4"
    cap = cv2.VideoCapture(video_path)

    # 读取第一帧用于ROI选择
    ret, frame = cap.read()
    if not ret:
        return

    # 选择ROI
    roi_selector = ROISelector('Select ROI')
    print("请用鼠标选择ROI区域点,按'c'确认,'r'重置")
    roi_points = roi_selector.select_roi(frame)

    # 创建蒙版
    mask = np.zeros(frame.shape[:2], dtype=np.uint8)
    cv2.fillPoly(mask, [roi_points], 255)

    # 处理视频
    while True:
        ret, frame = cap.read()
        if not ret:
            break

        # 应用蒙版
        masked_frame = cv2.bitwise_and(frame, frame, mask=mask)

        # YOLO检测
        results = model(masked_frame)

        # 统计计数
        count_dict = {}

        # 处理检测结果
        for r in results:
            boxes = r.boxes
            for box in boxes:
                x1, y1, x2, y2 = map(int, box.xyxy[0])

                # 检查中心点是否在ROI内
                center_x = (x1 + x2) // 2
                center_y = (y1 + y2) // 2

                if mask[center_y, center_x] > 0:
                    cls = int(box.cls[0])
                    conf = float(box.conf[0])

                    # 更新计数
                    class_name = model.names[cls]
                    count_dict[class_name] = count_dict.get(class_name, 0) + 1

                    # 绘制边界框和标签
                    cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
                    label = f'{class_name} {conf:.2f}'
                    cv2.putText(frame, label, (x1, y1-10), 
                              cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

        # 绘制ROI区域
        cv2.polylines(frame, [roi_points], True, (0, 0, 255), 2)

        # 显示计数结果
        y_offset = 30
        for class_name, count in count_dict.items():
            cv2.putText(frame, f'{class_name}: {count}', (10, y_offset), 
                       cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2)
            y_offset += 30

        # 显示结果
        cv2.imshow('YOLO Detection with ROI', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()

if __name__ == "__main__":
    main()
主要功能和特点

ROI区域选择

可以通过预定义坐标点设置ROI。或使用交互式界面手动选择ROI区域。支持多边形区域选择。

检测过滤

只统计ROI区域内的目标。使用目标中心点判断是否在ROI内。过滤掉ROI外的检测结果。

可视化

显示ROI边界。显示检测到的目标边界框。显示类别和置信度。显示各类别的计数统计。

实时统计

统计ROI内各类别的数量。实时显示统计结果。使用方法

预定义ROI方式

# 定义ROI点坐标
roi_points = np.array([
    [x1, y1],
    [x2, y2],
    [x3, y3],
    [x4, y4]
], dtype=np.int32)

# 处理视频
process_video("video.mp4", roi_points)

交互式选择ROI

# 运行主程序
main()
# 使用鼠标点击选择ROI区域点
# 按'c'确认选择
# 按'r'重置选择
# 按'q'退出程序
这个实现方案可以帮助你:准确统计特定区域内的目标。避免区域外干扰。提供直观的可视化结果。灵活配置检测区域。

网友回复

我知道答案,我要回答