深度解析CVZone:构建高效计算机视觉应用的实战指南
【免费下载链接】cvzoneThis is a Computer vision package that makes its easy to run Image processing and AI functions. At the core it uses OpenCV and Mediapipe libraries.项目地址: https://gitcode.com/gh_mirrors/cv/cvzone
CVZone是一个基于OpenCV和Mediapipe的计算机视觉工具包,它极大地简化了图像处理和AI功能的实现流程。在短短10分钟内,开发者就能完成从环境搭建到运行人脸检测应用的全过程,即使是零基础也能轻松掌握这个强大的计算机视觉工具包。
🏗️ 架构设计与核心模块解析
CVZone采用模块化设计理念,将复杂的计算机视觉任务分解为独立的、可复用的功能单元。这种设计使得开发者能够按需选择功能模块,避免引入不必要的依赖和性能开销。
核心架构层解析
CVZone的架构分为三个主要层次:
- 基础工具层(Utils模块):提供图像处理的基础功能,包括图像堆叠、边界框绘制、文本渲染等
- AI检测层:集成Mediapipe的预训练模型,提供人脸检测、手部追踪、姿态估计等高级功能
- 应用支持层:包括序列通信、PID控制、实时绘图等辅助功能
核心模块源码深度剖析
让我们深入分析几个关键模块的实现细节:
人脸检测模块(cvzone/FaceDetectionModule.py)基于Mediapipe的人脸检测模型,支持两种检测模式:
- 短距离检测(0-2米):适用于近距离人脸识别
- 长距离检测(0-5米):适用于监控场景
# 高级人脸检测配置示例 from cvzone.FaceDetectionModule import FaceDetector import cv2 # 创建高精度人脸检测器 detector = FaceDetector( minDetectionCon=0.7, # 提高检测置信度阈值 modelSelection=1 # 使用长距离检测模型 ) # 多尺度检测策略 def multi_scale_face_detection(img, scales=[1.0, 0.75, 0.5]): faces_all_scales = [] for scale in scales: resized = cv2.resize(img, None, fx=scale, fy=scale) _, bboxs = detector.findFaces(resized, draw=False) # 将检测框缩放回原始尺寸 for bbox in bboxs: bbox['bbox'] = [int(v/scale) for v in bbox['bbox']] bbox['center'] = (int(bbox['center'][0]/scale), int(bbox['center'][1]/scale)) faces_all_scales.append(bbox) return faces_all_scales手部追踪模块(cvzone/HandTrackingModule.py)实现了21个关键点的精确追踪,支持双手同时检测和手势识别:
# 高级手势识别与距离计算 from cvzone.HandTrackingModule import HandDetector import numpy as np detector = HandDetector( staticMode=False, maxHands=2, modelComplexity=1, detectionCon=0.8, # 提高检测置信度 minTrackCon=0.7 # 提高追踪稳定性 ) def analyze_hand_gesture(hands): """分析手部姿态并识别手势""" if not hands: return None gestures = [] for hand in hands: fingers = detector.fingersUp(hand) # 手势分类逻辑 if sum(fingers) == 5: gesture = "OPEN_HAND" elif sum(fingers) == 0: gesture = "FIST" elif fingers[0] == 0 and fingers[1:].count(1) == 1: gesture = "POINTING" else: gesture = "OTHER" # 计算手部关键点距离 lmList = hand["lmList"] if len(lmList) >= 21: thumb_tip = lmList[4][:2] index_tip = lmList[8][:2] distance = np.linalg.norm(np.array(thumb_tip) - np.array(index_tip)) gestures.append({ "type": hand["type"], "gesture": gesture, "fingers_up": fingers, "pinch_distance": distance }) return gestures🚀 高级特性与性能优化
实时图像处理流水线
CVZone的图像处理流水线经过精心优化,确保在实时应用中保持高性能。以下是一个完整的实时处理示例:
import cv2 import cvzone from cvzone.FaceDetectionModule import FaceDetector from cvzone.Utils import stackImages from cvzone.FPS import FPS class RealTimeVisionPipeline: def __init__(self): self.cap = cv2.VideoCapture(0) self.detector = FaceDetector() self.fps_reader = FPS(avgCount=30) # 设置摄像头参数优化 self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640) self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) self.cap.set(cv2.CAP_PROP_FPS, 30) def process_frame(self, img): """处理单帧图像的多步骤流水线""" # 步骤1:人脸检测 img_faces, bboxs = self.detector.findFaces(img, draw=True) # 步骤2:FPS计算 fps, img_fps = self.fps_reader.update(img_faces) # 步骤3:图像增强 img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) img_gray = cv2.cvtColor(img_gray, cv2.COLOR_GRAY2BGR) # 步骤4:边缘检测 img_canny = cv2.Canny(img, 100, 200) img_canny = cv2.cvtColor(img_canny, cv2.COLOR_GRAY2BGR) # 步骤5:图像堆叠显示 img_stack = cvzone.stackImages( [img_fps, img_gray, img_canny], cols=3, scale=0.7 ) return img_stack, len(bboxs), fps def run(self): """运行主处理循环""" while True: success, img = self.cap.read() if not success: break processed_img, face_count, fps = self.process_frame(img) # 添加统计信息 cvzone.putTextRect( processed_img, f"Faces: {face_count} | FPS: {fps:.1f}", pos=(10, 30), scale=2, thickness=2, colorR=(0, 0, 255), colorT=(255, 255, 255) ) cv2.imshow("Real-time Vision Pipeline", processed_img) if cv2.waitKey(1) & 0xFF == ord('q'): break self.cap.release() cv2.destroyAllWindows()多模态数据融合
CVZone支持多种数据源的融合处理,包括摄像头输入、图像文件和网络流:
from cvzone.Utils import downloadImageFromUrl, overlayPNG import cv2 class MultiSourceProcessor: def __init__(self): self.backgrounds = [] def load_remote_image(self, url): """从URL加载图像并保持透明度""" return downloadImageFromUrl(url, keepTransparency=True) def create_composite_image(self, base_img, overlay_img, position): """创建合成图像""" return overlayPNG(base_img, overlay_img, pos=position) def batch_process_images(self, image_paths): """批量处理图像""" processed_images = [] for path in image_paths: img = cv2.imread(path) if img is not None: # 应用多种处理效果 img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) img_edges = cv2.Canny(img_gray, 50, 150) img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2BGR) # 添加装饰性边框 img_decorated = cvzone.cornerRect( img, (50, 50, img.shape[1]-100, img.shape[0]-100), l=20, t=3, colorR=(0, 255, 255), colorC=(255, 0, 255) ) processed_images.append(img_decorated) return processed_images🎯 实战应用:构建智能监控系统
系统架构设计
基于CVZone构建的智能监控系统包含以下组件:
- 视频采集层:多路摄像头输入支持
- AI分析层:人脸检测、行为分析、异常识别
- 可视化层:实时数据显示、告警提示
- 存储层:事件记录、图像存储
import cv2 import cvzone import numpy as np from datetime import datetime from cvzone.FaceDetectionModule import FaceDetector from cvzone.HandTrackingModule import HandDetector class IntelligentSurveillanceSystem: def __init__(self, camera_indices=[0]): self.cameras = [] for idx in camera_indices: cap = cv2.VideoCapture(idx) cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) self.cameras.append(cap) self.face_detector = FaceDetector(minDetectionCon=0.6) self.hand_detector = HandDetector(detectionCon=0.7) self.event_log = [] def analyze_frame(self, frame, camera_id): """分析单帧图像,检测异常事件""" events = [] # 人脸检测与分析 frame_faces, bboxs = self.face_detector.findFaces(frame, draw=False) if bboxs: events.append({ 'type': 'face_detected', 'count': len(bboxs), 'timestamp': datetime.now(), 'camera': camera_id }) # 分析人脸属性 for bbox in bboxs: confidence = bbox['score'][0] center = bbox['center'] size = bbox['bbox'][2] * bbox['bbox'][3] if confidence > 0.8: events.append({ 'type': 'high_confidence_face', 'confidence': confidence, 'position': center, 'size': size }) # 手部检测与手势分析 hands, _ = self.hand_detector.findHands(frame, draw=False) if hands: for hand in hands: hand_type = hand["type"] fingers = self.hand_detector.fingersUp(hand) # 检测特定手势 if sum(fingers) == 1 and fingers[1] == 1: events.append({ 'type': 'pointing_gesture', 'hand': hand_type, 'timestamp': datetime.now() }) return frame_faces, events def generate_visualization(self, frames, events): """生成可视化界面""" processed_frames = [] for i, (frame, frame_events) in enumerate(zip(frames, events)): # 添加事件标记 for event in frame_events: if event['type'] == 'face_detected': cvzone.putTextRect( frame, f"Faces: {event['count']}", pos=(10, 30), colorR=(0, 0, 255), scale=1.5 ) elif event['type'] == 'high_confidence_face': cvzone.cornerRect( frame, (event['position'][0]-50, event['position'][1]-50, 100, 100), colorR=(0, 255, 0) ) processed_frames.append(frame) # 堆叠多摄像头画面 if len(processed_frames) > 1: final_display = cvzone.stackImages(processed_frames, cols=2, scale=0.8) else: final_display = processed_frames[0] # 添加系统状态信息 total_events = sum(len(e) for e in events) cvzone.putTextRect( final_display, f"Active Cameras: {len(self.cameras)} | Events: {total_events}", pos=(10, final_display.shape[0] - 30), colorR=(0, 0, 0), colorT=(255, 255, 255) ) return final_display def run(self): """运行监控系统""" print("智能监控系统启动...") while True: frames = [] all_events = [] # 从所有摄像头采集帧 for i, cap in enumerate(self.cameras): success, frame = cap.read() if success: processed_frame, events = self.analyze_frame(frame, i) frames.append(processed_frame) all_events.append(events) # 记录重要事件 for event in events: if event['type'] in ['high_confidence_face', 'pointing_gesture']: self.event_log.append(event) if frames: # 生成并显示可视化界面 display = self.generate_visualization(frames, all_events) cv2.imshow("Intelligent Surveillance System", display) # 按键退出 if cv2.waitKey(1) & 0xFF == ord('q'): break # 清理资源 for cap in self.cameras: cap.release() cv2.destroyAllWindows() print(f"系统运行结束,共记录 {len(self.event_log)} 个事件")🔧 性能调优与最佳实践
内存管理与性能优化
import time import psutil import cv2 from cvzone.FaceDetectionModule import FaceDetector class PerformanceOptimizer: def __init__(self): self.detector = FaceDetector() self.performance_stats = { 'frame_count': 0, 'total_time': 0, 'fps_history': [], 'memory_usage': [] } def optimize_detection_parameters(self, img_size, use_case): """根据使用场景优化检测参数""" optimizations = { 'realtime': { 'scale_factor': 0.5, 'min_neighbors': 3, 'min_size': (30, 30) }, 'high_accuracy': { 'scale_factor': 1.1, 'min_neighbors': 5, 'min_size': (50, 50) }, 'low_power': { 'scale_factor': 0.75, 'min_neighbors': 2, 'min_size': (40, 40) } } return optimizations.get(use_case, optimizations['realtime']) def adaptive_frame_skipping(self, current_fps, target_fps=30): """自适应帧跳过策略""" if current_fps < target_fps * 0.8: return 2 # 跳2帧 elif current_fps > target_fps * 1.2: return 0 # 不跳帧 else: return 1 # 跳1帧 def monitor_performance(self): """监控系统性能""" process = psutil.Process() memory_mb = process.memory_info().rss / 1024 / 1024 self.performance_stats['memory_usage'].append(memory_mb) # 保持历史记录长度 if len(self.performance_stats['fps_history']) > 100: self.performance_stats['fps_history'].pop(0) if len(self.performance_stats['memory_usage']) > 100: self.performance_stats['memory_usage'].pop(0) return { 'memory_mb': memory_mb, 'avg_fps': np.mean(self.performance_stats['fps_history']) if self.performance_stats['fps_history'] else 0, 'frame_count': self.performance_stats['frame_count'] }错误处理与鲁棒性增强
import traceback from cvzone.Utils import stackImages class RobustVisionSystem: def __init__(self): self.error_count = 0 self.max_errors = 10 def safe_image_processing(self, img, processing_func): """安全的图像处理包装器""" try: if img is None or img.size == 0: print("警告:输入图像为空") return None # 验证图像格式 if len(img.shape) != 3 or img.shape[2] != 3: print("警告:图像格式不支持,尝试转换") if len(img.shape) == 2: img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) elif img.shape[2] == 4: img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR) result = processing_func(img) self.error_count = 0 # 重置错误计数 return result except Exception as e: self.error_count += 1 print(f"图像处理错误 ({self.error_count}/{self.max_errors}): {str(e)}") if self.error_count >= self.max_errors: raise RuntimeError("达到最大错误次数,系统停止") return None def graceful_degradation(self, primary_func, fallback_func, img): """优雅降级处理""" try: return primary_func(img) except Exception as e: print(f"主功能失败,使用备用功能: {e}") return fallback_func(img)📊 集成方案与扩展开发
与外部系统集成
CVZone可以轻松集成到各种应用场景中:
import json import requests from cvzone.PoseModule import PoseDetector class CVZoneIntegration: def __init__(self, api_endpoint=None): self.pose_detector = PoseDetector() self.api_endpoint = api_endpoint def analyze_workout_form(self, video_path): """分析健身动作姿势""" cap = cv2.VideoCapture(video_path) analysis_results = [] while True: success, img = cap.read() if not success: break img = self.pose_detector.findPose(img) lmList, bboxInfo = self.pose_detector.findPosition(img, draw=False) if lmList: # 分析关键角度 angles = { 'elbow_angle': self.pose_detector.findAngle( lmList[11][0:2], lmList[13][0:2], lmList[15][0:2], img=img )[0], 'knee_angle': self.pose_detector.findAngle( lmList[23][0:2], lmList[25][0:2], lmList[27][0:2], img=img )[0] } analysis_results.append({ 'frame': self.pose_detector.frame_count, 'angles': angles, 'posture_score': self.calculate_posture_score(angles) }) cap.release() return analysis_results def send_to_cloud(self, data): """发送数据到云服务""" if self.api_endpoint: try: response = requests.post( self.api_endpoint, json=data, headers={'Content-Type': 'application/json'}, timeout=5 ) return response.status_code == 200 except Exception as e: print(f"云服务发送失败: {e}") return False return True def calculate_posture_score(self, angles): """计算姿势评分""" score = 100 # 肘部角度应在80-100度之间(理想俯卧撑姿势) if not (80 <= angles['elbow_angle'] <= 100): score -= 20 # 膝盖角度应在160-180度之间(站立姿势) if not (160 <= angles['knee_angle'] <= 180): score -= 15 return max(score, 0)自定义模块开发
from cvzone.Utils import putTextRect, cornerRect import cv2 class CustomCVZoneModule: """自定义CVZone扩展模块示例""" @staticmethod def draw_advanced_annotation(img, text, bbox, confidence): """绘制高级标注""" x, y, w, h = bbox # 绘制带阴影的边界框 shadow_offset = 3 cvzone.cornerRect( img, (x + shadow_offset, y + shadow_offset, w, h), colorR=(50, 50, 50), colorC=(100, 100, 100), rt=2 ) cvzone.cornerRect( img, (x, y, w, h), colorR=(0, 255, 0), colorC=(255, 0, 255), rt=2 ) # 绘制带背景的置信度条 confidence_width = int(w * confidence) cv2.rectangle(img, (x, y-25), (x+confidence_width, y-5), (0, 255, 0), -1) cv2.rectangle(img, (x, y-25), (x+w, y-5), (100, 100, 100), 2) # 绘制文本 cvzone.putTextRect( img, f"{text} ({confidence:.1%})", (x, y-30), scale=0.8, thickness=1, colorR=(0, 0, 0), colorT=(255, 255, 255) ) return img @staticmethod def create_visualization_grid(images, titles, cols=2, scale=0.7): """创建带标题的可视化网格""" if len(images) != len(titles): raise ValueError("图像和标题数量必须相同") annotated_images = [] for img, title in zip(images, titles): # 为每个图像添加标题 img_with_title = img.copy() cvzone.putTextRect( img_with_title, title, (10, 30), scale=1, thickness=2, colorR=(0, 0, 0), colorT=(255, 255, 255) ) annotated_images.append(img_with_title) # 使用CVZone的stackImages创建网格 return cvzone.stackImages(annotated_images, cols=cols, scale=scale)🎨 可视化与调试工具
实时数据可视化
import matplotlib.pyplot as plt from cvzone.PlotModule import LivePlot import numpy as np class RealTimeVisualization: def __init__(self): # 创建多个实时图表 self.fps_plot = LivePlot(w=800, yLimit=[0, 60], interval=0.01, char='FPS') self.detection_plot = LivePlot(w=800, yLimit=[0, 10], interval=0.01, char='Detections') self.memory_plot = LivePlot(w=800, yLimit=[0, 500], interval=0.01, char='Memory(MB)') self.history = { 'fps': [], 'detections': [], 'memory': [], 'timestamps': [] } def update_plots(self, fps, detections, memory_mb): """更新所有图表""" # 更新实时图表 img_fps = self.fps_plot.update(fps) img_detections = self.detection_plot.update(detections) img_memory = self.memory_plot.update(memory_mb) # 保存历史数据 self.history['fps'].append(fps) self.history['detections'].append(detections) self.history['memory'].append(memory_mb) self.history['timestamps'].append(len(self.history['timestamps'])) # 限制历史数据长度 max_history = 1000 for key in self.history: if len(self.history[key]) > max_history: self.history[key] = self.history[key][-max_history:] return img_fps, img_detections, img_memory def generate_performance_report(self): """生成性能报告""" if not self.history['fps']: return None fig, axes = plt.subplots(3, 1, figsize=(10, 8)) # FPS图表 axes[0].plot(self.history['timestamps'], self.history['fps'], 'b-', linewidth=2) axes[0].set_title('FPS over Time') axes[0].set_ylabel('FPS') axes[0].grid(True, alpha=0.3) axes[0].axhline(y=np.mean(self.history['fps']), color='r', linestyle='--', label=f'Avg: {np.mean(self.history["fps"]):.1f}') axes[0].legend() # 检测数量图表 axes[1].plot(self.history['timestamps'], self.history['detections'], 'g-', linewidth=2) axes[1].set_title('Detections over Time') axes[1].set_ylabel('Detection Count') axes[1].grid(True, alpha=0.3) # 内存使用图表 axes[2].plot(self.history['timestamps'], self.history['memory'], 'r-', linewidth=2) axes[2].set_title('Memory Usage over Time') axes[2].set_xlabel('Frame Number') axes[2].set_ylabel('Memory (MB)') axes[2].grid(True, alpha=0.3) plt.tight_layout() return fig🔍 故障排除与性能调优
常见问题解决方案
检测精度不足
- 调整
minDetectionCon参数提高检测置信度 - 使用图像预处理(直方图均衡化、对比度增强)
- 尝试不同的检测模型(modelSelection参数)
- 调整
性能瓶颈分析
- 使用FPS模块监控实时帧率
- 分析内存使用情况,避免内存泄漏
- 考虑使用多线程处理图像采集和分析
多摄像头同步问题
- 使用硬件同步的多摄像头设备
- 软件层面使用时间戳对齐
- 考虑使用专门的视频采集卡
高级配置建议
class AdvancedConfiguration: """高级配置管理器""" @staticmethod def get_optimal_config(hardware_profile): """根据硬件配置返回最优参数""" configs = { 'high_end': { 'resolution': (1280, 720), 'fps_target': 60, 'detection_confidence': 0.7, 'max_hands': 2, 'max_faces': 4, 'enable_segmentation': True }, 'mid_range': { 'resolution': (640, 480), 'fps_target': 30, 'detection_confidence': 0.6, 'max_hands': 2, 'max_faces': 2, 'enable_segmentation': False }, 'low_power': { 'resolution': (320, 240), 'fps_target': 15, 'detection_confidence': 0.5, 'max_hands': 1, 'max_faces': 1, 'enable_segmentation': False } } return configs.get(hardware_profile, configs['mid_range']) @staticmethod def adaptive_quality_adjustment(current_fps, target_fps, current_config): """自适应质量调整""" fps_ratio = current_fps / target_fps if fps_ratio < 0.8: # 降低质量以提高性能 new_config = current_config.copy() new_config['resolution'] = ( current_config['resolution'][0] // 2, current_config['resolution'][1] // 2 ) new_config['detection_confidence'] = min( current_config['detection_confidence'] + 0.1, 0.9 ) return new_config elif fps_ratio > 1.2: # 提高质量 new_config = current_config.copy() new_config['resolution'] = ( min(current_config['resolution'][0] * 2, 1920), min(current_config['resolution'][1] * 2, 1080) ) new_config['detection_confidence'] = max( current_config['detection_confidence'] - 0.05, 0.3 ) return new_config return current_config📈 部署与生产环境建议
容器化部署
# Dockerfile for CVZone application FROM python:3.9-slim # Install system dependencies RUN apt-get update && apt-get install -y \ libgl1-mesa-glx \ libglib2.0-0 \ libsm6 \ libxext6 \ libxrender-dev \ libgomp1 \ && rm -rf /var/lib/apt/lists/* # Set working directory WORKDIR /app # Copy requirements and install Python dependencies COPY requirements.txt . RUN pip install --no-cache-dir -r requirements.txt # Copy application code COPY . . # Create non-root user RUN useradd -m -u 1000 appuser && chown -R appuser:appuser /app USER appuser # Run the application CMD ["python", "main.py"]性能监控与日志
import logging import json from datetime import datetime class CVZoneMonitor: def __init__(self, log_file='cvzone_monitor.log'): # 配置日志 logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', handlers=[ logging.FileHandler(log_file), logging.StreamHandler() ] ) self.logger = logging.getLogger('CVZoneMonitor') # 性能指标 self.metrics = { 'start_time': datetime.now(), 'frames_processed': 0, 'errors': [], 'performance_samples': [] } def log_performance(self, fps, detections, memory_usage): """记录性能指标""" sample = { 'timestamp': datetime.now().isoformat(), 'fps': fps, 'detections': detections, 'memory_mb': memory_usage } self.metrics['performance_samples'].append(sample) self.metrics['frames_processed'] += 1 # 定期记录摘要 if self.metrics['frames_processed'] % 100 == 0: self.log_summary() def log_summary(self): """记录性能摘要""" if not self.metrics['performance_samples']: return recent_samples = self.metrics['performance_samples'][-50:] avg_fps = sum(s['fps'] for s in recent_samples) / len(recent_samples) avg_detections = sum(s['detections'] for s in recent_samples) / len(recent_samples) summary = { 'timestamp': datetime.now().isoformat(), 'total_frames': self.metrics['frames_processed'], 'avg_fps_last_50': avg_fps, 'avg_detections_last_50': avg_detections, 'total_errors': len(self.metrics['errors']), 'uptime_seconds': (datetime.now() - self.metrics['start_time']).total_seconds() } self.logger.info(f"性能摘要: {json.dumps(summary, indent=2)}") def save_metrics(self, filepath='cvzone_metrics.json'): """保存指标到文件""" with open(filepath, 'w') as f: json.dump(self.metrics, f, indent=2, default=str)🎉 结语
CVZone作为一个功能强大的计算机视觉工具包,为开发者提供了从基础图像处理到高级AI检测的完整解决方案。通过本文的深度解析,您应该已经掌握了:
- 架构理解:深入了解CVZone的模块化设计和工作原理
- 高级应用:掌握构建复杂计算机视觉系统的实战技巧
- 性能优化:学会调优和监控系统性能的方法
- 扩展开发:了解如何扩展和定制CVZone功能
- 生产部署:掌握在生产环境中部署和维护的最佳实践
无论您是构建实时监控系统、智能交互应用还是自动化检测工具,CVZone都能提供强大而灵活的支持。通过合理利用其丰富的功能和优化策略,您可以构建出高效、稳定的计算机视觉应用。
记住,成功的计算机视觉项目不仅依赖于强大的工具,更需要深入理解问题领域和持续的优化迭代。CVZone为您提供了坚实的起点,而您的创造力和工程能力将决定项目的最终高度。
【免费下载链接】cvzoneThis is a Computer vision package that makes its easy to run Image processing and AI functions. At the core it uses OpenCV and Mediapipe libraries.项目地址: https://gitcode.com/gh_mirrors/cv/cvzone
创作声明:本文部分内容由AI辅助生成(AIGC),仅供参考