千家信息网

Python如何实现人脸识别系统

发表于:2024-09-23 作者:千家信息网编辑
千家信息网最后更新 2024年09月23日,小编给大家分享一下Python如何实现人脸识别系统,相信大部分人都还不怎么了解,因此分享这篇文章给大家参考一下,希望大家阅读完这篇文章后大有收获,下面让我们一起去了解一下吧!基本原理人脸识别和目标检测
千家信息网最后更新 2024年09月23日Python如何实现人脸识别系统

小编给大家分享一下Python如何实现人脸识别系统,相信大部分人都还不怎么了解,因此分享这篇文章给大家参考一下,希望大家阅读完这篇文章后大有收获,下面让我们一起去了解一下吧!

基本原理

人脸识别和目标检测这些还不太一样,比如大家传统的训练一个目标检测模型,你只有对这个目标训练了之后,你的模型才能找到这样的目标,比如你的目标检测模型如果是检测植物的,那显然就不能检测动物。但是人脸识别就不一样,以你的手机为例,你发现你只录入了一次你的人脸信息,不需要训练,他就能准确的识别你,这里识别的原理是通过人脸识别的模型提取你脸部的特征向量,然后将实时检测到的你的人脸同数据库中保存的人脸进行比对,如果相似度超过一定的阈值之后,就认为比对成功。不过我这里说的只是简化版本的人脸识别,现在手机和门禁这些要复杂和安全的多,也不是简单平面上的人脸识别。

总结下来可以分为下面的步骤:

1.上传人脸到数据库

2.人脸检测

3.数据库比对并返回结果

这里我做了一个简答的示意图,可以帮助大家简单理解一下。

代码实现

废话不多说,这里就是我们的代码实现,代码我已经上传到码云,大家直接下载就行,地址就在博客开头。

不会安装python环境的兄弟请看这里:如何在pycharm中配置anaconda的虚拟环境

创建虚拟环境

创建虚拟环境前请大家先下载博客开头的码云源码到本地。

本次我们需要使用到python3.7的虚拟环境,命令如下:

conda create -n face python==3.7.3conda activate face

安装必要的库

pip install -r requirements.txt

愉快地开始你的人脸识别吧!

执行下面的主文件即可

python UI.py

或者在pycharm中按照下面的方式直接运行即可

首先将你需要识别的人脸上传到数据库中

通过第二个视频检测功能识别实时的人脸

详细的代码如下:

# -*- coding: utf-8 -*-"""-------------------------------------------------Project Name: yolov5-jungongFile Name: window.py.pyAuthor: chenmingCreate Date: 2021/11/8Description:图形化界面,可以检测摄像头、视频和图片文件-------------------------------------------------"""# 应该在界面启动的时候就将模型加载出来,设置tmp的目录来放中间的处理结果import shutilimport PyQt5.QtCorefrom PyQt5.QtGui import *from PyQt5.QtCore import *from PyQt5.QtWidgets import *import threadingimport argparseimport osimport sysfrom pathlib import Pathimport cv2import torchimport torch.backends.cudnn as cudnnimport os.path as ospFILE = Path(__file__).resolve()ROOT = FILE.parents[0]  # YOLOv5 root directoryif str(ROOT) not in sys.path:    sys.path.append(str(ROOT))  # add ROOT to PATHROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relativefrom models.common import DetectMultiBackendfrom utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreamsfrom utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr,                           increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh)from utils.plots import Annotator, colors, save_one_boxfrom utils.torch_utils import select_device, time_sync# 添加一个关于界面# 窗口主类class MainWindow(QTabWidget):    # 基本配置不动,然后只动第三个界面    def __init__(self):        # 初始化界面        super().__init__()        self.setWindowTitle('Target detection system')        self.resize(1200, 800)        self.setWindowIcon(QIcon("images/UI/lufei.png"))        # 图片读取进程        self.output_size = 480        self.img2predict = ""        self.device = 'cpu'        # # 初始化视频读取线程        self.vid_source = '0'  # 初始设置为摄像头        self.stopEvent = threading.Event()        self.webcam = True        self.stopEvent.clear()        self.model = self.model_load(weights="runs/train/exp_yolov5s/weights/best.pt",                                     device="cpu")  # todo 指明模型加载的位置的设备        self.initUI()        self.reset_vid()    '''    ***模型初始化***    '''    @torch.no_grad()    def model_load(self, weights="",  # model.pt path(s)                   device='',  # cuda device, i.e. 0 or 0,1,2,3 or cpu                   half=False,  # use FP16 half-precision inference                   dnn=False,  # use OpenCV DNN for ONNX inference                   ):        device = select_device(device)        half &= device.type != 'cpu'  # half precision only supported on CUDA        device = select_device(device)        model = DetectMultiBackend(weights, device=device, dnn=dnn)        stride, names, pt, jit, onnx = model.stride, model.names, model.pt, model.jit, model.onnx        # Half        half &= pt and device.type != 'cpu'  # half precision only supported by PyTorch on CUDA        if pt:            model.model.half() if half else model.model.float()        print("模型加载完成!")        return model    '''    ***界面初始化***    '''    def initUI(self):        # 图片检测子界面        font_title = QFont('楷体', 16)        font_main = QFont('楷体', 14)        # 图片识别界面, 两个按钮,上传图片和显示结果        img_detection_widget = QWidget()        img_detection_layout = QVBoxLayout()        img_detection_title = QLabel("图片识别功能")        img_detection_title.setFont(font_title)        mid_img_widget = QWidget()        mid_img_layout = QHBoxLayout()        self.left_img = QLabel()        self.right_img = QLabel()        self.left_img.setPixmap(QPixmap("images/UI/up.jpeg"))        self.right_img.setPixmap(QPixmap("images/UI/right.jpeg"))        self.left_img.setAlignment(Qt.AlignCenter)        self.right_img.setAlignment(Qt.AlignCenter)        mid_img_layout.addWidget(self.left_img)        mid_img_layout.addStretch(0)        mid_img_layout.addWidget(self.right_img)        mid_img_widget.setLayout(mid_img_layout)        up_img_button = QPushButton("上传图片")        det_img_button = QPushButton("开始检测")        up_img_button.clicked.connect(self.upload_img)        det_img_button.clicked.connect(self.detect_img)        up_img_button.setFont(font_main)        det_img_button.setFont(font_main)        up_img_button.setStyleSheet("QPushButton{color:white}"                                    "QPushButton:hover{background-color: rgb(2,110,180);}"                                    "QPushButton{background-color:rgb(48,124,208)}"                                    "QPushButton{border:2px}"                                    "QPushButton{border-radius:5px}"                                    "QPushButton{padding:5px 5px}"                                    "QPushButton{margin:5px 5px}")        det_img_button.setStyleSheet("QPushButton{color:white}"                                     "QPushButton:hover{background-color: rgb(2,110,180);}"                                     "QPushButton{background-color:rgb(48,124,208)}"                                     "QPushButton{border:2px}"                                     "QPushButton{border-radius:5px}"                                     "QPushButton{padding:5px 5px}"                                     "QPushButton{margin:5px 5px}")        img_detection_layout.addWidget(img_detection_title, alignment=Qt.AlignCenter)        img_detection_layout.addWidget(mid_img_widget, alignment=Qt.AlignCenter)        img_detection_layout.addWidget(up_img_button)        img_detection_layout.addWidget(det_img_button)        img_detection_widget.setLayout(img_detection_layout)        # todo 视频识别界面        # 视频识别界面的逻辑比较简单,基本就从上到下的逻辑        vid_detection_widget = QWidget()        vid_detection_layout = QVBoxLayout()        vid_title = QLabel("视频检测功能")        vid_title.setFont(font_title)        self.vid_img = QLabel()        self.vid_img.setPixmap(QPixmap("images/UI/up.jpeg"))        vid_title.setAlignment(Qt.AlignCenter)        self.vid_img.setAlignment(Qt.AlignCenter)        self.webcam_detection_btn = QPushButton("摄像头实时监测")        self.mp4_detection_btn = QPushButton("视频文件检测")        self.vid_stop_btn = QPushButton("停止检测")        self.webcam_detection_btn.setFont(font_main)        self.mp4_detection_btn.setFont(font_main)        self.vid_stop_btn.setFont(font_main)        self.webcam_detection_btn.setStyleSheet("QPushButton{color:white}"                                                "QPushButton:hover{background-color: rgb(2,110,180);}"                                                "QPushButton{background-color:rgb(48,124,208)}"                                                "QPushButton{border:2px}"                                                "QPushButton{border-radius:5px}"                                                "QPushButton{padding:5px 5px}"                                                "QPushButton{margin:5px 5px}")        self.mp4_detection_btn.setStyleSheet("QPushButton{color:white}"                                             "QPushButton:hover{background-color: rgb(2,110,180);}"                                             "QPushButton{background-color:rgb(48,124,208)}"                                             "QPushButton{border:2px}"                                             "QPushButton{border-radius:5px}"                                             "QPushButton{padding:5px 5px}"                                             "QPushButton{margin:5px 5px}")        self.vid_stop_btn.setStyleSheet("QPushButton{color:white}"                                        "QPushButton:hover{background-color: rgb(2,110,180);}"                                        "QPushButton{background-color:rgb(48,124,208)}"                                        "QPushButton{border:2px}"                                        "QPushButton{border-radius:5px}"                                        "QPushButton{padding:5px 5px}"                                        "QPushButton{margin:5px 5px}")        self.webcam_detection_btn.clicked.connect(self.open_cam)        self.mp4_detection_btn.clicked.connect(self.open_mp4)        self.vid_stop_btn.clicked.connect(self.close_vid)        # 添加组件到布局上        vid_detection_layout.addWidget(vid_title)        vid_detection_layout.addWidget(self.vid_img)        vid_detection_layout.addWidget(self.webcam_detection_btn)        vid_detection_layout.addWidget(self.mp4_detection_btn)        vid_detection_layout.addWidget(self.vid_stop_btn)        vid_detection_widget.setLayout(vid_detection_layout)        # todo 关于界面        about_widget = QWidget()        about_layout = QVBoxLayout()        about_title = QLabel('欢迎使用目标检测系统\n\n 提供付费指导:有需要的好兄弟加下面的QQ即可')  # todo 修改欢迎词语        about_title.setFont(QFont('楷体', 18))        about_title.setAlignment(Qt.AlignCenter)        about_img = QLabel()        about_img.setPixmap(QPixmap('images/UI/qq.png'))        about_img.setAlignment(Qt.AlignCenter)        # label4.setText("如何调整学习率")        label_super = QLabel()  # todo 更换作者信息        label_super.setText("或者你可以在这里找到我-->肆十二")        label_super.setFont(QFont('楷体', 16))        label_super.setOpenExternalLinks(True)        # label_super.setOpenExternalLinks(True)        label_super.setAlignment(Qt.AlignRight)        about_layout.addWidget(about_title)        about_layout.addStretch()        about_layout.addWidget(about_img)        about_layout.addStretch()        about_layout.addWidget(label_super)        about_widget.setLayout(about_layout)        self.left_img.setAlignment(Qt.AlignCenter)        self.addTab(img_detection_widget, '图片检测')        self.addTab(vid_detection_widget, '视频检测')        self.addTab(about_widget, '联系我')        self.setTabIcon(0, QIcon('images/UI/lufei.png'))        self.setTabIcon(1, QIcon('images/UI/lufei.png'))        self.setTabIcon(2, QIcon('images/UI/lufei.png'))    '''    ***上传图片***    '''    def upload_img(self):        # 选择录像文件进行读取        fileName, fileType = QFileDialog.getOpenFileName(self, 'Choose file', '', '*.jpg *.png *.tif *.jpeg')        if fileName:            suffix = fileName.split(".")[-1]            save_path = osp.join("images/tmp", "tmp_upload." + suffix)            shutil.copy(fileName, save_path)            # 应该调整一下图片的大小,然后统一防在一起            im0 = cv2.imread(save_path)            resize_scale = self.output_size / im0.shape[0]            im0 = cv2.resize(im0, (0, 0), fx=resize_scale, fy=resize_scale)            cv2.imwrite("images/tmp/upload_show_result.jpg", im0)            # self.right_img.setPixmap(QPixmap("images/tmp/single_result.jpg"))            self.img2predict = fileName            self.left_img.setPixmap(QPixmap("images/tmp/upload_show_result.jpg"))            # todo 上传图片之后右侧的图片重置,            self.right_img.setPixmap(QPixmap("images/UI/right.jpeg"))    '''    ***检测图片***    '''    def detect_img(self):        model = self.model        output_size = self.output_size        source = self.img2predict  # file/dir/URL/glob, 0 for webcam        imgsz = 640  # inference size (pixels)        conf_thres = 0.25  # confidence threshold        iou_thres = 0.45  # NMS IOU threshold        max_det = 1000  # maximum detections per image        device = self.device  # cuda device, i.e. 0 or 0,1,2,3 or cpu        view_img = False  # show results        save_txt = False  # save results to *.txt        save_conf = False  # save confidences in --save-txt labels        save_crop = False  # save cropped prediction boxes        nosave = False  # do not save images/videos        classes = None  # filter by class: --class 0, or --class 0 2 3        agnostic_nms = False  # class-agnostic NMS        augment = False  # ugmented inference        visualize = False  # visualize features        line_thickness = 3  # bounding box thickness (pixels)        hide_labels = False  # hide labels        hide_conf = False  # hide confidences        half = False  # use FP16 half-precision inference        dnn = False  # use OpenCV DNN for ONNX inference        print(source)        if source == "":            QMessageBox.warning(self, "请上传", "请先上传图片再进行检测")        else:            source = str(source)            device = select_device(self.device)            webcam = False            stride, names, pt, jit, onnx = model.stride, model.names, model.pt, model.jit, model.onnx            imgsz = check_img_size(imgsz, s=stride)  # check image size            save_img = not nosave and not source.endswith('.txt')  # save inference images            # Dataloader            if webcam:                view_img = check_imshow()                cudnn.benchmark = True  # set True to speed up constant image size inference                dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt and not jit)                bs = len(dataset)  # batch_size            else:                dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt and not jit)                bs = 1  # batch_size            vid_path, vid_writer = [None] * bs, [None] * bs            # Run inference            if pt and device.type != 'cpu':                model(torch.zeros(1, 3, *imgsz).to(device).type_as(next(model.model.parameters())))  # warmup            dt, seen = [0.0, 0.0, 0.0], 0            for path, im, im0s, vid_cap, s in dataset:                t1 = time_sync()                im = torch.from_numpy(im).to(device)                im = im.half() if half else im.float()  # uint8 to fp16/32                im /= 255  # 0 - 255 to 0.0 - 1.0                if len(im.shape) == 3:                    im = im[None]  # expand for batch dim                t2 = time_sync()                dt[0] += t2 - t1                # Inference                # visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False                pred = model(im, augment=augment, visualize=visualize)                t3 = time_sync()                dt[1] += t3 - t2                # NMS                pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)                dt[2] += time_sync() - t3                # Second-stage classifier (optional)                # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)                # Process predictions                for i, det in enumerate(pred):  # per image                    seen += 1                    if webcam:  # batch_size >= 1                        p, im0, frame = path[i], im0s[i].copy(), dataset.count                        s += f'{i}: '                    else:                        p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)                    p = Path(p)  # to Path                    s += '%gx%g ' % im.shape[2:]  # print string                    gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]  # normalization gain whwh                    imc = im0.copy() if save_crop else im0  # for save_crop                    annotator = Annotator(im0, line_width=line_thickness, example=str(names))                    if len(det):                        # Rescale boxes from img_size to im0 size                        det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round()                        # Print results                        for c in det[:, -1].unique():                            n = (det[:, -1] == c).sum()  # detections per class                            s += f"{n} {names[int(c)]}{'s' * (n > 1)}, "  # add to string                        # Write results                        for *xyxy, conf, cls in reversed(det):                            if save_txt:  # Write to file                                xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(                                    -1).tolist()  # normalized xywh                                line = (cls, *xywh, conf) if save_conf else (cls, *xywh)  # label format                                # with open(txt_path + '.txt', 'a') as f:                                #     f.write(('%g ' * len(line)).rstrip() % line + '\n')                            if save_img or save_crop or view_img:  # Add bbox to image                                c = int(cls)  # integer class                                label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')                                annotator.box_label(xyxy, label, color=colors(c, True))                                # if save_crop:                                #     save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg',                                #                  BGR=True)                    # Print time (inference-only)                    LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)')                    # Stream results                    im0 = annotator.result()                    # if view_img:                    #     cv2.imshow(str(p), im0)                    #     cv2.waitKey(1)  # 1 millisecond                    # Save results (image with detections)                    resize_scale = output_size / im0.shape[0]                    im0 = cv2.resize(im0, (0, 0), fx=resize_scale, fy=resize_scale)                    cv2.imwrite("images/tmp/single_result.jpg", im0)                    # 目前的情况来看,应该只是ubuntu下会出问题,但是在windows下是完整的,所以继续                    self.right_img.setPixmap(QPixmap("images/tmp/single_result.jpg"))    # 视频检测,逻辑基本一致,有两个功能,分别是检测摄像头的功能和检测视频文件的功能,先做检测摄像头的功能。    '''    ### 界面关闭事件 ###     '''    def closeEvent(self, event):        reply = QMessageBox.question(self,                                     'quit',                                     "Are you sure?",                                     QMessageBox.Yes | QMessageBox.No,                                     QMessageBox.No)        if reply == QMessageBox.Yes:            self.close()            event.accept()        else:            event.ignore()    '''    ### 视频关闭事件 ###     '''    def open_cam(self):        self.webcam_detection_btn.setEnabled(False)        self.mp4_detection_btn.setEnabled(False)        self.vid_stop_btn.setEnabled(True)        self.vid_source = '0'        self.webcam = True        th = threading.Thread(target=self.detect_vid)        th.start()    '''    ### 开启视频文件检测事件 ###     '''    def open_mp4(self):        fileName, fileType = QFileDialog.getOpenFileName(self, 'Choose file', '', '*.mp4 *.avi')        if fileName:            self.webcam_detection_btn.setEnabled(False)            self.mp4_detection_btn.setEnabled(False)            # self.vid_stop_btn.setEnabled(True)            self.vid_source = fileName            self.webcam = False            th = threading.Thread(target=self.detect_vid)            th.start()    '''    ### 视频开启事件 ###     '''    # 视频和摄像头的主函数是一样的,不过是传入的source不同罢了    def detect_vid(self):        # pass        model = self.model        output_size = self.output_size        # source = self.img2predict  # file/dir/URL/glob, 0 for webcam        imgsz = 640  # inference size (pixels)        conf_thres = 0.25  # confidence threshold        iou_thres = 0.45  # NMS IOU threshold        max_det = 1000  # maximum detections per image        # device = self.device  # cuda device, i.e. 0 or 0,1,2,3 or cpu        view_img = False  # show results        save_txt = False  # save results to *.txt        save_conf = False  # save confidences in --save-txt labels        save_crop = False  # save cropped prediction boxes        nosave = False  # do not save images/videos        classes = None  # filter by class: --class 0, or --class 0 2 3        agnostic_nms = False  # class-agnostic NMS        augment = False  # ugmented inference        visualize = False  # visualize features        line_thickness = 3  # bounding box thickness (pixels)        hide_labels = False  # hide labels        hide_conf = False  # hide confidences        half = False  # use FP16 half-precision inference        dnn = False  # use OpenCV DNN for ONNX inference        source = str(self.vid_source)        webcam = self.webcam        device = select_device(self.device)        stride, names, pt, jit, onnx = model.stride, model.names, model.pt, model.jit, model.onnx        imgsz = check_img_size(imgsz, s=stride)  # check image size        save_img = not nosave and not source.endswith('.txt')  # save inference images        # Dataloader        if webcam:            view_img = check_imshow()            cudnn.benchmark = True  # set True to speed up constant image size inference            dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt and not jit)            bs = len(dataset)  # batch_size        else:            dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt and not jit)            bs = 1  # batch_size        vid_path, vid_writer = [None] * bs, [None] * bs        # Run inference        if pt and device.type != 'cpu':            model(torch.zeros(1, 3, *imgsz).to(device).type_as(next(model.model.parameters())))  # warmup        dt, seen = [0.0, 0.0, 0.0], 0        for path, im, im0s, vid_cap, s in dataset:            t1 = time_sync()            im = torch.from_numpy(im).to(device)            im = im.half() if half else im.float()  # uint8 to fp16/32            im /= 255  # 0 - 255 to 0.0 - 1.0            if len(im.shape) == 3:                im = im[None]  # expand for batch dim            t2 = time_sync()            dt[0] += t2 - t1            # Inference            # visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False            pred = model(im, augment=augment, visualize=visualize)            t3 = time_sync()            dt[1] += t3 - t2            # NMS            pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)            dt[2] += time_sync() - t3            # Second-stage classifier (optional)            # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)            # Process predictions            for i, det in enumerate(pred):  # per image                seen += 1                if webcam:  # batch_size >= 1                    p, im0, frame = path[i], im0s[i].copy(), dataset.count                    s += f'{i}: '                else:                    p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)                p = Path(p)  # to Path                # save_path = str(save_dir / p.name)  # im.jpg                # txt_path = str(save_dir / 'labels' / p.stem) + (                #     '' if dataset.mode == 'image' else f'_{frame}')  # im.txt                s += '%gx%g ' % im.shape[2:]  # print string                gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]  # normalization gain whwh                imc = im0.copy() if save_crop else im0  # for save_crop                annotator = Annotator(im0, line_width=line_thickness, example=str(names))                if len(det):                    # Rescale boxes from img_size to im0 size                    det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round()                    # Print results                    for c in det[:, -1].unique():                        n = (det[:, -1] == c).sum()  # detections per class                        s += f"{n} {names[int(c)]}{'s' * (n > 1)}, "  # add to string                    # Write results                    for *xyxy, conf, cls in reversed(det):                        if save_txt:  # Write to file                            xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(                                -1).tolist()  # normalized xywh                            line = (cls, *xywh, conf) if save_conf else (cls, *xywh)  # label format                            # with open(txt_path + '.txt', 'a') as f:                            #     f.write(('%g ' * len(line)).rstrip() % line + '\n')                        if save_img or save_crop or view_img:  # Add bbox to image                            c = int(cls)  # integer class                            label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')                            annotator.box_label(xyxy, label, color=colors(c, True))                            # if save_crop:                            #     save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg',                            #                  BGR=True)                # Print time (inference-only)                LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)')                # Stream results                # Save results (image with detections)                im0 = annotator.result()                frame = im0                resize_scale = output_size / frame.shape[0]                frame_resized = cv2.resize(frame, (0, 0), fx=resize_scale, fy=resize_scale)                cv2.imwrite("images/tmp/single_result_vid.jpg", frame_resized)                self.vid_img.setPixmap(QPixmap("images/tmp/single_result_vid.jpg"))                # self.vid_img                # if view_img:                # cv2.imshow(str(p), im0)                # self.vid_img.setPixmap(QPixmap("images/tmp/single_result_vid.jpg"))                # cv2.waitKey(1)  # 1 millisecond            if cv2.waitKey(25) & self.stopEvent.is_set() == True:                self.stopEvent.clear()                self.webcam_detection_btn.setEnabled(True)                self.mp4_detection_btn.setEnabled(True)                self.reset_vid()                break        # self.reset_vid()    '''    ### 界面重置事件 ###     '''    def reset_vid(self):        self.webcam_detection_btn.setEnabled(True)        self.mp4_detection_btn.setEnabled(True)        self.vid_img.setPixmap(QPixmap("images/UI/up.jpeg"))        self.vid_source = '0'        self.webcam = True    '''    ### 视频重置事件 ###     '''    def close_vid(self):        self.stopEvent.set()        self.reset_vid()if __name__ == "__main__":    app = QApplication(sys.argv)    mainWindow = MainWindow()    mainWindow.show()    sys.exit(app.exec_())

以上是"Python如何实现人脸识别系统"这篇文章的所有内容,感谢各位的阅读!相信大家都有了一定的了解,希望分享的内容对大家有所帮助,如果还想学习更多知识,欢迎关注行业资讯频道!

检测 人脸 视频 图片 界面 模型 功能 事件 摄像头 文件 目标 摄像 环境 代码 数据 数据库 楷体 面的 系统 实时 数据库的安全要保护哪些东西 数据库安全各自的含义是什么 生产安全数据库录入 数据库的安全性及管理 数据库安全策略包含哪些 海淀数据库安全审计系统 建立农村房屋安全信息数据库 易用的数据库客户端支持安全管理 连接数据库失败ssl安全错误 数据库的锁怎样保障安全 涉检网络安全防护紧急预案 服务器怎么查看开启了哪些端口 太仓网络技术支持咨询 网络安全和信息化工作二十年 群晖服务器接投影仪 网络安全防护工控思路 网络安全保卫局王玮瑛 网络安全法中对学生的要求 思政大讲堂网络安全诈骗观后感 初始化数据库日历 香港服务器ip地址 hp打印机服务器 网络安全培训报导 金蝶服务器数据怎么导出 app系统软件开发方案报价 湘潭电商软件开发 佛山手机软件开发价格 删除第二题创建的数据库 自己软件开发的优势 做软件开发要学哪些 如何给数据库修改列名 长春大学网络安全学院冯筱姗 网络安全保卫局王玮瑛 通讯网络技术的发展历程 西安京九道网络技术厂招聘 梅林固件pptp服务器设置 求生之路怎么查看已玩过的服务器 软件开发工程师证好考吗 温州机旁备件管理软件开发 加强互联网企业监管 网络安全
0