单目标跟踪:
直接调用opencv中封装的tracker即可。
#!/usr/bin/env python3# -*- Coding: utf-8 -*-"""Created on Sun Jan 5 17:50:47 2020第四章 kcf跟踪@author: youxinlin"""import cv2from items import MessageItemimport timeimport numpy as np'''监视者模块,负责入侵检测,目标跟踪'''class WatchDog(object): #入侵检测者模块,用于入侵检测 def __init__(self,frame=None): #运动检测器构造函数 self._background = None if frame is not None: self._background = cv2.GaussianBlur(cv2.cvtcolor(frame,cv2.color_BGR2GRAY),(21,21),0) self.es = cv2.getStructuringElement(cv2.MORPH_ELliPSE,(10,10)) def isWorking(self): #运动检测器是否工作 return self._background is not None def startWorking(self,frame): #运动检测器开始工作 if frame is not None: self._background = cv2.GaussianBlur(cv2.cvtcolor(frame,0) def stopWorking(self): #运动检测器结束工作 self._background = None def analyze(self,frame): #运动检测 if frame is None or self._background is None: return sample_frame = cv2.GaussianBlur(cv2.cvtcolor(frame,0) diff = cv2.absdiff(self._background,sample_frame) diff = cv2.threshold(diff,25,255,cv2.THRESH_BINARY)[1] diff = cv2.dilate(diff,self.es,iterations=2) image,cnts,hIErarchy = cv2.findContours(diff.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) coordinate = [] bigC = None bigMulti = 0 for c in cnts: if cv2.contourArea(c) < 1500: continue (x,y,w,h) = cv2.boundingRect(c) if w * h > bigMulti: bigMulti = w * h bigC = ((x,y),(x+w,y+h)) if bigC: cv2.rectangle(frame,bigC[0],bigC[1],(255,0),2,1) coordinate.append(bigC) message = {"coord":coordinate} message['msg'] = None return MessageItem(frame,message)class Tracker(object): ''' 追踪者模块,用于追踪指定目标 ''' def __init__(self,tracker_type = "BOOSTING",draw_coord = True): ''' 初始化追踪器种类 ''' #获得opencv版本 (major_ver,minor_ver,subminor_ver) = (cv2.__version__).split('.') self.tracker_types = ['BOOSTING','MIL','kcf','TLD','MEDIANFLOW','GOTURN'] self.tracker_type = tracker_type self.isWorking = False self.draw_coord = draw_coord #构造追踪器 if int(minor_ver) < 3: self.tracker = cv2.Tracker_create(tracker_type) else: if tracker_type == 'BOOSTING': self.tracker = cv2.TrackerBoosting_create() if tracker_type == 'MIL': self.tracker = cv2.TrackerMIL_create() if tracker_type == 'kcf': self.tracker = cv2.Trackerkcf_create() if tracker_type == 'TLD': self.tracker = cv2.TrackerTLD_create() if tracker_type == 'MEDIANFLOW': self.tracker = cv2.TrackerMedianFlow_create() if tracker_type == 'GOTURN': self.tracker = cv2.TrackerGOTURN_create() def initWorking(self,frame,Box): ''' 追踪器工作初始化 frame:初始化追踪画面 Box:追踪的区域 ''' if not self.tracker: raise Exception("追踪器未初始化") status = self.tracker.init(frame,Box) if not status: raise Exception("追踪器工作初始化失败") self.coord = Box self.isWorking = True def track(self,frame): ''' 开启追踪 ''' message = None if self.isWorking: status,self.coord = self.tracker.update(frame) if status: message = {"coord":[((int(self.coord[0]),int(self.coord[1])),(int(self.coord[0] + self.coord[2]),int(self.coord[1] + self.coord[3])))]} if self.draw_coord: p1 = (int(self.coord[0]),int(self.coord[1])) p2 = (int(self.coord[0] + self.coord[2]),int(self.coord[1] + self.coord[3])) cv2.rectangle(frame,p1,p2,1) message['msg'] = "is tracking" return MessageItem(frame,message)class ObjectTracker(object): def __init__(self,dataSet): self.cascade = cv2.CascadeClassifIEr(dataSet) def track(self,frame): gray = cv2.cvtcolor(frame,cv2.color_BGR2GRAY) faces = self.cascade.detectMultiScale(gray,1.03,5) for (x,h) in faces: cv2.rectangle(frame,(x,y+h),(0,255),2) return frameif __name__ == '__main__' :# tracker_types = ['BOOSTING','GOTURN'] tracker = Tracker(tracker_type="kcf")# vIDeo = cv2.VIDeoCapture(0)# vIDeo = cv2.VIDeoCapture("complex1.mov") vIDeo = cv2.VIDeoCapture(r"/Users/youxinlin/Desktop/vIDeo_data/complex1.MOV") ok,frame = vIDeo.read() bBox = cv2.selectROI(frame,False) tracker.initWorking(frame,bBox) while True: _,frame = vIDeo.read(); if(_): item = tracker.track(frame); cv2.imshow("track",item.getFrame()) k = cv2.waitKey(1) & 0xff if k == 27: break
附带items.py,放在同个文件夹下:
#!/usr/bin/env python3# -*- Coding: utf-8 -*-"""Created on Sun Jan 5 17:51:04 2020@author: youxinlin"""import Jsonfrom utils import IoUtil'''信息封装类'''class MessageItem(object): #用于封装信息的类,包含图片和其他信息 def __init__(self,message): self._frame = frame self._message = message def getFrame(self): #图片信息 return self._frame def getMessage(self): #文字信息,Json格式 return self._message def getBase64Frame(self): #返回base64格式的图片,将BGR图像转化为RGB图像 jepg = IoUtil.array_to_bytes(self._frame[...,::-1]) return IoUtil.bytes_to_base64(jepg) def getBase64FrameByte(self): #返回base64格式图片的bytes return bytes(self.getBase64Frame()) def getJson(self): #获得Json数据格式 dicdata = {"frame":self.getBase64Frame().decode(),"message":self.getMessage()} return Json.dumps(dicdata) def getBinaryFrame(self): return IoUtil.array_to_bytes(self._frame[...,::-1])
utils.py:也放在同一个文件夹下。
#!/usr/bin/env python3# -*- Coding: utf-8 -*-"""Created on Sun Jan 5 17:51:40 2020@author: youxinlin"""import timeimport numpyimport base64import osimport loggingimport sysfrom PIL import Imagefrom io import BytesIO#工具类class IoUtil(object): #流 *** 作工具类 @staticmethod def array_to_bytes(pic,formatter="jpeg",quality=70): ''' 静态方法,将numpy数组转化二进制流 :param pic: numpy数组 :param format: 图片格式 :param quality:压缩比,压缩比越高,产生的二进制数据越短 :return: ''' stream = BytesIO() picture = Image.fromarray(pic) picture.save(stream,format=formatter,quality=quality) jepg = stream.getvalue() stream.close() return jepg @staticmethod def bytes_to_base64(byte): ''' 静态方法,bytes转base64编码 :param byte: :return: ''' return base64.b64encode(byte) @staticmethod def transport_rgb(frame): ''' 将bgr图像转化为rgb图像,或者将rgb图像转化为bgr图像 ''' return frame[...,::-1] @staticmethod def byte_to_package(bytes,cmd,var=1): ''' 将每一帧的图片流的二进制数据进行分包 :param byte: 二进制文件 :param cmd:命令 :return: ''' head = [ver,len(byte),cmd] headPack = struct.pack("!3I",*head) senddata = headPack+byte return senddata @staticmethod def mkdir(filePath): ''' 创建文件夹 ''' if not os.path.exists(filePath): os.mkdir(filePath) @staticmethod def countCenter(Box): ''' 计算一个矩形的中心 ''' return (int(abs(Box[0][0] - Box[1][0])*0.5) + Box[0][0],int(abs(Box[0][1] - Box[1][1])*0.5) +Box[0][1]) @staticmethod def countBox(center): ''' 根据两个点计算出,x,c,r ''' return (center[0][0],center[0][1],center[1][0]-center[0][0],center[1][1]-center[0][1]) @staticmethod def getimagefilename(): return time.strftime("%Y_%m_%d_%H_%M_%s",time.localtime())+'.png'
多目标跟踪:
和单目标差不多,改用MultiTracker_create()
#!/usr/bin/env python3# -*- Coding: utf-8 -*-"""Created on Sun Jan 5 18:02:33 2020
多目标跟踪
@author: youxinlin"""import numpy as npimport cv2import sys'''if len(sys.argv) != 2: print('input vIDeo name is missing') exit()'''print('Select multiple tracking targets')cv2.nameDWindow("tracking")camera = cv2.VIDeoCapture(r"/Users/youxinlin/Desktop/vIDeo_data/complex6.MOV")#camera = cv2.VIDeoCapture(0)tracker = cv2.MultiTracker_create() #多目标跟踪a= cv2.Tracker_cinit_once = Falseok,image=camera.read()if not ok: print('Failed to read vIDeo') exit()bBox1 = cv2.selectROI('tracking',image)bBox2 = cv2.selectROI('tracking',image)bBox3 = cv2.selectROI('tracking',image)while camera.isOpened(): ok,image=camera.read() if not ok: print ('no image to read') break if not init_once: ok = tracker.add(cv2.Trackerkcf_create(),image,bBox1) ok = tracker.add(cv2.Trackerkcf_create( ),bBox2) ok = tracker.add(cv2.Trackerkcf_create(),bBox3) init_once = True ok,Boxes = tracker.update(image) for newBox in Boxes: p1 = (int(newBox[0]),int(newBox[1])) p2 = (int(newBox[0] + newBox[2]),int(newBox[1] + newBox[3])) cv2.rectangle(image,255)) cv2.imshow('tracking',image) k = cv2.waitKey(1) if k == 27 : break # esc pressed
多尺度检测的kcf、自定义所用特征的kcf
在一些场景下,不想使用默认的hog特征跟踪,或需要对比不同特征的跟踪效果,那么封装好的方法似乎不可用,需要可以自己撸一波kcf的代码,从而使用自己设定的特征。
总结
以上所述是小编给大家介绍的python实现单目标、多目标、多尺度、自定义特征的kcf跟踪算法,希望对大家有所帮助,如果大家有任何疑问请给我留言,小编会及时回复大家的。在此也非常感谢大家对我们网站的支持!如果你觉得本文对你有帮助,欢迎转载,烦请注明出处,谢谢!
总结以上是内存溢出为你收集整理的python实现单目标、多目标、多尺度、自定义特征的KCF跟踪算法(实例代码)全部内容,希望文章能够帮你解决python实现单目标、多目标、多尺度、自定义特征的KCF跟踪算法(实例代码)所遇到的程序开发问题。
如果觉得内存溢出网站内容还不错,欢迎将内存溢出网站推荐给程序员好友。
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)