diff --git a/StereoVidComfort.py b/StereoVidComfort.py index 19ce69b..3631a96 100644 --- a/StereoVidComfort.py +++ b/StereoVidComfort.py @@ -6,6 +6,7 @@ import os import sys from matplotlib import pyplot as plt + ''' TODO: 0:读取视频 √ @@ -22,31 +23,33 @@ def openVid(): print("file doesn't exist!") fileName = input("video path:") cap = cv2.VideoCapture(fileName) - if not cap.isOpened(): - print("Video cannot be opened.") - sys.exit() - else: + if cap.isOpened(): return cap + else: + print("cannot open video.") + sys.exit() + def getFrameCount(cap): - if not cap.isOpened(): - print("Video cannot be opened.") - sys.exit() - else: + if cap.isOpened(): return cap.get(cv2.CAP_PROP_FRAME_COUNT) + else: + print("cannot open video.") + sys.exit() def getFrameRate(cap): - if not cap.isOpened(): - print("Video cannot be opened.") - sys.exit() - else: + if cap.isOpened(): return cap.get(cv2.CAP_PROP_FPS) + else: + print("cannot open video.") + sys.exit() if __name__ == "__main__": cap = openVid() + isDemo = int(input("is Demo(0/1)?")) frameRate = getFrameRate(cap) frameCount = getFrameCount(cap) @@ -55,50 +58,56 @@ if __name__ == "__main__": print("video read error.") sys.exit() + #分割左右画面 imgL = np.split(img, 2, 1)[0] imgR = np.split(img, 2, 1)[1] - prvs = cv2.cvtColor(imgR, cv2.COLOR_BGR2GRAY) - hsv = np.zeros_like(imgR) - hsv[..., 1] = 255 + prvs = cv2.cvtColor(imgR, cv2.COLOR_BGR2GRAY) #前一帧的右画面灰度,用于运动矢量计算 + hsv = np.zeros_like(imgR) #将运动矢量按hsv显示,以色调h表示运动方向,以明度v表示运动位移 + hsv[..., 1] = 255 #饱和度置为最高 - cap.set(cv2.CAP_PROP_POS_FRAMES,52000) - - for frameID in range(int(cap.get(cv2.CAP_PROP_POS_FRAMES)), int(frameCount), int(frameRate/4)): + #每秒取4帧进行计算 + for frameID in range(round(cap.get(cv2.CAP_PROP_POS_FRAMES)), round(frameCount), round(frameRate/4)): cap.set(cv2.CAP_PROP_POS_FRAMES, frameID) isSuccess, img = cap.read() if not isSuccess: print("video read error.") sys.exit() + #分割左右画面 imgL = np.split(img, 2, 1)[0] imgR = np.split(img, 2, 1)[1] - next = cv2.cvtColor(imgR, cv2.COLOR_BGR2GRAY) - flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 0) - mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1]) - hsv[..., 0] = ang*180/np.pi/2 - hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX) - bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) - cv2.namedWindow("img", cv2.WINDOW_NORMAL) - cv2.imshow('img', img) - cv2.namedWindow("MotionVector",cv2.WINDOW_NORMAL) - cv2.imshow("MotionVector",bgr) - prvs = next - + next = cv2.cvtColor(imgR, cv2.COLOR_BGR2GRAY) #当前帧的右画面灰度,用于运动矢量计算 + flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 0) #计算两帧间的光流 + mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1]) #运动矢量的直角坐标表示转换为极坐标表示 + hsv[..., 0] = ang*180/np.pi/2 #角度对应色调 + hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX) #位移量对应明度 - stereo = cv2.StereoSGBM_create(numDisparities=96, blockSize=11) + #计算深度图 + stereo = cv2.StereoSGBM_create(numDisparities=64, blockSize=3) disparity = stereo.compute(imgL, imgR) - print("time: ", frameID/frameRate) - print("AVG depth: ",np.mean(disparity)) - print("AVG motion: ",np.mean(hsv[...,2])) + print("time: ", round(frameID/frameRate,2)) + print("AVG depth: ",round(np.mean(disparity),2)) + print("AVG motion: ",round(np.mean(hsv[...,2]),2)) print() - cv2.waitKey(1) - #cv2.waitKey(1) - #plt.title("DepthMap") - #plt.imshow(disparity) - #plt.pause(0.5) + + #当为demo模式时显示当前帧画面、运动矢量图和景深图 + if isDemo: + #显示当前帧 + cv2.namedWindow("img", cv2.WINDOW_NORMAL) + cv2.imshow('img', img) + #显示当前帧的运动矢量的hsv表示 + bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) #hsv转为rgb用于显示 + cv2.namedWindow("MotionVector",cv2.WINDOW_NORMAL) + cv2.imshow("MotionVector",bgr) + + #显示当前帧的景深图 + plt.title("DepthMap") + plt.imshow(disparity) + plt.pause(0.5) + prvs = next #当前帧覆盖上一帧,继续计算 print("success")