我可以在 python 中将视频逐帧流式传输到 OpenGL Texture2D OK(pi3d 模块,例如 pi3d_demos/VideoWalk.py),但我注意到它逐渐泄漏内存。下面是显示问题的代码的精简版本。
有人能看到我漏水的地方吗?当 python 停止时,内存似乎会恢复。我尝试过将其明确设置为
None
或手动调用垃圾收集器。
#!/usr/bin/python
import os
import numpy as np
import subprocess
import threading
import time
import json
def get_dimensions(video_path):
probe_cmd = f'ffprobe -v error -show_entries stream=width,height,avg_frame_rate -of json "{video_path}"'
probe_result = subprocess.check_output(probe_cmd, shell=True, text=True)
video_info_list = [vinfo for vinfo in json.loads(probe_result)['streams'] if 'width' in vinfo]
if len(video_info_list) > 0:
video_info = video_info_list[0] # use first if more than one!
return(video_info['width'], video_info['height'])
else:
return None
class VideoStreamer:
def __init__(self, video_path):
self.flag = False # use to signal new texture
self.kill_thread = False
self.command = [ 'ffmpeg', '-i', video_path, '-f', 'image2pipe',
'-pix_fmt', 'rgb24', '-vcodec', 'rawvideo', '-']
dimensions = get_dimensions(video_path)
if dimensions is not None:
(self.W, self.H) = dimensions
self.P = 3
self.image = np.zeros((self.H, self.W, self.P), dtype='uint8')
self.t = threading.Thread(target=self.pipe_thread)
self.t.start()
else: # couldn't get dimensions for some reason - assume not able to read video
self.W = 240
self.H = 180
self.P = 3
self.image = np.zeros((self.H, self.W, self.P), dtype='uint8')
self.t = None
def pipe_thread(self):
pipe = None
while not self.kill_thread:
st_tm = time.time()
if pipe is None:
pipe = subprocess.Popen(self.command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=-1)
self.image = np.frombuffer(pipe.stdout.read(self.H * self.W * self.P), dtype='uint8') # overwrite array
pipe.stdout.flush() # presumably nothing else has arrived since read()
pipe.stderr.flush() # ffmpeg sends commentary to stderr
if len(self.image) < self.H * self.W * self.P: # end of video, reload
pipe.terminate()
pipe = None
else:
self.image.shape = (self.H, self.W, self.P)
self.flag = True
step = time.time() - st_tm
time.sleep(max(0.04 - step, 0.0)) # adding fps info to ffmpeg doesn't seem to have any effect
if pipe is not None:
pipe.terminate()
pipe = None
def kill(self):
self.kill_thread = True
if self.t is not None:
self.t.join()
vs = None
try:
while True:
for (path, _, videos) in os.walk("/home/patrick/Pictures/videos"):
for video in videos:
print(video)
os.system("free") # shows gradually declining memory available
vs = VideoStreamer(os.path.join(path, video))
for i in range(500):
tries = 0
while not vs.flag and tries < 5:
time.sleep(0.001)
tries += 1
# at this point vs.image is a numpy array HxWxP bytes
vs.flag = False
vs.kill()
except KeyboardInterrupt:
if vs is not None:
vs.kill()
os.system("free")
好吧,有两个因素让我感到困惑。第一个是 Wayland 在 Raspberry Pi 上(至少)的书虫上使用的
pcmanfm
中的一个错误,该错误挂在图像数据上。
第二个是由于释放流句柄的时间造成的,后者可以通过使用上下文管理器来修复(即
with Popen(...stdout=PIPE) as pipe:
可能应该总是这样做,但通常不会明确显示 subprocess.Popen
。所以如上所述,但是螺纹部分:
def pipe_thread(self):
while not self.kill_thread:
with subprocess.Popen(self.command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=-1) as pipe:
while pipe.poll() is None and not self.kill_thread:
st_tm = time.time()
self.flag = False
self.image = np.frombuffer(pipe.stdout.read(self.H * self.W * self.P), dtype='uint8') # overwrite array
self.image.shape = (self.H, self.W, self.P)
self.flag = True
step = time.time() - st_tm
time.sleep(max(0.04 - step, 0.0)) # adding fps info to ffmpeg doesn't seem to have any effect