Question
Create rtsp stream based on opencv images in python
My goal is to read frames from an rtsp server, do some opencv manipulation, and write the manipulated frames to a new rtsp server.
I tried the following based on Write in Gstreamer pipeline from opencv in python, but I was unable to figure out what the appropriate gst-launch-1.0 arguments should be to create the rtsp server. Can anyone assist with proper arguments to gst-launch-1.0? The ones I tried got stuck in "Pipeline is PREROLLING"
import cv2
cap = cv2.VideoCapture("rtsp://....")
framerate = 25.0
out = cv2.VideoWriter('appsrc ! videoconvert ! '
'x264enc noise-reduction=10000 speed-preset=ultrafast
tune=zerolatency ! '
'rtph264pay config-interval=1 pt=96 !'
'tcpserversink host=192.168.1.27 port=5000 sync=false',
0, framerate, (640, 480))
counter = 0
while cap.isOpened():
ret, frame = cap.read()
if ret:
out.write(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()
I also tried another solution based on Write opencv frames into gstreamer rtsp server pipeline
import cv2
import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstRtspServer', '1.0')
from gi.repository import Gst, GstRtspServer, GObject
class SensorFactory(GstRtspServer.RTSPMediaFactory):
def __init__(self, **properties):
super(SensorFactory, self).__init__(**properties)
#self.cap = cv2.VideoCapture(0)
self.cap = cv2.VideoCapture("rtsp://....")
self.number_frames = 0
self.fps = 30
self.duration = 1 / self.fps * Gst.SECOND # duration of a frame in nanoseconds
self.launch_string = 'appsrc name=source is-live=true block=true format=GST_FORMAT_TIME ' \
'caps=video/x-raw,format=BGR,width=640,height=480,framerate={}/1 ' \
'! videoconvert ! video/x-raw,format=I420 ' \
'! x264enc speed-preset=ultrafast tune=zerolatency ' \
'! rtph264pay config-interval=1 name=pay0 pt=96'.format(self.fps)
def on_need_data(self, src, lenght):
if self.cap.isOpened():
ret, frame = self.cap.read()
if ret:
data = frame.tostring()
buf = Gst.Buffer.new_allocate(None, len(data), None)
buf.fill(0, data)
buf.duration = self.duration
timestamp = self.number_frames * self.duration
buf.pts = buf.dts = int(timestamp)
buf.offset = timestamp
self.number_frames += 1
retval = src.emit('push-buffer', buf)
print('pushed buffer, frame {}, duration {} ns, durations {} s'.format(self.number_frames, self.duration, self.duration / Gst.SECOND))
if retval != Gst.FlowReturn.OK:
print(retval)
def do_create_element(self, url):
return Gst.parse_launch(self.launch_string)
def do_configure(self, rtsp_media):
self.number_frames = 0
appsrc = rtsp_media.get_element().get_child_by_name('source')
appsrc.connect('need-data', self.on_need_data)
class GstServer(GstRtspServer.RTSPServer):
def __init__(self, **properties):
super(GstServer, self).__init__(**properties)
self.factory = SensorFactory()
self.factory.set_shared(True)
self.get_mount_points().add_factory("/test", self.factory)
self.attach(None)
GObject.threads_init()
Gst.init(None)
server = GstServer()
loop = GObject.MainLoop()
loop.run()
This solution generates the rtsp server and streams it to it. I can open the resulting rtsp stream in VLC, but it keeps displaying the first frame and does not update with new frames. Anyone who knows why?
I'm looking for any solution which will enable me with low latency to read frames from an rtsp server into an opencv format, manipulate the frames and output the frames into a new rtsp server (which I also need to create). If something better exists, the solution does not need to be based on gstreamer.
I am using Ubuntu 16.04 with python2.7 and opencv 3.4.1