François Laignel 2019-12-02 10:30:07 +01:00
parent bdadf25f5c
commit e8f5191ee7
24 changed files with 6782 additions and 3221 deletions

View file

@ -20,11 +20,12 @@ gst-check = { package = "gstreamer-check", git = "https://gitlab.freedesktop.org
gst-net = { package = "gstreamer-net", git = "https://gitlab.freedesktop.org/gstreamer/gstreamer-rs" }
gst-rtp = { package = "gstreamer-rtp", git = "https://gitlab.freedesktop.org/gstreamer/gstreamer-rs" }
gstreamer-sys = { git = "https://gitlab.freedesktop.org/gstreamer/gstreamer-rs-sys" }
pin-project = "0.4"
tokio = "=0.2.0-alpha.6"
tokio-executor = { version = "=0.2.0-alpha.6", features = ["current-thread"] }
tokio-net = { version = "=0.2.0-alpha.6", features = ["tcp", "udp"] }
tokio-timer = "=0.3.0-alpha.6"
futures-preview = "0.3.0-alpha.19"
futures = "0.3"
lazy_static = "1.0"
either = "1.0"
rand = "0.7"

View file

@ -18,6 +18,8 @@
use either::Either;
use futures::channel::mpsc;
use futures::future::BoxFuture;
use futures::lock::{Mutex, MutexGuard};
use futures::prelude::*;
use glib;
@ -30,15 +32,19 @@ use gst;
use gst::prelude::*;
use gst::subclass::prelude::*;
use gst::{gst_debug, gst_element_error, gst_error, gst_error_msg, gst_log, gst_trace};
use gst::{EventView, QueryView};
use lazy_static::lazy_static;
use rand;
use std::sync::Mutex;
use std::convert::TryInto;
use std::sync::Arc;
use std::u32;
use super::iocontext::*;
use crate::block_on;
use crate::runtime::prelude::*;
use crate::runtime::{Context, PadSrc, PadSrcRef};
const DEFAULT_CONTEXT: &str = "";
const DEFAULT_CONTEXT_WAIT: u32 = 0;
@ -119,34 +125,6 @@ static PROPERTIES: [subclass::Property; 5] = [
}),
];
struct State {
io_context: Option<IOContext>,
pending_future_id: Option<PendingFutureId>,
channel: Option<mpsc::Sender<Either<gst::Buffer, gst::Event>>>,
pending_future_abort_handle: Option<future::AbortHandle>,
need_initial_events: bool,
configured_caps: Option<gst::Caps>,
}
impl Default for State {
fn default() -> State {
State {
io_context: None,
pending_future_id: None,
channel: None,
pending_future_abort_handle: None,
need_initial_events: true,
configured_caps: None,
}
}
}
struct AppSrc {
src_pad: gst::Pad,
state: Mutex<State>,
settings: Mutex<Settings>,
}
lazy_static! {
static ref CAT: gst::DebugCategory = gst::DebugCategory::new(
"ts-appsrc",
@ -155,32 +133,154 @@ lazy_static! {
);
}
impl AppSrc {
fn create_io_context_event(state: &State) -> Option<gst::Event> {
if let (&Some(ref pending_future_id), &Some(ref io_context)) =
(&state.pending_future_id, &state.io_context)
{
let s = gst::Structure::new(
"ts-io-context",
&[
("io-context", &io_context),
("pending-future-id", &*pending_future_id),
],
);
Some(gst::Event::new_custom_downstream_sticky(s).build())
} else {
None
#[derive(Debug)]
enum StreamItem {
Buffer(gst::Buffer),
Event(gst::Event),
}
#[derive(Debug)]
struct AppSrcPadHandlerInner {
need_initial_events: bool,
configured_caps: Option<gst::Caps>,
}
impl Default for AppSrcPadHandlerInner {
fn default() -> Self {
AppSrcPadHandlerInner {
need_initial_events: true,
configured_caps: None,
}
}
}
fn src_event(&self, pad: &gst::Pad, element: &gst::Element, event: gst::Event) -> bool {
use gst::EventView;
#[derive(Clone, Debug)]
struct AppSrcPadHandler(Arc<Mutex<AppSrcPadHandlerInner>>);
gst_log!(CAT, obj: pad, "Handling event {:?}", event);
impl AppSrcPadHandler {
fn new() -> Self {
AppSrcPadHandler(Arc::new(Mutex::new(AppSrcPadHandlerInner::default())))
}
#[inline]
async fn lock(&self) -> MutexGuard<'_, AppSrcPadHandlerInner> {
self.0.lock().await
}
async fn start_task(
&self,
pad: PadSrcRef<'_>,
element: &gst::Element,
receiver: mpsc::Receiver<StreamItem>,
) {
let this = self.clone();
let pad_weak = pad.downgrade();
let element = element.clone();
let receiver = Arc::new(Mutex::new(receiver));
pad.start_task(move || {
let this = this.clone();
let pad_weak = pad_weak.clone();
let element = element.clone();
let receiver = Arc::clone(&receiver);
async move {
let item = receiver.lock().await.next().await;
let pad = pad_weak.upgrade().expect("PadSrc no longer exists");
let item = match item {
Some(item) => item,
None => {
gst_log!(CAT, obj: pad.gst_pad(), "SrcPad channel aborted");
pad.pause_task().await;
return;
}
};
this.push_item(pad, &element, item).await;
}
})
.await;
}
async fn push_item(self, pad: PadSrcRef<'_>, element: &gst::Element, item: StreamItem) {
// Don't keep the `events` in scope so as to reduce the `Future`'s size
{
let mut events = Vec::new();
{
let mut inner = self.lock().await;
if inner.need_initial_events {
gst_debug!(CAT, obj: pad.gst_pad(), "Pushing initial events");
let stream_id =
format!("{:08x}{:08x}", rand::random::<u32>(), rand::random::<u32>());
events.push(
gst::Event::new_stream_start(&stream_id)
.group_id(gst::util_group_id_next())
.build(),
);
let appsrc = AppSrc::from_instance(element);
if let Some(ref caps) = appsrc.settings.lock().await.caps {
events.push(gst::Event::new_caps(&caps).build());
inner.configured_caps = Some(caps.clone());
}
events.push(
gst::Event::new_segment(&gst::FormattedSegment::<gst::format::Time>::new())
.build(),
);
inner.need_initial_events = false;
}
}
for event in events {
pad.push_event(event).await;
}
}
let res = match item {
StreamItem::Buffer(buffer) => {
gst_log!(CAT, obj: pad.gst_pad(), "Forwarding buffer {:?}", buffer);
pad.push(buffer).await
}
StreamItem::Event(event) => {
gst_log!(CAT, obj: pad.gst_pad(), "Forwarding event {:?}", event);
pad.push_event(event).await;
Ok(gst::FlowSuccess::Ok)
}
};
match res {
Ok(_) => gst_log!(CAT, obj: pad.gst_pad(), "Successfully pushed item"),
Err(gst::FlowError::Eos) => gst_debug!(CAT, obj: pad.gst_pad(), "EOS"),
Err(gst::FlowError::Flushing) => gst_debug!(CAT, obj: pad.gst_pad(), "Flushing"),
Err(err) => {
gst_error!(CAT, obj: pad.gst_pad(), "Got error {}", err);
gst_element_error!(
&element,
gst::StreamError::Failed,
("Internal data stream error"),
["streaming stopped, reason {}", err]
);
}
}
}
}
impl PadSrcHandler for AppSrcPadHandler {
type ElementImpl = AppSrc;
fn src_event(
&self,
pad: PadSrcRef,
app_src: &AppSrc,
element: &gst::Element,
event: gst::Event,
) -> Either<bool, BoxFuture<'static, bool>> {
gst_log!(CAT, obj: pad.gst_pad(), "Handling event {:?}", event);
let ret = match event.view() {
EventView::FlushStart(..) => {
let _ = self.stop(element);
let _ = block_on!(app_src.pause(element));
true
}
EventView::FlushStop(..) => {
@ -188,7 +288,7 @@ impl AppSrc {
if res == Ok(gst::StateChangeSuccess::Success) && state == gst::State::Playing
|| res == Ok(gst::StateChangeSuccess::Async) && pending == gst::State::Playing
{
let _ = self.start(element);
let _ = block_on!(app_src.start(element));
}
true
}
@ -198,23 +298,22 @@ impl AppSrc {
};
if ret {
gst_log!(CAT, obj: pad, "Handled event {:?}", event);
gst_log!(CAT, obj: pad.gst_pad(), "Handled event {:?}", event);
} else {
gst_log!(CAT, obj: pad, "Didn't handle event {:?}", event);
gst_log!(CAT, obj: pad.gst_pad(), "Didn't handle event {:?}", event);
}
ret
Either::Left(ret)
}
fn src_query(
&self,
pad: &gst::Pad,
pad: PadSrcRef,
_app_src: &AppSrc,
_element: &gst::Element,
query: &mut gst::QueryRef,
) -> bool {
use gst::QueryView;
gst_log!(CAT, obj: pad, "Handling query {:?}", query);
gst_log!(CAT, obj: pad.gst_pad(), "Handling query {:?}", query);
let ret = match query.view_mut() {
QueryView::Latency(ref mut q) => {
q.set(true, 0.into(), 0.into());
@ -226,8 +325,8 @@ impl AppSrc {
true
}
QueryView::Caps(ref mut q) => {
let state = self.state.lock().unwrap();
let caps = if let Some(ref caps) = state.configured_caps {
let inner = block_on!(self.lock());
let caps = if let Some(ref caps) = inner.configured_caps {
q.get_filter()
.map(|f| f.intersect_with_mode(caps, gst::CapsIntersectMode::First))
.unwrap_or_else(|| caps.clone())
@ -245,17 +344,34 @@ impl AppSrc {
};
if ret {
gst_log!(CAT, obj: pad, "Handled query {:?}", query);
gst_log!(CAT, obj: pad.gst_pad(), "Handled query {:?}", query);
} else {
gst_log!(CAT, obj: pad, "Didn't handle query {:?}", query);
gst_log!(CAT, obj: pad.gst_pad(), "Didn't handle query {:?}", query);
}
ret
}
}
fn push_buffer(&self, element: &gst::Element, mut buffer: gst::Buffer) -> bool {
let settings = self.settings.lock().unwrap().clone();
struct State {
sender: Option<mpsc::Sender<StreamItem>>,
}
if settings.do_timestamp {
impl Default for State {
fn default() -> Self {
State { sender: None }
}
}
struct AppSrc {
src_pad: PadSrc,
src_pad_handler: AppSrcPadHandler,
state: Mutex<State>,
settings: Mutex<Settings>,
}
impl AppSrc {
async fn push_buffer(&self, element: &gst::Element, mut buffer: gst::Buffer) -> bool {
if self.settings.lock().await.do_timestamp {
if let Some(clock) = element.get_clock() {
let base_time = element.get_base_time();
let now = clock.get_time();
@ -269,229 +385,111 @@ impl AppSrc {
}
}
let mut state = self.state.lock().unwrap();
if let Some(ref mut channel) = state.channel {
match channel.try_send(Either::Left(buffer)) {
Ok(_) => true,
Err(err) => {
gst_error!(CAT, obj: element, "Failed to queue buffer: {}", err);
false
}
}
} else {
false
}
}
fn end_of_stream(&self, element: &gst::Element) -> bool {
let mut state = self.state.lock().unwrap();
if let Some(ref mut channel) = state.channel {
match channel.try_send(Either::Right(gst::Event::new_eos().build())) {
Ok(_) => true,
Err(err) => {
gst_error!(CAT, obj: element, "Failed to queue EOS: {}", err);
false
}
}
} else {
false
}
}
async fn push_item(
element: gst::Element,
item: Either<gst::Buffer, gst::Event>,
) -> Result<(), gst::FlowError> {
let appsrc = Self::from_instance(&element);
let mut events = Vec::new();
{
let mut state = appsrc.state.lock().unwrap();
if state.need_initial_events {
gst_debug!(CAT, obj: &element, "Pushing initial events");
let stream_id =
format!("{:08x}{:08x}", rand::random::<u32>(), rand::random::<u32>());
events.push(
gst::Event::new_stream_start(&stream_id)
.group_id(gst::util_group_id_next())
.build(),
);
if let Some(ref caps) = appsrc.settings.lock().unwrap().caps {
events.push(gst::Event::new_caps(&caps).build());
state.configured_caps = Some(caps.clone());
}
events.push(
gst::Event::new_segment(&gst::FormattedSegment::<gst::format::Time>::new())
.build(),
);
if let Some(event) = Self::create_io_context_event(&state) {
events.push(event);
// Get rid of reconfigure flag
appsrc.src_pad.check_reconfigure();
}
state.need_initial_events = false;
} else if appsrc.src_pad.check_reconfigure() {
if let Some(event) = Self::create_io_context_event(&state) {
events.push(event);
}
}
}
for event in events {
appsrc.src_pad.push_event(event);
}
let res = match item {
Either::Left(buffer) => {
gst_log!(CAT, obj: &element, "Forwarding buffer {:?}", buffer);
appsrc.src_pad.push(buffer).map(|_| ())
}
Either::Right(event) => {
gst_log!(CAT, obj: &element, "Forwarding event {:?}", event);
appsrc.src_pad.push_event(event);
Ok(())
}
let mut state = self.state.lock().await;
let sender = match state.sender.as_mut() {
Some(sender) => sender,
None => return false,
};
match res {
Ok(()) => gst_log!(CAT, obj: &element, "Successfully pushed item"),
Err(gst::FlowError::Eos) => gst_debug!(CAT, obj: &element, "EOS"),
Err(gst::FlowError::Flushing) => gst_debug!(CAT, obj: &element, "Flushing"),
match sender.send(StreamItem::Buffer(buffer)).await {
Ok(_) => true,
Err(err) => {
gst_error!(CAT, obj: &element, "Got error {}", err);
gst_element_error!(
&element,
gst::StreamError::Failed,
("Internal data stream error"),
["streaming stopped, reason {}", err]
);
gst_error!(CAT, obj: element, "Failed to queue buffer: {}", err);
false
}
}
res?;
let abortable_drain = {
let mut state = appsrc.state.lock().unwrap();
if let State {
io_context: Some(ref io_context),
pending_future_id: Some(ref pending_future_id),
ref mut pending_future_abort_handle,
..
} = *state
{
let (abort_handle, abortable_drain) =
io_context.drain_pending_futures(*pending_future_id);
*pending_future_abort_handle = abort_handle;
abortable_drain
} else {
return Ok(());
}
};
abortable_drain.await
}
fn prepare(&self, element: &gst::Element) -> Result<(), gst::ErrorMessage> {
async fn end_of_stream(&self, element: &gst::Element) -> bool {
let mut state = self.state.lock().await;
let sender = match state.sender.as_mut() {
Some(sender) => sender,
None => return false,
};
let eos = StreamItem::Event(gst::Event::new_eos().build());
match sender.send(eos).await {
Ok(_) => true,
Err(err) => {
gst_error!(CAT, obj: element, "Failed to queue EOS: {}", err);
false
}
}
}
async fn prepare(&self, element: &gst::Element) -> Result<(), gst::ErrorMessage> {
let _state = self.state.lock().await;
gst_debug!(CAT, obj: element, "Preparing");
let settings = self.settings.lock().unwrap().clone();
let settings = self.settings.lock().await;
let mut state = self.state.lock().unwrap();
let io_context =
IOContext::new(&settings.context, settings.context_wait).map_err(|err| {
let context =
Context::acquire(&settings.context, settings.context_wait).map_err(|err| {
gst_error_msg!(
gst::ResourceError::OpenRead,
["Failed to create IO context: {}", err]
["Failed to acquire Context: {}", err]
)
})?;
let pending_future_id = io_context.acquire_pending_future_id();
gst_debug!(
CAT,
obj: element,
"Got pending future id {:?}",
pending_future_id
);
state.io_context = Some(io_context);
state.pending_future_id = Some(pending_future_id);
self.src_pad
.prepare(context, &self.src_pad_handler)
.await
.map_err(|err| {
gst_error_msg!(
gst::ResourceError::OpenRead,
["Error preparing src_pads: {:?}", err]
)
})?;
gst_debug!(CAT, obj: element, "Prepared");
Ok(())
}
fn unprepare(&self, element: &gst::Element) -> Result<(), ()> {
async fn unprepare(&self, element: &gst::Element) -> Result<(), ()> {
let _state = self.state.lock().await;
gst_debug!(CAT, obj: element, "Unpreparing");
// FIXME: The IO Context has to be alive longer than the other parts
// of the state. Otherwise a deadlock can happen between shutting down
// the IO context (thread join while the state lock is held) and stuff
// happening on the IO context (which might take the state lock).
let io_context = {
let mut state = self.state.lock().unwrap();
if let (&Some(ref pending_future_id), &Some(ref io_context)) =
(&state.pending_future_id, &state.io_context)
{
io_context.release_pending_future_id(*pending_future_id);
}
let io_context = state.io_context.take();
*state = State::default();
io_context
};
drop(io_context);
self.src_pad.stop_task().await;
let _ = self.src_pad.unprepare().await;
self.src_pad_handler.lock().await.configured_caps = None;
gst_debug!(CAT, obj: element, "Unprepared");
Ok(())
}
fn start(&self, element: &gst::Element) -> Result<(), ()> {
async fn start(&self, element: &gst::Element) -> Result<(), ()> {
let mut state = self.state.lock().await;
gst_debug!(CAT, obj: element, "Starting");
let settings = self.settings.lock().unwrap().clone();
let mut state = self.state.lock().unwrap();
let State {
ref io_context,
ref mut channel,
..
} = *state;
let max_buffers = self.settings.lock().await.max_buffers.try_into().unwrap();
let (sender, receiver) = mpsc::channel(max_buffers);
state.sender = Some(sender);
let io_context = io_context.as_ref().unwrap();
self.src_pad_handler
.start_task(self.src_pad.as_ref(), element, receiver)
.await;
let (channel_sender, channel_receiver) = mpsc::channel(settings.max_buffers as usize);
let element_clone = element.clone();
let future = channel_receiver
.for_each(move |item| Self::push_item(element_clone.clone(), item).map(|_| ()));
io_context.spawn(future);
*channel = Some(channel_sender);
gst_debug!(CAT, obj: element, "Started");
Ok(())
}
fn stop(&self, element: &gst::Element) -> Result<(), ()> {
gst_debug!(CAT, obj: element, "Stopping");
let mut state = self.state.lock().unwrap();
async fn pause(&self, element: &gst::Element) -> Result<(), ()> {
let pause_completion = {
let mut state = self.state.lock().await;
gst_debug!(CAT, obj: element, "Pausing");
let _ = state.channel.take();
let pause_completion = self.src_pad.pause_task().await;
// Prevent subsequent items from being enqueued
state.sender = None;
if let Some(abort_handle) = state.pending_future_abort_handle.take() {
abort_handle.abort();
}
pause_completion
};
gst_debug!(CAT, obj: element, "Stopped");
gst_debug!(CAT, obj: element, "Waiting for Task Pause to complete");
pause_completion.await;
gst_debug!(CAT, obj: element, "Paused");
Ok(())
}
@ -542,7 +540,7 @@ impl ObjectSubclass for AppSrc {
.expect("missing signal arg");
let appsrc = Self::from_instance(&element);
Some(appsrc.push_buffer(&element, buffer).to_value())
Some(block_on!(appsrc.push_buffer(&element, buffer)).to_value())
},
);
@ -557,32 +555,18 @@ impl ObjectSubclass for AppSrc {
.expect("signal arg")
.expect("missing signal arg");
let appsrc = Self::from_instance(&element);
Some(appsrc.end_of_stream(&element).to_value())
Some(block_on!(appsrc.end_of_stream(&element)).to_value())
},
);
}
fn new_with_class(klass: &subclass::simple::ClassStruct<Self>) -> Self {
let templ = klass.get_pad_template("src").unwrap();
let src_pad = gst::Pad::new_from_template(&templ, Some("src"));
src_pad.set_event_function(|pad, parent, event| {
AppSrc::catch_panic_pad_function(
parent,
|| false,
|queue, element| queue.src_event(pad, element, event),
)
});
src_pad.set_query_function(|pad, parent, query| {
AppSrc::catch_panic_pad_function(
parent,
|| false,
|queue, element| queue.src_query(pad, element, query),
)
});
let src_pad = PadSrc::new_from_template(&templ, Some("src"));
Self {
src_pad,
src_pad_handler: AppSrcPadHandler::new(),
state: Mutex::new(State::default()),
settings: Mutex::new(Settings::default()),
}
@ -597,26 +581,26 @@ impl ObjectImpl for AppSrc {
match *prop {
subclass::Property("context", ..) => {
let mut settings = self.settings.lock().unwrap();
let mut settings = block_on!(self.settings.lock());
settings.context = value
.get()
.expect("type checked upstream")
.unwrap_or_else(|| "".into());
}
subclass::Property("context-wait", ..) => {
let mut settings = self.settings.lock().unwrap();
let mut settings = block_on!(self.settings.lock());
settings.context_wait = value.get_some().expect("type checked upstream");
}
subclass::Property("caps", ..) => {
let mut settings = self.settings.lock().unwrap();
let mut settings = block_on!(self.settings.lock());
settings.caps = value.get().expect("type checked upstream");
}
subclass::Property("max-buffers", ..) => {
let mut settings = self.settings.lock().unwrap();
let mut settings = block_on!(self.settings.lock());
settings.max_buffers = value.get_some().expect("type checked upstream");
}
subclass::Property("do-timestamp", ..) => {
let mut settings = self.settings.lock().unwrap();
let mut settings = block_on!(self.settings.lock());
settings.do_timestamp = value.get_some().expect("type checked upstream");
}
_ => unimplemented!(),
@ -628,23 +612,23 @@ impl ObjectImpl for AppSrc {
match *prop {
subclass::Property("context", ..) => {
let settings = self.settings.lock().unwrap();
let settings = block_on!(self.settings.lock());
Ok(settings.context.to_value())
}
subclass::Property("context-wait", ..) => {
let settings = self.settings.lock().unwrap();
let settings = block_on!(self.settings.lock());
Ok(settings.context_wait.to_value())
}
subclass::Property("caps", ..) => {
let settings = self.settings.lock().unwrap();
let settings = block_on!(self.settings.lock());
Ok(settings.caps.to_value())
}
subclass::Property("max-buffers", ..) => {
let settings = self.settings.lock().unwrap();
let settings = block_on!(self.settings.lock());
Ok(settings.max_buffers.to_value())
}
subclass::Property("do-timestamp", ..) => {
let settings = self.settings.lock().unwrap();
let settings = block_on!(self.settings.lock());
Ok(settings.do_timestamp.to_value())
}
_ => unimplemented!(),
@ -655,7 +639,7 @@ impl ObjectImpl for AppSrc {
self.parent_constructed(obj);
let element = obj.downcast_ref::<gst::Element>().unwrap();
element.add_pad(&self.src_pad).unwrap();
element.add_pad(self.src_pad.gst_pad()).unwrap();
super::set_element_flags(element, gst::ElementFlags::SOURCE);
}
@ -671,16 +655,16 @@ impl ElementImpl for AppSrc {
match transition {
gst::StateChange::NullToReady => {
self.prepare(element).map_err(|err| {
block_on!(self.prepare(element)).map_err(|err| {
element.post_error_message(&err);
gst::StateChangeError
})?;
}
gst::StateChange::PlayingToPaused => {
self.stop(element).map_err(|_| gst::StateChangeError)?;
block_on!(self.pause(element)).map_err(|_| gst::StateChangeError)?;
}
gst::StateChange::ReadyToNull => {
self.unprepare(element).map_err(|_| gst::StateChangeError)?;
block_on!(self.unprepare(element)).map_err(|_| gst::StateChangeError)?;
}
_ => (),
}
@ -692,11 +676,12 @@ impl ElementImpl for AppSrc {
success = gst::StateChangeSuccess::NoPreroll;
}
gst::StateChange::PausedToPlaying => {
self.start(element).map_err(|_| gst::StateChangeError)?;
block_on!(self.start(element)).map_err(|_| gst::StateChangeError)?;
}
gst::StateChange::PausedToReady => {
let mut state = self.state.lock().unwrap();
state.need_initial_events = true;
block_on!(async {
self.src_pad_handler.lock().await.need_initial_events = true;
});
}
_ => (),
}

View file

@ -15,8 +15,8 @@
// Free Software Foundation, Inc., 51 Franklin Street, Suite 500,
// Boston, MA 02110-1335, USA.
use futures::channel::oneshot;
use futures::prelude::*;
use futures::future::{self, abortable, AbortHandle};
use futures::lock::Mutex;
use gst;
use gst::gst_debug;
@ -25,15 +25,9 @@ use gst::prelude::*;
use lazy_static::lazy_static;
use std::collections::VecDeque;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::task::{self, Poll};
use std::sync::Arc;
use std::{u32, u64};
use tokio_executor::current_thread as tokio_current_thread;
use super::iocontext::*;
lazy_static! {
static ref DATA_QUEUE_CAT: gst::DebugCategory = gst::DebugCategory::new(
"ts-dataqueue",
@ -74,17 +68,18 @@ impl DataQueueItem {
#[derive(PartialEq, Eq, Debug)]
enum DataQueueState {
Unscheduled,
Scheduled,
Running,
Shutdown,
Paused,
Started,
Stopped,
}
#[derive(Clone)]
#[derive(Clone, Debug)]
pub struct DataQueue(Arc<Mutex<DataQueueInner>>);
#[derive(Debug)]
struct DataQueueInner {
element: gst::Element,
src_pad: gst::Pad,
state: DataQueueState,
queue: VecDeque<DataQueueItem>,
@ -95,157 +90,80 @@ struct DataQueueInner {
max_size_bytes: Option<u32>,
max_size_time: Option<u64>,
waker: Option<task::Waker>,
shutdown_receiver: Option<oneshot::Receiver<()>>,
pending_handle: Option<AbortHandle>,
}
impl DataQueueInner {
fn wake(&mut self) {
if let Some(pending_handle) = self.pending_handle.take() {
pending_handle.abort();
}
}
}
impl DataQueue {
pub fn new(
element: &gst::Element,
src_pad: &gst::Pad,
max_size_buffers: Option<u32>,
max_size_bytes: Option<u32>,
max_size_time: Option<u64>,
) -> DataQueue {
DataQueue(Arc::new(Mutex::new(DataQueueInner {
element: element.clone(),
state: DataQueueState::Unscheduled,
src_pad: src_pad.clone(),
state: DataQueueState::Stopped,
queue: VecDeque::new(),
cur_size_buffers: 0,
cur_size_bytes: 0,
max_size_buffers,
max_size_bytes,
max_size_time,
waker: None,
shutdown_receiver: None,
pending_handle: None,
})))
}
pub fn schedule<F, G, Fut>(
&self,
io_context: &IOContext,
func: F,
err_func: G,
) -> Result<(), ()>
where
F: Fn(DataQueueItem) -> Fut + Send + 'static,
Fut: Future<Output = Result<(), gst::FlowError>> + Send + 'static,
G: FnOnce(gst::FlowError) + Send + 'static,
{
// Ready->Paused
//
// Need to wait for a possible shutdown to finish first
// spawn() on the reactor, change state to Scheduled
let mut inner = self.0.lock().unwrap();
gst_debug!(DATA_QUEUE_CAT, obj: &inner.element, "Scheduling data queue");
if inner.state == DataQueueState::Scheduled {
gst_debug!(DATA_QUEUE_CAT, obj: &inner.element, "Data queue already scheduled");
return Ok(());
}
assert_eq!(inner.state, DataQueueState::Unscheduled);
inner.state = DataQueueState::Scheduled;
let (sender, receiver) = oneshot::channel();
inner.shutdown_receiver = Some(receiver);
let queue_clone = self.clone();
let element_clone = inner.element.clone();
io_context.spawn(queue_clone.try_for_each(func).then(move |res| {
gst_debug!(
DATA_QUEUE_CAT,
obj: &element_clone,
"Data queue finished: {:?}",
res
);
if let Err(err) = res {
err_func(err);
}
let _ = sender.send(());
future::ready(())
}));
Ok(())
}
pub fn unpause(&self) {
// Paused->Playing
//
// Change state to Running and signal task
let mut inner = self.0.lock().unwrap();
gst_debug!(DATA_QUEUE_CAT, obj: &inner.element, "Unpausing data queue");
if inner.state == DataQueueState::Running {
gst_debug!(DATA_QUEUE_CAT, obj: &inner.element, "Data queue already unpaused");
pub async fn start(&self) {
let mut inner = self.0.lock().await;
if inner.state == DataQueueState::Started {
gst_debug!(DATA_QUEUE_CAT, obj: &inner.element, "Data queue already Started");
return;
}
assert_eq!(inner.state, DataQueueState::Scheduled);
inner.state = DataQueueState::Running;
if let Some(waker) = inner.waker.take() {
waker.wake();
}
gst_debug!(DATA_QUEUE_CAT, obj: &inner.element, "Starting data queue");
inner.state = DataQueueState::Started;
inner.wake();
}
pub fn pause(&self) {
// Playing->Paused
//
// Change state to Scheduled and signal task
let mut inner = self.0.lock().unwrap();
pub async fn pause(&self) {
let mut inner = self.0.lock().await;
if inner.state == DataQueueState::Paused {
gst_debug!(DATA_QUEUE_CAT, obj: &inner.element, "Data queue already Paused");
return;
}
gst_debug!(DATA_QUEUE_CAT, obj: &inner.element, "Pausing data queue");
if inner.state == DataQueueState::Scheduled {
gst_debug!(DATA_QUEUE_CAT, obj: &inner.element, "Data queue already paused");
return;
}
assert_eq!(inner.state, DataQueueState::Running);
inner.state = DataQueueState::Scheduled;
if let Some(waker) = inner.waker.take() {
waker.wake();
}
assert_eq!(DataQueueState::Started, inner.state);
inner.state = DataQueueState::Paused;
inner.wake();
}
pub fn shutdown(&self) {
// Paused->Ready
//
// Change state to Shutdown and signal task, wait for our future to be finished
// Requires scheduled function to be unblocked! Pad must be deactivated before
let mut inner = self.0.lock().unwrap();
gst_debug!(DATA_QUEUE_CAT, obj: &inner.element, "Shutting down data queue");
if inner.state == DataQueueState::Unscheduled {
gst_debug!(DATA_QUEUE_CAT, obj: &inner.element, "Data queue already shut down");
pub async fn stop(&self) {
let mut inner = self.0.lock().await;
if inner.state == DataQueueState::Stopped {
gst_debug!(DATA_QUEUE_CAT, obj: &inner.element, "Data queue already Stopped");
return;
}
assert!(inner.state == DataQueueState::Scheduled || inner.state == DataQueueState::Running);
inner.state = DataQueueState::Shutdown;
if let Some(waker) = inner.waker.take() {
waker.wake();
}
let shutdown_receiver = inner.shutdown_receiver.take().unwrap();
gst_debug!(DATA_QUEUE_CAT, obj: &inner.element, "Waiting for data queue to shut down");
drop(inner);
tokio_current_thread::block_on_all(shutdown_receiver).expect("Already shut down");
let mut inner = self.0.lock().unwrap();
inner.state = DataQueueState::Unscheduled;
gst_debug!(DATA_QUEUE_CAT, obj: &inner.element, "Data queue shut down");
gst_debug!(DATA_QUEUE_CAT, obj: &inner.element, "Stopping data queue");
inner.state = DataQueueState::Stopped;
inner.wake();
}
pub fn clear(&self, src_pad: &gst::Pad) {
let mut inner = self.0.lock().unwrap();
pub async fn clear(&self) {
let mut inner = self.0.lock().await;
gst_debug!(DATA_QUEUE_CAT, obj: &inner.element, "Clearing queue");
assert_eq!(inner.state, DataQueueState::Paused);
gst_debug!(DATA_QUEUE_CAT, obj: &inner.element, "Clearing data queue");
let src_pad = inner.src_pad.clone();
for item in inner.queue.drain(..) {
if let DataQueueItem::Event(event) = item {
if event.is_sticky()
@ -256,10 +174,23 @@ impl DataQueue {
}
}
}
gst_debug!(DATA_QUEUE_CAT, obj: &inner.element, "Data queue cleared");
}
pub fn push(&self, item: DataQueueItem) -> Result<(), DataQueueItem> {
let mut inner = self.0.lock().unwrap();
pub async fn push(&self, item: DataQueueItem) -> Result<(), DataQueueItem> {
let mut inner = self.0.lock().await;
if inner.state != DataQueueState::Started {
gst_debug!(
DATA_QUEUE_CAT,
obj: &inner.element,
"Rejecting item {:?} in state {:?}",
item,
inner.state
);
return Err(item);
}
gst_debug!(DATA_QUEUE_CAT, obj: &inner.element, "Pushing item {:?}", item);
@ -299,52 +230,50 @@ impl DataQueue {
inner.cur_size_buffers += count;
inner.cur_size_bytes += bytes;
if let Some(waker) = inner.waker.take() {
waker.wake();
}
inner.wake();
Ok(())
}
}
impl Drop for DataQueueInner {
fn drop(&mut self) {
assert_eq!(self.state, DataQueueState::Unscheduled);
}
}
// Implementing `next` as an `async fn` instead of a `Stream` because of the `async` `Mutex`
// See https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/merge_requests/204#note_322774
#[allow(clippy::should_implement_trait)]
pub async fn next(&mut self) -> Option<DataQueueItem> {
loop {
let pending_fut = {
let mut inner = self.0.lock().await;
match inner.state {
DataQueueState::Started => match inner.queue.pop_front() {
None => {
gst_debug!(DATA_QUEUE_CAT, obj: &inner.element, "Data queue is empty");
}
Some(item) => {
gst_debug!(DATA_QUEUE_CAT, obj: &inner.element, "Popped item {:?}", item);
impl Stream for DataQueue {
type Item = Result<DataQueueItem, gst::FlowError>;
let (count, bytes) = item.size();
inner.cur_size_buffers -= count;
inner.cur_size_bytes -= bytes;
fn poll_next(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Option<Self::Item>> {
let mut inner = self.0.lock().unwrap();
if inner.state == DataQueueState::Shutdown {
gst_debug!(DATA_QUEUE_CAT, obj: &inner.element, "Data queue shutting down");
return Poll::Ready(None);
} else if inner.state == DataQueueState::Scheduled {
gst_debug!(DATA_QUEUE_CAT, obj: &inner.element, "Data queue not running");
inner.waker = Some(cx.waker().clone());
return Poll::Pending;
}
return Some(item);
}
},
DataQueueState::Paused => {
gst_debug!(DATA_QUEUE_CAT, obj: &inner.element, "Data queue Paused");
return None;
}
DataQueueState::Stopped => {
gst_debug!(DATA_QUEUE_CAT, obj: &inner.element, "Data queue Stopped");
return None;
}
}
assert_eq!(inner.state, DataQueueState::Running);
let (pending_fut, abort_handle) = abortable(future::pending::<()>());
inner.pending_handle = Some(abort_handle);
gst_debug!(DATA_QUEUE_CAT, obj: &inner.element, "Trying to read data");
match inner.queue.pop_front() {
None => {
gst_debug!(DATA_QUEUE_CAT, obj: &inner.element, "Data queue is empty");
inner.waker = Some(cx.waker().clone());
Poll::Pending
}
Some(item) => {
gst_debug!(DATA_QUEUE_CAT, obj: &inner.element, "Popped item {:?}", item);
pending_fut
};
let (count, bytes) = item.size();
inner.cur_size_buffers -= count;
inner.cur_size_bytes -= bytes;
Poll::Ready(Some(Ok(item)))
}
let _ = pending_fut.await;
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -15,17 +15,26 @@
// Free Software Foundation, Inc., 51 Franklin Street, Suite 500,
// Boston, MA 02110-1335, USA.
//! A collection of GStreamer plugins which leverage the `threadshare` [`runtime`].
//!
//! [`runtime`]: runtime/index.html
// Needed for `select!` in `Socket::next`
// see https://docs.rs/futures/0.3.1/futures/macro.select.html
#![recursion_limit = "1024"]
#![crate_type = "cdylib"]
mod runtime;
use runtime::executor as iocontext;
pub use tokio_executor;
mod socket;
#[macro_use]
pub mod runtime;
pub mod socket;
mod tcpclientsrc;
mod udpsrc;
mod appsrc;
mod dataqueue;
pub mod dataqueue;
mod jitterbuffer;
mod proxy;
mod queue;

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -15,8 +15,29 @@
// Free Software Foundation, Inc., 51 Franklin Street, Suite 500,
// Boston, MA 02110-1335, USA.
use futures::channel::{mpsc, oneshot};
use futures::future::{AbortHandle, Abortable, BoxFuture};
//! The `Executor` for the `threadshare` GStreamer plugins framework.
//!
//! The [`threadshare`]'s `Executor` consists in a set of [`Context`]s. Each [`Context`] is
//! identified by a `name` and runs a loop in a dedicated `thread`. Users can use the [`Context`]
//! to spawn `Future`s. `Future`s are asynchronous processings which allow waiting for resources
//! in a non-blocking way. Examples of non-blocking operations are:
//!
//! * Waiting for an incoming packet on a Socket.
//! * Waiting for an asynchronous `Mutex` `lock` to succeed.
//! * Waiting for a `Timeout` to be elapsed.
//!
//! [`Context`]s instantiators define the minimum time between two iterations of the [`Context`]
//! loop, which acts as a throttle, saving CPU usage when no operations are to be executed.
//!
//! `Element` implementations should use [`PadSrc`] & [`PadSink`] which provides high-level features.
//!
//! [`threadshare`]: ../index.html
//! [`Context`]: struct.Context.html
//! [`PadSrc`]: struct.PadSrc.html
//! [`PadSink`]: struct.PadSink.html
use futures::channel::mpsc as future_mpsc;
use futures::future::BoxFuture;
use futures::prelude::*;
use futures::ready;
use futures::stream::futures_unordered::FuturesUnordered;
@ -34,78 +55,79 @@ use std::collections::{BinaryHeap, HashMap};
use std::io;
use std::mem;
use std::pin::Pin;
use std::sync::atomic;
use std::sync::{Arc, Mutex, Weak};
use std::task::{self, Poll};
use std::sync::mpsc as sync_mpsc;
use std::sync::{atomic, Arc, Mutex, Weak};
use std::task::Poll;
use std::thread;
use std::time;
use std::time::{Duration, Instant};
use tokio_executor::current_thread as tokio_current_thread;
use tokio_executor::park::Unpark;
use super::RUNTIME_CAT;
// We are bound to using `sync` for the `runtime` `Mutex`es. Attempts to use `async` `Mutex`es
// lead to the following issues:
//
// * `CONTEXTS`: can't `spawn` a `Future` when called from a `Context` thread via `ffi`.
// * `timers`: can't automatically `remove` the timer from `BinaryHeap` because `async drop`
// is not available.
// * `task_queues`: can't `add` a pending task when called from a `Context` thread via `ffi`.
//
// Also, we want to be able to `acquire` a `Context` outside of an `async` context.
// These `Mutex`es must be `lock`ed for a short period.
lazy_static! {
static ref CONTEXTS: Mutex<HashMap<String, Weak<IOContextInner>>> = Mutex::new(HashMap::new());
static ref CONTEXT_CAT: gst::DebugCategory = gst::DebugCategory::new(
"ts-context",
gst::DebugColorFlags::empty(),
Some("Thread-sharing Context"),
);
static ref CONTEXTS: Mutex<HashMap<String, Weak<ContextInner>>> = Mutex::new(HashMap::new());
}
// Our own simplified implementation of reactor::Background to allow hooking into its internals
const RUNNING: usize = 0;
const SHUTDOWN_NOW: usize = 1;
struct IOContextRunner {
struct ContextThread {
name: String,
shutdown: Arc<atomic::AtomicUsize>,
shutdown: Arc<atomic::AtomicBool>,
}
impl IOContextRunner {
impl ContextThread {
fn start(
name: &str,
wait: u32,
reactor: tokio_net::driver::Reactor,
timers: Arc<Mutex<BinaryHeap<TimerEntry>>>,
) -> (tokio_current_thread::Handle, IOContextShutdown) {
let handle = reactor.handle().clone();
let shutdown = Arc::new(atomic::AtomicUsize::new(RUNNING));
) -> (tokio_current_thread::Handle, ContextShutdown) {
let handle = reactor.handle();
let shutdown = Arc::new(atomic::AtomicBool::new(false));
let shutdown_clone = shutdown.clone();
let name_clone = name.into();
let mut runner = IOContextRunner {
let mut context_thread = ContextThread {
shutdown: shutdown_clone,
name: name_clone,
};
let (sender, receiver) = oneshot::channel();
let (sender, receiver) = sync_mpsc::channel();
let join = thread::spawn(move || {
runner.run(wait, reactor, sender, timers);
context_thread.spawn(wait, reactor, sender, timers);
});
let shutdown = IOContextShutdown {
let shutdown = ContextShutdown {
name: name.into(),
shutdown,
handle,
join: Some(join),
};
let runtime_handle =
tokio_current_thread::block_on_all(receiver).expect("Runtime init failed");
let thread_handle = receiver.recv().expect("Context thread init failed");
(runtime_handle, shutdown)
(thread_handle, shutdown)
}
fn run(
fn spawn(
&mut self,
wait: u32,
reactor: tokio_net::driver::Reactor,
sender: oneshot::Sender<tokio_current_thread::Handle>,
sender: sync_mpsc::Sender<tokio_current_thread::Handle>,
timers: Arc<Mutex<BinaryHeap<TimerEntry>>>,
) {
use std::time::{Duration, Instant};
gst_debug!(CONTEXT_CAT, "Started reactor thread '{}'", self.name);
gst_debug!(RUNTIME_CAT, "Started context thread '{}'", self.name);
let wait = Duration::from_millis(wait as u64);
@ -117,7 +139,7 @@ impl IOContextRunner {
sender
.send(current_thread.handle())
.expect("Couldn't send Runtime handle");
.expect("Couldn't send context thread handle");
let _timer_guard = tokio_timer::set_default(&timer_handle);
let _reactor_guard = tokio_net::driver::set_default(&handle);
@ -125,11 +147,12 @@ impl IOContextRunner {
let mut now = Instant::now();
loop {
if self.shutdown.load(atomic::Ordering::SeqCst) > RUNNING {
if self.shutdown.load(atomic::Ordering::SeqCst) {
gst_debug!(RUNTIME_CAT, "Shutting down loop");
break;
}
gst_trace!(CONTEXT_CAT, "Elapsed {:?} since last loop", now.elapsed());
gst_trace!(RUNTIME_CAT, "Elapsed {:?} since last loop", now.elapsed());
// Handle timers
{
@ -171,33 +194,33 @@ impl IOContextRunner {
}
}
gst_trace!(CONTEXT_CAT, "Turning current thread '{}'", self.name);
gst_trace!(RUNTIME_CAT, "Turning thread '{}'", self.name);
while current_thread
.turn(Some(time::Duration::from_millis(0)))
.turn(Some(Duration::from_millis(0)))
.unwrap()
.has_polled()
{}
gst_trace!(CONTEXT_CAT, "Turned current thread '{}'", self.name);
gst_trace!(RUNTIME_CAT, "Turned thread '{}'", self.name);
// We have to check again after turning in case we're supposed to shut down now
// and already handled the unpark above
if self.shutdown.load(atomic::Ordering::SeqCst) > RUNNING {
gst_debug!(CONTEXT_CAT, "Shutting down loop");
if self.shutdown.load(atomic::Ordering::SeqCst) {
gst_debug!(RUNTIME_CAT, "Shutting down loop");
break;
}
let elapsed = now.elapsed();
gst_trace!(CONTEXT_CAT, "Elapsed {:?} after handling futures", elapsed);
gst_trace!(RUNTIME_CAT, "Elapsed {:?} after handling futures", elapsed);
if wait == time::Duration::from_millis(0) {
if wait == Duration::from_millis(0) {
let timers = timers.lock().unwrap();
let wait = match timers.peek().map(|entry| entry.time) {
None => None,
Some(time) => Some({
let tmp = time::Instant::now();
let tmp = Instant::now();
if time < tmp {
time::Duration::from_millis(0)
Duration::from_millis(0)
} else {
time.duration_since(tmp)
}
@ -205,19 +228,19 @@ impl IOContextRunner {
};
drop(timers);
gst_trace!(CONTEXT_CAT, "Sleeping for up to {:?}", wait);
gst_trace!(RUNTIME_CAT, "Sleeping for up to {:?}", wait);
current_thread.turn(wait).unwrap();
gst_trace!(CONTEXT_CAT, "Slept for {:?}", now.elapsed());
now = time::Instant::now();
gst_trace!(RUNTIME_CAT, "Slept for {:?}", now.elapsed());
now = Instant::now();
} else {
if elapsed < wait {
gst_trace!(
CONTEXT_CAT,
RUNTIME_CAT,
"Waiting for {:?} before polling again",
wait - elapsed
);
thread::sleep(wait - elapsed);
gst_trace!(CONTEXT_CAT, "Slept for {:?}", now.elapsed());
gst_trace!(RUNTIME_CAT, "Slept for {:?}", now.elapsed());
}
now += wait;
@ -226,26 +249,33 @@ impl IOContextRunner {
}
}
impl Drop for IOContextRunner {
impl Drop for ContextThread {
fn drop(&mut self) {
gst_debug!(CONTEXT_CAT, "Shut down reactor thread '{}'", self.name);
gst_debug!(RUNTIME_CAT, "Terminated: context thread '{}'", self.name);
}
}
struct IOContextShutdown {
#[derive(Debug)]
struct ContextShutdown {
name: String,
shutdown: Arc<atomic::AtomicUsize>,
shutdown: Arc<atomic::AtomicBool>,
handle: tokio_net::driver::Handle,
join: Option<thread::JoinHandle<()>>,
}
impl Drop for IOContextShutdown {
impl Drop for ContextShutdown {
fn drop(&mut self) {
use tokio_executor::park::Unpark;
gst_debug!(CONTEXT_CAT, "Shutting down reactor thread '{}'", self.name);
self.shutdown.store(SHUTDOWN_NOW, atomic::Ordering::SeqCst);
gst_trace!(CONTEXT_CAT, "Waiting for reactor '{}' shutdown", self.name);
gst_debug!(
RUNTIME_CAT,
"Shutting down context thread thread '{}'",
self.name
);
self.shutdown.store(true, atomic::Ordering::SeqCst);
gst_trace!(
RUNTIME_CAT,
"Waiting for context thread '{}' to shutdown",
self.name
);
// After being unparked, the next turn() is guaranteed to finish immediately,
// as such there is no race condition between checking for shutdown and setting
// shutdown.
@ -254,172 +284,236 @@ impl Drop for IOContextShutdown {
}
}
#[derive(Clone)]
pub struct IOContext(Arc<IOContextInner>);
#[derive(Clone, Copy, Eq, PartialEq, Hash, Debug)]
pub struct TaskQueueId(u64);
impl glib::subclass::boxed::BoxedType for IOContext {
const NAME: &'static str = "TsIOContext";
impl glib::subclass::boxed::BoxedType for TaskQueueId {
const NAME: &'static str = "TsTaskQueueId";
glib_boxed_type!();
}
glib_boxed_derive_traits!(IOContext);
glib_boxed_derive_traits!(TaskQueueId);
pub type PendingFuturesOutput = Result<(), gst::FlowError>;
type PendingFutureQueue = FuturesUnordered<BoxFuture<'static, PendingFuturesOutput>>;
pub type TaskOutput = Result<(), gst::FlowError>;
type TaskQueue = FuturesUnordered<BoxFuture<'static, TaskOutput>>;
struct IOContextInner {
#[derive(Debug)]
struct ContextInner {
name: String,
runtime_handle: Mutex<tokio_current_thread::Handle>,
thread_handle: Mutex<tokio_current_thread::Handle>,
reactor_handle: tokio_net::driver::Handle,
timers: Arc<Mutex<BinaryHeap<TimerEntry>>>,
// Only used for dropping
_shutdown: IOContextShutdown,
pending_futures: Mutex<(u64, HashMap<u64, PendingFutureQueue>)>,
_shutdown: ContextShutdown,
task_queues: Mutex<(u64, HashMap<u64, TaskQueue>)>,
}
impl Drop for IOContextInner {
impl Drop for ContextInner {
fn drop(&mut self) {
let mut contexts = CONTEXTS.lock().unwrap();
gst_debug!(CONTEXT_CAT, "Finalizing context '{}'", self.name);
gst_debug!(RUNTIME_CAT, "Finalizing context '{}'", self.name);
contexts.remove(&self.name);
}
}
impl IOContext {
pub fn new(name: &str, wait: u32) -> Result<Self, io::Error> {
#[derive(Clone, Debug)]
pub struct ContextWeak(Weak<ContextInner>);
impl ContextWeak {
pub fn upgrade(&self) -> Option<Context> {
self.0.upgrade().map(Context)
}
}
/// A `threadshare` `runtime` `Context`.
///
/// The `Context` provides low-level asynchronous processing features to
/// multiplex task execution on a single thread.
///
/// `Element` implementations should use [`PadSrc`] and [`PadSink`] which
/// provide high-level features.
///
/// See the [module-level documentation](index.html) for more.
///
/// [`PadSrc`]: ../struct.PadSrc.html
/// [`PadSink`]: ../struct.PadSink.html
#[derive(Clone, Debug)]
pub struct Context(Arc<ContextInner>);
impl Context {
pub fn acquire(context_name: &str, wait: u32) -> Result<Self, io::Error> {
let mut contexts = CONTEXTS.lock().unwrap();
if let Some(context) = contexts.get(name) {
if let Some(context) = context.upgrade() {
gst_debug!(CONTEXT_CAT, "Reusing existing context '{}'", name);
return Ok(IOContext(context));
if let Some(inner_weak) = contexts.get(context_name) {
if let Some(inner_strong) = inner_weak.upgrade() {
gst_debug!(RUNTIME_CAT, "Joining Context '{}'", inner_strong.name);
return Ok(Context(inner_strong));
}
}
let reactor = tokio_net::driver::Reactor::new()?;
let reactor_handle = reactor.handle().clone();
let reactor_handle = reactor.handle();
let timers = Arc::new(Mutex::new(BinaryHeap::new()));
let (runtime_handle, shutdown) =
IOContextRunner::start(name, wait, reactor, timers.clone());
let (thread_handle, shutdown) =
ContextThread::start(context_name, wait, reactor, timers.clone());
let context = Arc::new(IOContextInner {
name: name.into(),
runtime_handle: Mutex::new(runtime_handle),
let context = Context(Arc::new(ContextInner {
name: context_name.into(),
thread_handle: Mutex::new(thread_handle),
reactor_handle,
timers,
_shutdown: shutdown,
pending_futures: Mutex::new((0, HashMap::new())),
});
contexts.insert(name.into(), Arc::downgrade(&context));
task_queues: Mutex::new((0, HashMap::new())),
}));
contexts.insert(context_name.into(), Arc::downgrade(&context.0));
gst_debug!(CONTEXT_CAT, "Created new context '{}'", name);
Ok(IOContext(context))
gst_debug!(RUNTIME_CAT, "New Context '{}'", context.0.name);
Ok(context)
}
pub fn spawn<Fut>(&self, future: Fut)
where
Fut: Future<Output = ()> + Send + 'static,
{
self.0.runtime_handle.lock().unwrap().spawn(future).unwrap();
pub fn downgrade(&self) -> ContextWeak {
ContextWeak(Arc::downgrade(&self.0))
}
pub fn acquire_task_queue_id(&self) -> TaskQueueId {
let mut task_queues = self.0.task_queues.lock().unwrap();
let id = task_queues.0;
task_queues.0 += 1;
task_queues.1.insert(id, FuturesUnordered::new());
TaskQueueId(id)
}
pub fn name(&self) -> &str {
self.0.name.as_str()
}
pub fn reactor_handle(&self) -> &tokio_net::driver::Handle {
&self.0.reactor_handle
}
pub fn acquire_pending_future_id(&self) -> PendingFutureId {
let mut pending_futures = self.0.pending_futures.lock().unwrap();
let id = pending_futures.0;
pending_futures.0 += 1;
pending_futures.1.insert(id, FuturesUnordered::new());
PendingFutureId(id)
pub fn spawn<Fut>(&self, future: Fut)
where
Fut: Future<Output = ()> + Send + 'static,
{
self.0.thread_handle.lock().unwrap().spawn(future).unwrap();
}
pub fn release_pending_future_id(&self, id: PendingFutureId) {
let mut pending_futures = self.0.pending_futures.lock().unwrap();
if let Some(fs) = pending_futures.1.remove(&id.0) {
self.spawn(fs.try_for_each(|_| future::ok(())).map(|_| ()));
pub fn release_task_queue(&self, id: TaskQueueId) -> Option<TaskQueue> {
let mut task_queues = self.0.task_queues.lock().unwrap();
task_queues.1.remove(&id.0)
}
pub fn add_task<T>(&self, id: TaskQueueId, task: T) -> Result<(), ()>
where
T: Future<Output = TaskOutput> + Send + 'static,
{
let mut task_queues = self.0.task_queues.lock().unwrap();
match task_queues.1.get_mut(&id.0) {
Some(task_queue) => {
task_queue.push(task.boxed());
Ok(())
}
None => Err(()),
}
}
pub fn add_pending_future<F>(&self, id: PendingFutureId, future: F)
where
F: Future<Output = PendingFuturesOutput> + Send + 'static,
{
let mut pending_futures = self.0.pending_futures.lock().unwrap();
let fs = pending_futures.1.get_mut(&id.0).unwrap();
fs.push(future.boxed())
pub fn clear_task_queue(&self, id: TaskQueueId) {
let mut task_queues = self.0.task_queues.lock().unwrap();
let task_queue = task_queues.1.get_mut(&id.0).unwrap();
*task_queue = FuturesUnordered::new();
}
pub fn drain_pending_futures(
&self,
id: PendingFutureId,
) -> (
Option<AbortHandle>,
future::Either<
BoxFuture<'static, PendingFuturesOutput>,
future::Ready<PendingFuturesOutput>,
>,
) {
let mut pending_futures = self.0.pending_futures.lock().unwrap();
let fs = pending_futures.1.get_mut(&id.0).unwrap();
pub fn drain_task_queue(&self, id: TaskQueueId) -> Option<impl Future<Output = TaskOutput>> {
let task_queue = {
let mut task_queues = self.0.task_queues.lock().unwrap();
let task_queue = task_queues.1.get_mut(&id.0).unwrap();
let pending_futures = mem::replace(fs, FuturesUnordered::new());
mem::replace(task_queue, FuturesUnordered::new())
};
if !pending_futures.is_empty() {
if !task_queue.is_empty() {
gst_log!(
CONTEXT_CAT,
"Scheduling {} pending futures for context '{}' with pending future id {:?}",
pending_futures.len(),
self.0.name,
RUNTIME_CAT,
"Scheduling {} tasks from {:?} on '{}'",
task_queue.len(),
id,
self.0.name,
);
let (abort_handle, abort_registration) = AbortHandle::new_pair();
let abortable = Abortable::new(
pending_futures.try_for_each(|_| future::ok(())),
abort_registration,
)
.map(|res| {
res.unwrap_or_else(|_| {
gst_trace!(CONTEXT_CAT, "Aborting");
Err(gst::FlowError::Flushing)
})
})
.boxed()
.left_future();
(Some(abort_handle), abortable)
Some(task_queue.try_for_each(|_| future::ok(())))
} else {
(None, future::ok(()).right_future())
None
}
}
pub fn add_timer(
&self,
time: Instant,
interval: Option<Duration>,
) -> future_mpsc::UnboundedReceiver<()> {
let (sender, receiver) = future_mpsc::unbounded();
let mut timers = self.0.timers.lock().unwrap();
let entry = TimerEntry {
time,
id: TIMER_ENTRY_ID.fetch_add(1, atomic::Ordering::Relaxed),
interval,
sender,
};
timers.push(entry);
self.0.reactor_handle.unpark();
receiver
}
pub fn new_interval(&self, interval: Duration) -> Interval {
Interval::new(&self, interval)
}
/// Builds a `Future` to execute an `action` at [`Interval`]s.
///
/// [`Interval`]: struct.Interval.html
pub fn interval<F, Fut>(&self, interval: Duration, f: F) -> impl Future<Output = Fut::Output>
where
F: Fn() -> Fut + Send + Sync + 'static,
Fut: Future<Output = Result<(), ()>> + Send + 'static,
{
let f = Arc::new(f);
self.new_interval(interval).try_for_each(move |_| {
let f = Arc::clone(&f);
f()
})
}
pub fn new_timeout(&self, timeout: Duration) -> Timeout {
Timeout::new(&self, timeout)
}
/// Builds a `Future` to execute an action after the given `delay` has elapsed.
pub fn delay_for<F, Fut>(&self, delay: Duration, f: F) -> impl Future<Output = Fut::Output>
where
F: FnOnce() -> Fut + Send + Sync + 'static,
Fut: Future<Output = ()> + Send + 'static,
{
self.new_timeout(delay).then(move |_| f())
}
}
#[derive(Clone, Copy, Eq, PartialEq, Hash, Debug)]
pub struct PendingFutureId(u64);
impl glib::subclass::boxed::BoxedType for PendingFutureId {
const NAME: &'static str = "TsPendingFutureId";
glib_boxed_type!();
}
glib_boxed_derive_traits!(PendingFutureId);
static TIMER_ENTRY_ID: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
// Ad-hoc interval timer implementation for our throttled event loop above
pub struct TimerEntry {
time: time::Instant,
#[derive(Debug)]
struct TimerEntry {
time: Instant,
id: usize, // for producing a total order
interval: Option<time::Duration>,
sender: mpsc::UnboundedSender<()>,
interval: Option<Duration>,
sender: future_mpsc::UnboundedSender<()>,
}
impl PartialEq for TimerEntry {
@ -445,68 +539,292 @@ impl Ord for TimerEntry {
}
}
#[allow(unused)]
/// A `Stream` that yields a tick at `interval`s.
#[derive(Debug)]
pub struct Interval {
receiver: mpsc::UnboundedReceiver<()>,
receiver: future_mpsc::UnboundedReceiver<()>,
}
impl Interval {
#[allow(unused)]
pub fn new(context: &IOContext, interval: time::Duration) -> Self {
use tokio_executor::park::Unpark;
let (sender, receiver) = mpsc::unbounded();
let mut timers = context.0.timers.lock().unwrap();
let entry = TimerEntry {
time: time::Instant::now(),
id: TIMER_ENTRY_ID.fetch_add(1, atomic::Ordering::Relaxed),
interval: Some(interval),
sender,
};
timers.push(entry);
context.reactor_handle().unpark();
Self { receiver }
fn new(context: &Context, interval: Duration) -> Self {
Self {
receiver: context.add_timer(Instant::now(), Some(interval)),
}
}
}
impl Stream for Interval {
type Item = ();
type Item = Result<(), ()>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Option<Self::Item>> {
self.receiver.poll_next_unpin(cx)
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context,
) -> Poll<Option<Self::Item>> {
self.receiver
.poll_next_unpin(cx)
.map(|item_opt| item_opt.map(Ok))
}
}
/// A `Future` that completes after a `timeout` is elapsed.
#[derive(Debug)]
pub struct Timeout {
receiver: mpsc::UnboundedReceiver<()>,
receiver: future_mpsc::UnboundedReceiver<()>,
}
impl Timeout {
pub fn new(context: &IOContext, timeout: time::Duration) -> Self {
let (sender, receiver) = mpsc::unbounded();
let mut timers = context.0.timers.lock().unwrap();
let entry = TimerEntry {
time: time::Instant::now() + timeout,
id: TIMER_ENTRY_ID.fetch_add(1, atomic::Ordering::Relaxed),
interval: None,
sender,
};
timers.push(entry);
Self { receiver }
fn new(context: &Context, timeout: Duration) -> Self {
Self {
receiver: context.add_timer(Instant::now() + timeout, None),
}
}
}
impl Future for Timeout {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
fn poll(mut self: Pin<&mut Self>, cx: &mut std::task::Context) -> Poll<Self::Output> {
match ready!(self.receiver.poll_next_unpin(cx)) {
Some(_) => Poll::Ready(()),
None => unreachable!(),
}
}
}
#[cfg(test)]
mod tests {
use futures::channel::{mpsc, oneshot};
use futures::future::Aborted;
use futures::lock::Mutex;
use gst;
use std::sync::Arc;
use std::time::{Duration, Instant};
use crate::block_on;
use crate::runtime::future::abortable_waitable;
use super::*;
type Item = i32;
const SLEEP_DURATION: u32 = 2;
const INTERVAL: Duration = Duration::from_millis(100 * SLEEP_DURATION as u64);
#[test]
fn user_drain_pending_tasks() {
// Setup
gst::init().unwrap();
let context = Context::acquire("user_drain_task_queue", SLEEP_DURATION).unwrap();
let queue_id = context.acquire_task_queue_id();
let (sender, mut receiver) = mpsc::channel(1);
let sender: Arc<Mutex<mpsc::Sender<Item>>> = Arc::new(Mutex::new(sender));
let ctx_weak = context.downgrade();
let queue_id_clone = queue_id.clone();
let add_task = move |item| {
let sender_task = Arc::clone(&sender);
let context = ctx_weak.upgrade().unwrap();
context.add_task(queue_id_clone, async move {
sender_task
.lock()
.await
.send(item)
.await
.map_err(|_| gst::FlowError::Error)
})
};
// Tests
assert!(context.drain_task_queue(queue_id).is_none());
add_task(0).unwrap();
receiver.try_next().unwrap_err();
let drain = context.drain_task_queue(queue_id).unwrap();
// User triggered drain
receiver.try_next().unwrap_err();
block_on!(drain).unwrap();
assert_eq!(receiver.try_next().unwrap(), Some(0));
add_task(1).unwrap();
receiver.try_next().unwrap_err();
}
#[test]
fn delay_for() {
gst::init().unwrap();
let context = Context::acquire("delay_for", SLEEP_DURATION).unwrap();
let (sender, receiver) = oneshot::channel();
let start = Instant::now();
let delayed_by_fut = context.delay_for(INTERVAL, move || async {
sender.send(42).unwrap();
});
context.spawn(delayed_by_fut);
let _ = block_on!(receiver).unwrap();
let delta = Instant::now() - start;
assert!(delta >= INTERVAL);
assert!(delta < INTERVAL * 2);
}
#[test]
fn delay_for_abort() {
gst::init().unwrap();
let context = Context::acquire("delay_for_abort", SLEEP_DURATION).unwrap();
let (sender, receiver) = oneshot::channel();
let delay_for_fut = context.delay_for(INTERVAL, move || async {
sender.send(42).unwrap();
});
let (abortable_delay_for, abort_handle) = abortable_waitable(delay_for_fut);
context.spawn(abortable_delay_for.map(move |res| {
if let Err(Aborted) = res {
gst_debug!(RUNTIME_CAT, "Aborted delay_for");
}
}));
block_on!(abort_handle.abort_and_wait()).unwrap();
block_on!(receiver).unwrap_err();
}
#[test]
fn interval_ok() {
gst::init().unwrap();
let context = Context::acquire("interval_ok", SLEEP_DURATION).unwrap();
let (sender, mut receiver) = mpsc::channel(1);
let sender: Arc<Mutex<mpsc::Sender<Instant>>> = Arc::new(Mutex::new(sender));
let interval_fut = context.interval(INTERVAL, move || {
let sender = Arc::clone(&sender);
async move {
let instant = Instant::now();
sender.lock().await.send(instant).await.map_err(drop)
}
});
context.spawn(interval_fut.map(drop));
block_on!(async {
let mut idx: u32 = 0;
let mut first = Instant::now();
while let Some(instant) = receiver.next().await {
if idx > 0 {
let delta = instant - first;
assert!(delta > INTERVAL * (idx - 1));
assert!(delta < INTERVAL * (idx + 1));
} else {
first = instant;
}
if idx == 3 {
break;
}
idx += 1;
}
});
}
#[test]
fn interval_err() {
gst::init().unwrap();
let context = Context::acquire("interval_err", SLEEP_DURATION).unwrap();
let (sender, mut receiver) = mpsc::channel(1);
let sender: Arc<Mutex<mpsc::Sender<Instant>>> = Arc::new(Mutex::new(sender));
let interval_idx: Arc<Mutex<Item>> = Arc::new(Mutex::new(0));
let interval_fut = context.interval(INTERVAL, move || {
let sender = Arc::clone(&sender);
let interval_idx = Arc::clone(&interval_idx);
async move {
let instant = Instant::now();
let mut idx = interval_idx.lock().await;
sender.lock().await.send(instant).await.unwrap();
*idx += 1;
if *idx < 3 {
Ok(())
} else {
Err(())
}
}
});
context.spawn(interval_fut.map(drop));
block_on!(async {
let mut idx: u32 = 0;
let mut first = Instant::now();
while let Some(instant) = receiver.next().await {
if idx > 0 {
let delta = instant - first;
assert!(delta > INTERVAL * (idx - 1));
assert!(delta < INTERVAL * (idx + 1));
} else {
first = instant;
}
idx += 1;
}
assert_eq!(idx, 3);
});
}
#[test]
fn interval_abort() {
gst::init().unwrap();
let context = Context::acquire("interval_abort", SLEEP_DURATION).unwrap();
let (sender, mut receiver) = mpsc::channel(1);
let sender: Arc<Mutex<mpsc::Sender<Instant>>> = Arc::new(Mutex::new(sender));
let interval_fut = context.interval(INTERVAL, move || {
let sender = Arc::clone(&sender);
async move {
let instant = Instant::now();
sender.lock().await.send(instant).await.map_err(drop)
}
});
let (abortable_interval, abort_handle) = abortable_waitable(interval_fut);
context.spawn(abortable_interval.map(move |res| {
if let Err(Aborted) = res {
gst_debug!(RUNTIME_CAT, "Aborted timeout");
}
}));
block_on!(async {
let mut idx: u32 = 0;
let mut first = Instant::now();
while let Some(instant) = receiver.next().await {
if idx > 0 {
let delta = instant - first;
assert!(delta > INTERVAL * (idx - 1));
assert!(delta < INTERVAL * (idx + 1));
} else {
first = instant;
}
if idx == 3 {
abort_handle.abort_and_wait().await.unwrap();
break;
}
idx += 1;
}
assert_eq!(receiver.next().await, None);
});
}
}

View file

@ -0,0 +1,198 @@
// Copyright (C) 2019 François Laignel <fengalin@free.fr>
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Library General Public
// License as published by the Free Software Foundation; either
// version 2 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Library General Public License for more details.
//
// You should have received a copy of the GNU Library General Public
// License along with this library; if not, write to the
// Free Software Foundation, Inc., 51 Franklin Street, Suite 500,
// Boston, MA 02110-1335, USA.
use futures::future::{self, AbortHandle, Abortable};
use futures::prelude::*;
use super::{waitable, WaitError, WaitHandle, Waitable};
pub type AbortableWaitable<Fut> = Waitable<Abortable<Fut>>;
/// Builds an [`Abortable`] and [`Waitable`] `Future` from the provided `Future`.
///
/// See [`AbortWaitHandle`].
///
/// [`Abortable`]: https://rust-lang-nursery.github.io/futures-api-docs/0.3.0-alpha.19/futures/future/struct.Abortable.html
/// [`Waitable`]: struct.Waitable.html
/// [`AbortWaitHandle`]: struct.AbortWaitHandle.html
pub fn abortable_waitable<Fut: Future>(future: Fut) -> (AbortableWaitable<Fut>, AbortWaitHandle) {
let (abortable, abort_handle) = future::abortable(future);
let (abortable_waitable, wait_handle) = waitable(abortable);
(
abortable_waitable,
AbortWaitHandle::new(abort_handle, wait_handle),
)
}
/// A handle to an [`Abortable`] and [`Waitable`] `Future`.
///
/// The handle allows checking for the `Future` state, canceling the `Future` and waiting until
/// the `Future` completes.
///
/// [`Abortable`]: https://rust-lang-nursery.github.io/futures-api-docs/0.3.0-alpha.19/futures/future/struct.Abortable.html
/// [`Waitable`]: struct.Waitable.html
#[derive(Debug)]
pub struct AbortWaitHandle {
abort_handle: AbortHandle,
wait_handle: WaitHandle,
}
impl AbortWaitHandle {
fn new(abort_handle: AbortHandle, wait_handle: WaitHandle) -> Self {
AbortWaitHandle {
abort_handle,
wait_handle,
}
}
pub fn is_terminated(&mut self) -> bool {
self.wait_handle.is_terminated()
}
pub fn is_cancelled(&mut self) -> bool {
self.wait_handle.is_cancelled()
}
pub async fn wait(self) -> Result<(), WaitError> {
self.wait_handle.wait().await
}
pub fn abort(&self) {
self.abort_handle.abort();
}
pub async fn abort_and_wait(mut self) -> Result<(), WaitError> {
if self.wait_handle.is_terminated() {
if self.wait_handle.is_cancelled() {
return Err(WaitError::Cancelled);
}
return Ok(());
}
self.abort_handle.abort();
self.wait_handle.wait().await
}
}
#[cfg(test)]
mod tests {
use super::*;
use futures::channel::{mpsc, oneshot};
#[derive(Debug, PartialEq)]
enum State {
Released,
Terminated,
Triggered,
}
#[tokio::test]
async fn abort_wait_async_non_blocking_task() {
let (trigger_sender, trigger_receiver) = oneshot::channel::<()>();
let (_release_sender, release_receiver) = oneshot::channel::<()>();
let (mut state_sender_abrt, mut state_receiver_abrt) = mpsc::channel(1);
let (shared, mut handle) = abortable_waitable(async move {
let _ = trigger_receiver.await;
state_sender_abrt.send(State::Triggered).await.unwrap();
let _ = release_receiver.await;
state_sender_abrt.send(State::Released).await.unwrap();
});
let (mut state_sender_spawn, mut state_receiver_spawn) = mpsc::channel(1);
tokio::spawn(async move {
let _ = shared.await;
state_sender_spawn.send(State::Terminated).await.unwrap();
});
drop(trigger_sender);
assert_eq!(state_receiver_abrt.next().await, Some(State::Triggered));
assert!(!handle.is_terminated());
assert_eq!(handle.abort_and_wait().await, Ok(()));
assert_eq!(state_receiver_spawn.next().await, Some(State::Terminated));
assert_eq!(state_receiver_abrt.next().await, None);
}
#[tokio::test]
async fn abort_wait_blocking_task() {
let (trigger_sender, trigger_receiver) = oneshot::channel::<()>();
let (release_sender, release_receiver) = oneshot::channel::<()>();
let (mut state_sender_abrt, mut state_receiver_abrt) = mpsc::channel(1);
let (shared, mut handle) = abortable_waitable(async move {
let _ = trigger_receiver.await;
state_sender_abrt.send(State::Triggered).await.unwrap();
let _ = release_receiver.await;
state_sender_abrt.send(State::Released).await.unwrap();
});
let (mut state_sender_spawn, mut state_receiver_spawn) = mpsc::channel(1);
tokio::spawn(async move {
let _ = shared.await;
state_sender_spawn.send(State::Terminated).await.unwrap();
});
drop(trigger_sender);
assert_eq!(state_receiver_abrt.next().await, Some(State::Triggered));
assert!(!handle.is_terminated());
drop(release_sender);
assert_eq!(state_receiver_abrt.next().await, Some(State::Released));
assert_eq!(handle.abort_and_wait().await, Ok(()));
assert_eq!(state_receiver_spawn.next().await, Some(State::Terminated));
assert_eq!(state_receiver_abrt.next().await, None);
}
#[tokio::test]
async fn abort_only() {
let (trigger_sender, trigger_receiver) = oneshot::channel::<()>();
let (_release_sender, release_receiver) = oneshot::channel::<()>();
let (mut state_sender_abrt, mut state_receiver_abrt) = mpsc::channel(1);
let (shared, mut handle) = abortable_waitable(async move {
let _ = trigger_receiver.await;
state_sender_abrt.send(State::Triggered).await.unwrap();
let _ = release_receiver.await;
state_sender_abrt.send(State::Released).await.unwrap();
});
let (mut state_sender_spawn, mut state_receiver_spawn) = mpsc::channel(1);
tokio::spawn(async move {
let _ = shared.await;
state_sender_spawn.send(State::Terminated).await.unwrap();
});
assert!(!handle.is_terminated());
drop(trigger_sender);
assert_eq!(state_receiver_abrt.next().await, Some(State::Triggered));
assert!(!handle.is_terminated());
handle.abort();
assert_eq!(state_receiver_spawn.next().await, Some(State::Terminated));
}
}

View file

@ -0,0 +1,24 @@
// Copyright (C) 2019 François Laignel <fengalin@free.fr>
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Library General Public
// License as published by the Free Software Foundation; either
// version 2 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Library General Public License for more details.
//
// You should have received a copy of the GNU Library General Public
// License along with this library; if not, write to the
// Free Software Foundation, Inc., 51 Franklin Street, Suite 500,
// Boston, MA 02110-1335, USA.
//! `Future`s combinators which help implementing statefull asynchronous `Processor`s.
mod abortable_waitable;
pub use abortable_waitable::{abortable_waitable, AbortWaitHandle, AbortableWaitable};
mod waitable;
pub use waitable::{waitable, WaitError, WaitHandle, Waitable};

View file

@ -0,0 +1,224 @@
// Copyright (C) 2019 François Laignel <fengalin@free.fr>
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Library General Public
// License as published by the Free Software Foundation; either
// version 2 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Library General Public License for more details.
//
// You should have received a copy of the GNU Library General Public
// License along with this library; if not, write to the
// Free Software Foundation, Inc., 51 Franklin Street, Suite 500,
// Boston, MA 02110-1335, USA.
use futures::channel::oneshot;
use futures::prelude::*;
use futures::ready;
use pin_project::pin_project;
use std::pin::Pin;
use std::task::{self, Poll};
/// Builds a [`Waitable`] `Future` from the provided `Future`.
///
/// See [`WaitHandle`].
///
/// [`Waitable`]: struct.Waitable.html
/// [`WaitHandle`]: struct.WaitHandle.html
pub fn waitable<Fut: Future>(future: Fut) -> (Waitable<Fut>, WaitHandle) {
Waitable::new(future)
}
/// A `Waitable` `Future`.
///
/// See [`WaitHandle`].
///
/// [`WaitHandle`]: struct.WaitHandle.html
#[pin_project]
#[derive(Debug)]
pub struct Waitable<Fut> {
#[pin]
inner: Fut,
sender: Option<oneshot::Sender<()>>,
}
impl<Fut> Waitable<Fut> {
pub fn new(inner: Fut) -> (Waitable<Fut>, WaitHandle) {
let (sender, receiver) = oneshot::channel();
(
Waitable {
inner,
sender: Some(sender),
},
WaitHandle::new(receiver),
)
}
}
impl<Fut: Future> Future for Waitable<Fut> {
type Output = Fut::Output;
fn poll(self: Pin<&mut Self>, cx: &mut task::Context) -> Poll<Self::Output> {
let this = self.project();
let output = ready!(this.inner.poll(cx));
if let Some(sender) = this.sender.take() {
let _ = sender.send(());
}
Poll::Ready(output)
}
}
#[derive(Debug, Eq, PartialEq)]
pub enum WaitError {
Cancelled,
}
/// A handle to a [`Waitable`] `Future`.
///
/// The handle allows checking for the `Future` completion and waiting until the `Future`
/// completes.
///
/// [`Waitable`]: struct.Waitable.html
#[derive(Debug)]
pub struct WaitHandle {
receiver: oneshot::Receiver<()>,
is_terminated: bool,
is_cancelled: bool,
}
impl WaitHandle {
fn new(receiver: oneshot::Receiver<()>) -> WaitHandle {
WaitHandle {
receiver,
is_terminated: false,
is_cancelled: false,
}
}
pub fn is_terminated(&mut self) -> bool {
self.check_state();
self.is_terminated
}
pub fn is_cancelled(&mut self) -> bool {
self.check_state();
self.is_cancelled
}
pub async fn wait(self) -> Result<(), WaitError> {
if self.is_terminated {
if self.is_cancelled {
return Err(WaitError::Cancelled);
}
return Ok(());
}
self.receiver.await.map_err(|_| WaitError::Cancelled)
}
fn check_state(&mut self) {
if self.is_terminated {
// no need to check state
return;
}
let res = self.receiver.try_recv();
match res {
Ok(None) => (),
Ok(Some(())) => self.is_terminated = true,
Err(_) => {
self.is_terminated = true;
self.is_cancelled = true;
}
}
}
}
#[cfg(test)]
mod tests {
use futures::channel::{mpsc, oneshot};
use futures::future;
use std::time::Duration;
use tokio::future::FutureExt;
use super::*;
#[derive(Debug, PartialEq)]
enum State {
Released,
Terminated,
Triggered,
}
#[tokio::test]
async fn wait() {
let (trigger_sender, trigger_receiver) = oneshot::channel::<()>();
let (mut state_sender_wait, mut state_receiver_wait) = mpsc::channel(1);
let (shared, mut handle) = waitable(async move {
let _ = trigger_receiver.await;
state_sender_wait.send(State::Triggered).await.unwrap();
let _ = future::pending::<()>()
.timeout(Duration::from_secs(1))
.await;
state_sender_wait.send(State::Released).await.unwrap();
});
let (mut state_sender_spawn, mut state_receiver_spawn) = mpsc::channel(1);
tokio::spawn(async move {
let _ = shared.await;
state_sender_spawn.send(State::Terminated).await.unwrap();
});
drop(trigger_sender);
assert_eq!(state_receiver_wait.next().await, Some(State::Triggered));
assert!(!handle.is_terminated());
assert_eq!(handle.wait().await, Ok(()));
assert_eq!(state_receiver_wait.next().await, Some(State::Released));
assert_eq!(state_receiver_spawn.next().await, Some(State::Terminated));
assert_eq!(state_receiver_wait.next().await, None);
}
#[tokio::test]
async fn no_wait() {
let (trigger_sender, trigger_receiver) = oneshot::channel::<()>();
let (mut state_sender_wait, mut state_receiver_wait) = mpsc::channel(1);
let (shared, mut handle) = waitable(async move {
let _ = trigger_receiver.await;
state_sender_wait.send(State::Triggered).await.unwrap();
state_sender_wait.send(State::Released).await.unwrap();
});
let (mut state_sender_spawn, mut state_receiver_spawn) = mpsc::channel(1);
tokio::spawn(async move {
let _ = shared.await;
state_sender_spawn.send(State::Terminated).await.unwrap();
});
drop(trigger_sender);
assert_eq!(state_receiver_wait.next().await, Some(State::Triggered));
assert!(!handle.is_terminated());
assert_eq!(state_receiver_wait.next().await, Some(State::Released));
assert_eq!(state_receiver_spawn.next().await, Some(State::Terminated));
assert_eq!(state_receiver_wait.next().await, None);
assert!(handle.is_terminated());
assert!(!handle.is_cancelled());
}
}

View file

@ -0,0 +1,25 @@
// Copyright (C) 2019 François Laignel <fengalin@free.fr>
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Library General Public
// License as published by the Free Software Foundation; either
// version 2 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Library General Public License for more details.
//
// You should have received a copy of the GNU Library General Public
// License along with this library; if not, write to the
// Free Software Foundation, Inc., 51 Franklin Street, Suite 500,
// Boston, MA 02110-1335, USA.
//! A set of `macro`s to ease the implementation of asynchronous processings.
#[macro_export]
macro_rules! block_on {
($future:expr) => {
$crate::tokio_executor::current_thread::CurrentThread::new().block_on($future)
};
}

View file

@ -15,4 +15,59 @@
// Free Software Foundation, Inc., 51 Franklin Street, Suite 500,
// Boston, MA 02110-1335, USA.
//! A `runtime` for the `threadshare` GStreamer plugins framework.
//!
//! Many `GStreamer` `Element`s internally spawn OS `thread`s. For most applications, this is not an
//! issue. However, in applications which process many `Stream`s in parallel, the high number of
//! `threads` leads to reduced efficiency due to:
//!
//! * context switches,
//! * scheduler overhead,
//! * most of the threads waiting for some resources to be available.
//!
//! The `threadshare` `runtime` is a framework to build `Element`s for such applications. It
//! uses light-weight threading to allow multiple `Element`s share a reduced number of OS `thread`s.
//!
//! See this [talk] ([slides]) for a presentation of the motivations and principles.
//!
//! Current implementation uses the crate [`tokio`].
//!
//! Most `Element`s implementations should use the high-level features provided by [`PadSrc`] &
//! [`PadSink`].
//!
//! [talk]: https://gstconf.ubicast.tv/videos/when-adding-more-threads-adds-more-problems-thread-sharing-between-elements-in-gstreamer/
//! [slides]: https://gstreamer.freedesktop.org/data/events/gstreamer-conference/2018/Sebastian%20Dr%C3%B6ge%20-%20When%20adding%20more%20threads%20adds%20more%20problems:%20Thread-sharing%20between%20elements%20in%20GStreamer.pdf
//! [`tokio`]: https://crates.io/crates/tokio
//! [`PadSrc`]: pad/struct.PadSrc.html
//! [`PadSink`]: pad/struct.PadSink.html
pub mod executor;
pub use executor::{Context, Interval, TaskOutput, Timeout};
pub mod future;
#[macro_use]
pub mod macros;
pub mod pad;
pub use pad::{PadSink, PadSinkRef, PadSrc, PadSrcRef, PadSrcWeak};
pub mod pad_context;
pub use pad_context::{PadContext, PadContextWeak};
pub mod prelude {
pub use super::pad::{PadSinkHandler, PadSrcHandler};
}
pub mod task;
use gst;
use lazy_static::lazy_static;
lazy_static! {
static ref RUNTIME_CAT: gst::DebugCategory = gst::DebugCategory::new(
"ts-runtime",
gst::DebugColorFlags::empty(),
Some("Thread-sharing Runtime"),
);
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,332 @@
// Copyright (C) 2019 François Laignel <fengalin@free.fr>
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Library General Public
// License as published by the Free Software Foundation; either
// version 2 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Library General Public License for more details.
//
// You should have received a copy of the GNU Library General Public
// License along with this library; if not, write to the
// Free Software Foundation, Inc., 51 Franklin Street, Suite 500,
// Boston, MA 02110-1335, USA.
//! A wrapper on a [`Context`] with additional features for [`PadSrc`] & [`PadSink`].
//!
//! [`Context`]: ../executor/struct.Context.html
//! [`PadSrc`]: ../pad/struct.PadSrc.html
//! [`PadSink`]: ../pad/struct.PadSink.html
use futures::prelude::*;
use glib;
use glib::{glib_boxed_derive_traits, glib_boxed_type};
use std::marker::PhantomData;
use std::time::Duration;
use super::executor::{Context, ContextWeak, Interval, TaskOutput, TaskQueueId, Timeout};
#[derive(Clone)]
pub struct PadContextWeak {
context_weak: ContextWeak,
queue_id: TaskQueueId,
}
impl PadContextWeak {
pub fn upgrade(&self) -> Option<PadContextRef> {
self.context_weak
.upgrade()
.map(|inner| PadContextRef::new(inner, self.queue_id))
}
}
impl glib::subclass::boxed::BoxedType for PadContextWeak {
const NAME: &'static str = "TsPadContext";
glib_boxed_type!();
}
glib_boxed_derive_traits!(PadContextWeak);
impl std::fmt::Debug for PadContextWeak {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self.context_weak.upgrade() {
Some(context) => write!(
f,
"PadContext {{ context: '{}'), {:?} }}",
context.name(),
self.queue_id
),
None => write!(
f,
"PadContext {{ context: _NO LONGER AVAILABLE_, {:?} }}",
self.queue_id
),
}
}
}
#[derive(Debug)]
pub struct PadContextRef<'a> {
strong: PadContextStrong,
phantom: PhantomData<&'a PadContextStrong>,
}
impl<'a> PadContextRef<'a> {
fn new(context: Context, queue_id: TaskQueueId) -> Self {
PadContextRef {
strong: PadContextStrong { context, queue_id },
phantom: PhantomData,
}
}
}
impl<'a> PadContextRef<'a> {
pub fn downgrade(&self) -> PadContextWeak {
self.strong.downgrade()
}
pub fn spawn<Fut>(&self, future: Fut)
where
Fut: Future<Output = ()> + Send + 'static,
{
self.strong.context.spawn(future);
}
pub fn add_pending_task<T>(&self, task: T)
where
T: Future<Output = TaskOutput> + Send + 'static,
{
self.strong.add_pending_task(task);
}
pub fn drain_pending_tasks(&self) -> Option<impl Future<Output = TaskOutput>> {
self.strong.drain_pending_tasks()
}
pub fn clear_pending_tasks(&self) {
self.strong.clear_pending_tasks();
}
pub fn context(&self) -> &Context {
&self.strong.context
}
pub fn new_interval(&self, interval: Duration) -> Interval {
self.strong.new_interval(interval)
}
/// Builds a `Future` to execute an `action` at [`Interval`]s.
///
/// [`Interval`]: struct.Interval.html
pub fn interval<F, Fut>(&self, interval: Duration, f: F) -> impl Future<Output = Fut::Output>
where
F: Fn() -> Fut + Send + Sync + 'static,
Fut: Future<Output = Result<(), ()>> + Send + 'static,
{
self.strong.interval(interval, f)
}
pub fn new_timeout(&self, timeout: Duration) -> Timeout {
self.strong.new_timeout(timeout)
}
/// Builds a `Future` to execute an action after the given `delay` has elapsed.
pub fn delay_for<F, Fut>(&self, delay: Duration, f: F) -> impl Future<Output = Fut::Output>
where
F: FnOnce() -> Fut + Send + Sync + 'static,
Fut: Future<Output = ()> + Send + 'static,
{
self.strong.delay_for(delay, f)
}
}
impl std::fmt::Display for PadContextRef<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
self.strong.fmt(f)
}
}
#[derive(Debug)]
struct PadContextStrong {
context: Context,
queue_id: TaskQueueId,
}
impl PadContextStrong {
#[inline]
pub fn downgrade(&self) -> PadContextWeak {
PadContextWeak {
context_weak: self.context.downgrade(),
queue_id: self.queue_id,
}
}
#[inline]
fn add_pending_task<T>(&self, task: T)
where
T: Future<Output = TaskOutput> + Send + 'static,
{
self.context
.add_task(self.queue_id, task)
.expect("TaskQueueId controlled by TaskContext");
}
#[inline]
fn drain_pending_tasks(&self) -> Option<impl Future<Output = TaskOutput>> {
self.context.drain_task_queue(self.queue_id)
}
#[inline]
fn clear_pending_tasks(&self) {
self.context.clear_task_queue(self.queue_id);
}
#[inline]
fn new_interval(&self, interval: Duration) -> Interval {
self.context.new_interval(interval)
}
#[inline]
fn interval<F, Fut>(&self, interval: Duration, f: F) -> impl Future<Output = Fut::Output>
where
F: Fn() -> Fut + Send + Sync + 'static,
Fut: Future<Output = Result<(), ()>> + Send + 'static,
{
self.context.interval(interval, f)
}
#[inline]
fn new_timeout(&self, timeout: Duration) -> Timeout {
self.context.new_timeout(timeout)
}
#[inline]
pub fn delay_for<F, Fut>(&self, delay: Duration, f: F) -> impl Future<Output = Fut::Output>
where
F: FnOnce() -> Fut + Send + Sync + 'static,
Fut: Future<Output = ()> + Send + 'static,
{
self.context.delay_for(delay, f)
}
}
impl std::fmt::Display for PadContextStrong {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "Context('{}'), {:?}", self.context.name(), self.queue_id)
}
}
/// A wrapper on a [`Context`] with additional features for [`PadSrc`] & [`PadSink`].
///
/// [`Context`]: ../executor/struct.Context.html
/// [`PadSrc`]: ../pad/struct.PadSrc.html
/// [`PadSink`]: ../pad/struct.PadSink.html
#[derive(Debug)]
pub struct PadContext(PadContextStrong);
impl PadContext {
pub fn new(context: Context) -> Self {
PadContext(PadContextStrong {
queue_id: context.acquire_task_queue_id(),
context,
})
}
pub fn downgrade(&self) -> PadContextWeak {
self.0.downgrade()
}
pub fn as_ref(&self) -> PadContextRef<'_> {
PadContextRef::new(self.0.context.clone(), self.0.queue_id)
}
pub fn spawn<Fut>(&self, future: Fut)
where
Fut: Future<Output = ()> + Send + 'static,
{
self.0.context.spawn(future);
}
pub fn drain_pending_tasks(&self) -> Option<impl Future<Output = TaskOutput>> {
self.0.drain_pending_tasks()
}
pub fn clear_pending_tasks(&self) {
self.0.clear_pending_tasks();
}
pub fn new_interval(&self, interval: Duration) -> Interval {
self.0.new_interval(interval)
}
/// Builds a `Future` to execute an `action` at [`Interval`]s.
///
/// [`Interval`]: struct.Interval.html
pub fn interval<F, Fut>(&self, interval: Duration, f: F) -> impl Future<Output = Fut::Output>
where
F: Fn() -> Fut + Send + Sync + 'static,
Fut: Future<Output = Result<(), ()>> + Send + 'static,
{
self.0.interval(interval, f)
}
pub fn new_timeout(&self, timeout: Duration) -> Timeout {
self.0.new_timeout(timeout)
}
/// Builds a `Future` to execute an action after the given `delay` has elapsed.
pub fn delay_for<F, Fut>(&self, delay: Duration, f: F) -> impl Future<Output = Fut::Output>
where
F: FnOnce() -> Fut + Send + Sync + 'static,
Fut: Future<Output = ()> + Send + 'static,
{
self.0.delay_for(delay, f)
}
pub(super) fn new_sticky_event(&self) -> gst::Event {
let s = gst::Structure::new("ts-pad-context", &[("pad-context", &self.downgrade())]);
gst::Event::new_custom_downstream_sticky(s).build()
}
#[inline]
pub fn is_pad_context_sticky_event(event: &gst::event::CustomDownstreamSticky) -> bool {
event.get_structure().unwrap().get_name() == "ts-pad-context"
}
pub fn check_pad_context_event(event: &gst::Event) -> Option<PadContextWeak> {
if let gst::EventView::CustomDownstreamSticky(e) = event.view() {
if Self::is_pad_context_sticky_event(&e) {
let s = e.get_structure().unwrap();
let pad_context = s
.get::<&PadContextWeak>("pad-context")
.expect("event field")
.expect("missing event field")
.clone();
Some(pad_context)
} else {
None
}
} else {
None
}
}
}
impl Drop for PadContext {
fn drop(&mut self) {
self.0.context.release_task_queue(self.0.queue_id);
}
}
impl std::fmt::Display for PadContext {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
self.0.fmt(f)
}
}

View file

@ -0,0 +1,272 @@
// Copyright (C) 2019 François Laignel <fengalin@free.fr>
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Library General Public
// License as published by the Free Software Foundation; either
// version 2 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Library General Public License for more details.
//
// You should have received a copy of the GNU Library General Public
// License along with this library; if not, write to the
// Free Software Foundation, Inc., 51 Franklin Street, Suite 500,
// Boston, MA 02110-1335, USA.
//! An execution loop to run asynchronous processing on a [`Context`].
//!
//! [`Context`]: ../executor/struct.Context.html
use futures::channel::oneshot;
use futures::future::{self, BoxFuture};
use futures::lock::Mutex;
use futures::prelude::*;
use gst::TaskState;
use gst::{gst_debug, gst_log, gst_trace, gst_warning};
use std::fmt;
use std::sync::Arc;
use super::future::{abortable_waitable, AbortWaitHandle};
use super::{Context, RUNTIME_CAT};
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum TaskError {
ActiveTask,
}
impl fmt::Display for TaskError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
TaskError::ActiveTask => write!(f, "The task is still active"),
}
}
}
impl std::error::Error for TaskError {}
#[derive(Debug)]
struct TaskInner {
context: Option<Context>,
state: TaskState,
loop_end_sender: Option<oneshot::Sender<()>>,
loop_handle: Option<AbortWaitHandle>,
}
impl Default for TaskInner {
fn default() -> Self {
TaskInner {
context: None,
state: TaskState::Stopped,
loop_end_sender: None,
loop_handle: None,
}
}
}
impl Drop for TaskInner {
fn drop(&mut self) {
// Check invariant which can't be held automatically in `Task`
// because `drop` can't be `async`
if self.state != TaskState::Stopped {
panic!("Missing call to `Task::stop`");
}
}
}
/// A `Task` operating on a `threadshare` [`Context`].
///
/// [`Context`]: struct.Context.html
#[derive(Debug)]
pub struct Task(Arc<Mutex<TaskInner>>);
impl Default for Task {
fn default() -> Self {
Task(Arc::new(Mutex::new(TaskInner::default())))
}
}
impl Task {
pub async fn prepare(&self, context: Context) -> Result<(), TaskError> {
let mut inner = self.0.lock().await;
if inner.state != TaskState::Stopped {
return Err(TaskError::ActiveTask);
}
inner.context = Some(context);
Ok(())
}
pub async fn unprepare(&self) -> Result<(), TaskError> {
let mut inner = self.0.lock().await;
if inner.state != TaskState::Stopped {
return Err(TaskError::ActiveTask);
}
inner.context = None;
Ok(())
}
pub async fn state(&self) -> TaskState {
self.0.lock().await.state
}
/// `Starts` the `Task`.
///
/// The `Task` will loop on the provided @func.
/// The execution occurs on the `Task`'s context.
pub async fn start<F, Fut>(&self, mut func: F)
where
F: (FnMut() -> Fut) + Send + 'static,
Fut: Future<Output = ()> + Send + 'static,
{
let inner_clone = Arc::clone(&self.0);
let mut inner = self.0.lock().await;
match inner.state {
TaskState::Started => {
gst_log!(RUNTIME_CAT, "Task already Started");
return;
}
TaskState::Paused | TaskState::Stopped => (),
other => unreachable!("Unexpected Task state {:?}", other),
}
gst_debug!(RUNTIME_CAT, "Starting Task");
let (loop_fut, loop_handle) = abortable_waitable(async move {
loop {
func().await;
let mut inner = inner_clone.lock().await;
match inner.state {
TaskState::Started => (),
TaskState::Paused | TaskState::Stopped => {
inner.loop_handle = None;
inner.loop_end_sender.take();
break;
}
other => unreachable!("Unexpected Task state {:?}", other),
}
}
});
inner
.context
.as_ref()
.expect("Context not set")
.spawn(loop_fut.map(drop));
inner.loop_handle = Some(loop_handle);
inner.state = TaskState::Started;
gst_debug!(RUNTIME_CAT, "Task Started");
}
/// Pauses the `Started` `Task`.
pub async fn pause(&self) -> BoxFuture<'static, ()> {
let mut inner = self.0.lock().await;
match inner.state {
TaskState::Started => {
gst_log!(RUNTIME_CAT, "Pausing Task");
inner.state = TaskState::Paused;
let (sender, receiver) = oneshot::channel();
inner.loop_end_sender = Some(sender);
async move {
let _ = receiver.await;
gst_log!(RUNTIME_CAT, "Task Paused");
}
.boxed()
}
TaskState::Paused => {
gst_trace!(RUNTIME_CAT, "Task already Paused");
future::ready(()).boxed()
}
other => {
gst_warning!(RUNTIME_CAT, "Attempting to pause Task in state {:?}", other,);
future::ready(()).boxed()
}
}
}
pub async fn stop(&self) {
let mut inner = self.0.lock().await;
if inner.state == TaskState::Stopped {
gst_log!(RUNTIME_CAT, "Task already stopped");
return;
}
gst_debug!(RUNTIME_CAT, "Stopping Task");
if let Some(loop_handle) = inner.loop_handle.take() {
let _ = loop_handle.abort_and_wait().await;
}
inner.state = TaskState::Stopped;
gst_debug!(RUNTIME_CAT, "Task Stopped");
}
}
#[cfg(test)]
mod tests {
use futures::channel::mpsc;
use futures::lock::Mutex;
use std::sync::Arc;
use crate::runtime::Context;
use super::*;
#[tokio::test]
async fn task() {
gst::init().unwrap();
let context = Context::acquire("task", 2).unwrap();
let task = Task::default();
task.prepare(context).await.unwrap();
let (mut sender, receiver) = mpsc::channel(0);
let receiver = Arc::new(Mutex::new(receiver));
gst_debug!(RUNTIME_CAT, "task test: starting");
task.start(move || {
let receiver = Arc::clone(&receiver);
async move {
gst_debug!(RUNTIME_CAT, "task test: awaiting receiver");
match receiver.lock().await.next().await {
Some(_) => gst_debug!(RUNTIME_CAT, "task test: item received"),
None => gst_debug!(RUNTIME_CAT, "task test: channel complete"),
}
}
})
.await;
gst_debug!(RUNTIME_CAT, "task test: sending item");
sender.send(()).await.unwrap();
gst_debug!(RUNTIME_CAT, "task test: item sent");
gst_debug!(RUNTIME_CAT, "task test: pausing");
let pause_completion = task.pause().await;
gst_debug!(RUNTIME_CAT, "task test: dropping sender");
drop(sender);
gst_debug!(RUNTIME_CAT, "task test: awaiting pause completion");
pause_completion.await;
gst_debug!(RUNTIME_CAT, "task test: stopping");
task.stop().await;
gst_debug!(RUNTIME_CAT, "task test: stopped");
}
}

View file

@ -17,7 +17,9 @@
// Boston, MA 02110-1335, USA.
use either::Either;
use futures::{channel::oneshot, prelude::*};
use futures::future::{abortable, AbortHandle, Aborted, BoxFuture};
use futures::lock::Mutex;
use gst;
use gst::prelude::*;
@ -26,13 +28,7 @@ use gst::{gst_debug, gst_error};
use lazy_static::lazy_static;
use std::io;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::task::{self, Poll};
use tokio_executor::current_thread as tokio_current_thread;
use super::iocontext::*;
use std::sync::Arc;
lazy_static! {
static ref SOCKET_CAT: gst::DebugCategory = gst::DebugCategory::new(
@ -42,185 +38,116 @@ lazy_static! {
);
}
#[derive(Debug)]
pub struct Socket<T: SocketRead + 'static>(Arc<Mutex<SocketInner<T>>>);
#[derive(PartialEq, Eq, Debug)]
enum SocketState {
Unscheduled,
Scheduled,
Running,
Shutdown,
}
pub trait SocketRead: Send + Unpin {
const DO_TIMESTAMP: bool;
fn poll_read(
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<(usize, Option<std::net::SocketAddr>)>>;
fn read<'buf>(
&self,
buffer: &'buf mut [u8],
) -> BoxFuture<'buf, io::Result<(usize, Option<std::net::SocketAddr>)>>;
}
#[derive(PartialEq, Eq, Debug)]
enum SocketState {
Paused,
Prepared,
Started,
Unprepared,
}
#[derive(Debug)]
struct SocketInner<T: SocketRead + 'static> {
element: gst::Element,
state: SocketState,
reader: Pin<Box<T>>,
element: gst::Element,
reader: T,
buffer_pool: gst::BufferPool,
waker: Option<task::Waker>,
shutdown_receiver: Option<oneshot::Receiver<()>>,
clock: Option<gst::Clock>,
base_time: Option<gst::ClockTime>,
read_handle: Option<AbortHandle>,
}
impl<T: SocketRead + 'static> Socket<T> {
pub fn new(element: &gst::Element, reader: T, buffer_pool: gst::BufferPool) -> Self {
Socket(Arc::new(Mutex::new(SocketInner::<T> {
state: SocketState::Unprepared,
element: element.clone(),
state: SocketState::Unscheduled,
reader: Pin::new(Box::new(reader)),
reader,
buffer_pool,
waker: None,
shutdown_receiver: None,
clock: None,
base_time: None,
read_handle: None,
})))
}
pub fn schedule<F, G, Fut>(
&self,
io_context: &IOContext,
func: F,
err_func: G,
) -> Result<(), ()>
where
F: Fn((gst::Buffer, Option<std::net::SocketAddr>)) -> Fut + Send + 'static,
Fut: Future<Output = Result<(), gst::FlowError>> + Send + 'static,
G: FnOnce(Either<gst::FlowError, io::Error>) + Send + 'static,
{
// Ready->Paused
//
// Need to wait for a possible shutdown to finish first
// spawn() on the reactor, change state to Scheduled
let stream = SocketStream::<T>(self.clone(), None);
pub async fn prepare(&self) -> Result<SocketStream<T>, ()> {
// Null->Ready
let mut inner = self.0.lock().await;
if inner.state != SocketState::Unprepared {
gst_debug!(SOCKET_CAT, obj: &inner.element, "Socket already prepared");
return Ok(SocketStream::<T>::new(self));
}
gst_debug!(SOCKET_CAT, obj: &inner.element, "Preparing socket");
let mut inner = self.0.lock().unwrap();
gst_debug!(SOCKET_CAT, obj: &inner.element, "Scheduling socket");
if inner.state == SocketState::Scheduled {
gst_debug!(SOCKET_CAT, obj: &inner.element, "Socket already scheduled");
inner.buffer_pool.set_active(true).map_err(|err| {
gst_error!(SOCKET_CAT, obj: &inner.element, "Failed to prepare socket: {}", err);
})?;
inner.state = SocketState::Prepared;
Ok(SocketStream::<T>::new(self))
}
pub async fn start(&self, clock: Option<gst::Clock>, base_time: Option<gst::ClockTime>) {
// Paused->Playing
let mut inner = self.0.lock().await;
assert_ne!(SocketState::Unprepared, inner.state);
if inner.state == SocketState::Started {
gst_debug!(SOCKET_CAT, obj: &inner.element, "Socket already started");
return;
}
gst_debug!(SOCKET_CAT, obj: &inner.element, "Starting socket");
inner.clock = clock;
inner.base_time = base_time;
inner.state = SocketState::Started;
}
pub async fn pause(&self) {
// Playing->Paused
let mut inner = self.0.lock().await;
assert_ne!(SocketState::Unprepared, inner.state);
if inner.state != SocketState::Started {
gst_debug!(SOCKET_CAT, obj: &inner.element, "Socket not started");
return;
}
gst_debug!(SOCKET_CAT, obj: &inner.element, "Pausing socket");
inner.clock = None;
inner.base_time = None;
inner.state = SocketState::Paused;
if let Some(read_handle) = inner.read_handle.take() {
read_handle.abort();
}
}
pub async fn unprepare(&self) -> Result<(), ()> {
// Ready->Null
let mut inner = self.0.lock().await;
assert_ne!(SocketState::Started, inner.state);
if inner.state == SocketState::Unprepared {
gst_debug!(SOCKET_CAT, obj: &inner.element, "Socket already unprepared");
return Ok(());
}
assert_eq!(inner.state, SocketState::Unscheduled);
inner.state = SocketState::Scheduled;
if inner.buffer_pool.set_active(true).is_err() {
gst_error!(SOCKET_CAT, obj: &inner.element, "Failed to activate buffer pool");
return Err(());
}
inner.buffer_pool.set_active(false).map_err(|err| {
gst_error!(SOCKET_CAT, obj: &inner.element, "Failed to unprepare socket: {}", err);
})?;
inner.state = SocketState::Unprepared;
let (sender, receiver) = oneshot::channel();
inner.shutdown_receiver = Some(receiver);
let element_clone = inner.element.clone();
io_context.spawn(
stream
.try_for_each(move |(buffer, saddr)| {
func((buffer, saddr)).into_future().map_err(Either::Left)
})
.then(move |res| {
gst_debug!(
SOCKET_CAT,
obj: &element_clone,
"Socket finished: {:?}",
res
);
if let Err(err) = res {
err_func(err);
}
let _ = sender.send(());
future::ready(())
}),
);
Ok(())
}
pub fn unpause(&self, clock: Option<gst::Clock>, base_time: Option<gst::ClockTime>) {
// Paused->Playing
//
// Change state to Running and signal task
let mut inner = self.0.lock().unwrap();
gst_debug!(SOCKET_CAT, obj: &inner.element, "Unpausing socket");
if inner.state == SocketState::Running {
gst_debug!(SOCKET_CAT, obj: &inner.element, "Socket already unpaused");
return;
}
assert_eq!(inner.state, SocketState::Scheduled);
inner.state = SocketState::Running;
inner.clock = clock;
inner.base_time = base_time;
if let Some(waker) = inner.waker.take() {
waker.wake();
}
}
pub fn pause(&self) {
// Playing->Paused
//
// Change state to Scheduled and signal task
let mut inner = self.0.lock().unwrap();
gst_debug!(SOCKET_CAT, obj: &inner.element, "Pausing socket");
if inner.state == SocketState::Scheduled {
gst_debug!(SOCKET_CAT, obj: &inner.element, "Socket already paused");
return;
}
assert_eq!(inner.state, SocketState::Running);
inner.state = SocketState::Scheduled;
inner.clock = None;
inner.base_time = None;
if let Some(waker) = inner.waker.take() {
waker.wake();
}
}
pub fn shutdown(&self) {
// Paused->Ready
//
// Change state to Shutdown and signal task, wait for our future to be finished
// Requires scheduled function to be unblocked! Pad must be deactivated before
let mut inner = self.0.lock().unwrap();
gst_debug!(SOCKET_CAT, obj: &inner.element, "Shutting down socket");
if inner.state == SocketState::Unscheduled {
gst_debug!(SOCKET_CAT, obj: &inner.element, "Socket already shut down");
return;
}
assert!(inner.state == SocketState::Scheduled || inner.state == SocketState::Running);
inner.state = SocketState::Shutdown;
if let Some(waker) = inner.waker.take() {
waker.wake();
}
let shutdown_receiver = inner.shutdown_receiver.take().unwrap();
gst_debug!(SOCKET_CAT, obj: &inner.element, "Waiting for socket to shut down");
drop(inner);
tokio_current_thread::block_on_all(shutdown_receiver).expect("Already shut down");
let mut inner = self.0.lock().unwrap();
inner.state = SocketState::Unscheduled;
let _ = inner.buffer_pool.set_active(false);
gst_debug!(SOCKET_CAT, obj: &inner.element, "Socket shut down");
}
}
impl<T: SocketRead + Unpin + 'static> Clone for Socket<T> {
@ -229,91 +156,93 @@ impl<T: SocketRead + Unpin + 'static> Clone for Socket<T> {
}
}
impl<T: SocketRead + 'static> Drop for SocketInner<T> {
fn drop(&mut self) {
assert_eq!(self.state, SocketState::Unscheduled);
}
pub type SocketStreamItem =
Result<(gst::Buffer, Option<std::net::SocketAddr>), Either<gst::FlowError, io::Error>>;
#[derive(Debug)]
pub struct SocketStream<T: SocketRead + 'static> {
socket: Socket<T>,
mapped_buffer: Option<gst::MappedBuffer<gst::buffer::Writable>>,
}
struct SocketStream<T: SocketRead + 'static>(
Socket<T>,
Option<gst::MappedBuffer<gst::buffer::Writable>>,
);
impl<T: SocketRead + 'static> Stream for SocketStream<T> {
type Item =
Result<(gst::Buffer, Option<std::net::SocketAddr>), Either<gst::FlowError, io::Error>>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Option<Self::Item>> {
// take the mapped_buffer before locking the socket so as to please the mighty borrow checker
let mut mapped_buffer = self.1.take();
let mut inner = (self.0).0.lock().unwrap();
if inner.state == SocketState::Shutdown {
gst_debug!(SOCKET_CAT, obj: &inner.element, "Socket shutting down");
return Poll::Ready(None);
} else if inner.state == SocketState::Scheduled {
gst_debug!(SOCKET_CAT, obj: &inner.element, "Socket not running");
inner.waker = Some(cx.waker().clone());
drop(inner);
self.1 = mapped_buffer;
return Poll::Pending;
impl<T: SocketRead + 'static> SocketStream<T> {
fn new(socket: &Socket<T>) -> Self {
SocketStream {
socket: socket.clone(),
mapped_buffer: None,
}
}
assert_eq!(inner.state, SocketState::Running);
// Implementing `next` as an `async fn` instead of a `Stream` because of the `async` `Mutex`
// See https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/merge_requests/204#note_322774
#[allow(clippy::should_implement_trait)]
pub async fn next(&mut self) -> Option<SocketStreamItem> {
// take the mapped_buffer before locking the socket so as to please the mighty borrow checker
let read_fut = {
let mut inner = self.socket.0.lock().await;
if inner.state != SocketState::Started {
gst_debug!(SOCKET_CAT, obj: &inner.element, "DataQueue is not Started");
return None;
}
gst_debug!(SOCKET_CAT, obj: &inner.element, "Trying to read data");
let (len, saddr, time) = {
let buffer = match mapped_buffer {
Some(ref mut buffer) => buffer,
None => match inner.buffer_pool.acquire_buffer(None) {
gst_debug!(SOCKET_CAT, obj: &inner.element, "Trying to read data");
if self.mapped_buffer.is_none() {
match inner.buffer_pool.acquire_buffer(None) {
Ok(buffer) => {
mapped_buffer = Some(buffer.into_mapped_buffer_writable().unwrap());
mapped_buffer.as_mut().unwrap()
self.mapped_buffer = Some(buffer.into_mapped_buffer_writable().unwrap());
}
Err(err) => {
gst_debug!(SOCKET_CAT, obj: &inner.element, "Failed to acquire buffer {:?}", err);
return Poll::Ready(Some(Err(Either::Left(err))));
return Some(Err(Either::Left(err)));
}
},
};
match inner.reader.as_mut().poll_read(cx, buffer.as_mut_slice()) {
Poll::Pending => {
gst_debug!(SOCKET_CAT, obj: &inner.element, "No data available");
inner.waker = Some(cx.waker().clone());
drop(inner);
self.1 = mapped_buffer;
return Poll::Pending;
}
Poll::Ready(Err(err)) => {
gst_debug!(SOCKET_CAT, obj: &inner.element, "Read error {:?}", err);
return Poll::Ready(Some(Err(Either::Right(err))));
}
Poll::Ready(Ok((len, saddr))) => {
let dts = if T::DO_TIMESTAMP {
let time = inner.clock.as_ref().unwrap().get_time();
let running_time = time - inner.base_time.unwrap();
gst_debug!(SOCKET_CAT, obj: &inner.element, "Read {} bytes at {} (clock {})", len, running_time, time);
running_time
} else {
gst_debug!(SOCKET_CAT, obj: &inner.element, "Read {} bytes", len);
gst::CLOCK_TIME_NONE
};
(len, saddr, dts)
}
}
let (read_fut, abort_handle) = abortable(
inner
.reader
.read(self.mapped_buffer.as_mut().unwrap().as_mut_slice()),
);
inner.read_handle = Some(abort_handle);
read_fut
};
let mut buffer = mapped_buffer.unwrap().into_buffer();
{
let buffer = buffer.get_mut().unwrap();
if len < buffer.get_size() {
buffer.set_size(len);
}
buffer.set_dts(time);
}
match read_fut.await {
Ok(Ok((len, saddr))) => {
let inner = self.socket.0.lock().await;
Poll::Ready(Some(Ok((buffer, saddr))))
let dts = if T::DO_TIMESTAMP {
let time = inner.clock.as_ref().unwrap().get_time();
let running_time = time - inner.base_time.unwrap();
gst_debug!(SOCKET_CAT, obj: &inner.element, "Read {} bytes at {} (clock {})", len, running_time, time);
running_time
} else {
gst_debug!(SOCKET_CAT, obj: &inner.element, "Read {} bytes", len);
gst::CLOCK_TIME_NONE
};
let mut buffer = self.mapped_buffer.take().unwrap().into_buffer();
{
let buffer = buffer.get_mut().unwrap();
if len < buffer.get_size() {
buffer.set_size(len);
}
buffer.set_dts(dts);
}
Some(Ok((buffer, saddr)))
}
Ok(Err(err)) => {
gst_debug!(SOCKET_CAT, obj: &self.socket.0.lock().await.element, "Read error {:?}", err);
Some(Err(Either::Right(err)))
}
Err(Aborted) => {
gst_debug!(SOCKET_CAT, obj: &self.socket.0.lock().await.element, "Read Aborted");
None
}
}
}
}

View file

@ -17,8 +17,9 @@
// Boston, MA 02110-1335, USA.
use either::Either;
use futures::ready;
use futures::{future::BoxFuture, prelude::*};
use futures::future::BoxFuture;
use futures::lock::{Mutex, MutexGuard};
use futures::prelude::*;
use glib;
use glib::prelude::*;
@ -30,21 +31,24 @@ use gst;
use gst::prelude::*;
use gst::subclass::prelude::*;
use gst::{gst_debug, gst_element_error, gst_error, gst_error_msg, gst_log, gst_trace};
use gst::{EventView, QueryView};
use lazy_static::lazy_static;
use rand;
use std::io;
use std::pin::Pin;
use std::sync::Mutex;
use std::task::{Context, Poll};
use std::net::{IpAddr, SocketAddr};
use std::sync::Arc;
use std::u16;
use tokio::io::AsyncRead;
use tokio::io::AsyncReadExt;
use super::iocontext::*;
use super::socket::*;
use crate::block_on;
use crate::runtime::prelude::*;
use crate::runtime::{Context, PadSrc, PadSrcRef};
use super::socket::{Socket, SocketRead, SocketStream};
const DEFAULT_ADDRESS: Option<&str> = Some("127.0.0.1");
const DEFAULT_PORT: u32 = 5000;
@ -139,92 +143,227 @@ static PROPERTIES: [subclass::Property; 6] = [
}),
];
pub struct TcpClientReader {
connect_future: BoxFuture<'static, io::Result<tokio::net::TcpStream>>,
struct TcpClientReaderInner {
connect_future: Option<BoxFuture<'static, io::Result<tokio::net::TcpStream>>>,
socket: Option<tokio::net::TcpStream>,
}
impl TcpClientReaderInner {
fn new<Fut>(connect_future: Fut) -> Self
where
Fut: Future<Output = io::Result<tokio::net::TcpStream>> + Send + 'static,
{
Self {
connect_future: Some(connect_future.boxed()),
socket: None,
}
}
}
pub struct TcpClientReader(Arc<Mutex<TcpClientReaderInner>>);
impl TcpClientReader {
pub fn new<Fut>(connect_future: Fut) -> Self
where
Fut: Future<Output = io::Result<tokio::net::TcpStream>> + Send + 'static,
{
Self {
connect_future: connect_future.boxed(),
socket: None,
}
TcpClientReader(Arc::new(Mutex::new(TcpClientReaderInner::new(
connect_future,
))))
}
}
impl SocketRead for TcpClientReader {
const DO_TIMESTAMP: bool = false;
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<(usize, Option<std::net::SocketAddr>)>> {
let socket = match self.socket {
Some(ref mut socket) => socket,
None => {
let stream = ready!(self.connect_future.as_mut().poll(cx))?;
self.socket = Some(stream);
self.socket.as_mut().unwrap()
}
};
fn read<'buf>(
&self,
buffer: &'buf mut [u8],
) -> BoxFuture<'buf, io::Result<(usize, Option<std::net::SocketAddr>)>> {
let this = Arc::clone(&self.0);
Pin::new(socket)
.as_mut()
.poll_read(cx, buf)
.map_ok(|read_size| (read_size, None))
async move {
let mut this = this.lock().await;
let socket = match this.socket {
Some(ref mut socket) => socket,
None => {
let stream = this.connect_future.take().unwrap().await?;
this.socket = Some(stream);
this.socket.as_mut().unwrap()
}
};
socket.read(buffer).await.map(|read_size| (read_size, None))
}
.boxed()
}
}
struct State {
io_context: Option<IOContext>,
pending_future_id: Option<PendingFutureId>,
socket: Option<Socket<TcpClientReader>>,
struct TcpClientSrcPadHandlerInner {
socket_stream: Option<SocketStream<TcpClientReader>>,
need_initial_events: bool,
configured_caps: Option<gst::Caps>,
pending_future_abort_handle: Option<future::AbortHandle>,
}
impl Default for State {
fn default() -> State {
State {
io_context: None,
pending_future_id: None,
socket: None,
impl Default for TcpClientSrcPadHandlerInner {
fn default() -> Self {
TcpClientSrcPadHandlerInner {
socket_stream: None,
need_initial_events: true,
configured_caps: None,
pending_future_abort_handle: None,
}
}
}
struct TcpClientSrc {
src_pad: gst::Pad,
state: Mutex<State>,
settings: Mutex<Settings>,
#[derive(Clone)]
struct TcpClientSrcPadHandler(Arc<Mutex<TcpClientSrcPadHandlerInner>>);
impl TcpClientSrcPadHandler {
fn new() -> Self {
TcpClientSrcPadHandler(Arc::new(Mutex::new(TcpClientSrcPadHandlerInner::default())))
}
#[inline]
async fn lock(&self) -> MutexGuard<'_, TcpClientSrcPadHandlerInner> {
self.0.lock().await
}
async fn start_task(&self, pad: PadSrcRef<'_>, element: &gst::Element) {
let this = self.clone();
let pad_weak = pad.downgrade();
let element = element.clone();
pad.start_task(move || {
let this = this.clone();
let pad_weak = pad_weak.clone();
let element = element.clone();
async move {
let item = this
.lock()
.await
.socket_stream
.as_mut()
.expect("Missing SocketStream")
.next()
.await;
let pad = pad_weak.upgrade().expect("PadSrc no longer exists");
let buffer = match item {
Some(Ok((buffer, _))) => buffer,
Some(Err(err)) => {
gst_error!(CAT, obj: &element, "Got error {}", err);
match err {
Either::Left(gst::FlowError::CustomError) => (),
Either::Left(err) => {
gst_element_error!(
element,
gst::StreamError::Failed,
("Internal data stream error"),
["streaming stopped, reason {}", err]
);
}
Either::Right(err) => {
gst_element_error!(
element,
gst::StreamError::Failed,
("I/O error"),
["streaming stopped, I/O error {}", err]
);
}
}
return;
}
None => {
gst_log!(CAT, obj: pad.gst_pad(), "SocketStream Stopped");
pad.pause_task().await;
return;
}
};
this.push_buffer(pad, &element, buffer).await;
}
})
.await;
}
async fn push_buffer(&self, pad: PadSrcRef<'_>, element: &gst::Element, buffer: gst::Buffer) {
{
let mut events = Vec::new();
{
let mut inner = self.lock().await;
if inner.need_initial_events {
gst_debug!(CAT, obj: pad.gst_pad(), "Pushing initial events");
let stream_id =
format!("{:08x}{:08x}", rand::random::<u32>(), rand::random::<u32>());
events.push(
gst::Event::new_stream_start(&stream_id)
.group_id(gst::util_group_id_next())
.build(),
);
let tcpclientsrc = TcpClientSrc::from_instance(element);
if let Some(ref caps) = tcpclientsrc.settings.lock().await.caps {
events.push(gst::Event::new_caps(&caps).build());
inner.configured_caps = Some(caps.clone());
}
events.push(
gst::Event::new_segment(&gst::FormattedSegment::<gst::format::Time>::new())
.build(),
);
inner.need_initial_events = false;
}
if buffer.get_size() == 0 {
events.push(gst::Event::new_eos().build());
}
}
for event in events {
pad.push_event(event).await;
}
}
match pad.push(buffer).await {
Ok(_) => {
gst_log!(CAT, obj: pad.gst_pad(), "Successfully pushed buffer");
}
Err(gst::FlowError::Flushing) => {
gst_debug!(CAT, obj: pad.gst_pad(), "Flushing");
pad.pause_task().await;
}
Err(gst::FlowError::Eos) => {
gst_debug!(CAT, obj: pad.gst_pad(), "EOS");
pad.pause_task().await;
}
Err(err) => {
gst_error!(CAT, obj: pad.gst_pad(), "Got error {}", err);
gst_element_error!(
element,
gst::StreamError::Failed,
("Internal data stream error"),
["streaming stopped, reason {}", err]
);
}
}
}
}
lazy_static! {
static ref CAT: gst::DebugCategory = gst::DebugCategory::new(
"ts-tcpclientsrc",
gst::DebugColorFlags::empty(),
Some("Thread-sharing TCP Client source"),
);
}
impl PadSrcHandler for TcpClientSrcPadHandler {
type ElementImpl = TcpClientSrc;
impl TcpClientSrc {
fn src_event(&self, pad: &gst::Pad, element: &gst::Element, event: gst::Event) -> bool {
use gst::EventView;
gst_log!(CAT, obj: pad, "Handling event {:?}", event);
fn src_event(
&self,
pad: PadSrcRef,
tcpclientsrc: &TcpClientSrc,
element: &gst::Element,
event: gst::Event,
) -> Either<bool, BoxFuture<'static, bool>> {
gst_log!(CAT, obj: pad.gst_pad(), "Handling event {:?}", event);
let ret = match event.view() {
EventView::FlushStart(..) => {
let _ = self.stop(element);
let _ = block_on!(tcpclientsrc.pause(element));
true
}
EventView::FlushStop(..) => {
@ -232,7 +371,7 @@ impl TcpClientSrc {
if res == Ok(gst::StateChangeSuccess::Success) && state == gst::State::Playing
|| res == Ok(gst::StateChangeSuccess::Async) && pending == gst::State::Playing
{
let _ = self.start(element);
let _ = block_on!(tcpclientsrc.start(element));
}
true
}
@ -242,22 +381,22 @@ impl TcpClientSrc {
};
if ret {
gst_log!(CAT, obj: pad, "Handled event {:?}", event);
gst_log!(CAT, obj: pad.gst_pad(), "Handled event {:?}", event);
} else {
gst_log!(CAT, obj: pad, "Didn't handle event {:?}", event);
gst_log!(CAT, obj: pad.gst_pad(), "Didn't handle event {:?}", event);
}
ret
Either::Left(ret)
}
fn src_query(
&self,
pad: &gst::Pad,
pad: PadSrcRef,
_tcpclientsrc: &TcpClientSrc,
_element: &gst::Element,
query: &mut gst::QueryRef,
) -> bool {
use gst::QueryView;
gst_log!(CAT, obj: pad, "Handling query {:?}", query);
gst_log!(CAT, obj: pad.gst_pad(), "Handling query {:?}", query);
let ret = match query.view_mut() {
QueryView::Latency(ref mut q) => {
q.set(false, 0.into(), 0.into());
@ -269,8 +408,8 @@ impl TcpClientSrc {
true
}
QueryView::Caps(ref mut q) => {
let state = self.state.lock().unwrap();
let caps = if let Some(ref caps) = state.configured_caps {
let inner = block_on!(self.lock());
let caps = if let Some(ref caps) = inner.configured_caps {
q.get_filter()
.map(|f| f.intersect_with_mode(caps, gst::CapsIntersectMode::First))
.unwrap_or_else(|| caps.clone())
@ -288,143 +427,46 @@ impl TcpClientSrc {
};
if ret {
gst_log!(CAT, obj: pad, "Handled query {:?}", query);
gst_log!(CAT, obj: pad.gst_pad(), "Handled query {:?}", query);
} else {
gst_log!(CAT, obj: pad, "Didn't handle query {:?}", query);
gst_log!(CAT, obj: pad.gst_pad(), "Didn't handle query {:?}", query);
}
ret
}
}
fn create_io_context_event(state: &State) -> Option<gst::Event> {
if let (&Some(ref pending_future_id), &Some(ref io_context)) =
(&state.pending_future_id, &state.io_context)
{
let s = gst::Structure::new(
"ts-io-context",
&[
("io-context", &io_context),
("pending-future-id", &*pending_future_id),
],
);
Some(gst::Event::new_custom_downstream_sticky(s).build())
} else {
None
}
struct State {
socket: Option<Socket<TcpClientReader>>,
}
impl Default for State {
fn default() -> State {
State { socket: None }
}
}
async fn push_buffer(element: gst::Element, buffer: gst::Buffer) -> Result<(), gst::FlowError> {
let tcpclientsrc = Self::from_instance(&element);
let mut events = Vec::new();
{
let mut state = tcpclientsrc.state.lock().unwrap();
if state.need_initial_events {
gst_debug!(CAT, obj: &element, "Pushing initial events");
struct TcpClientSrc {
src_pad: PadSrc,
src_pad_handler: TcpClientSrcPadHandler,
state: Mutex<State>,
settings: Mutex<Settings>,
}
let stream_id =
format!("{:08x}{:08x}", rand::random::<u32>(), rand::random::<u32>());
events.push(
gst::Event::new_stream_start(&stream_id)
.group_id(gst::util_group_id_next())
.build(),
);
if let Some(ref caps) = tcpclientsrc.settings.lock().unwrap().caps {
events.push(gst::Event::new_caps(&caps).build());
state.configured_caps = Some(caps.clone());
}
events.push(
gst::Event::new_segment(&gst::FormattedSegment::<gst::format::Time>::new())
.build(),
);
if let Some(event) = Self::create_io_context_event(&state) {
events.push(event);
// Get rid of reconfigure flag
tcpclientsrc.src_pad.check_reconfigure();
}
state.need_initial_events = false;
} else if tcpclientsrc.src_pad.check_reconfigure() {
if let Some(event) = Self::create_io_context_event(&state) {
events.push(event);
}
}
if buffer.get_size() == 0 {
events.push(gst::Event::new_eos().build());
}
}
for event in events {
tcpclientsrc.src_pad.push_event(event);
}
match tcpclientsrc.src_pad.push(buffer) {
Ok(_) => gst_log!(CAT, obj: &element, "Successfully pushed buffer"),
Err(gst::FlowError::Flushing) => {
gst_debug!(CAT, obj: &element, "Flushing");
let state = tcpclientsrc.state.lock().unwrap();
if let Some(ref socket) = state.socket {
socket.pause();
}
}
Err(gst::FlowError::Eos) => {
gst_debug!(CAT, obj: &element, "EOS");
let state = tcpclientsrc.state.lock().unwrap();
if let Some(ref socket) = state.socket {
socket.pause();
}
}
Err(err) => {
gst_error!(CAT, obj: &element, "Got error {}", err);
gst_element_error!(
element,
gst::StreamError::Failed,
("Internal data stream error"),
["streaming stopped, reason {}", err]
);
return Err(gst::FlowError::CustomError);
}
}
let abortable_drain = {
let mut state = tcpclientsrc.state.lock().unwrap();
if let State {
io_context: Some(ref io_context),
pending_future_id: Some(ref pending_future_id),
ref mut pending_future_abort_handle,
..
} = *state
{
let (cancel, abortable_drain) =
io_context.drain_pending_futures(*pending_future_id);
*pending_future_abort_handle = cancel;
abortable_drain
} else {
return Ok(());
}
};
abortable_drain.await
}
fn prepare(&self, element: &gst::Element) -> Result<(), gst::ErrorMessage> {
use std::net::{IpAddr, SocketAddr};
lazy_static! {
static ref CAT: gst::DebugCategory = gst::DebugCategory::new(
"ts-tcpclientsrc",
gst::DebugColorFlags::empty(),
Some("Thread-sharing TCP Client source"),
);
}
impl TcpClientSrc {
async fn prepare(&self, element: &gst::Element) -> Result<(), gst::ErrorMessage> {
let mut state = self.state.lock().await;
gst_debug!(CAT, obj: element, "Preparing");
let settings = self.settings.lock().unwrap().clone();
let mut state = self.state.lock().unwrap();
let io_context =
IOContext::new(&settings.context, settings.context_wait).map_err(|err| {
gst_error_msg!(
gst::ResourceError::OpenRead,
["Failed to create IO context: {}", err]
)
})?;
let settings = self.settings.lock().await;
let addr: IpAddr = match settings.address {
None => {
@ -445,6 +487,14 @@ impl TcpClientSrc {
};
let port = settings.port;
let context =
Context::acquire(&settings.context, settings.context_wait).map_err(|err| {
gst_error_msg!(
gst::ResourceError::OpenRead,
["Failed to acquire Context: {}", err]
)
})?;
let saddr = SocketAddr::new(addr, port as u16);
gst_debug!(CAT, obj: element, "Connecting to {:?}", saddr);
let socket = tokio::net::TcpStream::connect(saddr);
@ -465,111 +515,80 @@ impl TcpClientSrc {
buffer_pool,
);
let element_clone = element.clone();
let element_clone2 = element.clone();
socket
.schedule(
&io_context,
move |(buffer, _)| Self::push_buffer(element_clone.clone(), buffer),
move |err| {
gst_error!(CAT, obj: &element_clone2, "Got error {}", err);
match err {
Either::Left(gst::FlowError::CustomError) => (),
Either::Left(err) => {
gst_element_error!(
element_clone2,
gst::StreamError::Failed,
("Internal data stream error"),
["streaming stopped, reason {}", err]
);
}
Either::Right(err) => {
gst_element_error!(
element_clone2,
gst::StreamError::Failed,
("I/O error"),
["streaming stopped, I/O error {}", err]
);
}
}
},
)
.map_err(|_| {
gst_error_msg!(gst::ResourceError::OpenRead, ["Failed to schedule socket"])
let socket_stream = socket.prepare().await.map_err(|_| {
gst_error_msg!(gst::ResourceError::OpenRead, ["Failed to prepare socket"])
})?;
self.src_pad_handler.lock().await.socket_stream = Some(socket_stream);
self.src_pad
.prepare(context, &self.src_pad_handler)
.await
.map_err(|err| {
gst_error_msg!(
gst::ResourceError::OpenRead,
["Error preparing src_pads: {:?}", err]
)
})?;
let pending_future_id = io_context.acquire_pending_future_id();
gst_debug!(
CAT,
obj: element,
"Got pending future id {:?}",
pending_future_id
);
state.socket = Some(socket);
state.io_context = Some(io_context);
state.pending_future_id = Some(pending_future_id);
gst_debug!(CAT, obj: element, "Prepared");
Ok(())
}
fn unprepare(&self, element: &gst::Element) -> Result<(), ()> {
async fn unprepare(&self, element: &gst::Element) -> Result<(), ()> {
let mut state = self.state.lock().await;
gst_debug!(CAT, obj: element, "Unpreparing");
// FIXME: The IO Context has to be alive longer than the queue,
// otherwise the queue can't finish any remaining work
let (mut socket, io_context) = {
let mut state = self.state.lock().unwrap();
self.src_pad.stop_task().await;
if let (&Some(ref pending_future_id), &Some(ref io_context)) =
(&state.pending_future_id, &state.io_context)
{
io_context.release_pending_future_id(*pending_future_id);
}
let socket = state.socket.take();
let io_context = state.io_context.take();
*state = State::default();
(socket, io_context)
};
if let Some(ref socket) = socket.take() {
socket.shutdown();
{
let socket = state.socket.take().unwrap();
socket.unprepare().await.unwrap();
}
drop(io_context);
let _ = self.src_pad.unprepare().await;
self.src_pad_handler.lock().await.configured_caps = None;
gst_debug!(CAT, obj: element, "Unprepared");
Ok(())
}
fn start(&self, element: &gst::Element) -> Result<(), ()> {
async fn start(&self, element: &gst::Element) -> Result<(), ()> {
let state = self.state.lock().await;
gst_debug!(CAT, obj: element, "Starting");
let state = self.state.lock().unwrap();
if let Some(ref socket) = state.socket {
socket.unpause(None, None);
socket
.start(element.get_clock(), Some(element.get_base_time()))
.await;
}
self.src_pad_handler
.start_task(self.src_pad.as_ref(), element)
.await;
gst_debug!(CAT, obj: element, "Started");
Ok(())
}
fn stop(&self, element: &gst::Element) -> Result<(), ()> {
gst_debug!(CAT, obj: element, "Stopping");
let mut state = self.state.lock().unwrap();
async fn pause(&self, element: &gst::Element) -> Result<(), ()> {
let pause_completion = {
let state = self.state.lock().await;
gst_debug!(CAT, obj: element, "Pausing");
if let Some(ref socket) = state.socket {
socket.pause();
}
let pause_completion = self.src_pad.pause_task().await;
state.socket.as_ref().unwrap().pause().await;
if let Some(abort_handle) = state.pending_future_abort_handle.take() {
abort_handle.abort();
}
pause_completion
};
gst_debug!(CAT, obj: element, "Stopped");
gst_debug!(CAT, obj: element, "Waiting for Task Pause to complete");
pause_completion.await;
gst_debug!(CAT, obj: element, "Paused");
Ok(())
}
@ -606,25 +625,11 @@ impl ObjectSubclass for TcpClientSrc {
fn new_with_class(klass: &subclass::simple::ClassStruct<Self>) -> Self {
let templ = klass.get_pad_template("src").unwrap();
let src_pad = gst::Pad::new_from_template(&templ, Some("src"));
src_pad.set_event_function(|pad, parent, event| {
TcpClientSrc::catch_panic_pad_function(
parent,
|| false,
|tcpclientsrc, element| tcpclientsrc.src_event(pad, element, event),
)
});
src_pad.set_query_function(|pad, parent, query| {
TcpClientSrc::catch_panic_pad_function(
parent,
|| false,
|tcpclientsrc, element| tcpclientsrc.src_query(pad, element, query),
)
});
let src_pad = PadSrc::new_from_template(&templ, Some("src"));
Self {
src_pad,
src_pad_handler: TcpClientSrcPadHandler::new(),
state: Mutex::new(State::default()),
settings: Mutex::new(Settings::default()),
}
@ -639,30 +644,30 @@ impl ObjectImpl for TcpClientSrc {
match *prop {
subclass::Property("address", ..) => {
let mut settings = self.settings.lock().unwrap();
let mut settings = block_on!(self.settings.lock());
settings.address = value.get().expect("type checked upstream");
}
subclass::Property("port", ..) => {
let mut settings = self.settings.lock().unwrap();
let mut settings = block_on!(self.settings.lock());
settings.port = value.get_some().expect("type checked upstream");
}
subclass::Property("caps", ..) => {
let mut settings = self.settings.lock().unwrap();
let mut settings = block_on!(self.settings.lock());
settings.caps = value.get().expect("type checked upstream");
}
subclass::Property("chunk-size", ..) => {
let mut settings = self.settings.lock().unwrap();
let mut settings = block_on!(self.settings.lock());
settings.chunk_size = value.get_some().expect("type checked upstream");
}
subclass::Property("context", ..) => {
let mut settings = self.settings.lock().unwrap();
let mut settings = block_on!(self.settings.lock());
settings.context = value
.get()
.expect("type checked upstream")
.unwrap_or_else(|| "".into());
}
subclass::Property("context-wait", ..) => {
let mut settings = self.settings.lock().unwrap();
let mut settings = block_on!(self.settings.lock());
settings.context_wait = value.get_some().expect("type checked upstream");
}
_ => unimplemented!(),
@ -674,27 +679,27 @@ impl ObjectImpl for TcpClientSrc {
match *prop {
subclass::Property("address", ..) => {
let settings = self.settings.lock().unwrap();
let settings = block_on!(self.settings.lock());
Ok(settings.address.to_value())
}
subclass::Property("port", ..) => {
let settings = self.settings.lock().unwrap();
let settings = block_on!(self.settings.lock());
Ok(settings.port.to_value())
}
subclass::Property("caps", ..) => {
let settings = self.settings.lock().unwrap();
let settings = block_on!(self.settings.lock());
Ok(settings.caps.to_value())
}
subclass::Property("chunk-size", ..) => {
let settings = self.settings.lock().unwrap();
let settings = block_on!(self.settings.lock());
Ok(settings.chunk_size.to_value())
}
subclass::Property("context", ..) => {
let settings = self.settings.lock().unwrap();
let settings = block_on!(self.settings.lock());
Ok(settings.context.to_value())
}
subclass::Property("context-wait", ..) => {
let settings = self.settings.lock().unwrap();
let settings = block_on!(self.settings.lock());
Ok(settings.context_wait.to_value())
}
_ => unimplemented!(),
@ -705,7 +710,7 @@ impl ObjectImpl for TcpClientSrc {
self.parent_constructed(obj);
let element = obj.downcast_ref::<gst::Element>().unwrap();
element.add_pad(&self.src_pad).unwrap();
element.add_pad(self.src_pad.gst_pad()).unwrap();
super::set_element_flags(element, gst::ElementFlags::SOURCE);
}
@ -721,17 +726,20 @@ impl ElementImpl for TcpClientSrc {
match transition {
gst::StateChange::NullToReady => {
self.prepare(element)
block_on!(self.prepare(element))
.map_err(|err| {
element.post_error_message(&err);
gst::StateChangeError
})
.and_then(|_| self.start(element).map_err(|_| gst::StateChangeError))?;
.and_then(|_| {
block_on!(self.start(element)).map_err(|_| gst::StateChangeError)
})?;
}
gst::StateChange::PlayingToPaused => {
self.stop(element)
.and_then(|_| self.unprepare(element))
.map_err(|_| gst::StateChangeError)?;
block_on!(self.pause(element)).map_err(|_| gst::StateChangeError)?;
}
gst::StateChange::ReadyToNull => {
block_on!(self.unprepare(element)).map_err(|_| gst::StateChangeError)?;
}
_ => (),
}
@ -743,8 +751,8 @@ impl ElementImpl for TcpClientSrc {
success = gst::StateChangeSuccess::Success;
}
gst::StateChange::PausedToReady => {
let mut state = self.state.lock().unwrap();
state.need_initial_events = true;
let mut src_pad_handler = block_on!(self.src_pad_handler.lock());
src_pad_handler.need_initial_events = true;
}
_ => (),
}

View file

@ -16,6 +16,9 @@
// Boston, MA 02110-1335, USA.
use either::Either;
use futures::future::BoxFuture;
use futures::lock::{Mutex, MutexGuard};
use futures::prelude::*;
use gio;
@ -33,6 +36,7 @@ use gst;
use gst::prelude::*;
use gst::subclass::prelude::*;
use gst::{gst_debug, gst_element_error, gst_error, gst_error_msg, gst_log, gst_trace};
use gst::{EventView, QueryView};
use gst_net::*;
use lazy_static::lazy_static;
@ -40,9 +44,8 @@ use lazy_static::lazy_static;
use rand;
use std::io;
use std::pin::Pin;
use std::sync::Mutex;
use std::task::{Context, Poll};
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
use std::sync::Arc;
use std::u16;
#[cfg(unix)]
@ -51,7 +54,11 @@ use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
#[cfg(windows)]
use std::os::windows::io::{AsRawSocket, FromRawSocket, IntoRawSocket, RawSocket};
use super::{iocontext::*, socket::*};
use crate::block_on;
use crate::runtime::prelude::*;
use crate::runtime::{Context, PadSrc, PadSrcRef};
use super::socket::{Socket, SocketRead, SocketStream};
const DEFAULT_ADDRESS: Option<&str> = Some("127.0.0.1");
const DEFAULT_PORT: u32 = 5000;
@ -286,76 +293,220 @@ static PROPERTIES: [subclass::Property; 10] = [
}),
];
pub struct UdpReader {
#[derive(Debug)]
struct UdpReaderInner {
socket: tokio::net::udp::UdpSocket,
}
#[derive(Debug)]
pub struct UdpReader(Arc<Mutex<UdpReaderInner>>);
impl UdpReader {
fn new(socket: tokio::net::udp::UdpSocket) -> Self {
Self { socket }
UdpReader(Arc::new(Mutex::new(UdpReaderInner { socket })))
}
}
impl SocketRead for UdpReader {
const DO_TIMESTAMP: bool = true;
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<(usize, Option<std::net::SocketAddr>)>> {
Pin::new(&mut self.socket.recv_from(buf).boxed())
.as_mut()
.poll(cx)
.map(|res| res.map(|(read_size, saddr)| (read_size, Some(saddr))))
fn read<'buf>(
&self,
buffer: &'buf mut [u8],
) -> BoxFuture<'buf, io::Result<(usize, Option<std::net::SocketAddr>)>> {
let this = Arc::clone(&self.0);
async move {
this.lock()
.await
.socket
.recv_from(buffer)
.await
.map(|(read_size, saddr)| (read_size, Some(saddr)))
}
.boxed()
}
}
struct State {
io_context: Option<IOContext>,
pending_future_id: Option<PendingFutureId>,
socket: Option<Socket<UdpReader>>,
#[derive(Debug)]
struct UdpSrcPadHandlerInner {
retrieve_sender_address: bool,
socket_stream: Option<SocketStream<UdpReader>>,
need_initial_events: bool,
configured_caps: Option<gst::Caps>,
pending_future_abort_handle: Option<future::AbortHandle>,
}
impl Default for State {
fn default() -> State {
State {
io_context: None,
pending_future_id: None,
socket: None,
impl Default for UdpSrcPadHandlerInner {
fn default() -> Self {
UdpSrcPadHandlerInner {
retrieve_sender_address: true,
socket_stream: None,
need_initial_events: true,
configured_caps: None,
pending_future_abort_handle: None,
}
}
}
struct UdpSrc {
src_pad: gst::Pad,
state: Mutex<State>,
settings: Mutex<Settings>,
#[derive(Clone, Debug)]
struct UdpSrcPadHandler(Arc<Mutex<UdpSrcPadHandlerInner>>);
impl UdpSrcPadHandler {
fn new() -> Self {
UdpSrcPadHandler(Arc::new(Mutex::new(UdpSrcPadHandlerInner::default())))
}
#[inline]
async fn lock(&self) -> MutexGuard<'_, UdpSrcPadHandlerInner> {
self.0.lock().await
}
async fn start_task(&self, pad: PadSrcRef<'_>, element: &gst::Element) {
let this = self.clone();
let pad_weak = pad.downgrade();
let element = element.clone();
pad.start_task(move || {
let this = this.clone();
let pad_weak = pad_weak.clone();
let element = element.clone();
async move {
let item = this
.lock()
.await
.socket_stream
.as_mut()
.expect("Missing SocketStream")
.next()
.await;
let pad = pad_weak.upgrade().expect("PadSrc no longer exists");
let (mut buffer, saddr) = match item {
Some(Ok((buffer, saddr))) => (buffer, saddr),
Some(Err(err)) => {
gst_error!(CAT, obj: &element, "Got error {}", err);
match err {
Either::Left(gst::FlowError::CustomError) => (),
Either::Left(err) => {
gst_element_error!(
element,
gst::StreamError::Failed,
("Internal data stream error"),
["streaming stopped, reason {}", err]
);
}
Either::Right(err) => {
gst_element_error!(
element,
gst::StreamError::Failed,
("I/O error"),
["streaming stopped, I/O error {}", err]
);
}
}
return;
}
None => {
gst_log!(CAT, obj: pad.gst_pad(), "SocketStream Stopped");
pad.pause_task().await;
return;
}
};
if let Some(saddr) = saddr {
if this.lock().await.retrieve_sender_address {
let inet_addr = match saddr.ip() {
IpAddr::V4(ip) => gio::InetAddress::new_from_bytes(
gio::InetAddressBytes::V4(&ip.octets()),
),
IpAddr::V6(ip) => gio::InetAddress::new_from_bytes(
gio::InetAddressBytes::V6(&ip.octets()),
),
};
let inet_socket_addr =
&gio::InetSocketAddress::new(&inet_addr, saddr.port());
NetAddressMeta::add(buffer.get_mut().unwrap(), inet_socket_addr);
}
}
this.push_buffer(pad, &element, buffer).await;
}
})
.await;
}
async fn push_buffer(&self, pad: PadSrcRef<'_>, element: &gst::Element, buffer: gst::Buffer) {
{
let mut events = Vec::new();
{
let mut inner = self.lock().await;
if inner.need_initial_events {
gst_debug!(CAT, obj: pad.gst_pad(), "Pushing initial events");
let stream_id =
format!("{:08x}{:08x}", rand::random::<u32>(), rand::random::<u32>());
events.push(
gst::Event::new_stream_start(&stream_id)
.group_id(gst::util_group_id_next())
.build(),
);
let udpsrc = UdpSrc::from_instance(element);
if let Some(ref caps) = udpsrc.settings.lock().await.caps {
events.push(gst::Event::new_caps(&caps).build());
inner.configured_caps = Some(caps.clone());
}
events.push(
gst::Event::new_segment(&gst::FormattedSegment::<gst::format::Time>::new())
.build(),
);
inner.need_initial_events = false;
}
}
for event in events {
pad.push_event(event).await;
}
}
match pad.push(buffer).await {
Ok(_) => {
gst_log!(CAT, obj: pad.gst_pad(), "Successfully pushed buffer");
}
Err(gst::FlowError::Flushing) => {
gst_debug!(CAT, obj: pad.gst_pad(), "Flushing");
pad.pause_task().await;
}
Err(gst::FlowError::Eos) => {
gst_debug!(CAT, obj: pad.gst_pad(), "EOS");
pad.pause_task().await;
}
Err(err) => {
gst_error!(CAT, obj: pad.gst_pad(), "Got error {}", err);
gst_element_error!(
element,
gst::StreamError::Failed,
("Internal data stream error"),
["streaming stopped, reason {}", err]
);
}
}
}
}
lazy_static! {
static ref CAT: gst::DebugCategory = gst::DebugCategory::new(
"ts-udpsrc",
gst::DebugColorFlags::empty(),
Some("Thread-sharing UDP source"),
);
}
impl PadSrcHandler for UdpSrcPadHandler {
type ElementImpl = UdpSrc;
impl UdpSrc {
fn src_event(&self, pad: &gst::Pad, element: &gst::Element, event: gst::Event) -> bool {
use gst::EventView;
gst_log!(CAT, obj: pad, "Handling event {:?}", event);
fn src_event(
&self,
pad: PadSrcRef,
udpsrc: &UdpSrc,
element: &gst::Element,
event: gst::Event,
) -> Either<bool, BoxFuture<'static, bool>> {
gst_log!(CAT, obj: pad.gst_pad(), "Handling event {:?}", event);
let ret = match event.view() {
EventView::FlushStart(..) => {
let _ = self.stop(element);
let _ = block_on!(udpsrc.pause(element));
true
}
EventView::FlushStop(..) => {
@ -363,7 +514,7 @@ impl UdpSrc {
if res == Ok(gst::StateChangeSuccess::Success) && state == gst::State::Playing
|| res == Ok(gst::StateChangeSuccess::Async) && pending == gst::State::Playing
{
let _ = self.start(element);
let _ = block_on!(udpsrc.start(element));
}
true
}
@ -373,22 +524,23 @@ impl UdpSrc {
};
if ret {
gst_log!(CAT, obj: pad, "Handled event {:?}", event);
gst_log!(CAT, obj: pad.gst_pad(), "Handled event {:?}", event);
} else {
gst_log!(CAT, obj: pad, "Didn't handle event {:?}", event);
gst_log!(CAT, obj: pad.gst_pad(), "Didn't handle event {:?}", event);
}
ret
Either::Left(ret)
}
fn src_query(
&self,
pad: &gst::Pad,
pad: PadSrcRef,
_udpsrc: &UdpSrc,
_element: &gst::Element,
query: &mut gst::QueryRef,
) -> bool {
use gst::QueryView;
gst_log!(CAT, obj: pad.gst_pad(), "Handling query {:?}", query);
gst_log!(CAT, obj: pad, "Handling query {:?}", query);
let ret = match query.view_mut() {
QueryView::Latency(ref mut q) => {
q.set(true, 0.into(), 0.into());
@ -400,8 +552,8 @@ impl UdpSrc {
true
}
QueryView::Caps(ref mut q) => {
let state = self.state.lock().unwrap();
let caps = if let Some(ref caps) = state.configured_caps {
let inner = block_on!(self.lock());
let caps = if let Some(ref caps) = inner.configured_caps {
q.get_filter()
.map(|f| f.intersect_with_mode(caps, gst::CapsIntersectMode::First))
.unwrap_or_else(|| caps.clone())
@ -419,139 +571,54 @@ impl UdpSrc {
};
if ret {
gst_log!(CAT, obj: pad, "Handled query {:?}", query);
gst_log!(CAT, obj: pad.gst_pad(), "Handled query {:?}", query);
} else {
gst_log!(CAT, obj: pad, "Didn't handle query {:?}", query);
gst_log!(CAT, obj: pad.gst_pad(), "Didn't handle query {:?}", query);
}
ret
}
}
fn create_io_context_event(state: &State) -> Option<gst::Event> {
if let (&Some(ref pending_future_id), &Some(ref io_context)) =
(&state.pending_future_id, &state.io_context)
{
let s = gst::Structure::new(
"ts-io-context",
&[
("io-context", &io_context),
("pending-future-id", &*pending_future_id),
],
);
Some(gst::Event::new_custom_downstream_sticky(s).build())
} else {
None
}
#[derive(Debug)]
struct State {
socket: Option<Socket<UdpReader>>,
}
impl Default for State {
fn default() -> State {
State { socket: None }
}
}
async fn push_buffer(element: gst::Element, buffer: gst::Buffer) -> Result<(), gst::FlowError> {
let udpsrc = Self::from_instance(&element);
let mut events = Vec::new();
{
let mut state = udpsrc.state.lock().unwrap();
if state.need_initial_events {
gst_debug!(CAT, obj: &element, "Pushing initial events");
#[derive(Debug)]
struct UdpSrc {
src_pad: PadSrc,
src_pad_handler: UdpSrcPadHandler,
state: Mutex<State>,
settings: Mutex<Settings>,
}
let stream_id =
format!("{:08x}{:08x}", rand::random::<u32>(), rand::random::<u32>());
events.push(
gst::Event::new_stream_start(&stream_id)
.group_id(gst::util_group_id_next())
.build(),
);
if let Some(ref caps) = udpsrc.settings.lock().unwrap().caps {
events.push(gst::Event::new_caps(&caps).build());
state.configured_caps = Some(caps.clone());
}
events.push(
gst::Event::new_segment(&gst::FormattedSegment::<gst::format::Time>::new())
.build(),
);
if let Some(event) = Self::create_io_context_event(&state) {
events.push(event);
// Get rid of reconfigure flag
udpsrc.src_pad.check_reconfigure();
}
state.need_initial_events = false;
} else if udpsrc.src_pad.check_reconfigure() {
if let Some(event) = Self::create_io_context_event(&state) {
events.push(event);
}
}
}
for event in events {
udpsrc.src_pad.push_event(event);
}
match udpsrc.src_pad.push(buffer) {
Ok(_) => {
gst_log!(CAT, obj: &element, "Successfully pushed buffer");
}
Err(gst::FlowError::Flushing) => {
gst_debug!(CAT, obj: &element, "Flushing");
let state = udpsrc.state.lock().unwrap();
if let Some(ref socket) = state.socket {
socket.pause();
}
}
Err(gst::FlowError::Eos) => {
gst_debug!(CAT, obj: &element, "EOS");
let state = udpsrc.state.lock().unwrap();
if let Some(ref socket) = state.socket {
socket.pause();
}
}
Err(err) => {
gst_error!(CAT, obj: &element, "Got error {}", err);
gst_element_error!(
element,
gst::StreamError::Failed,
("Internal data stream error"),
["streaming stopped, reason {}", err]
);
return Err(gst::FlowError::CustomError);
}
}
let abortable_drain = {
let mut state = udpsrc.state.lock().unwrap();
if let State {
io_context: Some(ref io_context),
pending_future_id: Some(ref pending_future_id),
ref mut pending_future_abort_handle,
..
} = *state
{
let (cancel, abortable_drain) =
io_context.drain_pending_futures(*pending_future_id);
*pending_future_abort_handle = cancel;
abortable_drain
} else {
return Ok(());
}
};
abortable_drain.await
}
fn prepare(&self, element: &gst::Element) -> Result<(), gst::ErrorMessage> {
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
lazy_static! {
static ref CAT: gst::DebugCategory = gst::DebugCategory::new(
"ts-udpsrc",
gst::DebugColorFlags::empty(),
Some("Thread-sharing UDP source"),
);
}
impl UdpSrc {
async fn prepare(&self, element: &gst::Element) -> Result<(), gst::ErrorMessage> {
let mut state = self.state.lock().await;
gst_debug!(CAT, obj: element, "Preparing");
let settings = self.settings.lock().unwrap().clone();
let mut settings = self.settings.lock().await.clone();
let mut state = self.state.lock().unwrap();
let io_context =
IOContext::new(&settings.context, settings.context_wait).map_err(|err| {
let context =
Context::acquire(&settings.context, settings.context_wait).map_err(|err| {
gst_error_msg!(
gst::ResourceError::OpenRead,
["Failed to create IO context: {}", err]
["Failed to acquire Context: {}", err]
)
})?;
@ -569,7 +636,7 @@ impl UdpSrc {
socket = wrapped_socket.get()
}
let socket = tokio::net::UdpSocket::from_std(socket, io_context.reactor_handle())
let socket = tokio::net::UdpSocket::from_std(socket, context.reactor_handle())
.map_err(|err| {
gst_error_msg!(
gst::ResourceError::OpenRead,
@ -577,7 +644,7 @@ impl UdpSrc {
)
})?;
self.settings.lock().unwrap().used_socket = Some(wrapped_socket.clone());
settings.used_socket = Some(wrapped_socket.clone());
socket
} else {
@ -664,7 +731,7 @@ impl UdpSrc {
)
})?;
let socket = tokio::net::UdpSocket::from_std(socket, io_context.reactor_handle())
let socket = tokio::net::UdpSocket::from_std(socket, context.reactor_handle())
.map_err(|err| {
gst_error_msg!(
gst::ResourceError::OpenRead,
@ -719,7 +786,7 @@ impl UdpSrc {
)
})?;
let wrapper = GioSocketWrapper::new(&gio_socket);
self.settings.lock().unwrap().used_socket = Some(wrapper);
settings.used_socket = Some(wrapper);
}
#[cfg(windows)]
unsafe {
@ -745,7 +812,7 @@ impl UdpSrc {
)
})?;
let wrapper = GioSocketWrapper::new(&gio_socket);
self.settings.lock().unwrap().used_socket = Some(wrapper);
settings.used_socket = Some(wrapper);
}
socket
@ -762,72 +829,27 @@ impl UdpSrc {
})?;
let socket = Socket::new(element.upcast_ref(), UdpReader::new(socket), buffer_pool);
let socket_stream = socket.prepare().await.map_err(|_| {
gst_error_msg!(gst::ResourceError::OpenRead, ["Failed to prepare socket"])
})?;
let element_clone = element.clone();
let element_clone2 = element.clone();
{
let mut src_pad_handler = self.src_pad_handler.lock().await;
src_pad_handler.retrieve_sender_address = settings.retrieve_sender_address;
src_pad_handler.socket_stream = Some(socket_stream);
}
let retrieve_sender_address = self.settings.lock().unwrap().retrieve_sender_address;
socket
.schedule(
&io_context,
move |(mut buffer, saddr)| {
if let Some(saddr) = saddr {
if retrieve_sender_address {
let inet_addr = match saddr.ip() {
IpAddr::V4(ip) => gio::InetAddress::new_from_bytes(
gio::InetAddressBytes::V4(&ip.octets()),
),
IpAddr::V6(ip) => gio::InetAddress::new_from_bytes(
gio::InetAddressBytes::V6(&ip.octets()),
),
};
let inet_socket_addr =
&gio::InetSocketAddress::new(&inet_addr, saddr.port());
NetAddressMeta::add(buffer.get_mut().unwrap(), inet_socket_addr);
}
}
Self::push_buffer(element_clone.clone(), buffer)
},
move |err| {
gst_error!(CAT, obj: &element_clone2, "Got error {}", err);
match err {
Either::Left(gst::FlowError::CustomError) => (),
Either::Left(err) => {
gst_element_error!(
element_clone2,
gst::StreamError::Failed,
("Internal data stream error"),
["streaming stopped, reason {}", err]
);
}
Either::Right(err) => {
gst_element_error!(
element_clone2,
gst::StreamError::Failed,
("I/O error"),
["streaming stopped, I/O error {}", err]
);
}
}
},
)
.map_err(|_| {
gst_error_msg!(gst::ResourceError::OpenRead, ["Failed to schedule socket"])
self.src_pad
.prepare(context, &self.src_pad_handler)
.await
.map_err(|err| {
gst_error_msg!(
gst::ResourceError::OpenRead,
["Error preparing src_pads: {:?}", err]
)
})?;
let pending_future_id = io_context.acquire_pending_future_id();
gst_debug!(
CAT,
obj: element,
"Got pending future id {:?}",
pending_future_id
);
state.socket = Some(socket);
state.io_context = Some(io_context);
state.pending_future_id = Some(pending_future_id);
gst_debug!(CAT, obj: element, "Prepared");
drop(state);
@ -837,63 +859,60 @@ impl UdpSrc {
Ok(())
}
fn unprepare(&self, element: &gst::Element) -> Result<(), ()> {
async fn unprepare(&self, element: &gst::Element) -> Result<(), ()> {
let mut state = self.state.lock().await;
gst_debug!(CAT, obj: element, "Unpreparing");
self.settings.lock().unwrap().used_socket = None;
self.settings.lock().await.used_socket = None;
// FIXME: The IO Context has to be alive longer than the queue,
// otherwise the queue can't finish any remaining work
let (mut socket, io_context) = {
let mut state = self.state.lock().unwrap();
self.src_pad.stop_task().await;
if let (&Some(ref pending_future_id), &Some(ref io_context)) =
(&state.pending_future_id, &state.io_context)
{
io_context.release_pending_future_id(*pending_future_id);
}
let socket = state.socket.take();
let io_context = state.io_context.take();
*state = State::default();
(socket, io_context)
};
if let Some(ref socket) = socket.take() {
socket.shutdown();
{
let socket = state.socket.take().unwrap();
socket.unprepare().await.unwrap();
}
drop(io_context);
let _ = self.src_pad.unprepare().await;
self.src_pad_handler.lock().await.configured_caps = None;
gst_debug!(CAT, obj: element, "Unprepared");
Ok(())
}
fn start(&self, element: &gst::Element) -> Result<(), ()> {
async fn start(&self, element: &gst::Element) -> Result<(), ()> {
let state = self.state.lock().await;
gst_debug!(CAT, obj: element, "Starting");
let state = self.state.lock().unwrap();
if let Some(ref socket) = state.socket {
socket.unpause(element.get_clock(), Some(element.get_base_time()));
socket
.start(element.get_clock(), Some(element.get_base_time()))
.await;
}
self.src_pad_handler
.start_task(self.src_pad.as_ref(), element)
.await;
gst_debug!(CAT, obj: element, "Started");
Ok(())
}
fn stop(&self, element: &gst::Element) -> Result<(), ()> {
gst_debug!(CAT, obj: element, "Stopping");
let mut state = self.state.lock().unwrap();
async fn pause(&self, element: &gst::Element) -> Result<(), ()> {
let pause_completion = {
let state = self.state.lock().await;
gst_debug!(CAT, obj: element, "Pausing");
if let Some(ref socket) = state.socket {
socket.pause();
}
let pause_completion = self.src_pad.pause_task().await;
state.socket.as_ref().unwrap().pause().await;
if let Some(abort_handle) = state.pending_future_abort_handle.take() {
abort_handle.abort();
}
pause_completion
};
gst_debug!(CAT, obj: element, "Stopped");
gst_debug!(CAT, obj: element, "Waiting for Task Pause to complete");
pause_completion.await;
gst_debug!(CAT, obj: element, "Paused");
Ok(())
}
@ -946,25 +965,11 @@ impl ObjectSubclass for UdpSrc {
fn new_with_class(klass: &subclass::simple::ClassStruct<Self>) -> Self {
let templ = klass.get_pad_template("src").unwrap();
let src_pad = gst::Pad::new_from_template(&templ, Some("src"));
src_pad.set_event_function(|pad, parent, event| {
UdpSrc::catch_panic_pad_function(
parent,
|| false,
|udpsrc, element| udpsrc.src_event(pad, element, event),
)
});
src_pad.set_query_function(|pad, parent, query| {
UdpSrc::catch_panic_pad_function(
parent,
|| false,
|udpsrc, element| udpsrc.src_query(pad, element, query),
)
});
let src_pad = PadSrc::new_from_template(&templ, Some("src"));
Self {
src_pad,
src_pad_handler: UdpSrcPadHandler::new(),
state: Mutex::new(State::default()),
settings: Mutex::new(Settings::default()),
}
@ -979,27 +984,27 @@ impl ObjectImpl for UdpSrc {
match *prop {
subclass::Property("address", ..) => {
let mut settings = self.settings.lock().unwrap();
let mut settings = block_on!(self.settings.lock());
settings.address = value.get().expect("type checked upstream");
}
subclass::Property("port", ..) => {
let mut settings = self.settings.lock().unwrap();
let mut settings = block_on!(self.settings.lock());
settings.port = value.get_some().expect("type checked upstream");
}
subclass::Property("reuse", ..) => {
let mut settings = self.settings.lock().unwrap();
let mut settings = block_on!(self.settings.lock());
settings.reuse = value.get_some().expect("type checked upstream");
}
subclass::Property("caps", ..) => {
let mut settings = self.settings.lock().unwrap();
let mut settings = block_on!(self.settings.lock());
settings.caps = value.get().expect("type checked upstream");
}
subclass::Property("mtu", ..) => {
let mut settings = self.settings.lock().unwrap();
let mut settings = block_on!(self.settings.lock());
settings.mtu = value.get_some().expect("type checked upstream");
}
subclass::Property("socket", ..) => {
let mut settings = self.settings.lock().unwrap();
let mut settings = block_on!(self.settings.lock());
settings.socket = value
.get::<gio::Socket>()
.expect("type checked upstream")
@ -1009,18 +1014,18 @@ impl ObjectImpl for UdpSrc {
unreachable!();
}
subclass::Property("context", ..) => {
let mut settings = self.settings.lock().unwrap();
let mut settings = block_on!(self.settings.lock());
settings.context = value
.get()
.expect("type checked upstream")
.unwrap_or_else(|| "".into());
}
subclass::Property("context-wait", ..) => {
let mut settings = self.settings.lock().unwrap();
let mut settings = block_on!(self.settings.lock());
settings.context_wait = value.get_some().expect("type checked upstream");
}
subclass::Property("retrieve-sender-address", ..) => {
let mut settings = self.settings.lock().unwrap();
let mut settings = block_on!(self.settings.lock());
settings.retrieve_sender_address = value.get_some().expect("type checked upstream");
}
_ => unimplemented!(),
@ -1032,27 +1037,27 @@ impl ObjectImpl for UdpSrc {
match *prop {
subclass::Property("address", ..) => {
let settings = self.settings.lock().unwrap();
let settings = block_on!(self.settings.lock());
Ok(settings.address.to_value())
}
subclass::Property("port", ..) => {
let settings = self.settings.lock().unwrap();
let settings = block_on!(self.settings.lock());
Ok(settings.port.to_value())
}
subclass::Property("reuse", ..) => {
let settings = self.settings.lock().unwrap();
let settings = block_on!(self.settings.lock());
Ok(settings.reuse.to_value())
}
subclass::Property("caps", ..) => {
let settings = self.settings.lock().unwrap();
let settings = block_on!(self.settings.lock());
Ok(settings.caps.to_value())
}
subclass::Property("mtu", ..) => {
let settings = self.settings.lock().unwrap();
let settings = block_on!(self.settings.lock());
Ok(settings.mtu.to_value())
}
subclass::Property("socket", ..) => {
let settings = self.settings.lock().unwrap();
let settings = block_on!(self.settings.lock());
Ok(settings
.socket
.as_ref()
@ -1060,7 +1065,7 @@ impl ObjectImpl for UdpSrc {
.to_value())
}
subclass::Property("used-socket", ..) => {
let settings = self.settings.lock().unwrap();
let settings = block_on!(self.settings.lock());
Ok(settings
.used_socket
.as_ref()
@ -1068,15 +1073,15 @@ impl ObjectImpl for UdpSrc {
.to_value())
}
subclass::Property("context", ..) => {
let settings = self.settings.lock().unwrap();
let settings = block_on!(self.settings.lock());
Ok(settings.context.to_value())
}
subclass::Property("context-wait", ..) => {
let settings = self.settings.lock().unwrap();
let settings = block_on!(self.settings.lock());
Ok(settings.context_wait.to_value())
}
subclass::Property("retrieve-sender-address", ..) => {
let settings = self.settings.lock().unwrap();
let settings = block_on!(self.settings.lock());
Ok(settings.retrieve_sender_address.to_value())
}
_ => unimplemented!(),
@ -1087,7 +1092,7 @@ impl ObjectImpl for UdpSrc {
self.parent_constructed(obj);
let element = obj.downcast_ref::<gst::Element>().unwrap();
element.add_pad(&self.src_pad).unwrap();
element.add_pad(self.src_pad.gst_pad()).unwrap();
super::set_element_flags(element, gst::ElementFlags::SOURCE);
}
}
@ -1102,16 +1107,16 @@ impl ElementImpl for UdpSrc {
match transition {
gst::StateChange::NullToReady => {
self.prepare(element).map_err(|err| {
block_on!(self.prepare(element)).map_err(|err| {
element.post_error_message(&err);
gst::StateChangeError
})?;
}
gst::StateChange::PlayingToPaused => {
self.stop(element).map_err(|_| gst::StateChangeError)?;
block_on!(self.pause(element)).map_err(|_| gst::StateChangeError)?;
}
gst::StateChange::ReadyToNull => {
self.unprepare(element).map_err(|_| gst::StateChangeError)?;
block_on!(self.unprepare(element)).map_err(|_| gst::StateChangeError)?;
}
_ => (),
}
@ -1123,11 +1128,12 @@ impl ElementImpl for UdpSrc {
success = gst::StateChangeSuccess::NoPreroll;
}
gst::StateChange::PausedToPlaying => {
self.start(element).map_err(|_| gst::StateChangeError)?;
block_on!(self.start(element)).map_err(|_| gst::StateChangeError)?;
}
gst::StateChange::PausedToReady => {
let mut state = self.state.lock().unwrap();
state.need_initial_events = true;
block_on!(async {
self.src_pad_handler.lock().await.need_initial_events = true;
});
}
_ => (),
}

View file

@ -76,17 +76,18 @@ fn test_push() {
use gst::EventView;
let event = h.pull_event().unwrap();
// The StickyEvent for the TaskContext is pushed first
match event.view() {
EventView::StreamStart(..) => {
assert_eq!(n_events, 0);
assert_eq!(n_events, 1);
}
EventView::Caps(ev) => {
assert_eq!(n_events, 1);
assert_eq!(n_events, 2);
let event_caps = ev.get_caps();
assert_eq!(caps.as_ref(), event_caps);
}
EventView::Segment(..) => {
assert_eq!(n_events, 2);
assert_eq!(n_events, 3);
}
EventView::Eos(..) => {
break;

View file

@ -0,0 +1,830 @@
// Copyright (C) 2019 François Laignel <fengalin@free.fr>
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Library General Public
// License as published by the Free Software Foundation; either
// version 2 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Library General Public License for more details.
//
// You should have received a copy of the GNU Library General Public
// License along with this library; if not, write to the
// Free Software Foundation, Inc., 51 Franklin Street, Suite 500,
// Boston, MA 02110-1335, USA.
use either::Either;
use futures::channel::mpsc;
use futures::future::BoxFuture;
use futures::lock::Mutex;
use futures::prelude::*;
use glib;
use glib::{glib_boxed_derive_traits, glib_boxed_type, glib_object_impl, glib_object_subclass};
use gst;
use gst::prelude::*;
use gst::subclass::prelude::*;
use gst::EventView;
use gst::{gst_debug, gst_error_msg, gst_log};
use lazy_static::lazy_static;
use std::boxed::Box;
use std::sync::Arc;
use gstthreadshare::block_on;
use gstthreadshare::runtime::prelude::*;
use gstthreadshare::runtime::{Context, PadContext, PadSink, PadSinkRef, PadSrc, PadSrcRef};
const DEFAULT_CONTEXT: &str = "";
const SLEEP_DURATION: u32 = 2;
fn init() {
use std::sync::Once;
static INIT: Once = Once::new();
INIT.call_once(|| {
gst::init().unwrap();
gstthreadshare::plugin_register_static().expect("gstthreadshare pad test");
});
}
// Src
static SRC_PROPERTIES: [glib::subclass::Property; 1] =
[glib::subclass::Property("context", |name| {
glib::ParamSpec::string(
name,
"Context",
"Context name to share threads with",
Some(DEFAULT_CONTEXT),
glib::ParamFlags::READWRITE,
)
})];
#[derive(Debug)]
struct Settings {
context: String,
}
lazy_static! {
static ref SRC_CAT: gst::DebugCategory = gst::DebugCategory::new(
"ts-element-src-test",
gst::DebugColorFlags::empty(),
Some("Thread-sharing Test Src Element"),
);
}
#[derive(Clone, Debug)]
struct PadSrcHandlerTest;
impl PadSrcHandlerTest {
async fn start_task(&self, pad: PadSrcRef<'_>, receiver: mpsc::Receiver<Item>) {
let pad_weak = pad.downgrade();
let receiver = Arc::new(Mutex::new(receiver));
pad.start_task(move || {
let pad_weak = pad_weak.clone();
let receiver = Arc::clone(&receiver);
async move {
let item = receiver.lock().await.next().await;
let pad = pad_weak.upgrade().expect("PadSrc no longer exists");
let item = match item {
Some(item) => item,
None => {
gst_debug!(SRC_CAT, obj: pad.gst_pad(), "SrcPad channel aborted");
pad.pause_task().await;
return;
}
};
Self::push_item(pad, item).await;
}
})
.await;
}
async fn push_item(pad: PadSrcRef<'_>, item: Item) {
match item {
Item::Event(event) => {
pad.push_event(event).await;
}
Item::Buffer(buffer) => {
pad.push(buffer).await.unwrap();
}
Item::BufferList(list) => {
pad.push_list(list).await.unwrap();
}
}
gst_debug!(SRC_CAT, obj: pad.gst_pad(), "SrcPad handled an Item");
}
}
impl PadSrcHandler for PadSrcHandlerTest {
type ElementImpl = ElementSrcTest;
fn src_activatemode(
&self,
_pad: PadSrcRef,
_elem_src_test: &ElementSrcTest,
_element: &gst::Element,
mode: gst::PadMode,
active: bool,
) -> Result<(), gst::LoggableError> {
gst_debug!(SRC_CAT, "SrcPad activatemode {:?}, {}", mode, active);
Ok(())
}
fn src_event(
&self,
pad: PadSrcRef,
elem_src_test: &ElementSrcTest,
element: &gst::Element,
event: gst::Event,
) -> Either<bool, BoxFuture<'static, bool>> {
gst_log!(SRC_CAT, obj: pad.gst_pad(), "Handling event {:?}", event);
let ret = match event.view() {
EventView::FlushStart(..) => {
let _ = block_on!(elem_src_test.pause(element));
true
}
EventView::FlushStop(..) => {
let (res, state, pending) = element.get_state(0.into());
if res == Ok(gst::StateChangeSuccess::Success) && state == gst::State::Playing
|| res == Ok(gst::StateChangeSuccess::Async) && pending == gst::State::Playing
{
let _ = block_on!(elem_src_test.start(element));
}
true
}
_ => false,
};
if ret {
gst_log!(SRC_CAT, obj: pad.gst_pad(), "Handled event {:?}", event);
} else {
gst_log!(SRC_CAT, obj: pad.gst_pad(), "Didn't handle event {:?}", event);
}
Either::Left(ret)
}
}
#[derive(Debug)]
struct ElementSrcState {
sender: Option<mpsc::Sender<Item>>,
}
impl Default for ElementSrcState {
fn default() -> Self {
ElementSrcState { sender: None }
}
}
#[derive(Debug)]
struct ElementSrcTest {
src_pad: PadSrc,
src_pad_handler: PadSrcHandlerTest,
state: Mutex<ElementSrcState>,
settings: Mutex<Settings>,
}
impl ElementSrcTest {
async fn try_push(&self, item: Item) -> Result<(), Item> {
match self.state.lock().await.sender.as_mut() {
Some(sender) => sender
.try_send(item)
.map_err(mpsc::TrySendError::into_inner),
None => Err(item),
}
}
async fn prepare(&self, element: &gst::Element) -> Result<(), gst::ErrorMessage> {
let _state = self.state.lock().await;
gst_debug!(SRC_CAT, obj: element, "Preparing");
let settings = self.settings.lock().await;
let context = Context::acquire(&settings.context, SLEEP_DURATION).map_err(|err| {
gst_error_msg!(
gst::ResourceError::OpenRead,
["Failed to acquire Context: {}", err]
)
})?;
self.src_pad
.prepare(context, &self.src_pad_handler)
.await
.map_err(|err| {
gst_error_msg!(
gst::ResourceError::OpenRead,
["Error joining Context: {:?}", err]
)
})?;
gst_debug!(SRC_CAT, obj: element, "Prepared");
Ok(())
}
async fn unprepare(&self, element: &gst::Element) -> Result<(), ()> {
let _state = self.state.lock().await;
gst_debug!(SRC_CAT, obj: element, "Unpreparing");
self.src_pad.stop_task().await;
let _ = self.src_pad.unprepare().await;
gst_debug!(SRC_CAT, obj: element, "Unprepared");
Ok(())
}
async fn start(&self, element: &gst::Element) -> Result<(), ()> {
let mut state = self.state.lock().await;
gst_debug!(SRC_CAT, obj: element, "Starting");
let (sender, receiver) = mpsc::channel(1);
state.sender = Some(sender);
self.src_pad_handler
.start_task(self.src_pad.as_ref(), receiver)
.await;
gst_debug!(SRC_CAT, obj: element, "Started");
Ok(())
}
async fn pause(&self, element: &gst::Element) -> Result<(), ()> {
let pause_completion = {
let mut state = self.state.lock().await;
gst_debug!(SRC_CAT, obj: element, "Pausing");
let pause_completion = self.src_pad.pause_task().await;
// Prevent subsequent items from being enqueued
state.sender = None;
pause_completion
};
gst_debug!(SRC_CAT, obj: element, "Waiting for Task Pause to complete");
pause_completion.await;
gst_debug!(SRC_CAT, obj: element, "Paused");
Ok(())
}
}
impl ObjectSubclass for ElementSrcTest {
const NAME: &'static str = "RsTsElementSrcTest";
type ParentType = gst::Element;
type Instance = gst::subclass::ElementInstanceStruct<Self>;
type Class = glib::subclass::simple::ClassStruct<Self>;
glib_object_subclass!();
fn class_init(klass: &mut glib::subclass::simple::ClassStruct<Self>) {
klass.set_metadata(
"Thread-sharing Test Src Element",
"Generic",
"Src Element for Pad Src Test",
"François Laignel <fengalin@free.fr>",
);
let caps = gst::Caps::new_any();
let src_pad_template = gst::PadTemplate::new(
"src",
gst::PadDirection::Src,
gst::PadPresence::Always,
&caps,
)
.unwrap();
klass.add_pad_template(src_pad_template);
klass.install_properties(&SRC_PROPERTIES);
}
fn new_with_class(klass: &glib::subclass::simple::ClassStruct<Self>) -> Self {
let templ = klass.get_pad_template("src").unwrap();
let src_pad = PadSrc::new_from_template(&templ, Some("src"));
let settings = Settings {
context: String::new(),
};
ElementSrcTest {
src_pad,
src_pad_handler: PadSrcHandlerTest {},
state: Mutex::new(ElementSrcState::default()),
settings: Mutex::new(settings),
}
}
}
impl ObjectImpl for ElementSrcTest {
glib_object_impl!();
fn set_property(&self, _obj: &glib::Object, id: usize, value: &glib::Value) {
let prop = &SRC_PROPERTIES[id];
match *prop {
glib::subclass::Property("context", ..) => {
let context = value
.get()
.expect("type checked upstream")
.unwrap_or_else(|| "".into());
block_on!(self.settings.lock()).context = context;
}
_ => unimplemented!(),
}
}
fn constructed(&self, obj: &glib::Object) {
self.parent_constructed(obj);
let element = obj.downcast_ref::<gst::Element>().unwrap();
element.add_pad(self.src_pad.gst_pad()).unwrap();
}
}
impl ElementImpl for ElementSrcTest {
fn change_state(
&self,
element: &gst::Element,
transition: gst::StateChange,
) -> Result<gst::StateChangeSuccess, gst::StateChangeError> {
gst_log!(SRC_CAT, obj: element, "Changing state {:?}", transition);
match transition {
gst::StateChange::NullToReady => {
block_on!(self.prepare(element)).map_err(|err| {
element.post_error_message(&err);
gst::StateChangeError
})?;
}
gst::StateChange::PlayingToPaused => {
block_on!(self.pause(element)).map_err(|_| gst::StateChangeError)?;
}
gst::StateChange::ReadyToNull => {
block_on!(self.unprepare(element)).map_err(|_| gst::StateChangeError)?;
}
_ => (),
}
let mut success = self.parent_change_state(element, transition)?;
match transition {
gst::StateChange::PausedToPlaying => {
block_on!(self.start(element)).map_err(|_| gst::StateChangeError)?;
}
gst::StateChange::ReadyToPaused => {
success = gst::StateChangeSuccess::NoPreroll;
}
_ => (),
}
Ok(success)
}
}
// Sink
#[derive(Debug)]
enum Item {
Buffer(gst::Buffer),
BufferList(gst::BufferList),
Event(gst::Event),
}
#[derive(Clone, Debug)]
struct ItemSender {
sender: mpsc::Sender<Item>,
}
impl glib::subclass::boxed::BoxedType for ItemSender {
const NAME: &'static str = "TsTestItemSender";
glib_boxed_type!();
}
glib_boxed_derive_traits!(ItemSender);
static SINK_PROPERTIES: [glib::subclass::Property; 1] =
[glib::subclass::Property("sender", |name| {
glib::ParamSpec::boxed(
name,
"Sender",
"Channel sender to forward the incoming items to",
ItemSender::get_type(),
glib::ParamFlags::WRITABLE,
)
})];
#[derive(Clone, Debug)]
struct PadSinkHandlerTest;
impl PadSinkHandler for PadSinkHandlerTest {
type ElementImpl = ElementSinkTest;
fn sink_chain(
&self,
pad: PadSinkRef,
elem_sink_test: &ElementSinkTest,
_element: &gst::Element,
buffer: gst::Buffer,
) -> BoxFuture<'static, Result<gst::FlowSuccess, gst::FlowError>> {
let pad_weak = pad.downgrade();
let sender = Arc::clone(&elem_sink_test.sender);
async move {
let pad = pad_weak
.upgrade()
.expect("PadSink no longer exists in sink_chain");
gst_debug!(SINK_CAT, obj: pad.gst_pad(), "Fowarding {:?}", buffer);
sender
.lock()
.await
.as_mut()
.expect("ItemSender not set")
.send(Item::Buffer(buffer))
.await
.map(|_| gst::FlowSuccess::Ok)
.map_err(|_| gst::FlowError::CustomError)
}
.boxed()
}
fn sink_chain_list(
&self,
pad: PadSinkRef,
elem_sink_test: &ElementSinkTest,
_element: &gst::Element,
list: gst::BufferList,
) -> BoxFuture<'static, Result<gst::FlowSuccess, gst::FlowError>> {
let pad_weak = pad.downgrade();
let sender = Arc::clone(&elem_sink_test.sender);
async move {
let pad = pad_weak
.upgrade()
.expect("PadSink no longer exists in sink_chain_list");
gst_debug!(SINK_CAT, obj: pad.gst_pad(), "Fowarding {:?}", list);
sender
.lock()
.await
.as_mut()
.expect("ItemSender not set")
.send(Item::BufferList(list))
.await
.map(|_| gst::FlowSuccess::Ok)
.map_err(|_| gst::FlowError::CustomError)
}
.boxed()
}
fn sink_event(
&self,
pad: PadSinkRef,
elem_sink_test: &ElementSinkTest,
_element: &gst::Element,
event: gst::Event,
) -> Either<bool, BoxFuture<'static, bool>> {
if event.is_serialized() {
let pad_weak = pad.downgrade();
let sender = Arc::clone(&elem_sink_test.sender);
Either::Right(async move {
let pad = pad_weak
.upgrade()
.expect("PadSink no longer exists in sink_event");
gst_debug!(SINK_CAT, obj: pad.gst_pad(), "Fowarding serialized event {:?}", event);
sender
.lock()
.await
.as_mut()
.expect("ItemSender not set")
.send(Item::Event(event))
.await
.is_ok()
}.boxed())
} else {
gst_debug!(SINK_CAT, obj: pad.gst_pad(), "Fowarding non-serialized event {:?}", event);
Either::Left(
block_on!(elem_sink_test.sender.lock())
.as_mut()
.expect("ItemSender not set")
.try_send(Item::Event(event))
.is_ok(),
)
}
}
}
#[derive(Debug)]
struct ElementSinkTest {
sink_pad: PadSink,
sender: Arc<Mutex<Option<mpsc::Sender<Item>>>>,
}
lazy_static! {
static ref SINK_CAT: gst::DebugCategory = gst::DebugCategory::new(
"ts-element-sink-test",
gst::DebugColorFlags::empty(),
Some("Thread-sharing Test Sink Element"),
);
}
impl ObjectSubclass for ElementSinkTest {
const NAME: &'static str = "RsTsElementSinkTest";
type ParentType = gst::Element;
type Instance = gst::subclass::ElementInstanceStruct<Self>;
type Class = glib::subclass::simple::ClassStruct<Self>;
glib_object_subclass!();
fn class_init(klass: &mut glib::subclass::simple::ClassStruct<Self>) {
klass.set_metadata(
"Thread-sharing Test Sink Element",
"Generic",
"Sink Element for Pad Test",
"François Laignel <fengalin@free.fr>",
);
let caps = gst::Caps::new_any();
let sink_pad_template = gst::PadTemplate::new(
"sink",
gst::PadDirection::Sink,
gst::PadPresence::Always,
&caps,
)
.unwrap();
klass.add_pad_template(sink_pad_template);
klass.install_properties(&SINK_PROPERTIES);
klass.add_signal_with_class_handler(
"flush-start",
glib::SignalFlags::RUN_LAST | glib::SignalFlags::ACTION,
&[],
bool::static_type(),
|_, args| {
let element = args[0]
.get::<gst::Element>()
.expect("signal arg")
.expect("missing signal arg");
let this = Self::from_instance(&element);
Some(
this.sink_pad
.gst_pad()
.push_event(gst::Event::new_flush_start().build())
.to_value(),
)
},
);
klass.add_signal_with_class_handler(
"flush-stop",
glib::SignalFlags::RUN_LAST | glib::SignalFlags::ACTION,
&[],
bool::static_type(),
|_, args| {
let element = args[0]
.get::<gst::Element>()
.expect("signal arg")
.expect("missing signal arg");
let this = Self::from_instance(&element);
Some(
this.sink_pad
.gst_pad()
.push_event(gst::Event::new_flush_stop(true).build())
.to_value(),
)
},
);
}
fn new_with_class(klass: &glib::subclass::simple::ClassStruct<Self>) -> Self {
let templ = klass.get_pad_template("sink").unwrap();
let sink_pad = PadSink::new_from_template(&templ, Some("sink"));
ElementSinkTest {
sink_pad,
sender: Arc::new(Mutex::new(None)),
}
}
}
impl ObjectImpl for ElementSinkTest {
glib_object_impl!();
fn set_property(&self, _obj: &glib::Object, id: usize, value: &glib::Value) {
let prop = &SINK_PROPERTIES[id];
match *prop {
glib::subclass::Property("sender", ..) => {
let ItemSender { sender } = value
.get::<&ItemSender>()
.expect("type checked upstream")
.expect("ItemSender not found")
.clone();
*block_on!(self.sender.lock()) = Some(sender);
}
_ => unimplemented!(),
}
}
fn constructed(&self, obj: &glib::Object) {
self.parent_constructed(obj);
let element = obj.downcast_ref::<gst::Element>().unwrap();
element.add_pad(self.sink_pad.gst_pad()).unwrap();
}
}
impl ElementImpl for ElementSinkTest {
fn change_state(
&self,
element: &gst::Element,
transition: gst::StateChange,
) -> Result<gst::StateChangeSuccess, gst::StateChangeError> {
gst_log!(SINK_CAT, obj: element, "Changing state {:?}", transition);
match transition {
gst::StateChange::NullToReady => {
block_on!(self.sink_pad.prepare(&PadSinkHandlerTest {}));
}
gst::StateChange::ReadyToNull => {
block_on!(self.sink_pad.unprepare());
}
_ => (),
}
self.parent_change_state(element, transition)
}
}
fn setup(
context_name: &str,
) -> (
gst::Pipeline,
gst::Element,
gst::Element,
mpsc::Receiver<Item>,
) {
init();
// Src
let src_element = glib::Object::new(ElementSrcTest::get_type(), &[])
.unwrap()
.downcast::<gst::Element>()
.unwrap();
src_element.set_property("context", &context_name).unwrap();
// Sink
let sink_element = glib::Object::new(ElementSinkTest::get_type(), &[])
.unwrap()
.downcast::<gst::Element>()
.unwrap();
let (sender, receiver) = mpsc::channel::<Item>(10);
sink_element
.set_property("sender", &ItemSender { sender })
.unwrap();
let pipeline = gst::Pipeline::new(None);
pipeline.add_many(&[&src_element, &sink_element]).unwrap();
src_element.link(&sink_element).unwrap();
(pipeline, src_element, sink_element, receiver)
}
#[test]
fn task() {
let (pipeline, src_element, sink_element, mut receiver) = setup("task");
let elem_src_test = ElementSrcTest::from_instance(&src_element);
pipeline.set_state(gst::State::Playing).unwrap();
// Initial events
block_on!(elem_src_test.try_push(Item::Event(
gst::Event::new_stream_start("stream_id_task_test")
.group_id(gst::util_group_id_next())
.build(),
)))
.unwrap();
match block_on!(receiver.next()).unwrap() {
Item::Event(event) => match event.view() {
gst::EventView::CustomDownstreamSticky(e) => {
assert!(PadContext::is_pad_context_sticky_event(&e))
}
other => panic!("Unexpected event {:?}", other),
},
other => panic!("Unexpected item {:?}", other),
}
match block_on!(receiver.next()).unwrap() {
Item::Event(event) => match event.view() {
gst::EventView::StreamStart(_) => (),
other => panic!("Unexpected event {:?}", other),
},
other => panic!("Unexpected item {:?}", other),
}
block_on!(elem_src_test.try_push(Item::Event(
gst::Event::new_segment(&gst::FormattedSegment::<gst::format::Time>::new()).build(),
)))
.unwrap();
match block_on!(receiver.next()).unwrap() {
Item::Event(event) => match event.view() {
gst::EventView::Segment(_) => (),
other => panic!("Unexpected event {:?}", other),
},
other => panic!("Unexpected item {:?}", other),
}
// Buffer
block_on!(elem_src_test.try_push(Item::Buffer(gst::Buffer::from_slice(vec![1, 2, 3, 4]))))
.unwrap();
match block_on!(receiver.next()).unwrap() {
Item::Buffer(buffer) => {
let data = buffer.map_readable().unwrap();
assert_eq!(data.as_slice(), vec![1, 2, 3, 4].as_slice());
}
other => panic!("Unexpected item {:?}", other),
}
// BufferList
let mut list = gst::BufferList::new();
list.get_mut()
.unwrap()
.add(gst::Buffer::from_slice(vec![1, 2, 3, 4]));
block_on!(elem_src_test.try_push(Item::BufferList(list))).unwrap();
match block_on!(receiver.next()).unwrap() {
Item::BufferList(_) => (),
other => panic!("Unexpected item {:?}", other),
}
// Pause the Pad task
pipeline.set_state(gst::State::Paused).unwrap();
// Items not longer accepted
block_on!(elem_src_test.try_push(Item::Buffer(gst::Buffer::from_slice(vec![1, 2, 3, 4]))))
.unwrap_err();
// Nothing forwarded
receiver.try_next().unwrap_err();
// Switch back the Pad task to Started
pipeline.set_state(gst::State::Playing).unwrap();
// Still nothing forwarded
receiver.try_next().unwrap_err();
// Flush
assert!(sink_element
.emit("flush-start", &[])
.unwrap()
.unwrap()
.get_some::<bool>()
.unwrap());
assert!(sink_element
.emit("flush-stop", &[])
.unwrap()
.unwrap()
.get_some::<bool>()
.unwrap());
// EOS
block_on!(elem_src_test.try_push(Item::Event(gst::Event::new_eos().build()))).unwrap();
match block_on!(receiver.next()).unwrap() {
Item::Event(event) => match event.view() {
gst::EventView::Eos(_) => (),
other => panic!("Unexpected event {:?}", other),
},
other => panic!("Unexpected item {:?}", other),
}
// Stop the Pad task
pipeline.set_state(gst::State::Ready).unwrap();
// Receiver was dropped when stopping => can't send anymore
block_on!(elem_src_test.try_push(Item::Event(
gst::Event::new_stream_start("stream_id_task_test_past_stop")
.group_id(gst::util_group_id_next())
.build(),
)))
.unwrap_err();
}

View file

@ -16,9 +16,23 @@
// Boston, MA 02110-1335, USA.
use gst;
use gst::prelude::*;
use gst::{gst_debug, gst_error};
use lazy_static::lazy_static;
use std::sync::mpsc;
use gstthreadshare;
lazy_static! {
static ref CAT: gst::DebugCategory = gst::DebugCategory::new(
"ts-test",
gst::DebugColorFlags::empty(),
Some("Thread-sharing test"),
);
}
fn init() {
use std::sync::Once;
static INIT: Once = Once::new();
@ -30,9 +44,140 @@ fn init() {
}
#[test]
fn test_multiple_contexts() {
use gst::prelude::*;
fn multiple_contexts_queue() {
use std::net;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::sync::mpsc;
init();
const CONTEXT_NB: u32 = 2;
const SRC_NB: u16 = 4;
const CONTEXT_WAIT: u32 = 1;
const BUFFER_NB: u32 = 3;
const FIRST_PORT: u16 = 40000;
let l = glib::MainLoop::new(None, false);
let pipeline = gst::Pipeline::new(None);
let (sender, receiver) = mpsc::channel();
for i in 0..SRC_NB {
let src =
gst::ElementFactory::make("ts-udpsrc", Some(format!("src-{}", i).as_str())).unwrap();
src.set_property("context", &format!("context-{}", (i as u32) % CONTEXT_NB))
.unwrap();
src.set_property("context-wait", &CONTEXT_WAIT).unwrap();
src.set_property("port", &((FIRST_PORT + i) as u32))
.unwrap();
let queue =
gst::ElementFactory::make("ts-queue", Some(format!("queue-{}", i).as_str())).unwrap();
queue
.set_property("context", &format!("context-{}", (i as u32) % CONTEXT_NB))
.unwrap();
queue.set_property("context-wait", &CONTEXT_WAIT).unwrap();
let sink =
gst::ElementFactory::make("appsink", Some(format!("sink-{}", i).as_str())).unwrap();
sink.set_property("sync", &false).unwrap();
sink.set_property("async", &false).unwrap();
sink.set_property("emit-signals", &true).unwrap();
pipeline.add_many(&[&src, &queue, &sink]).unwrap();
gst::Element::link_many(&[&src, &queue, &sink]).unwrap();
let appsink = sink.dynamic_cast::<gst_app::AppSink>().unwrap();
let sender_clone = sender.clone();
appsink.connect_new_sample(move |appsink| {
let _sample = appsink
.emit("pull-sample", &[])
.unwrap()
.unwrap()
.get::<gst::Sample>()
.unwrap()
.unwrap();
sender_clone.send(()).unwrap();
Ok(gst::FlowSuccess::Ok)
});
}
let pipeline_clone = pipeline.clone();
let l_clone = l.clone();
let mut test_scenario = Some(move || {
let buffer = [0; 160];
let socket = net::UdpSocket::bind("0.0.0.0:0").unwrap();
let ipaddr = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1));
let destinations = (FIRST_PORT..(FIRST_PORT + SRC_NB))
.map(|port| SocketAddr::new(ipaddr, port))
.collect::<Vec<_>>();
for _ in 0..BUFFER_NB {
for dest in &destinations {
gst_debug!(CAT, "multiple_contexts_queue: sending buffer to {:?}", dest);
socket.send_to(&buffer, dest).unwrap();
std::thread::sleep(std::time::Duration::from_millis(CONTEXT_WAIT as u64));
}
}
gst_debug!(
CAT,
"multiple_contexts_queue: waiting for all buffers notifications"
);
for _ in 0..(BUFFER_NB * (SRC_NB as u32)) {
receiver.recv().unwrap();
}
pipeline_clone.set_state(gst::State::Null).unwrap();
l_clone.quit();
});
let bus = pipeline.get_bus().unwrap();
let l_clone = l.clone();
bus.add_watch(move |_, msg| {
use gst::MessageView;
match msg.view() {
MessageView::StateChanged(state_changed) => {
if let Some(source) = state_changed.get_src() {
if source.get_type() == gst::Pipeline::static_type() {
if state_changed.get_old() == gst::State::Paused
&& state_changed.get_current() == gst::State::Playing
{
if let Some(test_scenario) = test_scenario.take() {
std::thread::spawn(test_scenario);
}
}
}
}
}
MessageView::Error(err) => {
gst_error!(
CAT,
"multiple_contexts_queue: Error from {:?}: {} ({:?})",
err.get_src().map(|s| s.get_path_string()),
err.get_error(),
err.get_debug()
);
l_clone.quit();
}
_ => (),
};
glib::Continue(true)
});
pipeline.set_state(gst::State::Playing).unwrap();
gst_debug!(CAT, "Starting main loop for multiple_contexts_queue...");
l.run();
gst_debug!(CAT, "Stopping main loop for multiple_contexts_queue...");
}
#[test]
fn multiple_contexts_proxy() {
use std::net;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
@ -42,48 +187,135 @@ fn test_multiple_contexts() {
const SRC_NB: u16 = 4;
const CONTEXT_WAIT: u32 = 1;
const BUFFER_NB: u32 = 3;
// Don't overlap with `multiple_contexts_queue`
const OFFSET: u16 = 10;
const FIRST_PORT: u16 = 40000 + OFFSET;
let l = glib::MainLoop::new(None, false);
let pipeline = gst::Pipeline::new(None);
let mut src_list = Vec::<gst::Element>::new();
let (sender, receiver) = mpsc::channel();
for i in 0..SRC_NB {
let src =
gst::ElementFactory::make("ts-udpsrc", Some(format!("src-{}", i).as_str())).unwrap();
let pipeline_index = i + OFFSET;
let src = gst::ElementFactory::make(
"ts-udpsrc",
Some(format!("src-{}", pipeline_index).as_str()),
)
.unwrap();
src.set_property("context", &format!("context-{}", (i as u32) % CONTEXT_NB))
.unwrap();
src.set_property("context-wait", &CONTEXT_WAIT).unwrap();
src.set_property("port", &(40000u32 + (i as u32))).unwrap();
let queue =
gst::ElementFactory::make("ts-queue", Some(format!("queue-{}", i).as_str())).unwrap();
queue
.set_property("context", &format!("context-{}", (i as u32) % CONTEXT_NB))
src.set_property("port", &((FIRST_PORT + i) as u32))
.unwrap();
queue.set_property("context-wait", &CONTEXT_WAIT).unwrap();
let fakesink =
gst::ElementFactory::make("fakesink", Some(format!("sink-{}", i).as_str())).unwrap();
fakesink.set_property("sync", &false).unwrap();
fakesink.set_property("async", &false).unwrap();
let proxysink = gst::ElementFactory::make(
"ts-proxysink",
Some(format!("proxysink-{}", pipeline_index).as_str()),
)
.unwrap();
proxysink
.set_property("proxy-context", &format!("proxy-{}", pipeline_index))
.unwrap();
let proxysrc = gst::ElementFactory::make(
"ts-proxysrc",
Some(format!("proxysrc-{}", pipeline_index).as_str()),
)
.unwrap();
proxysrc
.set_property(
"context",
&format!("context-{}", (pipeline_index as u32) % CONTEXT_NB),
)
.unwrap();
proxysrc
.set_property("proxy-context", &format!("proxy-{}", pipeline_index))
.unwrap();
pipeline.add_many(&[&src, &queue, &fakesink]).unwrap();
src.link(&queue).unwrap();
queue.link(&fakesink).unwrap();
let sink =
gst::ElementFactory::make("appsink", Some(format!("sink-{}", pipeline_index).as_str()))
.unwrap();
sink.set_property("sync", &false).unwrap();
sink.set_property("async", &false).unwrap();
sink.set_property("emit-signals", &true).unwrap();
src_list.push(src);
pipeline
.add_many(&[&src, &proxysink, &proxysrc, &sink])
.unwrap();
src.link(&proxysink).unwrap();
proxysrc.link(&sink).unwrap();
let appsink = sink.dynamic_cast::<gst_app::AppSink>().unwrap();
let sender_clone = sender.clone();
appsink.connect_new_sample(move |appsink| {
let _sample = appsink
.emit("pull-sample", &[])
.unwrap()
.unwrap()
.get::<gst::Sample>()
.unwrap()
.unwrap();
sender_clone.send(()).unwrap();
Ok(gst::FlowSuccess::Ok)
});
}
let pipeline_clone = pipeline.clone();
let l_clone = l.clone();
let mut test_scenario = Some(move || {
let buffer = [0; 160];
let socket = net::UdpSocket::bind("0.0.0.0:0").unwrap();
let ipaddr = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1));
let destinations = (FIRST_PORT..(FIRST_PORT + SRC_NB))
.map(|port| SocketAddr::new(ipaddr, port))
.collect::<Vec<_>>();
for _ in 0..BUFFER_NB {
for dest in &destinations {
gst_debug!(CAT, "multiple_contexts_proxy: sending buffer to {:?}", dest);
socket.send_to(&buffer, dest).unwrap();
std::thread::sleep(std::time::Duration::from_millis(CONTEXT_WAIT as u64));
}
}
gst_debug!(
CAT,
"multiple_contexts_proxy: waiting for all buffers notifications"
);
for _ in 0..(BUFFER_NB * (SRC_NB as u32)) {
receiver.recv().unwrap();
}
pipeline_clone.set_state(gst::State::Null).unwrap();
l_clone.quit();
});
let bus = pipeline.get_bus().unwrap();
let l_clone = l.clone();
bus.add_watch(move |_, msg| {
use gst::MessageView;
match msg.view() {
MessageView::StateChanged(state_changed) => {
if let Some(source) = state_changed.get_src() {
if source.get_type() == gst::Pipeline::static_type() {
if state_changed.get_old() == gst::State::Paused
&& state_changed.get_current() == gst::State::Playing
{
if let Some(test_scenario) = test_scenario.take() {
std::thread::spawn(test_scenario);
}
}
}
}
}
MessageView::Error(err) => {
println!(
"Error from {:?}: {} ({:?})",
gst_error!(
CAT,
"multiple_contexts_proxy: Error from {:?}: {} ({:?})",
err.get_src().map(|s| s.get_path_string()),
err.get_error(),
err.get_debug()
@ -96,94 +328,121 @@ fn test_multiple_contexts() {
glib::Continue(true)
});
let pipeline_clone = pipeline.clone();
let l_clone = l.clone();
std::thread::spawn(move || {
// Sleep to allow the pipeline to be ready
std::thread::sleep(std::time::Duration::from_millis(50));
let buffer = [0; 160];
let socket = net::UdpSocket::bind("0.0.0.0:0").unwrap();
let ipaddr = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1));
let destinations = (40000..(40000 + SRC_NB))
.map(|port| SocketAddr::new(ipaddr, port))
.collect::<Vec<_>>();
let wait = std::time::Duration::from_millis(CONTEXT_WAIT as u64);
for _ in 0..BUFFER_NB {
let now = std::time::Instant::now();
for dest in &destinations {
socket.send_to(&buffer, dest).unwrap();
}
let elapsed = now.elapsed();
if elapsed < wait {
std::thread::sleep(wait - elapsed);
}
}
std::thread::sleep(std::time::Duration::from_millis(50));
pipeline_clone.set_state(gst::State::Null).unwrap();
l_clone.quit();
});
pipeline.set_state(gst::State::Playing).unwrap();
println!("starting...");
gst_debug!(CAT, "Starting main loop for multiple_contexts_proxy...");
l.run();
gst_debug!(CAT, "Stopping main loop for multiple_contexts_proxy...");
}
#[test]
fn test_premature_shutdown() {
use gst::prelude::*;
fn eos() {
const CONTEXT: &str = "test_eos";
init();
const CONTEXT_NAME: &str = "pipeline-context";
const CONTEXT_WAIT: u32 = 1;
const QUEUE_BUFFER_CAPACITY: u32 = 1;
const BURST_NB: u32 = 2;
let l = glib::MainLoop::new(None, false);
let pipeline = gst::Pipeline::new(None);
let caps = gst::Caps::new_simple("foo/bar", &[]);
let appsrc = gst::ElementFactory::make("ts-appsrc", None).unwrap();
appsrc.set_property("caps", &caps).unwrap();
appsrc.set_property("do-timestamp", &true).unwrap();
appsrc.set_property("context", &CONTEXT_NAME).unwrap();
appsrc.set_property("context-wait", &CONTEXT_WAIT).unwrap();
let src = gst::ElementFactory::make("ts-appsrc", Some("src-eos")).unwrap();
src.set_property("caps", &caps).unwrap();
src.set_property("do-timestamp", &true).unwrap();
src.set_property("context", &CONTEXT).unwrap();
let queue = gst::ElementFactory::make("ts-queue", None).unwrap();
queue.set_property("context", &CONTEXT_NAME).unwrap();
queue.set_property("context-wait", &CONTEXT_WAIT).unwrap();
queue
.set_property("max-size-buffers", &QUEUE_BUFFER_CAPACITY)
.unwrap();
let queue = gst::ElementFactory::make("ts-queue", Some("queue-eos")).unwrap();
queue.set_property("context", &CONTEXT).unwrap();
let fakesink = gst::ElementFactory::make("fakesink", None).unwrap();
fakesink.set_property("sync", &false).unwrap();
fakesink.set_property("async", &false).unwrap();
let appsink = gst::ElementFactory::make("appsink", Some("sink-eos")).unwrap();
pipeline.add_many(&[&appsrc, &queue, &fakesink]).unwrap();
appsrc.link(&queue).unwrap();
queue.link(&fakesink).unwrap();
pipeline.add_many(&[&src, &queue, &appsink]).unwrap();
gst::Element::link_many(&[&src, &queue, &appsink]).unwrap();
let bus = pipeline.get_bus().unwrap();
appsink.set_property("sync", &false).unwrap();
appsink.set_property("async", &false).unwrap();
appsink.set_property("emit-signals", &true).unwrap();
let (sample_notifier, sample_notif_rcv) = mpsc::channel();
let (eos_notifier, eos_notif_rcv) = mpsc::channel();
let appsink = appsink.dynamic_cast::<gst_app::AppSink>().unwrap();
appsink.connect_new_sample(move |appsink| {
gst_debug!(CAT, obj: appsink, "eos: pulling sample");
let _ = appsink
.emit("pull-sample", &[])
.unwrap()
.unwrap()
.get::<gst::Sample>()
.unwrap()
.unwrap();
sample_notifier.send(()).unwrap();
Ok(gst::FlowSuccess::Ok)
});
appsink.connect_eos(move |_appsink| eos_notifier.send(()).unwrap());
fn push_buffer(src: &gst::Element) -> bool {
gst_debug!(CAT, obj: src, "eos: pushing buffer");
src.emit("push-buffer", &[&gst::Buffer::from_slice(vec![0; 1024])])
.unwrap()
.unwrap()
.get_some::<bool>()
.unwrap()
}
let pipeline_clone = pipeline.clone();
let l_clone = l.clone();
bus.add_watch(move |_, msg| {
let mut scenario = Some(move || {
// Initialize the dataflow
assert!(push_buffer(&src));
sample_notif_rcv.recv().unwrap();
assert!(src
.emit("end-of-stream", &[])
.unwrap()
.unwrap()
.get_some::<bool>()
.unwrap());
eos_notif_rcv.recv().unwrap();
assert!(push_buffer(&src));
std::thread::sleep(std::time::Duration::from_millis(50));
assert_eq!(
sample_notif_rcv.try_recv().unwrap_err(),
mpsc::TryRecvError::Empty
);
pipeline_clone.set_state(gst::State::Null).unwrap();
l_clone.quit();
});
let l_clone = l.clone();
pipeline.get_bus().unwrap().add_watch(move |_, msg| {
use gst::MessageView;
match msg.view() {
MessageView::StateChanged(state_changed) => {
if let Some(source) = state_changed.get_src() {
if source.get_type() != gst::Pipeline::static_type() {
return glib::Continue(true);
}
if state_changed.get_old() == gst::State::Paused
&& state_changed.get_current() == gst::State::Playing
{
if let Some(scenario) = scenario.take() {
std::thread::spawn(scenario);
}
}
}
}
MessageView::Error(err) => {
println!(
"Error from {:?}: {} ({:?})",
gst_error!(
CAT,
"eos: Error from {:?}: {} ({:?})",
err.get_src().map(|s| s.get_path_string()),
err.get_error(),
err.get_debug()
@ -196,40 +455,157 @@ fn test_premature_shutdown() {
glib::Continue(true)
});
pipeline.set_state(gst::State::Playing).unwrap();
gst_debug!(CAT, "Starting main loop for eos...");
l.run();
gst_debug!(CAT, "Stopping main loop for eos...");
}
#[test]
fn premature_shutdown() {
init();
const APPSRC_CONTEXT_WAIT: u32 = 0;
const QUEUE_CONTEXT_WAIT: u32 = 1;
const QUEUE_ITEMS_CAPACITY: u32 = 1;
let l = glib::MainLoop::new(None, false);
let pipeline = gst::Pipeline::new(None);
let caps = gst::Caps::new_simple("foo/bar", &[]);
let src = gst::ElementFactory::make("ts-appsrc", Some("src-ps")).unwrap();
src.set_property("caps", &caps).unwrap();
src.set_property("do-timestamp", &true).unwrap();
src.set_property("context", &"appsrc-context").unwrap();
src.set_property("context-wait", &APPSRC_CONTEXT_WAIT)
.unwrap();
let queue = gst::ElementFactory::make("ts-queue", Some("queue-ps")).unwrap();
queue.set_property("context", &"queue-context").unwrap();
queue
.set_property("context-wait", &QUEUE_CONTEXT_WAIT)
.unwrap();
queue
.set_property("max-size-buffers", &QUEUE_ITEMS_CAPACITY)
.unwrap();
let appsink = gst::ElementFactory::make("appsink", Some("sink-ps")).unwrap();
pipeline.add_many(&[&src, &queue, &appsink]).unwrap();
gst::Element::link_many(&[&src, &queue, &appsink]).unwrap();
appsink.set_property("emit-signals", &true).unwrap();
appsink.set_property("sync", &false).unwrap();
appsink.set_property("async", &false).unwrap();
let (sender, receiver) = mpsc::channel();
let appsink = appsink.dynamic_cast::<gst_app::AppSink>().unwrap();
appsink.connect_new_sample(move |appsink| {
gst_debug!(CAT, obj: appsink, "premature_shutdown: pulling sample");
let _sample = appsink
.emit("pull-sample", &[])
.unwrap()
.unwrap()
.get::<gst::Sample>()
.unwrap()
.unwrap();
sender.send(()).unwrap();
Ok(gst::FlowSuccess::Ok)
});
fn push_buffer(src: &gst::Element) -> bool {
gst_debug!(CAT, obj: src, "premature_shutdown: pushing buffer");
src.emit("push-buffer", &[&gst::Buffer::from_slice(vec![0; 1024])])
.unwrap()
.unwrap()
.get_some::<bool>()
.unwrap()
}
let pipeline_clone = pipeline.clone();
let l_clone = l.clone();
std::thread::spawn(move || {
// Sleep to allow the pipeline to be ready
std::thread::sleep(std::time::Duration::from_millis(10));
let mut scenario = Some(move || {
gst_debug!(CAT, "premature_shutdown: STEP 1: Playing");
// Initialize the dataflow
assert!(push_buffer(&src));
// Fill up the queue then pause a bit and push again
let mut burst_idx = 0;
loop {
let was_pushed = appsrc
.emit("push-buffer", &[&gst::Buffer::from_slice(vec![0; 1024])])
.unwrap()
.unwrap()
.get_some::<bool>()
.unwrap();
// Wait for the buffer to reach AppSink
receiver.recv().unwrap();
assert_eq!(receiver.try_recv().unwrap_err(), mpsc::TryRecvError::Empty);
if !was_pushed {
if burst_idx < BURST_NB {
burst_idx += 1;
// Sleep a bit to let a few buffers go through
std::thread::sleep(std::time::Duration::from_micros(500));
} else {
pipeline_clone.set_state(gst::State::Null).unwrap();
break;
}
}
}
assert!(push_buffer(&src));
pipeline_clone.set_state(gst::State::Paused).unwrap();
// Paused -> can't push_buffer
assert!(!push_buffer(&src));
gst_debug!(CAT, "premature_shutdown: STEP 2: Paused -> Playing");
pipeline_clone.set_state(gst::State::Playing).unwrap();
gst_debug!(CAT, "premature_shutdown: STEP 3: Playing");
receiver.recv().unwrap();
assert!(push_buffer(&src));
receiver.recv().unwrap();
// Fill up the (dataqueue) and abruptly shutdown
assert!(push_buffer(&src));
assert!(push_buffer(&src));
gst_debug!(CAT, "premature_shutdown: STEP 4: Shutdown");
pipeline_clone.set_state(gst::State::Null).unwrap();
assert!(!push_buffer(&src));
l_clone.quit();
});
let l_clone = l.clone();
pipeline.get_bus().unwrap().add_watch(move |_, msg| {
use gst::MessageView;
match msg.view() {
MessageView::StateChanged(state_changed) => {
if let Some(source) = state_changed.get_src() {
if source.get_type() != gst::Pipeline::static_type() {
return glib::Continue(true);
}
if state_changed.get_old() == gst::State::Paused
&& state_changed.get_current() == gst::State::Playing
{
if let Some(scenario) = scenario.take() {
std::thread::spawn(scenario);
}
}
}
}
MessageView::Error(err) => {
gst_error!(
CAT,
"premature_shutdown: Error from {:?}: {} ({:?})",
err.get_src().map(|s| s.get_path_string()),
err.get_error(),
err.get_debug()
);
l_clone.quit();
}
_ => (),
};
glib::Continue(true)
});
pipeline.set_state(gst::State::Playing).unwrap();
println!("starting...");
gst_debug!(CAT, "Starting main loop for premature_shutdown...");
l.run();
gst_debug!(CAT, "Stopped main loop for premature_shutdown...");
}

View file

@ -87,7 +87,7 @@ fn test_push() {
eos = true;
break;
}
MessageView::Error(..) => unreachable!(),
MessageView::Error(err) => unreachable!("proxy::test_push {:?}", err),
_ => (),
}
}

View file

@ -82,17 +82,18 @@ fn test_push() {
use gst::EventView;
let event = h.pull_event().unwrap();
// The StickyEvent for the TaskContext is pushed first
match event.view() {
EventView::StreamStart(..) => {
assert_eq!(n_events, 0);
assert_eq!(n_events, 1);
}
EventView::Caps(ev) => {
assert_eq!(n_events, 1);
assert_eq!(n_events, 2);
let event_caps = ev.get_caps();
assert_eq!(caps.as_ref(), event_caps);
}
EventView::Segment(..) => {
assert_eq!(n_events, 2);
assert_eq!(n_events, 3);
break;
}
_ => (),