2022-10-12 13:35:11 +00:00
|
|
|
// Copyright (C) 2022 LTN Global Communications, Inc.
|
|
|
|
// Contact: Jan Alexander Steffens (heftig) <jan.steffens@ltnglobal.com>
|
|
|
|
//
|
|
|
|
// This Source Code Form is subject to the terms of the Mozilla Public License, v2.0.
|
|
|
|
// If a copy of the MPL was not distributed with this file, You can obtain one at
|
|
|
|
// <https://mozilla.org/MPL/2.0/>.
|
|
|
|
//
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2023-07-06 13:43:37 +00:00
|
|
|
use gst::glib::once_cell::sync::Lazy;
|
2022-10-12 13:35:11 +00:00
|
|
|
use gst::{
|
|
|
|
glib::{self, translate::IntoGlib},
|
|
|
|
prelude::*,
|
|
|
|
subclass::prelude::*,
|
|
|
|
};
|
|
|
|
use parking_lot::{Condvar, Mutex, MutexGuard};
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
use std::{collections::VecDeque, sync::mpsc};
|
2022-10-12 13:35:11 +00:00
|
|
|
|
|
|
|
/// Offset for the segment in single-segment mode, to handle negative DTS
|
|
|
|
const SEGMENT_OFFSET: gst::ClockTime = gst::ClockTime::from_seconds(60 * 60 * 1000);
|
|
|
|
|
|
|
|
static CAT: Lazy<gst::DebugCategory> = Lazy::new(|| {
|
|
|
|
gst::DebugCategory::new(
|
|
|
|
"livesync",
|
|
|
|
gst::DebugColorFlags::empty(),
|
|
|
|
Some("debug category for the livesync element"),
|
|
|
|
)
|
|
|
|
});
|
|
|
|
|
2023-02-08 15:41:53 +00:00
|
|
|
fn audio_info_from_caps(
|
|
|
|
caps: &gst::CapsRef,
|
|
|
|
) -> Result<Option<gst_audio::AudioInfo>, glib::BoolError> {
|
|
|
|
caps.structure(0)
|
|
|
|
.map_or(false, |s| s.has_name("audio/x-raw"))
|
|
|
|
.then(|| gst_audio::AudioInfo::from_caps(caps))
|
|
|
|
.transpose()
|
|
|
|
}
|
|
|
|
|
2023-10-25 16:26:58 +00:00
|
|
|
fn duration_from_caps(caps: &gst::CapsRef) -> Option<gst::ClockTime> {
|
|
|
|
caps.structure(0)
|
2024-01-24 11:51:57 +00:00
|
|
|
.filter(|s| s.name().starts_with("video/") || s.name().starts_with("image/"))
|
2023-10-25 16:26:58 +00:00
|
|
|
.and_then(|s| s.get::<gst::Fraction>("framerate").ok())
|
|
|
|
.filter(|framerate| framerate.denom() > 0 && framerate.numer() > 0)
|
|
|
|
.and_then(|framerate| {
|
|
|
|
gst::ClockTime::SECOND.mul_div_round(framerate.denom() as u64, framerate.numer() as u64)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-10-12 13:35:11 +00:00
|
|
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
|
|
|
enum BufferLateness {
|
|
|
|
OnTime,
|
|
|
|
LateUnderThreshold,
|
|
|
|
LateOverThreshold,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
enum Item {
|
|
|
|
Buffer(gst::Buffer, BufferLateness),
|
|
|
|
Event(gst::Event),
|
|
|
|
// SAFETY: Item needs to wait until the query and the receiver has returned
|
|
|
|
Query(std::ptr::NonNull<gst::QueryRef>, mpsc::SyncSender<bool>),
|
|
|
|
}
|
|
|
|
|
2023-02-09 09:03:46 +00:00
|
|
|
// SAFETY: Need to be able to pass *mut gst::QueryRef
|
|
|
|
unsafe impl Send for Item {}
|
|
|
|
|
2022-10-12 13:35:11 +00:00
|
|
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
|
|
|
struct Timestamps {
|
|
|
|
start: gst::ClockTime,
|
|
|
|
end: gst::ClockTime,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub struct LiveSync {
|
|
|
|
state: Mutex<State>,
|
|
|
|
cond: Condvar,
|
|
|
|
sinkpad: gst::Pad,
|
|
|
|
srcpad: gst::Pad,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
struct State {
|
2023-02-08 19:36:24 +00:00
|
|
|
/// See `PROP_LATENCY`
|
2022-10-12 13:35:11 +00:00
|
|
|
latency: gst::ClockTime,
|
2023-02-08 19:36:24 +00:00
|
|
|
|
|
|
|
/// See `PROP_LATE_THRESHOLD`
|
2022-10-12 13:35:11 +00:00
|
|
|
late_threshold: Option<gst::ClockTime>,
|
2023-02-08 19:36:24 +00:00
|
|
|
|
|
|
|
/// See `PROP_SINGLE_SEGMENT`
|
2023-02-08 19:10:50 +00:00
|
|
|
single_segment: bool,
|
2022-10-12 13:35:11 +00:00
|
|
|
|
2023-02-08 19:36:24 +00:00
|
|
|
/// Latency reported by upstream
|
2022-10-12 13:35:11 +00:00
|
|
|
upstream_latency: Option<gst::ClockTime>,
|
2023-02-08 19:36:24 +00:00
|
|
|
|
|
|
|
/// Whether we're in PLAYING state
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
playing: bool,
|
2023-02-08 19:36:24 +00:00
|
|
|
|
|
|
|
/// Whether our sinkpad is EOS
|
2022-10-12 13:35:11 +00:00
|
|
|
eos: bool,
|
|
|
|
|
2023-02-08 19:36:24 +00:00
|
|
|
/// Flow state of our srcpad
|
2022-10-12 13:35:11 +00:00
|
|
|
srcresult: Result<gst::FlowSuccess, gst::FlowError>,
|
2023-02-08 19:36:24 +00:00
|
|
|
|
|
|
|
/// Wait operation for our next buffer
|
2022-10-12 13:35:11 +00:00
|
|
|
clock_id: Option<gst::SingleShotClockId>,
|
|
|
|
|
2023-02-08 19:36:24 +00:00
|
|
|
/// Segment of our sinkpad
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
in_segment: Option<gst::FormattedSegment<gst::ClockTime>>,
|
2023-02-08 19:36:24 +00:00
|
|
|
|
|
|
|
/// Segment to be applied to the srcpad on the next queued buffer
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
pending_segment: Option<gst::FormattedSegment<gst::ClockTime>>,
|
2023-02-08 19:36:24 +00:00
|
|
|
|
|
|
|
/// Segment of our srcpad
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
out_segment: Option<gst::FormattedSegment<gst::ClockTime>>,
|
|
|
|
|
2023-02-08 19:36:24 +00:00
|
|
|
/// Caps of our sinkpad
|
2022-10-12 13:35:11 +00:00
|
|
|
in_caps: Option<gst::Caps>,
|
2023-02-08 19:36:24 +00:00
|
|
|
|
|
|
|
/// Caps to be applied to the srcpad on the next queued buffer
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
pending_caps: Option<gst::Caps>,
|
2023-02-08 19:36:24 +00:00
|
|
|
|
|
|
|
/// Audio format of our sinkpad
|
2022-10-12 13:35:11 +00:00
|
|
|
in_audio_info: Option<gst_audio::AudioInfo>,
|
2023-02-08 19:36:24 +00:00
|
|
|
|
|
|
|
/// Audio format of our srcpad
|
2022-10-12 13:35:11 +00:00
|
|
|
out_audio_info: Option<gst_audio::AudioInfo>,
|
|
|
|
|
2023-10-25 16:26:58 +00:00
|
|
|
/// Duration from caps on our sinkpad
|
|
|
|
in_duration: Option<gst::ClockTime>,
|
|
|
|
|
|
|
|
/// Duration from caps on our srcpad
|
|
|
|
out_duration: Option<gst::ClockTime>,
|
|
|
|
|
2023-02-08 19:36:24 +00:00
|
|
|
/// Queue between sinkpad and srcpad
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
queue: VecDeque<Item>,
|
2023-02-08 19:36:24 +00:00
|
|
|
|
|
|
|
/// Whether our queue currently holds a buffer. We only allow one!
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
buffer_queued: bool,
|
2023-02-08 19:36:24 +00:00
|
|
|
|
|
|
|
/// Current buffer of our srcpad
|
2022-10-12 13:35:11 +00:00
|
|
|
out_buffer: Option<gst::Buffer>,
|
|
|
|
|
2023-10-24 23:15:19 +00:00
|
|
|
/// Whether our last output buffer was a duplicate
|
|
|
|
out_buffer_duplicate: bool,
|
|
|
|
|
2023-02-08 19:36:24 +00:00
|
|
|
/// Running timestamp of our sinkpad
|
2022-10-12 13:35:11 +00:00
|
|
|
in_timestamp: Option<Timestamps>,
|
2023-02-08 19:36:24 +00:00
|
|
|
|
|
|
|
/// Running timestamp of our srcpad
|
2022-10-12 13:35:11 +00:00
|
|
|
out_timestamp: Option<Timestamps>,
|
|
|
|
|
2023-02-08 19:36:24 +00:00
|
|
|
/// See `PROP_IN`
|
2022-10-12 13:35:11 +00:00
|
|
|
num_in: u64,
|
2023-02-08 19:36:24 +00:00
|
|
|
|
|
|
|
/// See `PROP_DROP`
|
2022-10-12 13:35:11 +00:00
|
|
|
num_drop: u64,
|
2023-02-08 19:36:24 +00:00
|
|
|
|
|
|
|
/// See `PROP_OUT`
|
2022-10-12 13:35:11 +00:00
|
|
|
num_out: u64,
|
2023-02-08 19:36:24 +00:00
|
|
|
|
|
|
|
/// See `PROP_DUPLICATE`
|
2022-10-12 13:35:11 +00:00
|
|
|
num_duplicate: u64,
|
|
|
|
}
|
|
|
|
|
|
|
|
const PROP_LATENCY: &str = "latency";
|
|
|
|
const PROP_LATE_THRESHOLD: &str = "late-threshold";
|
2023-02-08 19:10:50 +00:00
|
|
|
const PROP_SINGLE_SEGMENT: &str = "single-segment";
|
2022-10-12 13:35:11 +00:00
|
|
|
|
|
|
|
const PROP_IN: &str = "in";
|
|
|
|
const PROP_DROP: &str = "drop";
|
|
|
|
const PROP_OUT: &str = "out";
|
|
|
|
const PROP_DUPLICATE: &str = "duplicate";
|
|
|
|
|
|
|
|
const DEFAULT_LATENCY: gst::ClockTime = gst::ClockTime::ZERO;
|
2023-10-25 16:26:58 +00:00
|
|
|
const MINIMUM_DURATION: gst::ClockTime = gst::ClockTime::from_mseconds(8);
|
2022-10-12 13:35:11 +00:00
|
|
|
const DEFAULT_DURATION: gst::ClockTime = gst::ClockTime::from_mseconds(100);
|
2023-10-25 16:26:58 +00:00
|
|
|
const MAXIMUM_DURATION: gst::ClockTime = gst::ClockTime::from_seconds(10);
|
2023-01-16 15:42:56 +00:00
|
|
|
const MINIMUM_LATE_THRESHOLD: gst::ClockTime = gst::ClockTime::ZERO;
|
2022-10-12 13:35:11 +00:00
|
|
|
const DEFAULT_LATE_THRESHOLD: Option<gst::ClockTime> = Some(gst::ClockTime::from_seconds(2));
|
|
|
|
|
|
|
|
impl Default for State {
|
|
|
|
fn default() -> Self {
|
|
|
|
Self {
|
|
|
|
latency: DEFAULT_LATENCY,
|
|
|
|
late_threshold: DEFAULT_LATE_THRESHOLD,
|
2023-02-08 19:10:50 +00:00
|
|
|
single_segment: false,
|
2022-10-12 13:35:11 +00:00
|
|
|
upstream_latency: None,
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
playing: false,
|
2022-10-12 13:35:11 +00:00
|
|
|
eos: false,
|
|
|
|
srcresult: Err(gst::FlowError::Flushing),
|
|
|
|
clock_id: None,
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
in_segment: None,
|
|
|
|
pending_segment: None,
|
|
|
|
out_segment: None,
|
2022-10-12 13:35:11 +00:00
|
|
|
in_caps: None,
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
pending_caps: None,
|
2023-10-25 16:26:58 +00:00
|
|
|
in_duration: None,
|
|
|
|
out_duration: None,
|
2022-10-12 13:35:11 +00:00
|
|
|
in_audio_info: None,
|
|
|
|
out_audio_info: None,
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
queue: VecDeque::with_capacity(32),
|
|
|
|
buffer_queued: false,
|
2022-10-12 13:35:11 +00:00
|
|
|
out_buffer: None,
|
2023-10-24 23:15:19 +00:00
|
|
|
out_buffer_duplicate: false,
|
2022-10-12 13:35:11 +00:00
|
|
|
in_timestamp: None,
|
|
|
|
out_timestamp: None,
|
|
|
|
num_in: 0,
|
|
|
|
num_drop: 0,
|
|
|
|
num_out: 0,
|
|
|
|
num_duplicate: 0,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[glib::object_subclass]
|
|
|
|
impl ObjectSubclass for LiveSync {
|
|
|
|
const NAME: &'static str = "GstLiveSync";
|
|
|
|
type Type = super::LiveSync;
|
|
|
|
type ParentType = gst::Element;
|
|
|
|
|
|
|
|
fn with_class(class: &Self::Class) -> Self {
|
2023-05-10 15:02:08 +00:00
|
|
|
let sinkpad = gst::Pad::builder_from_template(&class.pad_template("sink").unwrap())
|
|
|
|
.activatemode_function(|pad, parent, mode, active| {
|
|
|
|
Self::catch_panic_pad_function(
|
|
|
|
parent,
|
2023-10-24 19:28:04 +00:00
|
|
|
|| Err(gst::loggable_error!(CAT, "sink_activatemode panicked")),
|
|
|
|
|livesync| livesync.sink_activatemode(pad, mode, active),
|
2022-10-12 13:35:11 +00:00
|
|
|
)
|
2023-05-10 15:02:08 +00:00
|
|
|
})
|
|
|
|
.event_function(|pad, parent, event| {
|
|
|
|
Self::catch_panic_pad_function(
|
|
|
|
parent,
|
|
|
|
|| false,
|
|
|
|
|livesync| livesync.sink_event(pad, event),
|
2022-10-12 13:35:11 +00:00
|
|
|
)
|
2023-05-10 15:02:08 +00:00
|
|
|
})
|
|
|
|
.query_function(|pad, parent, query| {
|
|
|
|
Self::catch_panic_pad_function(
|
|
|
|
parent,
|
|
|
|
|| false,
|
|
|
|
|livesync| livesync.sink_query(pad, query),
|
|
|
|
)
|
|
|
|
})
|
|
|
|
.chain_function(|pad, parent, buffer| {
|
|
|
|
Self::catch_panic_pad_function(
|
|
|
|
parent,
|
|
|
|
|| Err(gst::FlowError::Error),
|
|
|
|
|livesync| livesync.sink_chain(pad, buffer),
|
|
|
|
)
|
|
|
|
})
|
|
|
|
.flags(
|
|
|
|
gst::PadFlags::PROXY_CAPS
|
|
|
|
| gst::PadFlags::PROXY_ALLOCATION
|
|
|
|
| gst::PadFlags::PROXY_SCHEDULING,
|
|
|
|
)
|
|
|
|
.build();
|
|
|
|
|
|
|
|
let srcpad = gst::Pad::builder_from_template(&class.pad_template("src").unwrap())
|
|
|
|
.activatemode_function(|pad, parent, mode, active| {
|
|
|
|
Self::catch_panic_pad_function(
|
|
|
|
parent,
|
2023-10-24 19:28:04 +00:00
|
|
|
|| Err(gst::loggable_error!(CAT, "src_activatemode panicked")),
|
|
|
|
|livesync| livesync.src_activatemode(pad, mode, active),
|
2023-05-10 15:02:08 +00:00
|
|
|
)
|
|
|
|
})
|
|
|
|
.event_function(|pad, parent, event| {
|
|
|
|
Self::catch_panic_pad_function(
|
|
|
|
parent,
|
|
|
|
|| false,
|
|
|
|
|livesync| livesync.src_event(pad, event),
|
|
|
|
)
|
|
|
|
})
|
|
|
|
.query_function(|pad, parent, query| {
|
|
|
|
Self::catch_panic_pad_function(
|
|
|
|
parent,
|
|
|
|
|| false,
|
|
|
|
|livesync| livesync.src_query(pad, query),
|
|
|
|
)
|
|
|
|
})
|
|
|
|
.flags(
|
|
|
|
gst::PadFlags::PROXY_CAPS
|
|
|
|
| gst::PadFlags::PROXY_ALLOCATION
|
|
|
|
| gst::PadFlags::PROXY_SCHEDULING,
|
|
|
|
)
|
|
|
|
.build();
|
2022-10-12 13:35:11 +00:00
|
|
|
|
|
|
|
Self {
|
|
|
|
state: Default::default(),
|
|
|
|
cond: Condvar::new(),
|
|
|
|
sinkpad,
|
|
|
|
srcpad,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ObjectImpl for LiveSync {
|
|
|
|
fn properties() -> &'static [glib::ParamSpec] {
|
|
|
|
static PROPERTIES: Lazy<[glib::ParamSpec; 7]> = Lazy::new(|| {
|
|
|
|
[
|
|
|
|
glib::ParamSpecUInt64::builder(PROP_LATENCY)
|
|
|
|
.nick("Latency")
|
|
|
|
.blurb(
|
|
|
|
"Additional latency to allow upstream to take longer to \
|
|
|
|
produce buffers for the current position (in nanoseconds)",
|
|
|
|
)
|
|
|
|
.maximum(i64::MAX as u64)
|
|
|
|
.default_value(DEFAULT_LATENCY.into_glib())
|
|
|
|
.mutable_playing()
|
|
|
|
.build(),
|
|
|
|
glib::ParamSpecUInt64::builder(PROP_LATE_THRESHOLD)
|
|
|
|
.nick("Late threshold")
|
|
|
|
.blurb(
|
|
|
|
"Maximum time spent (in nanoseconds) before \
|
|
|
|
accepting one late buffer; -1 = never",
|
|
|
|
)
|
|
|
|
.minimum(MINIMUM_LATE_THRESHOLD.into_glib())
|
|
|
|
.default_value(DEFAULT_LATE_THRESHOLD.into_glib())
|
|
|
|
.mutable_playing()
|
|
|
|
.build(),
|
2023-02-08 19:10:50 +00:00
|
|
|
glib::ParamSpecBoolean::builder(PROP_SINGLE_SEGMENT)
|
|
|
|
.nick("Single segment")
|
|
|
|
.blurb("Timestamp buffers and eat segments so as to appear as one segment")
|
|
|
|
.mutable_ready()
|
|
|
|
.build(),
|
2022-10-12 13:35:11 +00:00
|
|
|
glib::ParamSpecUInt64::builder(PROP_IN)
|
|
|
|
.nick("Frames input")
|
|
|
|
.blurb("Number of incoming frames accepted")
|
|
|
|
.read_only()
|
|
|
|
.build(),
|
|
|
|
glib::ParamSpecUInt64::builder(PROP_DROP)
|
|
|
|
.nick("Frames dropped")
|
|
|
|
.blurb("Number of incoming frames dropped")
|
|
|
|
.read_only()
|
|
|
|
.build(),
|
|
|
|
glib::ParamSpecUInt64::builder(PROP_OUT)
|
|
|
|
.nick("Frames output")
|
|
|
|
.blurb("Number of outgoing frames produced")
|
|
|
|
.read_only()
|
|
|
|
.build(),
|
|
|
|
glib::ParamSpecUInt64::builder(PROP_DUPLICATE)
|
|
|
|
.nick("Frames duplicated")
|
|
|
|
.blurb("Number of outgoing frames duplicated")
|
|
|
|
.read_only()
|
|
|
|
.build(),
|
|
|
|
]
|
|
|
|
});
|
|
|
|
|
|
|
|
PROPERTIES.as_ref()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn constructed(&self) {
|
|
|
|
self.parent_constructed();
|
|
|
|
|
|
|
|
let obj = self.obj();
|
|
|
|
obj.add_pad(&self.sinkpad).unwrap();
|
|
|
|
obj.add_pad(&self.srcpad).unwrap();
|
|
|
|
obj.set_element_flags(gst::ElementFlags::PROVIDE_CLOCK | gst::ElementFlags::REQUIRE_CLOCK);
|
|
|
|
}
|
|
|
|
|
|
|
|
fn set_property(&self, _id: usize, value: &glib::Value, pspec: &glib::ParamSpec) {
|
|
|
|
let mut state = self.state.lock();
|
|
|
|
match pspec.name() {
|
|
|
|
PROP_LATENCY => {
|
|
|
|
state.latency = value.get().unwrap();
|
|
|
|
let _ = self.obj().post_message(gst::message::Latency::new());
|
|
|
|
}
|
|
|
|
|
|
|
|
PROP_LATE_THRESHOLD => {
|
|
|
|
state.late_threshold = value.get().unwrap();
|
|
|
|
}
|
|
|
|
|
|
|
|
PROP_SINGLE_SEGMENT => {
|
|
|
|
state.single_segment = value.get().unwrap();
|
|
|
|
}
|
|
|
|
|
|
|
|
_ => unimplemented!(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn property(&self, _id: usize, pspec: &glib::ParamSpec) -> glib::Value {
|
|
|
|
let state = self.state.lock();
|
|
|
|
match pspec.name() {
|
|
|
|
PROP_LATENCY => state.latency.to_value(),
|
|
|
|
PROP_LATE_THRESHOLD => state.late_threshold.to_value(),
|
2023-02-08 19:10:50 +00:00
|
|
|
PROP_SINGLE_SEGMENT => state.single_segment.to_value(),
|
2022-10-12 13:35:11 +00:00
|
|
|
PROP_IN => state.num_in.to_value(),
|
|
|
|
PROP_DROP => state.num_drop.to_value(),
|
|
|
|
PROP_OUT => state.num_out.to_value(),
|
|
|
|
PROP_DUPLICATE => state.num_duplicate.to_value(),
|
|
|
|
_ => unimplemented!(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl GstObjectImpl for LiveSync {}
|
|
|
|
|
|
|
|
impl ElementImpl for LiveSync {
|
|
|
|
fn metadata() -> Option<&'static gst::subclass::ElementMetadata> {
|
|
|
|
static ELEMENT_METADATA: Lazy<gst::subclass::ElementMetadata> = Lazy::new(|| {
|
|
|
|
gst::subclass::ElementMetadata::new(
|
|
|
|
"Live Synchronizer",
|
|
|
|
"Filter",
|
|
|
|
"Outputs livestream, inserting gap frames when input lags",
|
|
|
|
"Jan Alexander Steffens (heftig) <jan.steffens@ltnglobal.com>",
|
|
|
|
)
|
|
|
|
});
|
|
|
|
|
|
|
|
Some(&*ELEMENT_METADATA)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn pad_templates() -> &'static [gst::PadTemplate] {
|
|
|
|
static PAD_TEMPLATES: Lazy<[gst::PadTemplate; 2]> = Lazy::new(|| {
|
|
|
|
let caps = gst::Caps::new_any();
|
|
|
|
|
|
|
|
[
|
|
|
|
gst::PadTemplate::new(
|
|
|
|
"sink",
|
|
|
|
gst::PadDirection::Sink,
|
|
|
|
gst::PadPresence::Always,
|
|
|
|
&caps,
|
|
|
|
)
|
|
|
|
.unwrap(),
|
|
|
|
gst::PadTemplate::new(
|
|
|
|
"src",
|
|
|
|
gst::PadDirection::Src,
|
|
|
|
gst::PadPresence::Always,
|
|
|
|
&caps,
|
|
|
|
)
|
|
|
|
.unwrap(),
|
|
|
|
]
|
|
|
|
});
|
|
|
|
|
|
|
|
PAD_TEMPLATES.as_ref()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn change_state(
|
|
|
|
&self,
|
|
|
|
transition: gst::StateChange,
|
|
|
|
) -> Result<gst::StateChangeSuccess, gst::StateChangeError> {
|
|
|
|
gst::trace!(CAT, imp: self, "Changing state {:?}", transition);
|
|
|
|
|
|
|
|
if transition == gst::StateChange::PausedToPlaying {
|
|
|
|
let mut state = self.state.lock();
|
|
|
|
state.playing = true;
|
|
|
|
self.cond.notify_all();
|
|
|
|
}
|
|
|
|
|
|
|
|
let success = self.parent_change_state(transition)?;
|
|
|
|
|
2023-10-24 21:53:17 +00:00
|
|
|
match transition {
|
|
|
|
gst::StateChange::PlayingToPaused => {
|
|
|
|
let mut state = self.state.lock();
|
|
|
|
state.playing = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
gst::StateChange::PausedToReady => {
|
|
|
|
let mut state = self.state.lock();
|
|
|
|
state.num_in = 0;
|
|
|
|
state.num_drop = 0;
|
|
|
|
state.num_out = 0;
|
|
|
|
state.num_duplicate = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
_ => {}
|
2022-10-12 13:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
match (transition, success) {
|
|
|
|
(
|
|
|
|
gst::StateChange::ReadyToPaused | gst::StateChange::PlayingToPaused,
|
|
|
|
gst::StateChangeSuccess::Success,
|
|
|
|
) => Ok(gst::StateChangeSuccess::NoPreroll),
|
|
|
|
(_, s) => Ok(s),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn provide_clock(&self) -> Option<gst::Clock> {
|
|
|
|
Some(gst::SystemClock::obtain())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl State {
|
|
|
|
/// Calculate the running time the buffer covers, including latency
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
fn ts_range(
|
|
|
|
&self,
|
|
|
|
buf: &gst::BufferRef,
|
|
|
|
segment: &gst::FormattedSegment<gst::ClockTime>,
|
|
|
|
) -> Option<Timestamps> {
|
2022-10-12 13:35:11 +00:00
|
|
|
let mut timestamp_start = buf.dts_or_pts()?;
|
|
|
|
|
|
|
|
if !self.single_segment {
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
timestamp_start = segment
|
2022-10-12 13:35:11 +00:00
|
|
|
.to_running_time(timestamp_start)
|
|
|
|
.unwrap_or(gst::ClockTime::ZERO);
|
|
|
|
timestamp_start += self.latency + self.upstream_latency.unwrap();
|
|
|
|
} else {
|
|
|
|
timestamp_start += self.upstream_latency.unwrap();
|
|
|
|
timestamp_start = timestamp_start.saturating_sub(SEGMENT_OFFSET);
|
|
|
|
}
|
|
|
|
|
|
|
|
Some(Timestamps {
|
|
|
|
start: timestamp_start,
|
|
|
|
end: timestamp_start + buf.duration().unwrap(),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2023-10-24 22:38:28 +00:00
|
|
|
fn pending_events(&self) -> bool {
|
|
|
|
self.pending_caps.is_some() || self.pending_segment.is_some()
|
|
|
|
}
|
2022-10-12 13:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl LiveSync {
|
2023-10-24 19:28:04 +00:00
|
|
|
fn sink_activatemode(
|
2022-10-12 13:35:11 +00:00
|
|
|
&self,
|
|
|
|
pad: &gst::Pad,
|
|
|
|
mode: gst::PadMode,
|
|
|
|
active: bool,
|
|
|
|
) -> Result<(), gst::LoggableError> {
|
|
|
|
if mode != gst::PadMode::Push {
|
|
|
|
return Err(gst::loggable_error!(CAT, "Wrong scheduling mode"));
|
|
|
|
}
|
|
|
|
|
2023-10-24 21:53:17 +00:00
|
|
|
if !active {
|
|
|
|
self.set_flushing(&mut self.state.lock());
|
2022-10-12 13:35:11 +00:00
|
|
|
|
|
|
|
let lock = pad.stream_lock();
|
2023-10-24 21:53:17 +00:00
|
|
|
self.sink_reset(&mut self.state.lock());
|
2022-10-12 13:35:11 +00:00
|
|
|
drop(lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2023-10-24 19:28:04 +00:00
|
|
|
fn src_activatemode(
|
2022-10-12 13:35:11 +00:00
|
|
|
&self,
|
|
|
|
pad: &gst::Pad,
|
|
|
|
mode: gst::PadMode,
|
|
|
|
active: bool,
|
|
|
|
) -> Result<(), gst::LoggableError> {
|
|
|
|
if mode != gst::PadMode::Push {
|
|
|
|
return Err(gst::loggable_error!(CAT, "Wrong scheduling mode"));
|
|
|
|
}
|
|
|
|
|
|
|
|
if active {
|
2023-10-24 21:53:17 +00:00
|
|
|
self.start_src_task(&mut self.state.lock())
|
|
|
|
.map_err(|e| gst::LoggableError::new(*CAT, e))?;
|
|
|
|
} else {
|
|
|
|
let mut state = self.state.lock();
|
|
|
|
self.set_flushing(&mut state);
|
|
|
|
self.src_reset(&mut state);
|
|
|
|
drop(state);
|
2022-10-12 13:35:11 +00:00
|
|
|
|
2023-10-24 21:53:17 +00:00
|
|
|
pad.stop_task()?;
|
|
|
|
}
|
2022-10-12 13:35:11 +00:00
|
|
|
|
2023-10-24 21:53:17 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
2022-10-12 13:35:11 +00:00
|
|
|
|
2023-10-24 21:53:17 +00:00
|
|
|
fn set_flushing(&self, state: &mut State) {
|
|
|
|
state.srcresult = Err(gst::FlowError::Flushing);
|
|
|
|
if let Some(clock_id) = state.clock_id.take() {
|
|
|
|
clock_id.unschedule();
|
|
|
|
}
|
2022-10-12 13:35:11 +00:00
|
|
|
|
2023-10-24 21:53:17 +00:00
|
|
|
// Ensure we drop any query response sender to unblock the sinkpad
|
|
|
|
state.queue.clear();
|
|
|
|
state.buffer_queued = false;
|
2022-10-12 13:35:11 +00:00
|
|
|
|
2023-10-24 21:53:17 +00:00
|
|
|
self.cond.notify_all();
|
|
|
|
}
|
|
|
|
|
|
|
|
fn sink_reset(&self, state: &mut State) {
|
|
|
|
state.eos = false;
|
|
|
|
state.in_segment = None;
|
|
|
|
state.in_caps = None;
|
|
|
|
state.in_audio_info = None;
|
2023-10-25 16:26:58 +00:00
|
|
|
state.in_duration = None;
|
2023-10-24 21:53:17 +00:00
|
|
|
state.in_timestamp = None;
|
|
|
|
}
|
|
|
|
|
|
|
|
fn src_reset(&self, state: &mut State) {
|
|
|
|
state.pending_segment = None;
|
|
|
|
state.out_segment = None;
|
|
|
|
state.pending_caps = None;
|
|
|
|
state.out_audio_info = None;
|
2023-10-25 16:26:58 +00:00
|
|
|
state.out_duration = None;
|
2023-10-24 21:53:17 +00:00
|
|
|
state.out_buffer = None;
|
2023-10-24 23:15:19 +00:00
|
|
|
state.out_buffer_duplicate = false;
|
2023-10-24 21:53:17 +00:00
|
|
|
state.out_timestamp = None;
|
2022-10-12 13:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn sink_event(&self, pad: &gst::Pad, mut event: gst::Event) -> bool {
|
|
|
|
{
|
|
|
|
let state = self.state.lock();
|
|
|
|
if state.single_segment {
|
|
|
|
let event = event.make_mut();
|
|
|
|
let latency = state.latency.nseconds() as i64;
|
|
|
|
event.set_running_time_offset(event.running_time_offset() + latency);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-08 19:33:29 +00:00
|
|
|
let mut is_restart = false;
|
|
|
|
let mut is_eos = false;
|
|
|
|
|
2022-10-12 13:35:11 +00:00
|
|
|
match event.view() {
|
|
|
|
gst::EventView::FlushStart(_) => {
|
|
|
|
let ret = self.srcpad.push_event(event);
|
|
|
|
|
2023-10-24 21:53:17 +00:00
|
|
|
self.set_flushing(&mut self.state.lock());
|
|
|
|
|
|
|
|
if let Err(e) = self.srcpad.pause_task() {
|
|
|
|
gst::error!(CAT, imp: self, "Failed to pause task: {e}");
|
|
|
|
return false;
|
2022-10-12 13:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
gst::EventView::FlushStop(_) => {
|
|
|
|
let ret = self.srcpad.push_event(event);
|
|
|
|
|
|
|
|
let mut state = self.state.lock();
|
2023-10-24 21:53:17 +00:00
|
|
|
self.sink_reset(&mut state);
|
|
|
|
self.src_reset(&mut state);
|
|
|
|
|
|
|
|
if let Err(e) = self.start_src_task(&mut state) {
|
|
|
|
gst::error!(CAT, imp: self, "Failed to start task: {e}");
|
|
|
|
return false;
|
|
|
|
}
|
2022-10-12 13:35:11 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-02-08 19:33:29 +00:00
|
|
|
gst::EventView::StreamStart(_) => is_restart = true,
|
2022-10-12 13:35:11 +00:00
|
|
|
|
|
|
|
gst::EventView::Segment(e) => {
|
2023-02-08 19:33:29 +00:00
|
|
|
is_restart = true;
|
|
|
|
|
2022-10-12 13:35:11 +00:00
|
|
|
let segment = match e.segment().downcast_ref() {
|
|
|
|
Some(s) => s,
|
|
|
|
None => {
|
|
|
|
gst::error!(CAT, imp: self, "Got non-TIME segment");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
let mut state = self.state.lock();
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
state.in_segment = Some(segment.clone());
|
2022-10-12 13:35:11 +00:00
|
|
|
}
|
|
|
|
|
2023-02-08 19:33:29 +00:00
|
|
|
gst::EventView::Eos(_) => is_eos = true,
|
2022-10-12 13:35:11 +00:00
|
|
|
|
|
|
|
gst::EventView::Caps(c) => {
|
|
|
|
let caps = c.caps_owned();
|
|
|
|
|
2023-02-08 15:41:53 +00:00
|
|
|
let audio_info = match audio_info_from_caps(&caps) {
|
2022-10-12 13:35:11 +00:00
|
|
|
Ok(ai) => ai,
|
|
|
|
Err(e) => {
|
|
|
|
gst::error!(CAT, imp: self, "Failed to parse audio caps: {}", e);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2023-10-25 16:26:58 +00:00
|
|
|
let duration = duration_from_caps(&caps);
|
|
|
|
|
2022-10-12 13:35:11 +00:00
|
|
|
let mut state = self.state.lock();
|
|
|
|
state.in_caps = Some(caps);
|
|
|
|
state.in_audio_info = audio_info;
|
2023-10-25 16:26:58 +00:00
|
|
|
state.in_duration = duration;
|
2022-10-12 13:35:11 +00:00
|
|
|
}
|
|
|
|
|
2023-02-09 09:03:46 +00:00
|
|
|
gst::EventView::Gap(_) => {
|
|
|
|
gst::debug!(CAT, imp: self, "Got gap event");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-10-12 13:35:11 +00:00
|
|
|
_ => {}
|
|
|
|
}
|
|
|
|
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
if !event.is_serialized() {
|
|
|
|
return gst::Pad::event_default(pad, Some(&*self.obj()), event);
|
|
|
|
}
|
2022-10-12 13:35:11 +00:00
|
|
|
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
let mut state = self.state.lock();
|
2023-02-08 19:33:29 +00:00
|
|
|
|
|
|
|
if is_restart {
|
2023-10-24 21:53:17 +00:00
|
|
|
state.eos = false;
|
|
|
|
|
2023-02-08 19:33:29 +00:00
|
|
|
if state.srcresult == Err(gst::FlowError::Eos) {
|
2023-10-24 21:53:17 +00:00
|
|
|
if let Err(e) = self.start_src_task(&mut state) {
|
|
|
|
gst::error!(CAT, imp: self, "Failed to start task: {e}");
|
|
|
|
return false;
|
|
|
|
}
|
2023-02-08 19:33:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if state.eos {
|
|
|
|
gst::trace!(CAT, imp: self, "Refusing event, we are EOS: {:?}", event);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if is_eos {
|
|
|
|
state.eos = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if let Err(err) = state.srcresult {
|
|
|
|
// Following GstQueue's behavior:
|
|
|
|
// > For EOS events, that are not followed by data flow, we still
|
|
|
|
// > return FALSE here though and report an error.
|
|
|
|
if is_eos && !matches!(err, gst::FlowError::Flushing | gst::FlowError::Eos) {
|
|
|
|
self.flow_error(err);
|
|
|
|
}
|
|
|
|
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
return false;
|
|
|
|
}
|
2022-10-12 13:35:11 +00:00
|
|
|
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
gst::trace!(CAT, imp: self, "Queueing {:?}", event);
|
|
|
|
state.queue.push_back(Item::Event(event));
|
|
|
|
self.cond.notify_all();
|
2022-10-12 13:35:11 +00:00
|
|
|
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
true
|
2022-10-12 13:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn src_event(&self, pad: &gst::Pad, mut event: gst::Event) -> bool {
|
|
|
|
{
|
|
|
|
let state = self.state.lock();
|
|
|
|
if state.single_segment {
|
|
|
|
let event = event.make_mut();
|
|
|
|
let latency = state.latency.nseconds() as i64;
|
|
|
|
event.set_running_time_offset(event.running_time_offset() - latency);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
match event.view() {
|
|
|
|
gst::EventView::Reconfigure(_) => {
|
|
|
|
{
|
|
|
|
let mut state = self.state.lock();
|
|
|
|
if state.srcresult == Err(gst::FlowError::NotLinked) {
|
2023-10-24 21:53:17 +00:00
|
|
|
if let Err(e) = self.start_src_task(&mut state) {
|
|
|
|
gst::error!(CAT, imp: self, "Failed to start task: {e}");
|
|
|
|
}
|
2022-10-12 13:35:11 +00:00
|
|
|
}
|
|
|
|
}
|
2023-10-24 21:53:17 +00:00
|
|
|
|
2022-10-12 13:35:11 +00:00
|
|
|
self.sinkpad.push_event(event)
|
|
|
|
}
|
|
|
|
|
|
|
|
_ => gst::Pad::event_default(pad, Some(&*self.obj()), event),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn sink_query(&self, pad: &gst::Pad, query: &mut gst::QueryRef) -> bool {
|
|
|
|
if query.is_serialized() {
|
|
|
|
let (sender, receiver) = mpsc::sync_channel(1);
|
|
|
|
|
|
|
|
let mut state = self.state.lock();
|
|
|
|
if state.srcresult.is_err() {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
gst::trace!(CAT, imp: self, "Queueing {:?}", query);
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
state
|
|
|
|
.queue
|
|
|
|
.push_back(Item::Query(std::ptr::NonNull::from(query), sender));
|
2022-10-12 13:35:11 +00:00
|
|
|
self.cond.notify_all();
|
|
|
|
drop(state);
|
|
|
|
|
2023-10-24 21:53:17 +00:00
|
|
|
// If the sender gets dropped, we will also unblock
|
2022-10-12 13:35:11 +00:00
|
|
|
receiver.recv().unwrap_or(false)
|
|
|
|
} else {
|
|
|
|
gst::Pad::query_default(pad, Some(&*self.obj()), query)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn src_query(&self, pad: &gst::Pad, query: &mut gst::QueryRef) -> bool {
|
|
|
|
match query.view_mut() {
|
|
|
|
gst::QueryViewMut::Latency(_) => {
|
|
|
|
if !gst::Pad::query_default(pad, Some(&*self.obj()), query) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
let q = match query.view_mut() {
|
|
|
|
gst::QueryViewMut::Latency(q) => q,
|
|
|
|
_ => unreachable!(),
|
|
|
|
};
|
|
|
|
|
|
|
|
let mut state = self.state.lock();
|
|
|
|
let latency = state.latency;
|
|
|
|
|
|
|
|
let (_live, min, max) = q.result();
|
|
|
|
q.set(true, min + latency, max.map(|max| max + latency));
|
|
|
|
|
|
|
|
state.upstream_latency = Some(min);
|
|
|
|
true
|
|
|
|
}
|
|
|
|
|
|
|
|
_ => gst::Pad::query_default(pad, Some(&*self.obj()), query),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn sink_chain(
|
|
|
|
&self,
|
|
|
|
_pad: &gst::Pad,
|
|
|
|
mut buffer: gst::Buffer,
|
|
|
|
) -> Result<gst::FlowSuccess, gst::FlowError> {
|
2023-02-09 09:03:03 +00:00
|
|
|
gst::trace!(CAT, imp: self, "Incoming {:?}", buffer);
|
2022-10-12 13:35:11 +00:00
|
|
|
|
|
|
|
let mut state = self.state.lock();
|
|
|
|
|
2023-02-08 19:33:29 +00:00
|
|
|
if state.eos {
|
|
|
|
gst::debug!(CAT, imp: self, "Refusing buffer, we are EOS");
|
|
|
|
return Err(gst::FlowError::Eos);
|
|
|
|
}
|
|
|
|
|
2022-10-12 13:35:11 +00:00
|
|
|
if state.upstream_latency.is_none() {
|
|
|
|
gst::debug!(CAT, imp: self, "Have no upstream latency yet, querying");
|
|
|
|
let mut q = gst::query::Latency::new();
|
|
|
|
if MutexGuard::unlocked(&mut state, || self.sinkpad.peer_query(&mut q)) {
|
|
|
|
let (live, min, max) = q.result();
|
|
|
|
|
|
|
|
gst::debug!(
|
|
|
|
CAT,
|
|
|
|
imp: self,
|
|
|
|
"Latency query response: live {} min {} max {}",
|
|
|
|
live,
|
|
|
|
min,
|
|
|
|
max.display()
|
|
|
|
);
|
|
|
|
|
|
|
|
state.upstream_latency = Some(min);
|
|
|
|
} else {
|
|
|
|
gst::warning!(
|
|
|
|
CAT,
|
|
|
|
imp: self,
|
|
|
|
"Can't query upstream latency -- assuming zero"
|
|
|
|
);
|
2023-03-31 14:40:32 +00:00
|
|
|
state.upstream_latency = Some(gst::ClockTime::ZERO);
|
2022-10-12 13:35:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
while state.srcresult.is_ok() && state.buffer_queued {
|
2022-10-12 13:35:11 +00:00
|
|
|
self.cond.wait(&mut state);
|
|
|
|
}
|
|
|
|
state.srcresult?;
|
|
|
|
|
|
|
|
let buf_mut = buffer.make_mut();
|
|
|
|
|
|
|
|
if buf_mut.pts().is_none() {
|
2023-02-09 09:03:03 +00:00
|
|
|
gst::warning!(CAT, imp: self, "Incoming buffer has no timestamps");
|
2022-10-12 13:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if let Some(audio_info) = &state.in_audio_info {
|
2023-10-24 22:00:53 +00:00
|
|
|
let Some(calc_duration) = audio_info
|
|
|
|
.convert::<Option<gst::ClockTime>>(gst::format::Bytes::from_usize(buf_mut.size()))
|
2022-10-12 13:35:11 +00:00
|
|
|
.flatten()
|
2023-10-24 22:00:53 +00:00
|
|
|
else {
|
|
|
|
gst::error!(
|
|
|
|
CAT,
|
|
|
|
imp: self,
|
|
|
|
"Failed to calculate duration of {:?}",
|
|
|
|
buf_mut,
|
|
|
|
);
|
|
|
|
return Err(gst::FlowError::Error);
|
|
|
|
};
|
|
|
|
|
|
|
|
if let Some(buf_duration) = buf_mut.duration() {
|
2022-10-12 13:35:11 +00:00
|
|
|
let diff = if buf_duration < calc_duration {
|
|
|
|
calc_duration - buf_duration
|
|
|
|
} else {
|
|
|
|
buf_duration - calc_duration
|
|
|
|
};
|
|
|
|
|
2023-10-24 22:00:53 +00:00
|
|
|
let sample_duration = gst::ClockTime::SECOND
|
|
|
|
.mul_div_round(1, audio_info.rate().into())
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
if diff > sample_duration {
|
2022-10-12 13:35:11 +00:00
|
|
|
gst::warning!(
|
|
|
|
CAT,
|
|
|
|
imp: self,
|
|
|
|
"Correcting duration on audio buffer from {} to {}",
|
|
|
|
buf_duration,
|
|
|
|
calc_duration,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
} else {
|
2023-10-25 16:26:58 +00:00
|
|
|
gst::debug!(CAT, imp: self, "Patching incoming buffer with duration {calc_duration}");
|
2022-10-12 13:35:11 +00:00
|
|
|
}
|
2023-10-24 22:00:53 +00:00
|
|
|
|
|
|
|
buf_mut.set_duration(calc_duration);
|
|
|
|
} else if buf_mut.duration().is_none() {
|
2023-10-25 16:26:58 +00:00
|
|
|
let duration = state.in_duration.map_or(DEFAULT_DURATION, |dur| {
|
|
|
|
dur.clamp(MINIMUM_DURATION, MAXIMUM_DURATION)
|
|
|
|
});
|
|
|
|
|
|
|
|
gst::debug!(CAT, imp: self, "Patching incoming buffer with duration {duration}");
|
|
|
|
buf_mut.set_duration(duration);
|
2022-10-12 13:35:11 +00:00
|
|
|
}
|
|
|
|
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
// At this stage we should really really have a segment
|
2023-10-24 22:20:12 +00:00
|
|
|
let segment = state.in_segment.as_ref().ok_or_else(|| {
|
|
|
|
gst::error!(CAT, imp: self, "Missing segment");
|
|
|
|
gst::FlowError::Error
|
|
|
|
})?;
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
|
2022-10-12 13:35:11 +00:00
|
|
|
if state.single_segment {
|
|
|
|
let dts = segment
|
|
|
|
.to_running_time_full(buf_mut.dts())
|
|
|
|
.map(|r| r + SEGMENT_OFFSET)
|
|
|
|
.and_then(|r| r.positive());
|
|
|
|
let pts = segment
|
|
|
|
.to_running_time_full(buf_mut.pts())
|
|
|
|
.map(|r| r + SEGMENT_OFFSET)
|
|
|
|
.and_then(|r| r.positive())
|
|
|
|
.or_else(|| {
|
|
|
|
self.obj()
|
|
|
|
.current_running_time()
|
|
|
|
.map(|r| r + SEGMENT_OFFSET)
|
|
|
|
});
|
|
|
|
|
|
|
|
buf_mut.set_dts(dts.map(|t| t + state.latency));
|
|
|
|
buf_mut.set_pts(pts.map(|t| t + state.latency));
|
|
|
|
}
|
|
|
|
|
2023-10-24 22:38:28 +00:00
|
|
|
let timestamp = state.ts_range(buf_mut, segment);
|
2022-10-12 13:35:11 +00:00
|
|
|
let lateness = self.buffer_is_backwards(&state, timestamp);
|
|
|
|
|
2023-10-24 22:38:28 +00:00
|
|
|
if lateness == BufferLateness::LateUnderThreshold {
|
|
|
|
gst::debug!(CAT, imp: self, "Discarding late {:?}", buf_mut);
|
|
|
|
state.num_drop += 1;
|
|
|
|
return Ok(gst::FlowSuccess::Ok);
|
2022-10-12 13:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
gst::trace!(CAT, imp: self, "Queueing {:?} ({:?})", buffer, lateness);
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
state.queue.push_back(Item::Buffer(buffer, lateness));
|
|
|
|
state.buffer_queued = true;
|
2022-10-12 13:35:11 +00:00
|
|
|
state.in_timestamp = timestamp;
|
|
|
|
self.cond.notify_all();
|
|
|
|
|
|
|
|
Ok(gst::FlowSuccess::Ok)
|
|
|
|
}
|
|
|
|
|
2023-10-24 21:09:11 +00:00
|
|
|
fn start_src_task(&self, state: &mut State) -> Result<(), glib::BoolError> {
|
2023-10-24 21:53:17 +00:00
|
|
|
state.srcresult = Ok(gst::FlowSuccess::Ok);
|
|
|
|
|
2023-10-24 21:09:11 +00:00
|
|
|
let imp = self.ref_counted();
|
2023-10-24 21:53:17 +00:00
|
|
|
let ret = self.srcpad.start_task(move || imp.src_loop());
|
|
|
|
|
|
|
|
if ret.is_err() {
|
|
|
|
state.srcresult = Err(gst::FlowError::Error);
|
|
|
|
}
|
|
|
|
|
|
|
|
ret
|
2022-10-12 13:35:11 +00:00
|
|
|
}
|
|
|
|
|
2023-10-24 21:09:11 +00:00
|
|
|
fn src_loop(&self) {
|
|
|
|
let Err(mut err) = self.src_loop_inner() else {
|
|
|
|
return;
|
2022-10-12 13:35:11 +00:00
|
|
|
};
|
|
|
|
|
2023-10-24 21:09:11 +00:00
|
|
|
let eos = {
|
2022-10-12 13:35:11 +00:00
|
|
|
let mut state = self.state.lock();
|
|
|
|
|
|
|
|
match state.srcresult {
|
|
|
|
// Can be set to Flushing by another thread
|
|
|
|
Err(e) => err = e,
|
|
|
|
|
|
|
|
// Communicate our flow return
|
|
|
|
Ok(_) => state.srcresult = Err(err),
|
|
|
|
}
|
|
|
|
state.clock_id = None;
|
|
|
|
self.cond.notify_all();
|
2023-10-24 21:09:11 +00:00
|
|
|
|
|
|
|
state.eos
|
|
|
|
};
|
2022-10-12 13:35:11 +00:00
|
|
|
|
2023-02-08 19:33:29 +00:00
|
|
|
// Following GstQueue's behavior:
|
|
|
|
// > let app know about us giving up if upstream is not expected to do so
|
|
|
|
// > EOS is already taken care of elsewhere
|
2022-10-12 13:35:11 +00:00
|
|
|
if eos && !matches!(err, gst::FlowError::Flushing | gst::FlowError::Eos) {
|
2023-02-08 15:44:27 +00:00
|
|
|
self.flow_error(err);
|
2023-10-24 21:09:11 +00:00
|
|
|
self.srcpad.push_event(gst::event::Eos::new());
|
2022-10-12 13:35:11 +00:00
|
|
|
}
|
|
|
|
|
2023-10-24 21:09:11 +00:00
|
|
|
gst::log!(CAT, imp: self, "Loop stopping");
|
|
|
|
let _ = self.srcpad.pause_task();
|
2022-10-12 13:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn src_loop_inner(&self) -> Result<gst::FlowSuccess, gst::FlowError> {
|
|
|
|
let mut state = self.state.lock();
|
|
|
|
while state.srcresult.is_ok()
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
&& (!state.playing || (state.queue.is_empty() && state.out_buffer.is_none()))
|
2022-10-12 13:35:11 +00:00
|
|
|
{
|
|
|
|
self.cond.wait(&mut state);
|
|
|
|
}
|
|
|
|
state.srcresult?;
|
|
|
|
|
2023-06-20 07:36:51 +00:00
|
|
|
if let Some(out_timestamp) = state.out_timestamp {
|
|
|
|
let sync_ts = out_timestamp.end;
|
|
|
|
|
|
|
|
let element = self.obj();
|
|
|
|
|
|
|
|
let base_time = element.base_time().ok_or_else(|| {
|
|
|
|
gst::error!(CAT, imp: self, "Missing base time");
|
|
|
|
gst::FlowError::Flushing
|
|
|
|
})?;
|
|
|
|
|
|
|
|
let clock = element.clock().ok_or_else(|| {
|
|
|
|
gst::error!(CAT, imp: self, "Missing clock");
|
|
|
|
gst::FlowError::Flushing
|
|
|
|
})?;
|
|
|
|
|
|
|
|
let clock_id = clock.new_single_shot_id(base_time + sync_ts);
|
|
|
|
state.clock_id = Some(clock_id.clone());
|
|
|
|
|
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
|
|
|
imp: self,
|
|
|
|
"Waiting for clock to reach {}",
|
|
|
|
clock_id.time(),
|
|
|
|
);
|
|
|
|
|
2023-10-25 12:26:19 +00:00
|
|
|
let (res, jitter) = MutexGuard::unlocked(&mut state, || clock_id.wait());
|
2024-01-16 12:46:34 +00:00
|
|
|
gst::trace!(CAT, imp: self, "Clock returned {res:?} {}{}",
|
|
|
|
if jitter.is_negative() {"-"} else {""},
|
|
|
|
gst::ClockTime::from_nseconds(jitter.unsigned_abs()));
|
2023-06-20 07:36:51 +00:00
|
|
|
|
|
|
|
if res == Err(gst::ClockError::Unscheduled) {
|
|
|
|
return Err(gst::FlowError::Flushing);
|
|
|
|
}
|
|
|
|
|
|
|
|
state.srcresult?;
|
|
|
|
state.clock_id = None;
|
|
|
|
}
|
|
|
|
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
let in_item = state.queue.pop_front();
|
|
|
|
gst::trace!(CAT, imp: self, "Unqueueing {:?}", in_item);
|
|
|
|
|
|
|
|
let in_buffer = match in_item {
|
2022-10-12 13:35:11 +00:00
|
|
|
None => None,
|
|
|
|
|
|
|
|
Some(Item::Buffer(buffer, lateness)) => {
|
|
|
|
if self.buffer_is_early(&state, state.in_timestamp) {
|
|
|
|
// Try this buffer again on the next iteration
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
state.queue.push_front(Item::Buffer(buffer, lateness));
|
2022-10-12 13:35:11 +00:00
|
|
|
None
|
|
|
|
} else {
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
state.buffer_queued = false;
|
2023-10-24 22:18:03 +00:00
|
|
|
self.cond.notify_all();
|
2022-10-12 13:35:11 +00:00
|
|
|
Some((buffer, lateness))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Some(Item::Event(event)) => {
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
let mut push = true;
|
|
|
|
|
|
|
|
match event.view() {
|
|
|
|
gst::EventView::Segment(e) => {
|
|
|
|
let segment = e.segment().downcast_ref().unwrap();
|
2023-10-25 12:24:35 +00:00
|
|
|
gst::debug!(CAT, imp: self, "pending {segment:?}");
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
state.pending_segment = Some(segment.clone());
|
|
|
|
push = false;
|
|
|
|
}
|
|
|
|
|
2023-02-08 19:33:29 +00:00
|
|
|
gst::EventView::Eos(_) => {
|
|
|
|
state.out_buffer = None;
|
2023-10-24 23:15:19 +00:00
|
|
|
state.out_buffer_duplicate = false;
|
2023-02-08 19:33:29 +00:00
|
|
|
state.out_timestamp = None;
|
|
|
|
state.srcresult = Err(gst::FlowError::Eos);
|
|
|
|
}
|
|
|
|
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
gst::EventView::Caps(e) => {
|
|
|
|
state.pending_caps = Some(e.caps_owned());
|
|
|
|
push = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
_ => {}
|
|
|
|
}
|
|
|
|
|
2022-10-12 13:35:11 +00:00
|
|
|
self.cond.notify_all();
|
|
|
|
drop(state);
|
|
|
|
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
if push {
|
|
|
|
self.srcpad.push_event(event);
|
|
|
|
}
|
2022-10-12 13:35:11 +00:00
|
|
|
|
|
|
|
return Ok(gst::FlowSuccess::Ok);
|
|
|
|
}
|
|
|
|
|
|
|
|
Some(Item::Query(mut query, sender)) => {
|
|
|
|
self.cond.notify_all();
|
|
|
|
drop(state);
|
|
|
|
|
|
|
|
// SAFETY: The other thread is waiting for us to handle the query
|
|
|
|
let res = self.srcpad.peer_query(unsafe { query.as_mut() });
|
|
|
|
sender.send(res).ok();
|
|
|
|
|
|
|
|
return Ok(gst::FlowSuccess::Ok);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
let mut caps = None;
|
|
|
|
let mut segment = None;
|
2022-10-12 13:35:11 +00:00
|
|
|
|
2023-10-24 22:10:11 +00:00
|
|
|
match in_buffer {
|
2023-10-24 22:38:28 +00:00
|
|
|
Some((mut buffer, BufferLateness::OnTime)) => {
|
2023-10-24 22:32:25 +00:00
|
|
|
state.num_in += 1;
|
|
|
|
|
2023-10-24 23:15:19 +00:00
|
|
|
if state.out_buffer.is_none() || state.out_buffer_duplicate {
|
|
|
|
// We are just starting or done bridging a gap
|
|
|
|
buffer.make_mut().set_flags(gst::BufferFlags::DISCONT);
|
2023-10-24 22:38:28 +00:00
|
|
|
}
|
|
|
|
|
2023-10-24 22:10:11 +00:00
|
|
|
state.out_buffer = Some(buffer);
|
2023-10-24 23:15:19 +00:00
|
|
|
state.out_buffer_duplicate = false;
|
2023-10-24 22:10:11 +00:00
|
|
|
state.out_timestamp = state.in_timestamp;
|
2022-10-12 13:35:11 +00:00
|
|
|
|
2023-10-24 22:10:11 +00:00
|
|
|
caps = state.pending_caps.take();
|
|
|
|
segment = state.pending_segment.take();
|
2023-10-24 22:38:28 +00:00
|
|
|
}
|
2022-10-12 13:35:11 +00:00
|
|
|
|
2023-10-24 22:38:28 +00:00
|
|
|
Some((buffer, BufferLateness::LateOverThreshold)) if !state.pending_events() => {
|
|
|
|
gst::debug!(CAT, imp: self, "Accepting late {:?}", buffer);
|
|
|
|
state.num_in += 1;
|
|
|
|
|
|
|
|
self.patch_output_buffer(&mut state, Some(buffer))?;
|
2022-10-12 13:35:11 +00:00
|
|
|
}
|
|
|
|
|
2023-10-24 22:38:28 +00:00
|
|
|
Some((buffer, BufferLateness::LateOverThreshold)) => {
|
|
|
|
// Cannot accept late-over-threshold buffers while we have pending events
|
|
|
|
gst::debug!(CAT, imp: self, "Discarding late {:?}", buffer);
|
|
|
|
state.num_drop += 1;
|
2022-10-12 13:35:11 +00:00
|
|
|
|
2023-10-24 22:38:28 +00:00
|
|
|
self.patch_output_buffer(&mut state, None)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
None => {
|
|
|
|
self.patch_output_buffer(&mut state, None)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
Some((_, BufferLateness::LateUnderThreshold)) => {
|
|
|
|
// Is discarded before queueing
|
|
|
|
unreachable!();
|
2023-10-24 22:10:11 +00:00
|
|
|
}
|
|
|
|
}
|
2022-10-12 13:35:11 +00:00
|
|
|
|
|
|
|
let buffer = state.out_buffer.clone().unwrap();
|
|
|
|
let sync_ts = state
|
|
|
|
.out_timestamp
|
|
|
|
.map_or(gst::ClockTime::ZERO, |t| t.start);
|
|
|
|
|
|
|
|
if let Some(caps) = caps {
|
|
|
|
gst::debug!(CAT, imp: self, "Sending new caps: {}", caps);
|
|
|
|
|
|
|
|
let event = gst::event::Caps::new(&caps);
|
|
|
|
MutexGuard::unlocked(&mut state, || self.srcpad.push_event(event));
|
|
|
|
state.srcresult?;
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
|
2023-02-08 15:41:53 +00:00
|
|
|
state.out_audio_info = audio_info_from_caps(&caps).unwrap();
|
2023-10-25 16:26:58 +00:00
|
|
|
state.out_duration = duration_from_caps(&caps);
|
2022-10-12 13:35:11 +00:00
|
|
|
}
|
|
|
|
|
2023-10-25 17:03:52 +00:00
|
|
|
if let Some(mut segment) = segment {
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
if !state.single_segment {
|
2023-10-25 17:03:52 +00:00
|
|
|
if let Some(stop) = segment.stop() {
|
|
|
|
gst::debug!(CAT, imp: self, "Removing stop {} from outgoing segment", stop);
|
|
|
|
segment.set_stop(gst::ClockTime::NONE);
|
|
|
|
}
|
|
|
|
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
gst::debug!(CAT, imp: self, "Forwarding segment: {:?}", segment);
|
|
|
|
|
|
|
|
let event = gst::event::Segment::new(&segment);
|
|
|
|
MutexGuard::unlocked(&mut state, || self.srcpad.push_event(event));
|
|
|
|
state.srcresult?;
|
|
|
|
} else if state.out_segment.is_none() {
|
2022-10-12 13:35:11 +00:00
|
|
|
// Create live segment
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
let mut live_segment = gst::FormattedSegment::<gst::ClockTime>::new();
|
|
|
|
live_segment.set_start(sync_ts + SEGMENT_OFFSET);
|
|
|
|
live_segment.set_base(sync_ts);
|
|
|
|
live_segment.set_time(sync_ts);
|
|
|
|
live_segment.set_position(sync_ts + SEGMENT_OFFSET);
|
2022-10-12 13:35:11 +00:00
|
|
|
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
gst::debug!(CAT, imp: self, "Sending new segment: {:?}", live_segment);
|
2022-10-12 13:35:11 +00:00
|
|
|
|
livesync: Fix queueing
The logic of the element requires the next buffer to be available
immediately after we are done pushing the previous, otherwise we insert
a repeat.
Making the src loop handle events and queries broke this, as upstream is
almost guaranteed not to deliver a buffer in time if we allow non-buffer
items to block upstream's push.
To fix this, replace our single-item `Option` with a `VecDeque` that we
allow to hold an unlimited number of events or queries, but only one
buffer at a time.
In addition, the code was confused about the current caps and segment.
This wasn't an issue before making the src loop handle events and
queries, as only the sinkpad cared about the current segment, using it
to buffers received, and only the srcpad cared about the current caps,
sending it just before sending the next received buffer.
Now the sinkpad cares about caps (through `update_fallback_duration`)
and the srcpad cares about the segment (when not in single-segment
mode).
Fix this by
- making `in_caps` always hold the current caps of the sinkpad,
- adding `pending_caps`, which is used by the srcpad to store
caps to be sent with the next received buffer,
- adding `in_segment`, holding the current segment of the sinkpad,
- adding `pending_segment`, which is used by the srcpad to store
the segment to be sent with the next received buffer,
- adding `out_segment`, holding the current segment of the srcpad.
Maybe a fix for
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/298.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/1082>
2023-02-09 09:04:47 +00:00
|
|
|
let event = gst::event::Segment::new(&live_segment);
|
|
|
|
MutexGuard::unlocked(&mut state, || self.srcpad.push_event(event));
|
|
|
|
state.srcresult?;
|
|
|
|
}
|
|
|
|
|
|
|
|
state.out_segment = Some(segment);
|
2022-10-12 13:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
state.num_out += 1;
|
|
|
|
|
|
|
|
drop(state);
|
|
|
|
|
|
|
|
gst::trace!(CAT, imp: self, "Pushing {buffer:?}");
|
|
|
|
self.srcpad.push(buffer)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn buffer_is_backwards(&self, state: &State, timestamp: Option<Timestamps>) -> BufferLateness {
|
|
|
|
let timestamp = match timestamp {
|
|
|
|
Some(t) => t,
|
|
|
|
None => return BufferLateness::OnTime,
|
|
|
|
};
|
|
|
|
|
|
|
|
let out_timestamp = match state.out_timestamp {
|
|
|
|
Some(t) => t,
|
|
|
|
None => return BufferLateness::OnTime,
|
|
|
|
};
|
|
|
|
|
|
|
|
if timestamp.end > out_timestamp.end {
|
|
|
|
return BufferLateness::OnTime;
|
|
|
|
}
|
|
|
|
|
|
|
|
gst::debug!(
|
|
|
|
CAT,
|
|
|
|
imp: self,
|
|
|
|
"Timestamp regresses: buffer ends at {}, expected {}",
|
|
|
|
timestamp.end,
|
|
|
|
out_timestamp.end,
|
|
|
|
);
|
|
|
|
|
|
|
|
let late_threshold = match state.late_threshold {
|
|
|
|
Some(gst::ClockTime::ZERO) => return BufferLateness::LateOverThreshold,
|
|
|
|
Some(t) => t,
|
|
|
|
None => return BufferLateness::LateUnderThreshold,
|
|
|
|
};
|
|
|
|
|
|
|
|
let in_timestamp = match state.in_timestamp {
|
|
|
|
Some(t) => t,
|
|
|
|
None => return BufferLateness::LateUnderThreshold,
|
|
|
|
};
|
|
|
|
|
|
|
|
if timestamp.start > in_timestamp.end + late_threshold {
|
|
|
|
BufferLateness::LateOverThreshold
|
|
|
|
} else {
|
|
|
|
BufferLateness::LateUnderThreshold
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn buffer_is_early(&self, state: &State, timestamp: Option<Timestamps>) -> bool {
|
|
|
|
let timestamp = match timestamp {
|
|
|
|
Some(t) => t,
|
|
|
|
None => return false,
|
|
|
|
};
|
|
|
|
|
|
|
|
let out_timestamp = match state.out_timestamp {
|
|
|
|
Some(t) => t,
|
|
|
|
None => return false,
|
|
|
|
};
|
|
|
|
|
2023-10-24 21:53:17 +00:00
|
|
|
// When out_timestamp is set, we also have an out_buffer
|
|
|
|
let slack = state.out_buffer.as_deref().unwrap().duration().unwrap();
|
2022-10-12 13:35:11 +00:00
|
|
|
|
|
|
|
if timestamp.start < out_timestamp.end + slack {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-10-24 21:53:17 +00:00
|
|
|
// This buffer would start beyond another buffer duration after our
|
|
|
|
// last emitted buffer ended
|
|
|
|
|
2022-10-12 13:35:11 +00:00
|
|
|
gst::debug!(
|
|
|
|
CAT,
|
|
|
|
imp: self,
|
|
|
|
"Timestamp is too early: buffer starts at {}, expected {}",
|
|
|
|
timestamp.start,
|
|
|
|
out_timestamp.end,
|
|
|
|
);
|
|
|
|
|
|
|
|
true
|
|
|
|
}
|
2023-02-08 15:44:27 +00:00
|
|
|
|
|
|
|
/// Produces a message like GST_ELEMENT_FLOW_ERROR does
|
|
|
|
fn flow_error(&self, err: gst::FlowError) {
|
|
|
|
let details = gst::Structure::builder("details")
|
|
|
|
.field("flow-return", err.into_glib())
|
|
|
|
.build();
|
|
|
|
gst::element_imp_error!(
|
|
|
|
self,
|
|
|
|
gst::StreamError::Failed,
|
|
|
|
("Internal data flow error."),
|
|
|
|
["streaming task paused, reason {} ({:?})", err, err],
|
|
|
|
details: details
|
|
|
|
);
|
|
|
|
}
|
2023-10-24 22:38:28 +00:00
|
|
|
|
2023-10-24 23:15:19 +00:00
|
|
|
/// Patches the output buffer for repeating, setting out_buffer, out_buffer_duplicate and
|
|
|
|
/// out_timestamp
|
2023-10-24 22:38:28 +00:00
|
|
|
fn patch_output_buffer(
|
|
|
|
&self,
|
|
|
|
state: &mut State,
|
|
|
|
source: Option<gst::Buffer>,
|
|
|
|
) -> Result<(), gst::FlowError> {
|
|
|
|
let out_buffer = state.out_buffer.as_mut().unwrap();
|
2023-10-24 23:15:19 +00:00
|
|
|
let mut duplicate = state.out_buffer_duplicate;
|
2023-10-24 22:38:28 +00:00
|
|
|
|
|
|
|
let duration = out_buffer.duration().unwrap();
|
|
|
|
let dts = out_buffer.dts().map(|t| t + duration);
|
|
|
|
let pts = out_buffer.pts().map(|t| t + duration);
|
|
|
|
|
|
|
|
if let Some(source) = source {
|
|
|
|
gst::debug!(CAT, imp: self, "Repeating {:?} using {:?}", out_buffer, source);
|
|
|
|
*out_buffer = source;
|
2023-10-24 23:15:19 +00:00
|
|
|
duplicate = false;
|
2023-10-24 22:38:28 +00:00
|
|
|
} else {
|
|
|
|
gst::debug!(CAT, imp: self, "Repeating {:?}", out_buffer);
|
|
|
|
}
|
|
|
|
|
|
|
|
let buffer = out_buffer.make_mut();
|
|
|
|
|
2023-10-24 23:15:19 +00:00
|
|
|
if !duplicate {
|
2023-10-25 16:48:50 +00:00
|
|
|
let duration_is_valid =
|
|
|
|
(MINIMUM_DURATION..=MAXIMUM_DURATION).contains(&buffer.duration().unwrap());
|
2023-10-25 10:11:01 +00:00
|
|
|
|
2023-10-25 16:48:50 +00:00
|
|
|
if state.out_duration.is_some() || !duration_is_valid {
|
|
|
|
// Resize the buffer if caps gave us a duration
|
|
|
|
// or the current duration is unreasonable
|
2023-10-25 10:11:01 +00:00
|
|
|
|
2023-10-25 16:48:50 +00:00
|
|
|
let duration = state.out_duration.map_or(DEFAULT_DURATION, |dur| {
|
|
|
|
dur.clamp(MINIMUM_DURATION, MAXIMUM_DURATION)
|
|
|
|
});
|
2023-10-25 10:11:01 +00:00
|
|
|
|
2023-10-25 16:48:50 +00:00
|
|
|
if let Some(audio_info) = &state.out_audio_info {
|
|
|
|
let Some(size) = audio_info
|
|
|
|
.convert::<Option<gst::format::Bytes>>(duration)
|
|
|
|
.flatten()
|
|
|
|
.and_then(|bytes| usize::try_from(bytes).ok())
|
|
|
|
else {
|
|
|
|
gst::error!(CAT, imp: self, "Failed to calculate size of repeat buffer");
|
|
|
|
return Err(gst::FlowError::Error);
|
|
|
|
};
|
|
|
|
|
|
|
|
buffer.replace_all_memory(gst::Memory::with_size(size));
|
|
|
|
}
|
2023-10-25 10:11:01 +00:00
|
|
|
|
2023-10-25 16:48:50 +00:00
|
|
|
buffer.set_duration(duration);
|
|
|
|
gst::debug!(CAT, imp: self, "Patched output buffer duration to {duration}");
|
2023-10-24 22:38:28 +00:00
|
|
|
}
|
2023-10-25 10:11:01 +00:00
|
|
|
|
2023-10-25 16:48:50 +00:00
|
|
|
if let Some(audio_info) = &state.out_audio_info {
|
|
|
|
let mut map_info = buffer.map_writable().map_err(|e| {
|
|
|
|
gst::error!(CAT, imp: self, "Failed to map buffer: {}", e);
|
|
|
|
gst::FlowError::Error
|
|
|
|
})?;
|
|
|
|
audio_info
|
|
|
|
.format_info()
|
|
|
|
.fill_silence(map_info.as_mut_slice());
|
|
|
|
}
|
2023-10-24 22:38:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
buffer.set_dts(dts);
|
|
|
|
buffer.set_pts(pts);
|
|
|
|
buffer.set_flags(gst::BufferFlags::GAP);
|
|
|
|
buffer.unset_flags(gst::BufferFlags::DISCONT);
|
|
|
|
|
2023-10-24 23:15:19 +00:00
|
|
|
state.out_buffer_duplicate = true;
|
2023-10-24 22:38:28 +00:00
|
|
|
state.out_timestamp = state.ts_range(
|
|
|
|
state.out_buffer.as_ref().unwrap(),
|
|
|
|
state.out_segment.as_ref().unwrap(),
|
|
|
|
);
|
|
|
|
state.num_duplicate += 1;
|
|
|
|
Ok(())
|
|
|
|
}
|
2022-10-12 13:35:11 +00:00
|
|
|
}
|