diff --git a/src/main.rs b/src/main.rs index ceec708..f52f5c6 100644 --- a/src/main.rs +++ b/src/main.rs @@ -42,7 +42,8 @@ async fn main() -> eyre::Result<()> { StreamingRecognitionConfig { config: Some(RecognitionConfig { encoding: AudioEncoding::Flac.into(), // matching current example file - sample_rate_hertz: 48000, // matching current example file + sample_rate_hertz: 44_100, // matching current example file + audio_channel_count: 2, language_code: "en-US".to_string(), // we only support en-US to start with model: "video".to_string(), // dictate does not set this option use_enhanced: true, // dictate does not set this option @@ -59,18 +60,19 @@ async fn main() -> eyre::Result<()> { yield request; let file = tokio::fs::File::open("some-audio.flac").await.unwrap(); let mut audio_file = tokio::io::BufReader::new(file); - // read file chunk - let mut buffer = [0; 1024 * 5]; - while audio_file.read(&mut buffer).await.is_ok() { - // send to server - let request = StreamingRecognizeRequest { - streaming_request: Some(StreamingRequest::AudioContent( - BytesMut::from(buffer.as_slice()).freeze(), - )), - }; - yield request; - debug!("added a buffer to the sender queue"); - } + // read file chunk + let mut buffer = [0; 1024 * 50]; + while let Ok(n) = audio_file.read(&mut buffer[..]).await { + // send to server + let request = StreamingRecognizeRequest { + streaming_request: Some(StreamingRequest::AudioContent( + BytesMut::from(&buffer.as_slice()[..n]).freeze(), + )), + }; + yield request; + // debug!("added a buffer to the sender queue: {} bytes", n); + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + } }; let response = client