Move Ding to DriverConnect

This commit is contained in:
2023-03-12 13:10:05 -04:00
parent f6020bb859
commit 7797dbf89a

View File

@@ -4,16 +4,18 @@ use serenity::{async_trait,
model::prelude::{ChannelId, Guild}, model::prelude::{ChannelId, Guild},
prelude::{Context, Mutex}}; prelude::{Context, Mutex}};
use songbird::{EventHandler, Event, EventContext, use songbird::{EventHandler, Event, EventContext,
model::payload::{Speaking, ClientDisconnect}, ffmpeg, create_player, Call, CoreEvent}; model::payload::{Speaking, ClientDisconnect}, ffmpeg, create_player, Call, CoreEvent, events::context_data::ConnectData};
struct Receiver; struct Receiver {
call : Arc<Mutex<Call>>,
}
impl Receiver { impl Receiver {
pub fn new() -> Self { pub fn new(call: Arc<Mutex<Call>>) -> Self {
// You can manage state here, such as a buffer of audio packet bytes so // You can manage state here, such as a buffer of audio packet bytes so
// you can later store them in intervals. // you can later store them in intervals.
Self { Self {
call
} }
} }
} }
@@ -22,76 +24,91 @@ impl Receiver {
impl EventHandler for Receiver { impl EventHandler for Receiver {
#[allow(unused_variables)] #[allow(unused_variables)]
async fn act(&self, ctx: &EventContext<'_>) -> Option<Event> { async fn act(&self, ctx: &EventContext<'_>) -> Option<Event> {
use EventContext as Ctx; use EventContext as Ctx;
match ctx { match ctx {
Ctx::SpeakingStateUpdate( Ctx::SpeakingStateUpdate(
Speaking {speaking, ssrc, user_id, ..} Speaking {speaking, ssrc, user_id, ..}
) => { ) => {
// Discord voice calls use RTP, where every sender uses a randomly allocated // Discord voice calls use RTP, where every sender uses a randomly allocated
// *Synchronisation Source* (SSRC) to allow receivers to tell which audio // *Synchronisation Source* (SSRC) to allow receivers to tell which audio
// stream a received packet belongs to. As this number is not derived from // stream a received packet belongs to. As this number is not derived from
// the sender's user_id, only Discord Voice Gateway messages like this one // the sender's user_id, only Discord Voice Gateway messages like this one
// inform us about which random SSRC a user has been allocated. Future voice // inform us about which random SSRC a user has been allocated. Future voice
// packets will contain *only* the SSRC. // packets will contain *only* the SSRC.
// //
// You can implement logic here so that you can differentiate users' // You can implement logic here so that you can differentiate users'
// SSRCs and map the SSRC to the User ID and maintain this state. // SSRCs and map the SSRC to the User ID and maintain this state.
// Using this map, you can map the `ssrc` in `voice_packet` // Using this map, you can map the `ssrc` in `voice_packet`
// to the user ID and handle their audio packets separately. // to the user ID and handle their audio packets separately.
println!( println!(
"Speaking state update: user {:?} has SSRC {:?}, using {:?}", "Speaking state update: user {:?} has SSRC {:?}, using {:?}",
user_id, user_id,
ssrc, ssrc,
speaking, speaking,
); );
}, },
Ctx::SpeakingUpdate(data) => { Ctx::SpeakingUpdate(data) => {
// You can implement logic here which reacts to a user starting // You can implement logic here which reacts to a user starting
// or stopping speaking, and to map their SSRC to User ID. // or stopping speaking, and to map their SSRC to User ID.
println!( println!(
"Source {} has {} speaking.", "Source {} has {} speaking.",
data.ssrc, data.ssrc,
if data.speaking {"started"} else {"stopped"}, if data.speaking {"started"} else {"stopped"},
); );
}, },
Ctx::VoicePacket(data) => { Ctx::VoicePacket(data) => {
// An event which fires for every received audio packet, // An event which fires for every received audio packet,
// containing the decoded data. // containing the decoded data.
if let Some(audio) = data.audio { if let Some(audio) = data.audio {
println!("Audio packet's first 5 samples: {:?}", audio.get(..5.min(audio.len()))); println!("Audio packet's first 5 samples: {:?}", audio.get(..5.min(audio.len())));
println!( println!(
"Audio packet sequence {:05} has {:04} bytes (decompressed from {}), SSRC {}", "Audio packet sequence {:05} has {:04} bytes (decompressed from {}), SSRC {}",
data.packet.sequence.0, data.packet.sequence.0,
audio.len() * std::mem::size_of::<i16>(), audio.len() * std::mem::size_of::<i16>(),
data.packet.payload.len(), data.packet.payload.len(),
data.packet.ssrc, data.packet.ssrc,
); );
} else { } else {
println!("RTP packet, but no audio. Driver may not be configured to decode."); println!("RTP packet, but no audio. Driver may not be configured to decode.");
} }
}, },
Ctx::RtcpPacket(data) => { Ctx::RtcpPacket(data) => {
// An event which fires for every received rtcp packet, // An event which fires for every received rtcp packet,
// containing the call statistics and reporting information. // containing the call statistics and reporting information.
println!("RTCP packet received: {:?}", data.packet); println!("RTCP packet received: {:?}", data.packet);
}, },
Ctx::ClientDisconnect( Ctx::ClientDisconnect(
ClientDisconnect {user_id, ..} ClientDisconnect {user_id, ..}
) => { ) => {
// You can implement your own logic here to handle a user who has left the // You can implement your own logic here to handle a user who has left the
// voice channel e.g., finalise processing of statistics etc. // voice channel e.g., finalise processing of statistics etc.
// You will typically need to map the User ID to their SSRC; observed when // You will typically need to map the User ID to their SSRC; observed when
// first speaking. // first speaking.
println!("Client disconnected: user {:?}", user_id); println!("Client disconnected: user {:?}", user_id);
}, },
_ => { Ctx::DriverConnect(
// We won't be registering this struct for any more event classes. ConnectData { channel_id, ..}
unimplemented!() ) => {
} match channel_id {
} Some(chan) => {
let ding_src =
std::env::var("DING_SOUND").expect("DING not found in DING_SOUND");
let ding = ffmpeg(ding_src).await.expect("no ding.");
let (audio, handle) = create_player(ding);
let mut call = self.call.lock().await;
call.play(audio);
},
None => {}
}
},
_ => {
// We won't be registering this struct for any more event classes.
unimplemented!()
}
}
None None
} }
} }
@@ -99,46 +116,46 @@ pub async fn join(ctx: Context, guild: Guild, cid: ChannelId) -> Option<Arc<Mute
let manager = songbird::get(&ctx).await.expect("Songbird: intialization"); let manager = songbird::get(&ctx).await.expect("Songbird: intialization");
let (call, status) = manager.join(guild.id, cid).await; let (call, status) = manager.join(guild.id, cid).await;
match status { match status {
Ok(_) => { Ok(_) => {
let call_handle = call.clone();
{ {
let mut call = call.lock().await; let mut call = call.lock().await;
call.add_global_event( call.add_global_event(
CoreEvent::SpeakingUpdate.into(), CoreEvent::SpeakingUpdate.into(),
Receiver::new(), Receiver::new(call_handle.clone()),
); );
call.add_global_event( call.add_global_event(
CoreEvent::VoicePacket.into(), CoreEvent::VoicePacket.into(),
Receiver::new(), Receiver::new(call_handle.clone()),
); );
call.add_global_event( call.add_global_event(
CoreEvent::RtcpPacket.into(), CoreEvent::RtcpPacket.into(),
Receiver::new(), Receiver::new(call_handle.clone()),
); );
call.add_global_event( call.add_global_event(
CoreEvent::ClientDisconnect.into(), CoreEvent::ClientDisconnect.into(),
Receiver::new(), Receiver::new(call_handle.clone()),
); );
call.add_global_event( call.add_global_event(
CoreEvent::ClientDisconnect.into(), CoreEvent::ClientDisconnect.into(),
Receiver::new(), Receiver::new(call_handle.clone()),
); );
let ding_src = call.add_global_event(
std::env::var("DING_SOUND").expect("DING not found in DING_SOUND"); CoreEvent::DriverConnect.into(),
let ding = ffmpeg(ding_src).await.expect("no ding."); Receiver::new(call_handle.clone()),
let (audio, handle) = create_player(ding); );
call.play(audio);
} }
return Some(call); return Some(call);
} }
Err(_err) => { Err(_err) => {
println!("Error joining channel"); println!("Error joining channel");
} }
} }
None None
} }