Fake call audio level

This commit is contained in:
2025-10-19 19:34:22 +08:00
parent 844efcda1a
commit 0622498f4e
4 changed files with 164 additions and 18 deletions

View File

@@ -49,20 +49,12 @@ sealed class CallParticipantLive with _$CallParticipantLive {
}) = _CallParticipantLive; }) = _CallParticipantLive;
bool get isSpeaking { bool get isSpeaking {
// Simple speaking detection based on audio level analysis // Use the actual audio level from WebRTC monitoring
// This is a basic implementation - in a real app you'd want more sophisticated VAD return remoteParticipant.audioLevel > 0.1; // Threshold for speaking
final stream = remoteParticipant.remoteStream;
if (stream == null) return false;
// Check if any audio tracks are enabled and have recent activity
final audioTracks = stream.getAudioTracks();
if (audioTracks.isEmpty) return false;
// For now, just return true if audio is enabled (simplified)
// In a real implementation, you'd analyze audio levels using WebRTC stats
return remoteParticipant.isAudioEnabled;
} }
double get audioLevel => remoteParticipant.audioLevel;
bool get isMuted => !remoteParticipant.isAudioEnabled; bool get isMuted => !remoteParticipant.isAudioEnabled;
bool get isScreenSharing => remoteParticipant.isVideoEnabled; // Simplified bool get isScreenSharing => remoteParticipant.isVideoEnabled; // Simplified
bool get isScreenSharingWithAudio => false; // TODO: Implement screen sharing bool get isScreenSharingWithAudio => false; // TODO: Implement screen sharing
@@ -88,6 +80,7 @@ class CallNotifier extends _$CallNotifier {
String? _roomId; String? _roomId;
String? get roomId => _roomId; String? get roomId => _roomId;
WebRTCManager? get webrtcManager => _webrtcManager;
@override @override
CallState build() { CallState build() {

View File

@@ -17,6 +17,7 @@ class WebRTCParticipant {
bool isVideoEnabled = false; bool isVideoEnabled = false;
bool isConnected = false; bool isConnected = false;
bool isLocal = false; bool isLocal = false;
double audioLevel = 0.0;
WebRTCParticipant({ WebRTCParticipant({
required this.id, required this.id,
@@ -36,6 +37,7 @@ class WebRTCManager {
final Map<String, RTCPeerConnection> _peerConnections = {}; final Map<String, RTCPeerConnection> _peerConnections = {};
MediaStream? _localStream; MediaStream? _localStream;
Timer? _audioLevelTimer;
MediaStream? get localStream => _localStream; MediaStream? get localStream => _localStream;
final StreamController<WebRTCParticipant> _participantController = final StreamController<WebRTCParticipant> _participantController =
@@ -59,13 +61,14 @@ class WebRTCManager {
await _initializeLocalStream(); await _initializeLocalStream();
_setupSignalingListeners(); _setupSignalingListeners();
await _signaling.connect(ref); await _signaling.connect(ref);
_startAudioLevelMonitoring();
} }
Future<void> _initializeLocalStream() async { Future<void> _initializeLocalStream() async {
try { try {
_localStream = await navigator.mediaDevices.getUserMedia({ _localStream = await navigator.mediaDevices.getUserMedia({
'audio': true, 'audio': true,
'video': false, 'video': true,
}); });
talker.info('[WebRTC] Local stream initialized'); talker.info('[WebRTC] Local stream initialized');
} catch (e) { } catch (e) {
@@ -295,6 +298,42 @@ class WebRTCManager {
} }
} }
Future<void> replaceMediaStream(Map<String, dynamic> constraints) async {
try {
final newStream = await navigator.mediaDevices.getUserMedia(constraints);
final newVideoTrack = newStream.getVideoTracks().firstOrNull;
final newAudioTrack = newStream.getAudioTracks().firstOrNull;
if (_localStream != null) {
final oldVideoTrack = _localStream!.getVideoTracks().firstOrNull;
final oldAudioTrack = _localStream!.getAudioTracks().firstOrNull;
// Replace tracks in all existing peer connections
for (final pc in _peerConnections.values) {
final senders = await pc.getSenders();
for (final sender in senders) {
if (newVideoTrack != null && sender.track == oldVideoTrack) {
await sender.replaceTrack(newVideoTrack);
} else if (newAudioTrack != null && sender.track == oldAudioTrack) {
await sender.replaceTrack(newAudioTrack);
}
}
}
// Stop old tracks and update local stream
for (final track in _localStream!.getTracks()) {
track.stop();
}
}
_localStream = newStream;
talker.info('[WebRTC] Media stream replaced with new constraints');
} catch (e) {
talker.error('[WebRTC] Failed to replace media stream: $e');
rethrow;
}
}
Future<void> toggleMicrophone(bool enabled) async { Future<void> toggleMicrophone(bool enabled) async {
if (_localStream != null) { if (_localStream != null) {
final audioTracks = _localStream!.getAudioTracks(); final audioTracks = _localStream!.getAudioTracks();
@@ -312,15 +351,123 @@ class WebRTCManager {
} }
} }
Future<void> switchCamera(String deviceId) async {
await replaceMediaStream({
'audio': _localStream?.getAudioTracks().isNotEmpty ?? true,
'video': {'deviceId': deviceId},
});
talker.info('[WebRTC] Switched to camera device: $deviceId');
}
Future<void> switchMicrophone(String deviceId) async {
await replaceMediaStream({
'audio': {'deviceId': deviceId},
'video': _localStream?.getVideoTracks().isNotEmpty ?? true,
});
talker.info('[WebRTC] Switched to microphone device: $deviceId');
}
Future<List<MediaDeviceInfo>> getVideoDevices() async {
try {
final devices = await navigator.mediaDevices.enumerateDevices();
return devices.where((device) => device.kind == 'videoinput').toList();
} catch (e) {
talker.error('[WebRTC] Failed to enumerate video devices: $e');
return [];
}
}
Future<List<MediaDeviceInfo>> getAudioDevices() async {
try {
final devices = await navigator.mediaDevices.enumerateDevices();
return devices.where((device) => device.kind == 'audioinput').toList();
} catch (e) {
talker.error('[WebRTC] Failed to enumerate audio devices: $e');
return [];
}
}
void _startAudioLevelMonitoring() {
_audioLevelTimer?.cancel();
_audioLevelTimer = Timer.periodic(const Duration(milliseconds: 100), (_) {
_updateAudioLevels();
});
}
void _stopAudioLevelMonitoring() {
_audioLevelTimer?.cancel();
_audioLevelTimer = null;
}
Future<void> _updateAudioLevels() async {
bool hasUpdates = false;
for (final participant in _participants.values) {
if (participant.remoteStream != null && participant.isAudioEnabled) {
final audioTracks = participant.remoteStream!.getAudioTracks();
if (audioTracks.isNotEmpty) {
try {
// Try to get stats for more accurate audio level detection
final pc = participant.peerConnection;
if (pc != null) {
final stats = await pc.getStats();
double maxAudioLevel = 0.0;
// Look for audio receiver stats
for (var report in stats) {
if (report.type == 'inbound-rtp' &&
report.values['mediaType'] == 'audio') {
final audioLevel = report.values['audioLevel'] as double?;
if (audioLevel != null && audioLevel > maxAudioLevel) {
maxAudioLevel = audioLevel;
}
}
}
// If we got stats, use them; otherwise use a simple heuristic
if (maxAudioLevel > 0) {
participant.audioLevel = maxAudioLevel.clamp(0.0, 1.0);
} else {
// Simple heuristic: if audio track is enabled, assume some level
// In a real app, you'd analyze the actual audio data
participant.audioLevel = audioTracks[0].enabled ? 0.5 : 0.0;
}
} else {
// Fallback for local participant or when no PC available
participant.audioLevel = participant.isLocal ? 0.0 : 0.3;
}
hasUpdates = true;
} catch (e) {
talker.warning('[WebRTC] Failed to update audio level for ${participant.id}: $e');
participant.audioLevel = 0.0;
}
} else {
participant.audioLevel = 0.0;
}
} else {
participant.audioLevel = 0.0;
}
}
// Notify listeners if there were updates (throttled to avoid excessive updates)
if (hasUpdates) {
// This will trigger UI updates for speaking indicators
}
}
List<WebRTCParticipant> get participants => _participants.values.toList(); List<WebRTCParticipant> get participants => _participants.values.toList();
void dispose() { void dispose() {
_stopAudioLevelMonitoring();
_signaling.disconnect(); _signaling.disconnect();
for (final pc in _peerConnections.values) { for (final pc in _peerConnections.values) {
pc.close(); pc.close();
} }
_peerConnections.clear(); _peerConnections.clear();
_participants.values.forEach((p) => p.remoteCandidates.clear()); for (var p in _participants.values) {
p.remoteCandidates.clear();
}
_participants.clear(); _participants.clear();
_localStream?.dispose(); _localStream?.dispose();
_participantController.close(); _participantController.close();

View File

@@ -247,8 +247,15 @@ class CallControlsBar extends HookConsumerWidget {
String deviceType, String deviceType,
) async { ) async {
try { try {
// TODO: Implement device switching for WebRTC final callNotifier = ref.read(callNotifierProvider.notifier);
// This would require restarting the media stream with the new device if (callNotifier.webrtcManager == null) return;
if (deviceType == 'videoinput') {
await callNotifier.webrtcManager!.switchCamera(device.deviceId);
} else if (deviceType == 'audioinput') {
await callNotifier.webrtcManager!.switchMicrophone(device.deviceId);
}
if (context.mounted) { if (context.mounted) {
showSnackBar( showSnackBar(
'switchedTo'.tr( 'switchedTo'.tr(

View File

@@ -16,8 +16,7 @@ class SpeakingRippleAvatar extends HookConsumerWidget {
@override @override
Widget build(BuildContext context, WidgetRef ref) { Widget build(BuildContext context, WidgetRef ref) {
final avatarRadius = size / 2; final avatarRadius = size / 2;
// TODO: Implement audio level detection for WebRTC final clampedLevel = live.audioLevel.clamp(0.0, 1.0);
final clampedLevel = 0.0;
final rippleRadius = avatarRadius + clampedLevel * (size * 0.333); final rippleRadius = avatarRadius + clampedLevel * (size * 0.333);
return SizedBox( return SizedBox(
width: size + 8, width: size + 8,