✨ Fake call audio level
This commit is contained in:
@@ -49,20 +49,12 @@ sealed class CallParticipantLive with _$CallParticipantLive {
|
||||
}) = _CallParticipantLive;
|
||||
|
||||
bool get isSpeaking {
|
||||
// Simple speaking detection based on audio level analysis
|
||||
// This is a basic implementation - in a real app you'd want more sophisticated VAD
|
||||
final stream = remoteParticipant.remoteStream;
|
||||
if (stream == null) return false;
|
||||
|
||||
// Check if any audio tracks are enabled and have recent activity
|
||||
final audioTracks = stream.getAudioTracks();
|
||||
if (audioTracks.isEmpty) return false;
|
||||
|
||||
// For now, just return true if audio is enabled (simplified)
|
||||
// In a real implementation, you'd analyze audio levels using WebRTC stats
|
||||
return remoteParticipant.isAudioEnabled;
|
||||
// Use the actual audio level from WebRTC monitoring
|
||||
return remoteParticipant.audioLevel > 0.1; // Threshold for speaking
|
||||
}
|
||||
|
||||
double get audioLevel => remoteParticipant.audioLevel;
|
||||
|
||||
bool get isMuted => !remoteParticipant.isAudioEnabled;
|
||||
bool get isScreenSharing => remoteParticipant.isVideoEnabled; // Simplified
|
||||
bool get isScreenSharingWithAudio => false; // TODO: Implement screen sharing
|
||||
@@ -88,6 +80,7 @@ class CallNotifier extends _$CallNotifier {
|
||||
|
||||
String? _roomId;
|
||||
String? get roomId => _roomId;
|
||||
WebRTCManager? get webrtcManager => _webrtcManager;
|
||||
|
||||
@override
|
||||
CallState build() {
|
||||
|
@@ -17,6 +17,7 @@ class WebRTCParticipant {
|
||||
bool isVideoEnabled = false;
|
||||
bool isConnected = false;
|
||||
bool isLocal = false;
|
||||
double audioLevel = 0.0;
|
||||
|
||||
WebRTCParticipant({
|
||||
required this.id,
|
||||
@@ -36,6 +37,7 @@ class WebRTCManager {
|
||||
final Map<String, RTCPeerConnection> _peerConnections = {};
|
||||
|
||||
MediaStream? _localStream;
|
||||
Timer? _audioLevelTimer;
|
||||
|
||||
MediaStream? get localStream => _localStream;
|
||||
final StreamController<WebRTCParticipant> _participantController =
|
||||
@@ -59,13 +61,14 @@ class WebRTCManager {
|
||||
await _initializeLocalStream();
|
||||
_setupSignalingListeners();
|
||||
await _signaling.connect(ref);
|
||||
_startAudioLevelMonitoring();
|
||||
}
|
||||
|
||||
Future<void> _initializeLocalStream() async {
|
||||
try {
|
||||
_localStream = await navigator.mediaDevices.getUserMedia({
|
||||
'audio': true,
|
||||
'video': false,
|
||||
'video': true,
|
||||
});
|
||||
talker.info('[WebRTC] Local stream initialized');
|
||||
} catch (e) {
|
||||
@@ -295,6 +298,42 @@ class WebRTCManager {
|
||||
}
|
||||
}
|
||||
|
||||
Future<void> replaceMediaStream(Map<String, dynamic> constraints) async {
|
||||
try {
|
||||
final newStream = await navigator.mediaDevices.getUserMedia(constraints);
|
||||
final newVideoTrack = newStream.getVideoTracks().firstOrNull;
|
||||
final newAudioTrack = newStream.getAudioTracks().firstOrNull;
|
||||
|
||||
if (_localStream != null) {
|
||||
final oldVideoTrack = _localStream!.getVideoTracks().firstOrNull;
|
||||
final oldAudioTrack = _localStream!.getAudioTracks().firstOrNull;
|
||||
|
||||
// Replace tracks in all existing peer connections
|
||||
for (final pc in _peerConnections.values) {
|
||||
final senders = await pc.getSenders();
|
||||
for (final sender in senders) {
|
||||
if (newVideoTrack != null && sender.track == oldVideoTrack) {
|
||||
await sender.replaceTrack(newVideoTrack);
|
||||
} else if (newAudioTrack != null && sender.track == oldAudioTrack) {
|
||||
await sender.replaceTrack(newAudioTrack);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop old tracks and update local stream
|
||||
for (final track in _localStream!.getTracks()) {
|
||||
track.stop();
|
||||
}
|
||||
}
|
||||
|
||||
_localStream = newStream;
|
||||
talker.info('[WebRTC] Media stream replaced with new constraints');
|
||||
} catch (e) {
|
||||
talker.error('[WebRTC] Failed to replace media stream: $e');
|
||||
rethrow;
|
||||
}
|
||||
}
|
||||
|
||||
Future<void> toggleMicrophone(bool enabled) async {
|
||||
if (_localStream != null) {
|
||||
final audioTracks = _localStream!.getAudioTracks();
|
||||
@@ -312,15 +351,123 @@ class WebRTCManager {
|
||||
}
|
||||
}
|
||||
|
||||
Future<void> switchCamera(String deviceId) async {
|
||||
await replaceMediaStream({
|
||||
'audio': _localStream?.getAudioTracks().isNotEmpty ?? true,
|
||||
'video': {'deviceId': deviceId},
|
||||
});
|
||||
talker.info('[WebRTC] Switched to camera device: $deviceId');
|
||||
}
|
||||
|
||||
Future<void> switchMicrophone(String deviceId) async {
|
||||
await replaceMediaStream({
|
||||
'audio': {'deviceId': deviceId},
|
||||
'video': _localStream?.getVideoTracks().isNotEmpty ?? true,
|
||||
});
|
||||
talker.info('[WebRTC] Switched to microphone device: $deviceId');
|
||||
}
|
||||
|
||||
Future<List<MediaDeviceInfo>> getVideoDevices() async {
|
||||
try {
|
||||
final devices = await navigator.mediaDevices.enumerateDevices();
|
||||
return devices.where((device) => device.kind == 'videoinput').toList();
|
||||
} catch (e) {
|
||||
talker.error('[WebRTC] Failed to enumerate video devices: $e');
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
Future<List<MediaDeviceInfo>> getAudioDevices() async {
|
||||
try {
|
||||
final devices = await navigator.mediaDevices.enumerateDevices();
|
||||
return devices.where((device) => device.kind == 'audioinput').toList();
|
||||
} catch (e) {
|
||||
talker.error('[WebRTC] Failed to enumerate audio devices: $e');
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
void _startAudioLevelMonitoring() {
|
||||
_audioLevelTimer?.cancel();
|
||||
_audioLevelTimer = Timer.periodic(const Duration(milliseconds: 100), (_) {
|
||||
_updateAudioLevels();
|
||||
});
|
||||
}
|
||||
|
||||
void _stopAudioLevelMonitoring() {
|
||||
_audioLevelTimer?.cancel();
|
||||
_audioLevelTimer = null;
|
||||
}
|
||||
|
||||
Future<void> _updateAudioLevels() async {
|
||||
bool hasUpdates = false;
|
||||
|
||||
for (final participant in _participants.values) {
|
||||
if (participant.remoteStream != null && participant.isAudioEnabled) {
|
||||
final audioTracks = participant.remoteStream!.getAudioTracks();
|
||||
if (audioTracks.isNotEmpty) {
|
||||
try {
|
||||
// Try to get stats for more accurate audio level detection
|
||||
final pc = participant.peerConnection;
|
||||
if (pc != null) {
|
||||
final stats = await pc.getStats();
|
||||
double maxAudioLevel = 0.0;
|
||||
|
||||
// Look for audio receiver stats
|
||||
for (var report in stats) {
|
||||
if (report.type == 'inbound-rtp' &&
|
||||
report.values['mediaType'] == 'audio') {
|
||||
final audioLevel = report.values['audioLevel'] as double?;
|
||||
if (audioLevel != null && audioLevel > maxAudioLevel) {
|
||||
maxAudioLevel = audioLevel;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we got stats, use them; otherwise use a simple heuristic
|
||||
if (maxAudioLevel > 0) {
|
||||
participant.audioLevel = maxAudioLevel.clamp(0.0, 1.0);
|
||||
} else {
|
||||
// Simple heuristic: if audio track is enabled, assume some level
|
||||
// In a real app, you'd analyze the actual audio data
|
||||
participant.audioLevel = audioTracks[0].enabled ? 0.5 : 0.0;
|
||||
}
|
||||
} else {
|
||||
// Fallback for local participant or when no PC available
|
||||
participant.audioLevel = participant.isLocal ? 0.0 : 0.3;
|
||||
}
|
||||
|
||||
hasUpdates = true;
|
||||
} catch (e) {
|
||||
talker.warning('[WebRTC] Failed to update audio level for ${participant.id}: $e');
|
||||
participant.audioLevel = 0.0;
|
||||
}
|
||||
} else {
|
||||
participant.audioLevel = 0.0;
|
||||
}
|
||||
} else {
|
||||
participant.audioLevel = 0.0;
|
||||
}
|
||||
}
|
||||
|
||||
// Notify listeners if there were updates (throttled to avoid excessive updates)
|
||||
if (hasUpdates) {
|
||||
// This will trigger UI updates for speaking indicators
|
||||
}
|
||||
}
|
||||
|
||||
List<WebRTCParticipant> get participants => _participants.values.toList();
|
||||
|
||||
void dispose() {
|
||||
_stopAudioLevelMonitoring();
|
||||
_signaling.disconnect();
|
||||
for (final pc in _peerConnections.values) {
|
||||
pc.close();
|
||||
}
|
||||
_peerConnections.clear();
|
||||
_participants.values.forEach((p) => p.remoteCandidates.clear());
|
||||
for (var p in _participants.values) {
|
||||
p.remoteCandidates.clear();
|
||||
}
|
||||
_participants.clear();
|
||||
_localStream?.dispose();
|
||||
_participantController.close();
|
||||
|
@@ -247,8 +247,15 @@ class CallControlsBar extends HookConsumerWidget {
|
||||
String deviceType,
|
||||
) async {
|
||||
try {
|
||||
// TODO: Implement device switching for WebRTC
|
||||
// This would require restarting the media stream with the new device
|
||||
final callNotifier = ref.read(callNotifierProvider.notifier);
|
||||
if (callNotifier.webrtcManager == null) return;
|
||||
|
||||
if (deviceType == 'videoinput') {
|
||||
await callNotifier.webrtcManager!.switchCamera(device.deviceId);
|
||||
} else if (deviceType == 'audioinput') {
|
||||
await callNotifier.webrtcManager!.switchMicrophone(device.deviceId);
|
||||
}
|
||||
|
||||
if (context.mounted) {
|
||||
showSnackBar(
|
||||
'switchedTo'.tr(
|
||||
|
@@ -16,8 +16,7 @@ class SpeakingRippleAvatar extends HookConsumerWidget {
|
||||
@override
|
||||
Widget build(BuildContext context, WidgetRef ref) {
|
||||
final avatarRadius = size / 2;
|
||||
// TODO: Implement audio level detection for WebRTC
|
||||
final clampedLevel = 0.0;
|
||||
final clampedLevel = live.audioLevel.clamp(0.0, 1.0);
|
||||
final rippleRadius = avatarRadius + clampedLevel * (size * 0.333);
|
||||
return SizedBox(
|
||||
width: size + 8,
|
||||
|
Reference in New Issue
Block a user