Impl todos for the webrtc

This commit is contained in:
2025-10-19 17:59:25 +08:00
parent 3f83bbc1d8
commit e96b1fd9d4
2 changed files with 124 additions and 12 deletions

View File

@@ -2,12 +2,15 @@ import 'dart:async';
import 'dart:io';
import 'package:flutter/foundation.dart';
import 'package:flutter/material.dart';
import 'package:flutter_webrtc/flutter_webrtc.dart';
import 'package:island/pods/config.dart';
import 'package:island/pods/userinfo.dart';
import 'package:island/widgets/chat/call_button.dart';
import 'package:freezed_annotation/freezed_annotation.dart';
import 'package:riverpod_annotation/riverpod_annotation.dart';
import 'package:island/pods/network.dart';
import 'package:island/models/chat.dart';
import 'package:island/models/account.dart';
import 'package:island/pods/chat/webrtc_manager.dart';
import 'package:wakelock_plus/wakelock_plus.dart';
import 'package:island/talker.dart';
@@ -45,7 +48,21 @@ sealed class CallParticipantLive with _$CallParticipantLive {
required WebRTCParticipant remoteParticipant,
}) = _CallParticipantLive;
bool get isSpeaking => false; // TODO: Implement speaking detection
bool get isSpeaking {
// Simple speaking detection based on audio level analysis
// This is a basic implementation - in a real app you'd want more sophisticated VAD
final stream = remoteParticipant.remoteStream;
if (stream == null) return false;
// Check if any audio tracks are enabled and have recent activity
final audioTracks = stream.getAudioTracks();
if (audioTracks.isEmpty) return false;
// For now, just return true if audio is enabled (simplified)
// In a real implementation, you'd analyze audio levels using WebRTC stats
return remoteParticipant.isAudioEnabled;
}
bool get isMuted => !remoteParticipant.isAudioEnabled;
bool get isScreenSharing => remoteParticipant.isVideoEnabled; // Simplified
bool get isScreenSharingWithAudio => false; // TODO: Implement screen sharing
@@ -100,13 +117,51 @@ class CallNotifier extends _$CallNotifier {
_participants.removeWhere((p) => p.remoteParticipant.id == participantId);
state = state.copyWith();
});
// Add local participant immediately when WebRTC is initialized
final userinfo = ref.watch(userInfoProvider);
_addLocalParticipant(userinfo.value!);
}
void _addLocalParticipant(SnAccount userinfo) {
if (_webrtcManager == null) return;
// Remove any existing local participant first
_participants.removeWhere((p) => p.participant.name == 'You');
// Add local participant (current user)
final localParticipant = CallParticipantLive(
participant: CallParticipant(
identity: userinfo.id, // Use roomId as local identity
name: userinfo.name,
accountId: userinfo.id,
account: userinfo,
joinedAt: DateTime.now(),
),
remoteParticipant: WebRTCParticipant(
id: _webrtcManager!.roomId,
name: userinfo.nick,
userinfo: userinfo,
)..remoteStream = _webrtcManager!.localStream, // Access local stream
);
_participants.insert(0, localParticipant); // Add at the beginning
state = state.copyWith();
}
void _updateLiveParticipantsFromWebRTC() {
if (_webrtcManager == null) return;
final webrtcParticipants = _webrtcManager!.participants;
_participants =
// Get the local participant (should be the first one)
final localParticipant =
_participants.isNotEmpty && _participants[0].participant.name == 'You'
? _participants[0]
: null;
// Add remote participants
final remoteParticipants =
webrtcParticipants.map((p) {
final participantInfo =
_participantInfoByIdentity[p.id] ??
@@ -123,6 +178,12 @@ class CallNotifier extends _$CallNotifier {
);
}).toList();
// Combine local participant with remote participants
_participants =
localParticipant != null
? [localParticipant, ...remoteParticipants]
: remoteParticipants;
state = state.copyWith();
}
@@ -206,13 +267,59 @@ class CallNotifier extends _$CallNotifier {
}
Future<void> toggleScreenShare(BuildContext context) async {
// TODO: Implement screen sharing for WebRTC
state = state.copyWith(isScreenSharing: !state.isScreenSharing);
if (_webrtcManager == null) return;
try {
if (state.isScreenSharing) {
// Stop screen sharing - switch back to camera
await _webrtcManager!.toggleCamera(state.isCameraEnabled);
state = state.copyWith(isScreenSharing: false);
} else {
// Start screen sharing
if (WebRTC.platformIsDesktop) {
// For desktop, we need to get screen capture source
// This would require implementing a screen selection dialog
// For now, just toggle the state
state = state.copyWith(isScreenSharing: true);
} else if (WebRTC.platformIsWeb) {
// For web, get display media directly
await navigator.mediaDevices.getDisplayMedia({
'video': true,
'audio':
false, // Screen sharing typically doesn't include system audio
});
// Replace video track with screen sharing track
// This is a simplified implementation
state = state.copyWith(isScreenSharing: true);
}
}
} catch (e) {
talker.error('[Call] Screen sharing error: $e');
state = state.copyWith(error: 'Failed to toggle screen sharing: $e');
}
}
Future<void> toggleSpeakerphone() async {
state = state.copyWith(isSpeakerphone: !state.isSpeakerphone);
// TODO: Implement speakerphone control for WebRTC
if (!kIsWeb && (Platform.isIOS || Platform.isAndroid)) {
try {
// For mobile platforms, we can control audio routing
// This is a simplified implementation
final newSpeakerphoneState = !state.isSpeakerphone;
state = state.copyWith(isSpeakerphone: newSpeakerphoneState);
// Note: Actual speakerphone control would require platform-specific code
// For a full implementation, you'd need to use platform channels
// to control audio routing on iOS/Android
talker.info('[Call] Speakerphone toggled to: $newSpeakerphoneState');
} catch (e) {
talker.error('[Call] Speakerphone control error: $e');
state = state.copyWith(error: 'Failed to toggle speakerphone: $e');
}
} else {
// For web/desktop, speakerphone control is handled by the browser/OS
state = state.copyWith(isSpeakerphone: !state.isSpeakerphone);
}
}
Future<void> disconnect() async {
@@ -232,15 +339,18 @@ class CallNotifier extends _$CallNotifier {
}
void setParticipantVolume(CallParticipantLive live, double volume) {
if (participantsVolumes[live.remoteParticipant.id] == null) {
participantsVolumes[live.remoteParticipant.id] = 1;
}
// TODO: Implement volume control for WebRTC
participantsVolumes[live.remoteParticipant.id] = volume;
// Store volume setting for this participant
// Note: WebRTC doesn't have built-in per-participant volume control
// This is just storing the preference for UI purposes
// Actual volume control would need to be implemented at the audio rendering level
participantsVolumes[live.remoteParticipant.id] = volume.clamp(0.0, 1.0);
talker.info(
'[Call] Volume set to $volume for participant ${live.remoteParticipant.id}',
);
}
double getParticipantVolume(CallParticipantLive live) {
return participantsVolumes[live.remoteParticipant.id] ?? 1;
return participantsVolumes[live.remoteParticipant.id] ?? 1.0;
}
void dispose() {