import 'dart:async'; import 'dart:io'; import 'package:flutter/foundation.dart'; import 'package:flutter/material.dart'; import 'package:flutter_webrtc/flutter_webrtc.dart'; import 'package:island/pods/config.dart'; import 'package:island/pods/userinfo.dart'; import 'package:island/widgets/chat/call_button.dart'; import 'package:freezed_annotation/freezed_annotation.dart'; import 'package:riverpod_annotation/riverpod_annotation.dart'; import 'package:island/pods/network.dart'; import 'package:island/models/chat.dart'; import 'package:island/models/account.dart'; import 'package:island/pods/chat/webrtc_manager.dart'; import 'package:wakelock_plus/wakelock_plus.dart'; import 'package:island/talker.dart'; part 'call.g.dart'; part 'call.freezed.dart'; String formatDuration(Duration duration) { String negativeSign = duration.isNegative ? '-' : ''; String twoDigits(int n) => n.toString().padLeft(2, "0"); String twoDigitMinutes = twoDigits(duration.inMinutes.remainder(60).abs()); String twoDigitSeconds = twoDigits(duration.inSeconds.remainder(60).abs()); return "$negativeSign${twoDigits(duration.inHours)}:$twoDigitMinutes:$twoDigitSeconds"; } @freezed sealed class CallState with _$CallState { const factory CallState({ required bool isConnected, required bool isMicrophoneEnabled, required bool isCameraEnabled, required bool isScreenSharing, required bool isSpeakerphone, @Default(Duration(seconds: 0)) Duration duration, String? error, }) = _CallState; } @freezed sealed class CallParticipantLive with _$CallParticipantLive { const CallParticipantLive._(); const factory CallParticipantLive({ required CallParticipant participant, required WebRTCParticipant remoteParticipant, }) = _CallParticipantLive; bool get isSpeaking { // Simple speaking detection based on audio level analysis // This is a basic implementation - in a real app you'd want more sophisticated VAD final stream = remoteParticipant.remoteStream; if (stream == null) return false; // Check if any audio tracks are enabled and have recent activity final audioTracks = stream.getAudioTracks(); if (audioTracks.isEmpty) return false; // For now, just return true if audio is enabled (simplified) // In a real implementation, you'd analyze audio levels using WebRTC stats return remoteParticipant.isAudioEnabled; } bool get isMuted => !remoteParticipant.isAudioEnabled; bool get isScreenSharing => remoteParticipant.isVideoEnabled; // Simplified bool get isScreenSharingWithAudio => false; // TODO: Implement screen sharing bool get hasVideo => remoteParticipant.isVideoEnabled; bool get hasAudio => remoteParticipant.isAudioEnabled; } @Riverpod(keepAlive: true) class CallNotifier extends _$CallNotifier { WebRTCManager? _webrtcManager; List _participants = []; final Map _participantInfoByIdentity = {}; StreamSubscription? _participantJoinedSubscription; StreamSubscription? _participantLeftSubscription; List get participants => List.unmodifiable(_participants); Map participantsVolumes = {}; Timer? _durationTimer; String? _roomId; String? get roomId => _roomId; @override CallState build() { // Subscribe to websocket updates return const CallState( isConnected: false, isMicrophoneEnabled: true, // Audio enabled by default (matches WebRTC init) isCameraEnabled: true, // Video enabled by default (matches WebRTC init) isScreenSharing: false, isSpeakerphone: true, ); } void _initWebRTCListeners() { _participantJoinedSubscription?.cancel(); _participantLeftSubscription?.cancel(); _participantJoinedSubscription = _webrtcManager?.onParticipantJoined.listen( (participant) { _updateLiveParticipantsFromWebRTC(); }, ); _participantLeftSubscription = _webrtcManager?.onParticipantLeft.listen(( participantId, ) { _participants.removeWhere((p) => p.remoteParticipant.id == participantId); state = state.copyWith(); }); // Add local participant immediately when WebRTC is initialized final userinfo = ref.watch(userInfoProvider); if (userinfo.value != null) { _addLocalParticipant(userinfo.value!); } } void _addLocalParticipant(SnAccount userinfo) { if (_webrtcManager == null) return; // Remove any existing local participant first _participants.removeWhere((p) => p.participant.identity == userinfo.id); // Add local participant (current user) final localParticipant = CallParticipantLive( participant: CallParticipant( identity: userinfo.id, // Use roomId as local identity name: userinfo.name, accountId: userinfo.id, account: userinfo, joinedAt: DateTime.now(), ), remoteParticipant: WebRTCParticipant( id: _webrtcManager!.roomId, name: userinfo.nick, userinfo: userinfo, )..remoteStream = _webrtcManager!.localStream, // Access local stream ); _participants.insert(0, localParticipant); // Add at the beginning state = state.copyWith(); } void _updateLiveParticipantsFromWebRTC() { if (_webrtcManager == null) return; final webrtcParticipants = _webrtcManager!.participants; // Always ensure local participant exists final existingLocalParticipant = _participants.isNotEmpty && _participants[0].remoteParticipant.id == _webrtcManager!.roomId ? _participants[0] : null; final localParticipant = existingLocalParticipant ?? _createLocalParticipant(); // Add remote participants final remoteParticipants = webrtcParticipants.map((p) { final participantInfo = _participantInfoByIdentity[p.id] ?? CallParticipant( identity: p.id, name: p.name, accountId: p.userinfo.id, account: p.userinfo, joinedAt: DateTime.now(), ); return CallParticipantLive( participant: participantInfo, remoteParticipant: p, ); }).toList(); // Combine local participant with remote participants _participants = [localParticipant, ...remoteParticipants]; state = state.copyWith(); } CallParticipantLive _createLocalParticipant() { return CallParticipantLive( participant: CallParticipant( identity: _webrtcManager!.roomId, // Use roomId as local identity name: 'You', accountId: '', account: null, joinedAt: DateTime.now(), ), remoteParticipant: WebRTCParticipant( id: _webrtcManager!.roomId, name: 'You', userinfo: SnAccount( id: '', name: '', nick: '', language: '', isSuperuser: false, automatedId: null, profile: SnAccountProfile( id: '', firstName: '', middleName: '', lastName: '', bio: '', gender: '', pronouns: '', location: '', timeZone: '', links: [], experience: 0, level: 0, socialCredits: 0, socialCreditsLevel: 0, levelingProgress: 0, picture: null, background: null, verification: null, usernameColor: null, createdAt: DateTime.now(), updatedAt: DateTime.now(), deletedAt: null, ), perkSubscription: null, createdAt: DateTime.now(), updatedAt: DateTime.now(), deletedAt: null, ), )..remoteStream = _webrtcManager!.localStream, // Access local stream ); } Future joinRoom(String roomId) async { if (_roomId == roomId && _webrtcManager != null) { talker.info('[Call] Call skipped. Already connected to this room'); // Ensure state is connected even if we skip the join process if (!state.isConnected) { state = state.copyWith(isConnected: true); } return; } _roomId = roomId; // Clean up existing connection await disconnect(); try { final apiClient = ref.read(apiClientProvider); final ongoingCall = await ref.read(ongoingCallProvider(roomId).future); final response = await apiClient.get( '/sphere/chat/realtime/$roomId/join', ); if (response.statusCode == 200 && response.data != null) { final data = response.data; // Parse join response final joinResponse = ChatRealtimeJoinResponse.fromJson(data); final participants = joinResponse.participants; // Update participant info map for (final p in participants) { _participantInfoByIdentity[p.identity] = p; } // Setup duration timer _durationTimer?.cancel(); _durationTimer = Timer.periodic(const Duration(seconds: 1), (timer) { state = state.copyWith( duration: Duration( milliseconds: (DateTime.now().millisecondsSinceEpoch - (ongoingCall?.createdAt.millisecondsSinceEpoch ?? DateTime.now().millisecondsSinceEpoch)), ), ); }); // Initialize WebRTC manager final serverUrl = ref.watch(serverUrlProvider); _webrtcManager = WebRTCManager(roomId: roomId, serverUrl: serverUrl); await _webrtcManager!.initialize(ref); _initWebRTCListeners(); if (!kIsWeb && (Platform.isIOS || Platform.isAndroid)) { // TODO: Implement speakerphone control for WebRTC } state = state.copyWith(isConnected: true); // Enable wakelock when call connects WakelockPlus.enable(); } else { state = state.copyWith(error: 'Failed to join room'); } } catch (e) { state = state.copyWith(error: e.toString()); } } Future toggleMicrophone() async { final target = !state.isMicrophoneEnabled; state = state.copyWith(isMicrophoneEnabled: target); await _webrtcManager?.toggleMicrophone(target); // Update local participant's audio state if (_participants.isNotEmpty) { _participants[0].remoteParticipant.isAudioEnabled = target; state = state.copyWith(); // Trigger UI update } } Future toggleCamera() async { final target = !state.isCameraEnabled; state = state.copyWith(isCameraEnabled: target); await _webrtcManager?.toggleCamera(target); // Update local participant's video state if (_participants.isNotEmpty) { _participants[0].remoteParticipant.isVideoEnabled = target; state = state.copyWith(); // Trigger UI update } } Future toggleScreenShare(BuildContext context) async { if (_webrtcManager == null) return; try { if (state.isScreenSharing) { // Stop screen sharing - switch back to camera await _webrtcManager!.toggleCamera(state.isCameraEnabled); state = state.copyWith(isScreenSharing: false); } else { // Start screen sharing if (WebRTC.platformIsDesktop) { // For desktop, we need to get screen capture source // This would require implementing a screen selection dialog // For now, just toggle the state state = state.copyWith(isScreenSharing: true); } else if (WebRTC.platformIsWeb) { // For web, get display media directly await navigator.mediaDevices.getDisplayMedia({ 'video': true, 'audio': false, // Screen sharing typically doesn't include system audio }); // Replace video track with screen sharing track // This is a simplified implementation state = state.copyWith(isScreenSharing: true); } } } catch (e) { talker.error('[Call] Screen sharing error: $e'); state = state.copyWith(error: 'Failed to toggle screen sharing: $e'); } } Future toggleSpeakerphone() async { if (!kIsWeb && (Platform.isIOS || Platform.isAndroid)) { try { // For mobile platforms, we can control audio routing // This is a simplified implementation final newSpeakerphoneState = !state.isSpeakerphone; state = state.copyWith(isSpeakerphone: newSpeakerphoneState); // Note: Actual speakerphone control would require platform-specific code // For a full implementation, you'd need to use platform channels // to control audio routing on iOS/Android talker.info('[Call] Speakerphone toggled to: $newSpeakerphoneState'); } catch (e) { talker.error('[Call] Speakerphone control error: $e'); state = state.copyWith(error: 'Failed to toggle speakerphone: $e'); } } else { // For web/desktop, speakerphone control is handled by the browser/OS state = state.copyWith(isSpeakerphone: !state.isSpeakerphone); } } Future disconnect() async { _webrtcManager?.dispose(); _webrtcManager = null; _participantJoinedSubscription?.cancel(); _participantLeftSubscription?.cancel(); _participants.clear(); state = state.copyWith( isConnected: false, isMicrophoneEnabled: false, isCameraEnabled: false, isScreenSharing: false, ); // Disable wakelock when call disconnects WakelockPlus.disable(); } void setParticipantVolume(CallParticipantLive live, double volume) { // Store volume setting for this participant // Note: WebRTC doesn't have built-in per-participant volume control // This is just storing the preference for UI purposes // Actual volume control would need to be implemented at the audio rendering level participantsVolumes[live.remoteParticipant.id] = volume.clamp(0.0, 1.0); talker.info( '[Call] Volume set to $volume for participant ${live.remoteParticipant.id}', ); } double getParticipantVolume(CallParticipantLive live) { return participantsVolumes[live.remoteParticipant.id] ?? 1.0; } void dispose() { state = state.copyWith( error: null, isConnected: false, isMicrophoneEnabled: false, isCameraEnabled: false, isScreenSharing: false, ); _participantJoinedSubscription?.cancel(); _participantLeftSubscription?.cancel(); _webrtcManager?.dispose(); _webrtcManager = null; _durationTimer?.cancel(); _roomId = null; participantsVolumes = {}; // Disable wakelock when disposing WakelockPlus.disable(); } }