1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
| import 'dart:io';
import 'package:audio_session/audio_session.dart'; import 'package:flutter/foundation.dart'; import 'package:flutter_sound/flutter_sound.dart'; import 'package:permission_handler/permission_handler.dart'; import 'package:xtechpark/ws_utils/ws_file_util.dart'; import 'package:xtechpark/ws_utils/ws_log/ws_logger.dart'; import 'package:xtechpark/ws_utils/ws_toast_util.dart'; import 'package:path_provider/path_provider.dart'; import 'package:flutter_sound_platform_interface/flutter_sound_recorder_platform_interface.dart';
class WSAudioRecorder { factory WSAudioRecorder() => _getInstance();
static WSAudioRecorder get instance => _getInstance(); static WSAudioRecorder? _instance;
WSAudioRecorder._internal();
static WSAudioRecorder _getInstance() { _instance ??= WSAudioRecorder._internal(); return _instance!; }
FlutterSoundRecorder? _recorder; final Codec _codec = Codec.aacADTS; String _mPath = 'temp.aac';
Duration? duration;
Future init() async { _recorder = FlutterSoundRecorder(); await openTheRecorder(); }
dispose() { _recorder?.closeRecorder(); _recorder = null; }
Future<void> openTheRecorder() async { var status = await Permission.microphone.request(); if (status != PermissionStatus.granted) { WSToastUtil.show('Microphone permission not granted'); return; } await _recorder?.openRecorder(); final session = await AudioSession.instance; await session.configure(AudioSessionConfiguration( avAudioSessionCategory: AVAudioSessionCategory.playAndRecord, avAudioSessionCategoryOptions: AVAudioSessionCategoryOptions.allowBluetooth | AVAudioSessionCategoryOptions.defaultToSpeaker, avAudioSessionMode: AVAudioSessionMode.spokenAudio, avAudioSessionRouteSharingPolicy: AVAudioSessionRouteSharingPolicy.defaultPolicy, avAudioSessionSetActiveOptions: AVAudioSessionSetActiveOptions.none, androidAudioAttributes: const AndroidAudioAttributes( contentType: AndroidAudioContentType.speech, flags: AndroidAudioFlags.none, usage: AndroidAudioUsage.voiceCommunication, ), androidAudioFocusGainType: AndroidAudioFocusGainType.gain, androidWillPauseWhenDucked: true, ));
_recorder?.dispositionStream()?.listen((event) { WSLogger.debug('debug dispositionStream:$event'); });
_recorder?.setSubscriptionDuration(const Duration(milliseconds: 100)); _recorder?.onProgress?.listen((e) { WSLogger.debug("debug onProgress:${e.decibels} / ${e.duration}"); duration = e.duration; }); }
void startRecord() async { if(_recorder == null) { await init(); } if(_recorder?.recorderState == RecorderState.isRecording) { await _recorder?.stopRecorder(); return; } WSLogger.debug("debug startRecord"); var status = await Permission.microphone.request(); if (status != PermissionStatus.granted) { WSToastUtil.show("Microphone permission not granted"); } else { Directory tempDir = await getTemporaryDirectory(); _mPath = "${tempDir.path}/${DateTime.now().millisecondsSinceEpoch}.aac"; _recorder?.startRecorder( toFile: _mPath, codec: _codec, audioSource: AudioSource.microphone, );
WSLogger.debug("debug recording"); } }
void stopRecord(Function(String path, int duration) finished) async { String? path = await _recorder?.stopRecorder(); if (path == null) { WSToastUtil.show('record failed'); WSLogger.error('record failed ${_recorder?.recorderState}'); return; } if(!await WSFileUtil.isFileExists(path)) { WSToastUtil.show('record failed'); return; } if(duration != null && duration!.inSeconds < 1) { WSToastUtil.show('recording can\'t be less than 1 second'); return; } WSLogger.debug("Stop recording: path = $path,duration = ${duration?.inSeconds}"); if (TargetPlatform.android == defaultTargetPlatform) { path = "file://$path"; } finished(path, duration?.inSeconds ?? 0); } }
|