|
|
@@ -209,51 +209,71 @@ public class UILocalPlugin: CAPPlugin, CAPBridgedPlugin {
|
|
|
CAPPluginMethod(name: "transcribeAudio2Text", returnType: CAPPluginReturnPromise)
|
|
|
]
|
|
|
|
|
|
- // TODO: switch to use https://developer.apple.com/documentation/speech/speechanalyzer for iOS 26+
|
|
|
- // 语音识别方法
|
|
|
- private func recognizeSpeech(from url: URL, completion: @escaping (String?, Error?) -> Void) {
|
|
|
- SFSpeechRecognizer.requestAuthorization { authStatus in
|
|
|
- guard authStatus == .authorized else {
|
|
|
- completion(nil, NSError(domain: "", code: -1, userInfo: [NSLocalizedDescriptionKey: "语音识别权限未授权"]))
|
|
|
- return
|
|
|
- }
|
|
|
-
|
|
|
- let recognizer = SFSpeechRecognizer(locale: Locale(identifier: "en-US"))
|
|
|
- let request = SFSpeechURLRecognitionRequest(url: url)
|
|
|
-
|
|
|
- // Setting up offline speech recognition
|
|
|
- recognizer?.supportsOnDeviceRecognition = true
|
|
|
- request.shouldReportPartialResults = false
|
|
|
- request.requiresOnDeviceRecognition = true
|
|
|
- request.taskHint = .dictation
|
|
|
- if #available(iOS 16, *) {
|
|
|
- request.addsPunctuation = true
|
|
|
- }
|
|
|
+ func recognizeSpeech(from file: URL, locale: String, completion: @escaping (String?, Error?) -> Void) {
|
|
|
+ if #available(iOS 26.0, *) {
|
|
|
+ // Modern API: SpeechTranscriber + SpeechAnalyzer
|
|
|
+ Task {
|
|
|
+ do {
|
|
|
+ print("debug locale \(locale)")
|
|
|
+
|
|
|
+ // Step 1: pick supported locale
|
|
|
+ guard let supportedLocale = await SpeechTranscriber.supportedLocale(equivalentTo: Locale(identifier: locale)) else {
|
|
|
+ throw NSError(domain: "Speech", code: -1,
|
|
|
+ userInfo: [NSLocalizedDescriptionKey: "Unsupported locale"])
|
|
|
+ }
|
|
|
+
|
|
|
+ // Step 2: transcriber with transcription preset
|
|
|
+ let transcriber = SpeechTranscriber(locale: supportedLocale, preset: .transcription)
|
|
|
+
|
|
|
+ // Ensure assets (downloads model if needed)
|
|
|
+ if let installRequest = try await AssetInventory.assetInstallationRequest(supporting: [transcriber]) {
|
|
|
+ try await installRequest.downloadAndInstall()
|
|
|
+ }
|
|
|
+
|
|
|
+ // Step 3: collect transcription results async
|
|
|
+ async let transcriptionFuture: String = try transcriber.results.reduce(into: "") { partial, result in
|
|
|
+ partial += String(result.text.characters) + " "
|
|
|
+ }
|
|
|
+
|
|
|
+ // Step 4: analyzer
|
|
|
+ let analyzer = SpeechAnalyzer(modules: [transcriber])
|
|
|
+
|
|
|
+ // Step 5/6: run analysis from file
|
|
|
+ let audioFile = try AVAudioFile(forReading: file)
|
|
|
+ if let lastSample = try await analyzer.analyzeSequence(from: audioFile) {
|
|
|
+ try await analyzer.finalizeAndFinish(through: lastSample)
|
|
|
+ } else {
|
|
|
+ try await analyzer.cancelAndFinishNow()
|
|
|
+ }
|
|
|
+
|
|
|
+ // Step 7/8: wait for transcription
|
|
|
+ let finalText = try await transcriptionFuture.trimmingCharacters(in: .whitespacesAndNewlines)
|
|
|
+ completion(finalText, nil)
|
|
|
+
|
|
|
+ } catch {
|
|
|
+ completion(nil, error)
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
- recognizer?.recognitionTask(with: request) { result, error in
|
|
|
- if let result = result {
|
|
|
- let transcription = result.bestTranscription.formattedString
|
|
|
- completion(transcription, nil)
|
|
|
- } else if let error = error {
|
|
|
- completion(nil, error)
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
@objc func transcribeAudio2Text(_ call: CAPPluginCall) {
|
|
|
self.call = call
|
|
|
|
|
|
- // 接收音频数据 arrayBuffer
|
|
|
+ // audio arrayBuffer
|
|
|
guard let audioArray = call.getArray("audioData", NSNumber.self) as? [UInt8] else {
|
|
|
- call.reject("无效的音频数据")
|
|
|
+ call.reject("invalid audioData")
|
|
|
return
|
|
|
}
|
|
|
|
|
|
- // 将数组转换为 Data
|
|
|
+ guard let locale = call.getString("locale") else {
|
|
|
+ call.reject("invalid locale")
|
|
|
+ return
|
|
|
+ }
|
|
|
+
|
|
|
let audioData = Data(audioArray)
|
|
|
|
|
|
- // 保存为本地文件
|
|
|
let fileURL = FileManager.default.temporaryDirectory.appendingPathComponent("recordedAudio.m4a")
|
|
|
|
|
|
do {
|
|
|
@@ -261,23 +281,21 @@ public class UILocalPlugin: CAPPlugin, CAPBridgedPlugin {
|
|
|
|
|
|
let fileExists = FileManager.default.fileExists(atPath: fileURL.path)
|
|
|
|
|
|
- print("文件是否存在: \(fileExists), 路径: \(fileURL.path)")
|
|
|
+ print("file exists: \(fileExists), path: \(fileURL.path)")
|
|
|
if !fileExists {
|
|
|
- call.reject("文件保存失败,文件不存在")
|
|
|
+ call.reject("file save failed: file doesn't exist")
|
|
|
return
|
|
|
}
|
|
|
|
|
|
-
|
|
|
- // 调用语音识别
|
|
|
- self.recognizeSpeech(from: fileURL) { result, error in
|
|
|
+ self.recognizeSpeech(from: fileURL, locale: locale) { result, error in
|
|
|
if let result = result {
|
|
|
call.resolve(["transcription": result])
|
|
|
} else if let error = error {
|
|
|
- call.reject("语音识别失败: \(error.localizedDescription)")
|
|
|
+ call.reject("failed to transcribe: \(error.localizedDescription)")
|
|
|
}
|
|
|
}
|
|
|
} catch {
|
|
|
- call.reject("保存文件失败: \(error.localizedDescription)")
|
|
|
+ call.reject("failed to transcribe: \(error.localizedDescription)")
|
|
|
}
|
|
|
}
|
|
|
|