Sample JXA script for running speech recognition on audio. Shows how you can supply code blocks to ObjC methods in JXA (lines 33-37).

1 min read Original article ↗

Sample JXA script for running speech recognition on audio. Shows how you can supply code blocks to ObjC methods in JXA (lines 33-37).

ObjC.import("AVFoundation");
ObjC.import('Speech');
ObjC.import("objc");
function recordForDuration(duration, destination) {
const settings = $.NSMutableDictionary.alloc.init;
settings.setValueForKey($.kAudioFormatAppleIMA4, $.AVFormatIDKey);
// Some macOS versions fail to link $.AVAudioFormat, so we manually get the class
const format = $.objc_getClass("AVAudioFormat").alloc.initWithSettings(settings);
const recorder = $.objc_getClass("AVAudioRecorder").alloc.initWithURLFormatError(destination, format, null);
// Start recording & wait until done
recorder.recordForDuration(duration);
while (recorder.isRecording) {
$.NSRunLoop.currentRunLoop.runUntilDate($.NSDate.dateWithTimeIntervalSinceNow(0.1));
}
recorder.stop;
}
(() => {
const outputURL = $.NSURL.alloc.initFileURLWithPath($.NSTemporaryDirectory().stringByAppendingString("temp.mp4"));
// Record audio to temp file
recordForDuration(5, outputURL);
// Setup speech recognition
const recognizer = $.SFSpeechRecognizer.alloc.init;
const request = $.SFSpeechURLRecognitionRequest.alloc.initWithURL(outputURL);
// Perform recognition and store result
let finalResult = "";
const task = recognizer.recognitionTaskWithRequestResultHandler(request, (result, error) => {
if (result.js) {
finalResult = result.bestTranscription.formattedString.js;
}
})
while (task.state != $.SFSpeechRecognitionTaskStateCompleted && task.state != $.SFSpeechRecognitionTaskStateCanceling) {
$.NSRunLoop.currentRunLoop.runUntilDate($.NSDate.dateWithTimeIntervalSinceNow(0.1));
}
// Remove temp file
$.NSFileManager.defaultManager.removeItemAtURLError(outputURL, $.nil);
return finalResult;
})()