2 * Copyright (c) 2010-2022 Contributors to the openHAB project
4 * See the NOTICE file(s) distributed with this work for additional
7 * This program and the accompanying materials are made available under the
8 * terms of the Eclipse Public License 2.0 which is available at
9 * http://www.eclipse.org/legal/epl-2.0
11 * SPDX-License-Identifier: EPL-2.0
13 package org.openhab.voice.watsonstt.internal;
15 import static org.openhab.voice.watsonstt.internal.WatsonSTTConstants.*;
17 import java.util.List;
18 import java.util.Locale;
21 import java.util.concurrent.ScheduledExecutorService;
22 import java.util.concurrent.atomic.AtomicBoolean;
23 import java.util.concurrent.atomic.AtomicReference;
24 import java.util.stream.Collectors;
26 import org.eclipse.jdt.annotation.NonNullByDefault;
27 import org.eclipse.jdt.annotation.Nullable;
28 import org.openhab.core.audio.AudioFormat;
29 import org.openhab.core.audio.AudioStream;
30 import org.openhab.core.common.ThreadPoolManager;
31 import org.openhab.core.config.core.ConfigurableService;
32 import org.openhab.core.config.core.Configuration;
33 import org.openhab.core.voice.RecognitionStartEvent;
34 import org.openhab.core.voice.RecognitionStopEvent;
35 import org.openhab.core.voice.STTException;
36 import org.openhab.core.voice.STTListener;
37 import org.openhab.core.voice.STTService;
38 import org.openhab.core.voice.STTServiceHandle;
39 import org.openhab.core.voice.SpeechRecognitionErrorEvent;
40 import org.openhab.core.voice.SpeechRecognitionEvent;
41 import org.osgi.framework.Constants;
42 import org.osgi.service.component.annotations.Activate;
43 import org.osgi.service.component.annotations.Component;
44 import org.osgi.service.component.annotations.Modified;
45 import org.slf4j.Logger;
46 import org.slf4j.LoggerFactory;
48 import com.google.gson.JsonObject;
49 import com.ibm.cloud.sdk.core.http.HttpMediaType;
50 import com.ibm.cloud.sdk.core.security.IamAuthenticator;
51 import com.ibm.watson.speech_to_text.v1.SpeechToText;
52 import com.ibm.watson.speech_to_text.v1.model.RecognizeWithWebsocketsOptions;
53 import com.ibm.watson.speech_to_text.v1.model.SpeechRecognitionAlternative;
54 import com.ibm.watson.speech_to_text.v1.model.SpeechRecognitionResult;
55 import com.ibm.watson.speech_to_text.v1.model.SpeechRecognitionResults;
56 import com.ibm.watson.speech_to_text.v1.websocket.RecognizeCallback;
58 import okhttp3.WebSocket;
61 * The {@link WatsonSTTService} allows to use Watson as Speech-to-Text engine
63 * @author Miguel Álvarez - Initial contribution
66 @Component(configurationPid = SERVICE_PID, property = Constants.SERVICE_PID + "=" + SERVICE_PID)
67 @ConfigurableService(category = SERVICE_CATEGORY, label = SERVICE_NAME
68 + " Speech-to-Text", description_uri = SERVICE_CATEGORY + ":" + SERVICE_ID)
69 public class WatsonSTTService implements STTService {
70 private final Logger logger = LoggerFactory.getLogger(WatsonSTTService.class);
71 private final ScheduledExecutorService executor = ThreadPoolManager.getScheduledPool("OH-voice-watsonstt");
72 private final List<String> models = List.of("ar-AR_BroadbandModel", "de-DE_BroadbandModel", "en-AU_BroadbandModel",
73 "en-GB_BroadbandModel", "en-US_BroadbandModel", "es-AR_BroadbandModel", "es-CL_BroadbandModel",
74 "es-CO_BroadbandModel", "es-ES_BroadbandModel", "es-MX_BroadbandModel", "es-PE_BroadbandModel",
75 "fr-CA_BroadbandModel", "fr-FR_BroadbandModel", "it-IT_BroadbandModel", "ja-JP_BroadbandModel",
76 "ko-KR_BroadbandModel", "nl-NL_BroadbandModel", "pt-BR_BroadbandModel", "zh-CN_BroadbandModel");
77 private final Set<Locale> supportedLocales = models.stream().map(name -> name.split("_")[0])
78 .map(Locale::forLanguageTag).collect(Collectors.toSet());
79 private WatsonSTTConfiguration config = new WatsonSTTConfiguration();
80 private @Nullable SpeechToText speechToText = null;
83 protected void activate(Map<String, Object> config) {
88 protected void modified(Map<String, Object> config) {
89 this.config = new Configuration(config).as(WatsonSTTConfiguration.class);
90 if (this.config.apiKey.isBlank() || this.config.instanceUrl.isBlank()) {
91 this.speechToText = null;
93 var speechToText = new SpeechToText(new IamAuthenticator.Builder().apikey(this.config.apiKey).build());
94 speechToText.setServiceUrl(this.config.instanceUrl);
95 if (this.config.optOutLogging) {
96 speechToText.setDefaultHeaders(Map.of("X-Watson-Learning-Opt-Out", "1"));
98 this.speechToText = speechToText;
103 public String getId() {
108 public String getLabel(@Nullable Locale locale) {
113 public Set<Locale> getSupportedLocales() {
114 return supportedLocales;
118 public Set<AudioFormat> getSupportedFormats() {
119 return Set.of(AudioFormat.WAV, AudioFormat.OGG, new AudioFormat("OGG", "OPUS", null, null, null, null),
124 public STTServiceHandle recognize(STTListener sttListener, AudioStream audioStream, Locale locale, Set<String> set)
125 throws STTException {
126 var stt = this.speechToText;
128 throw new STTException("service is not correctly configured");
130 String contentType = getContentType(audioStream);
131 if (contentType == null) {
132 throw new STTException("Unsupported format, unable to resolve audio content type");
134 logger.debug("Content-Type: {}", contentType);
135 RecognizeWithWebsocketsOptions wsOptions = new RecognizeWithWebsocketsOptions.Builder().audio(audioStream)
136 .contentType(contentType).redaction(config.redaction).smartFormatting(config.smartFormatting)
137 .model(locale.toLanguageTag() + "_BroadbandModel").interimResults(true)
138 .backgroundAudioSuppression(config.backgroundAudioSuppression)
139 .speechDetectorSensitivity(config.speechDetectorSensitivity).inactivityTimeout(config.maxSilenceSeconds)
141 final AtomicReference<@Nullable WebSocket> socketRef = new AtomicReference<>();
142 final AtomicBoolean aborted = new AtomicBoolean(false);
143 executor.submit(() -> {
144 socketRef.set(stt.recognizeUsingWebSocket(wsOptions,
145 new TranscriptionListener(socketRef, sttListener, config, aborted)));
147 return new STTServiceHandle() {
149 public void abort() {
150 if (!aborted.getAndSet(true)) {
151 var socket = socketRef.get();
152 if (socket != null) {
153 sendStopMessage(socket);
160 private @Nullable String getContentType(AudioStream audioStream) throws STTException {
161 AudioFormat format = audioStream.getFormat();
162 String container = format.getContainer();
163 String codec = format.getCodec();
164 if (container == null || codec == null) {
165 throw new STTException("Missing audio stream info");
167 Long frequency = format.getFrequency();
168 Integer bitDepth = format.getBitDepth();
170 case AudioFormat.CONTAINER_WAVE:
171 if (AudioFormat.CODEC_PCM_SIGNED.equals(codec)) {
172 if (bitDepth == null || bitDepth != 16) {
175 // rate is a required parameter for this type
176 if (frequency == null) {
179 StringBuilder contentTypeL16 = new StringBuilder(HttpMediaType.AUDIO_PCM).append(";rate=")
181 // // those are optional
182 Integer channels = format.getChannels();
183 if (channels != null) {
184 contentTypeL16.append(";channels=").append(channels);
186 Boolean bigEndian = format.isBigEndian();
187 if (bigEndian != null) {
188 contentTypeL16.append(";")
189 .append(bigEndian ? "endianness=big-endian" : "endianness=little-endian");
191 return contentTypeL16.toString();
193 case AudioFormat.CONTAINER_OGG:
195 case AudioFormat.CODEC_VORBIS:
196 return "audio/ogg;codecs=vorbis";
198 return "audio/ogg;codecs=opus";
201 case AudioFormat.CONTAINER_NONE:
202 if (AudioFormat.CODEC_MP3.equals(codec)) {
210 private static void sendStopMessage(WebSocket ws) {
211 JsonObject stopMessage = new JsonObject();
212 stopMessage.addProperty("action", "stop");
213 ws.send(stopMessage.toString());
216 private static class TranscriptionListener implements RecognizeCallback {
217 private final Logger logger = LoggerFactory.getLogger(TranscriptionListener.class);
218 private final StringBuilder transcriptBuilder = new StringBuilder();
219 private final STTListener sttListener;
220 private final WatsonSTTConfiguration config;
221 private final AtomicBoolean aborted;
222 private final AtomicReference<@Nullable WebSocket> socketRef;
223 private float confidenceSum = 0f;
224 private int responseCount = 0;
225 private boolean disconnected = false;
227 public TranscriptionListener(AtomicReference<@Nullable WebSocket> socketRef, STTListener sttListener,
228 WatsonSTTConfiguration config, AtomicBoolean aborted) {
229 this.socketRef = socketRef;
230 this.sttListener = sttListener;
231 this.config = config;
232 this.aborted = aborted;
236 public void onTranscription(@Nullable SpeechRecognitionResults speechRecognitionResults) {
237 logger.debug("onTranscription");
238 if (speechRecognitionResults == null) {
241 speechRecognitionResults.getResults().stream().filter(SpeechRecognitionResult::isXFinal).forEach(result -> {
242 SpeechRecognitionAlternative alternative = result.getAlternatives().stream().findFirst().orElse(null);
243 if (alternative == null) {
246 logger.debug("onTranscription Final");
247 Double confidence = alternative.getConfidence();
248 transcriptBuilder.append(alternative.getTranscript());
249 confidenceSum += confidence != null ? confidence.floatValue() : 0f;
251 if (config.singleUtteranceMode) {
252 var socket = socketRef.get();
253 if (socket != null) {
254 sendStopMessage(socket);
261 public void onConnected() {
262 logger.debug("onConnected");
266 public void onError(@Nullable Exception e) {
267 var errorMessage = e != null ? e.getMessage() : null;
268 if (errorMessage != null && disconnected && errorMessage.contains("Socket closed")) {
269 logger.debug("Error ignored: {}", errorMessage);
272 logger.warn("TranscriptionError: {}", errorMessage);
273 if (!aborted.getAndSet(true)) {
274 sttListener.sttEventReceived(
275 new SpeechRecognitionErrorEvent(errorMessage != null ? errorMessage : "Unknown error"));
280 public void onDisconnected() {
281 logger.debug("onDisconnected");
283 if (!aborted.getAndSet(true)) {
284 sttListener.sttEventReceived(new RecognitionStopEvent());
285 float averageConfidence = confidenceSum / (float) responseCount;
286 String transcript = transcriptBuilder.toString().trim();
287 if (!transcript.isBlank()) {
288 sttListener.sttEventReceived(new SpeechRecognitionEvent(transcript, averageConfidence));
290 if (!config.noResultsMessage.isBlank()) {
291 sttListener.sttEventReceived(new SpeechRecognitionErrorEvent(config.noResultsMessage));
293 sttListener.sttEventReceived(new SpeechRecognitionErrorEvent("No results"));
300 public void onInactivityTimeout(@Nullable RuntimeException e) {
302 logger.debug("InactivityTimeout: {}", e.getMessage());
307 public void onListening() {
308 logger.debug("onListening");
309 sttListener.sttEventReceived(new RecognitionStartEvent());
313 public void onTranscriptionComplete() {
314 logger.debug("onTranscriptionComplete");