The `account` thing requires a single configuration parameter, which is the API key that allows accessing the account.
API keys can be created and managed under <https://platform.openai.com/account/api-keys>.
-| Name | Type | Description | Default | Required | Advanced |
-|-----------------|---------|-----------------------------------------|---------|----------|----------|
-| apiKey | text | The API key to be used for the requests | N/A | yes | no |
+| Name | Type | Description | Default | Required | Advanced |
+|-----------------|---------|-----------------------------------------------------------|--------------------------------------------|----------|----------|
+| apiKey | text | The API key to be used for the requests | N/A | yes | no |
+| apiUrl | text | The server API where to reach the AI service | https://api.openai.com/v1/chat/completions | no | yes |
+| modelUrl | text | The model url where to retrieve the available models from | https://api.openai.com/v1/models | no | yes |
+
+The advanced parameters `apiUrl` and `modelUrl` can be used, if any other ChatGPT-compatible service is used, e.g. a local installation of [LocalAI](https://github.com/go-skynet/LocalAI).
## Channels
// List of all Channel ids
public static final String CHANNEL_CHAT = "chat";
-
- public static final String OPENAI_API_URL = "https://api.openai.com/v1/chat/completions";
- public static final String OPENAI_MODELS_URL = "https://api.openai.com/v1/models";
}
public class ChatGPTConfiguration {
public String apiKey = "";
+ public String apiUrl = "https://api.openai.com/v1/chat/completions";
+ public String modelUrl = "https://api.openai.com/v1/models";
}
*/
package org.openhab.binding.chatgpt.internal;
-import static org.openhab.binding.chatgpt.internal.ChatGPTBindingConstants.*;
-
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
private Gson gson = new Gson();
private String apiKey = "";
+ private String apiUrl = "";
+ private String modelUrl = "";
+
private String lastPrompt = "";
private List<String> models = List.of();
root.add("messages", messages);
String queryJson = gson.toJson(root);
- Request request = httpClient.newRequest(OPENAI_API_URL).method(HttpMethod.POST)
+ Request request = httpClient.newRequest(apiUrl).method(HttpMethod.POST)
.header("Content-Type", "application/json").header("Authorization", "Bearer " + apiKey)
.content(new StringContentProvider(queryJson));
logger.trace("Query '{}'", queryJson);
}
this.apiKey = apiKey;
+ this.apiUrl = config.apiUrl;
+ this.modelUrl = config.modelUrl;
+
updateStatus(ThingStatus.UNKNOWN);
scheduler.execute(() -> {
try {
- Request request = httpClient.newRequest(OPENAI_MODELS_URL).method(HttpMethod.GET)
- .header("Authorization", "Bearer " + apiKey);
+ Request request = httpClient.newRequest(modelUrl).method(HttpMethod.GET).header("Authorization",
+ "Bearer " + apiKey);
ContentResponse response = request.send();
if (response.getStatus() == 200) {
updateStatus(ThingStatus.ONLINE);
thing-type.config.chatgpt.account.apiKey.label = API Key
thing-type.config.chatgpt.account.apiKey.description = API key to access the account
+thing-type.config.chatgpt.account.apiUrl.label = API URL
+thing-type.config.chatgpt.account.apiUrl.description = The server API where to reach the AI service.
+thing-type.config.chatgpt.account.modelUrl.label = Model URL
+thing-type.config.chatgpt.account.modelUrl.description = The model url where to retrieve the available models from.
# channel types
channel-type.config.chatgpt.chat.temperature.label = Temperature
channel-type.config.chatgpt.chat.temperature.description = Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
-# Status messages
+# status messages
-offline.configuration-error=No API key configured
-offline.communication-error=Could not connect to OpenAI API
+offline.configuration-error = No API key configured
+offline.communication-error = Could not connect to OpenAI API
<label>API Key</label>
<description>API key to access the account</description>
</parameter>
+ <parameter name="apiUrl" type="text" required="false">
+ <label>API URL</label>
+ <description>The server API where to reach the AI service.</description>
+ <default>https://api.openai.com/v1/chat/completions</default>
+ <advanced>true</advanced>
+ <options>
+ <option value="https://api.openai.com/v1/chat/completions">https://api.openai.com/v1/chat/completions</option>
+ </options>
+ <limitToOptions>false</limitToOptions>
+ </parameter>
+ <parameter name="modelUrl" type="text" required="false">
+ <label>Model URL</label>
+ <description>The model url where to retrieve the available models from.</description>
+ <default>https://api.openai.com/v1/models</default>
+ <advanced>true</advanced>
+ <options>
+ <option value="https://api.openai.com/v1/models">https://api.openai.com/v1/models</option>
+ </options>
+ <limitToOptions>false</limitToOptions>
+ </parameter>
</config-description>
</thing-type>