The type of the inference task that the model will perform.
NOTE: The chat_completion task type only supports streaming and only through the _stream API.
Values are chat_completion, completion, or text_embedding.
The unique identifier of the inference endpoint.
Specifies the amount of time to wait for the inference endpoint to be created.
The chunking configuration object.
The type of service supported for the specified task type. In this case, openai.
Value is openai.
Settings used to install the inference model. These settings are specific to the openai service.
Settings to configure the inference task. These settings are specific to the task type you specified.
PUT _inference/text_embedding/openai-embeddings
{
"service": "openai",
"service_settings": {
"api_key": "OpenAI-API-Key",
"model_id": "text-embedding-3-small",
"dimensions": 128
}
}
resp = client.inference.put(
task_type="text_embedding",
inference_id="openai-embeddings",
inference_config={
"service": "openai",
"service_settings": {
"api_key": "OpenAI-API-Key",
"model_id": "text-embedding-3-small",
"dimensions": 128
}
},
)
const response = await client.inference.put({
task_type: "text_embedding",
inference_id: "openai-embeddings",
inference_config: {
service: "openai",
service_settings: {
api_key: "OpenAI-API-Key",
model_id: "text-embedding-3-small",
dimensions: 128,
},
},
});
response = client.inference.put(
task_type: "text_embedding",
inference_id: "openai-embeddings",
body: {
"service": "openai",
"service_settings": {
"api_key": "OpenAI-API-Key",
"model_id": "text-embedding-3-small",
"dimensions": 128
}
}
)
$resp = $client->inference()->put([
"task_type" => "text_embedding",
"inference_id" => "openai-embeddings",
"body" => [
"service" => "openai",
"service_settings" => [
"api_key" => "OpenAI-API-Key",
"model_id" => "text-embedding-3-small",
"dimensions" => 128,
],
],
]);
curl -X PUT -H "Authorization: ApiKey $ELASTIC_API_KEY" -H "Content-Type: application/json" -d '{"service":"openai","service_settings":{"api_key":"OpenAI-API-Key","model_id":"text-embedding-3-small","dimensions":128}}' "$ELASTICSEARCH_URL/_inference/text_embedding/openai-embeddings"
client.inference().put(p -> p
.inferenceId("openai-embeddings")
.taskType(TaskType.TextEmbedding)
.inferenceConfig(i -> i
.service("openai")
.serviceSettings(JsonData.fromJson("{\"api_key\":\"OpenAI-API-Key\",\"model_id\":\"text-embedding-3-small\",\"dimensions\":128}"))
)
);
{
"service": "openai",
"service_settings": {
"api_key": "OpenAI-API-Key",
"model_id": "text-embedding-3-small",
"dimensions": 128
}
}
{
"service": "openai",
"service_settings": {
"api_key": "OpenAI-API-Key",
"model_id": "gpt-3.5-turbo"
}
}