The type of the inference task that the model will perform.
Values are text_embedding or rerank.
The unique identifier of the inference endpoint.
PUT _inference/text_embedding/openai-embeddings
{
"service": "voyageai",
"service_settings": {
"model_id": "voyage-3-large",
"dimensions": 512
}
}
resp = client.inference.put(
task_type="text_embedding",
inference_id="openai-embeddings",
inference_config={
"service": "voyageai",
"service_settings": {
"model_id": "voyage-3-large",
"dimensions": 512
}
},
)
const response = await client.inference.put({
task_type: "text_embedding",
inference_id: "openai-embeddings",
inference_config: {
service: "voyageai",
service_settings: {
model_id: "voyage-3-large",
dimensions: 512,
},
},
});
response = client.inference.put(
task_type: "text_embedding",
inference_id: "openai-embeddings",
body: {
"service": "voyageai",
"service_settings": {
"model_id": "voyage-3-large",
"dimensions": 512
}
}
)
$resp = $client->inference()->put([
"task_type" => "text_embedding",
"inference_id" => "openai-embeddings",
"body" => [
"service" => "voyageai",
"service_settings" => [
"model_id" => "voyage-3-large",
"dimensions" => 512,
],
],
]);
curl -X PUT -H "Authorization: ApiKey $ELASTIC_API_KEY" -H "Content-Type: application/json" -d '{"service":"voyageai","service_settings":{"model_id":"voyage-3-large","dimensions":512}}' "$ELASTICSEARCH_URL/_inference/text_embedding/openai-embeddings"
client.inference().put(p -> p
.inferenceId("openai-embeddings")
.taskType(TaskType.TextEmbedding)
.inferenceConfig(i -> i
.service("voyageai")
.serviceSettings(JsonData.fromJson("{\"model_id\":\"voyage-3-large\",\"dimensions\":512}"))
)
);
{
"service": "voyageai",
"service_settings": {
"model_id": "voyage-3-large",
"dimensions": 512
}
}
{
"service": "voyageai",
"service_settings": {
"model_id": "rerank-2"
}
}