Create an OpenShift AI inference endpoint
Generally available
Path parameters
-
The type of the inference task that the model will perform. NOTE: The
chat_completiontask type only supports streaming and only through the _stream API.Values are
text_embedding,completion,chat_completion, orrerank. -
The unique identifier of the inference endpoint.
Query parameters
-
Specifies the amount of time to wait for the inference endpoint to be created.
External documentation
Body
Required
-
The chunking configuration object. Applies only to the
text_embeddingtask type. Not applicable to thererank,completion, orchat_completiontask types.External documentation -
The type of service supported for the specified task type. In this case,
openshift_ai.Value is
openshift_ai. -
Settings used to install the inference model. These settings are specific to the
openshift_aiservice. -
Settings to configure the inference task. Applies only to the
reranktask type. Not applicable to thetext_embedding,completion, orchat_completiontask types. These settings are specific to the task type you specified.
PUT _inference/text_embedding/openshift-ai-text-embedding
{
"service": "openshift_ai",
"service_settings": {
"url": "openshift-ai-embeddings-url",
"api_key": "openshift-ai-embeddings-token",
"model_id": "gritlm-7b"
}
}
resp = client.inference.put(
task_type="text_embedding",
inference_id="openshift-ai-text-embedding",
inference_config={
"service": "openshift_ai",
"service_settings": {
"url": "openshift-ai-embeddings-url",
"api_key": "openshift-ai-embeddings-token",
"model_id": "gritlm-7b"
}
},
)
const response = await client.inference.put({
task_type: "text_embedding",
inference_id: "openshift-ai-text-embedding",
inference_config: {
service: "openshift_ai",
service_settings: {
url: "openshift-ai-embeddings-url",
api_key: "openshift-ai-embeddings-token",
model_id: "gritlm-7b",
},
},
});
response = client.inference.put(
task_type: "text_embedding",
inference_id: "openshift-ai-text-embedding",
body: {
"service": "openshift_ai",
"service_settings": {
"url": "openshift-ai-embeddings-url",
"api_key": "openshift-ai-embeddings-token",
"model_id": "gritlm-7b"
}
}
)
$resp = $client->inference()->put([
"task_type" => "text_embedding",
"inference_id" => "openshift-ai-text-embedding",
"body" => [
"service" => "openshift_ai",
"service_settings" => [
"url" => "openshift-ai-embeddings-url",
"api_key" => "openshift-ai-embeddings-token",
"model_id" => "gritlm-7b",
],
],
]);
curl -X PUT -H "Authorization: ApiKey $ELASTIC_API_KEY" -H "Content-Type: application/json" -d '{"service":"openshift_ai","service_settings":{"url":"openshift-ai-embeddings-url","api_key":"openshift-ai-embeddings-token","model_id":"gritlm-7b"}}' "$ELASTICSEARCH_URL/_inference/text_embedding/openshift-ai-text-embedding"
{
"service": "openshift_ai",
"service_settings": {
"url": "openshift-ai-embeddings-url",
"api_key": "openshift-ai-embeddings-token",
"model_id": "gritlm-7b"
}
}
{
"service": "openshift_ai",
"service_settings": {
"url": "openshift-ai-completion-url",
"api_key": "openshift-ai-completion-token",
"model_id": "llama-31-8b-instruct"
}
}
{
"service": "openshift_ai",
"service_settings": {
"url": "openshift-ai-chat-completion-url",
"api_key": "openshift-ai-chat-completion-token",
"model_id": "llama-31-8b-instruct"
}
}
{
"service": "openshift_ai",
"service_settings": {
"url": "openshift-ai-rerank-url",
"api_key": "openshift-ai-rerank-token",
"model_id": "bge-reranker-v2-m3"
}
}
{
"service": "openshift_ai",
"service_settings": {
"url": "openshift-ai-rerank-url",
"api_key": "openshift-ai-rerank-token"
},
"task_settings": {
"return_documents": true,
"top_n": 2
}
}
{
"inference_id": "openshift-ai-text-embedding",
"task_type": "text_embedding",
"service": "openshift_ai",
"service_settings": {
"model_id": "gritlm-7b",
"url": "openshift-ai-embeddings-url",
"rate_limit": {
"requests_per_minute": 3000
},
"dimensions": 4096,
"similarity": "dot_product",
"dimensions_set_by_user": false
},
"chunking_settings": {
"strategy": "sentence",
"max_chunk_size": 250,
"sentence_overlap": 1
}
}
{
"inference_id": "openshift-ai-completion",
"task_type": "completion",
"service": "openshift_ai",
"service_settings": {
"model_id": "llama-31-8b-instruct",
"url": "openshift-ai-completion-url",
"rate_limit": {
"requests_per_minute": 3000
}
}
}
{
"inference_id": "openshift-ai-chat-completion",
"task_type": "chat_completion",
"service": "openshift_ai",
"service_settings": {
"model_id": "llama-31-8b-instruct",
"url": "openshift-ai-chat-completion-url",
"rate_limit": {
"requests_per_minute": 3000
}
}
}
{
"inference_id": "openshift-ai-rerank",
"task_type": "rerank",
"service": "openshift_ai",
"service_settings": {
"model_id": "bge-reranker-v2-m3",
"url": "openshift-ai-rerank-url",
"rate_limit": {
"requests_per_minute": 3000
}
}
}
{
"inference_id": "openshift-ai-rerank",
"task_type": "rerank",
"service": "openshift_ai",
"service_settings": {
"url": "openshift-ai-rerank-url",
"rate_limit": {
"requests_per_minute": 3000
}
},
"task_settings": {
"return_documents": true,
"top_n": 2
}
}