Create an inference endpoint to perform an inference task with the amazonbedrock service.
You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.
manage_inferenceThe type of the inference task that the model will perform.
Values are completion or text_embedding.
The unique identifier of the inference endpoint.
PUT _inference/text_embedding/amazon_bedrock_embeddings
{
"service": "amazonbedrock",
"service_settings": {
"access_key": "AWS-access-key",
"secret_key": "AWS-secret-key",
"region": "us-east-1",
"provider": "amazontitan",
"model": "amazon.titan-embed-text-v2:0"
}
}
resp = client.inference.put(
task_type="text_embedding",
inference_id="amazon_bedrock_embeddings",
inference_config={
"service": "amazonbedrock",
"service_settings": {
"access_key": "AWS-access-key",
"secret_key": "AWS-secret-key",
"region": "us-east-1",
"provider": "amazontitan",
"model": "amazon.titan-embed-text-v2:0"
}
},
)
const response = await client.inference.put({
task_type: "text_embedding",
inference_id: "amazon_bedrock_embeddings",
inference_config: {
service: "amazonbedrock",
service_settings: {
access_key: "AWS-access-key",
secret_key: "AWS-secret-key",
region: "us-east-1",
provider: "amazontitan",
model: "amazon.titan-embed-text-v2:0",
},
},
});
response = client.inference.put(
task_type: "text_embedding",
inference_id: "amazon_bedrock_embeddings",
body: {
"service": "amazonbedrock",
"service_settings": {
"access_key": "AWS-access-key",
"secret_key": "AWS-secret-key",
"region": "us-east-1",
"provider": "amazontitan",
"model": "amazon.titan-embed-text-v2:0"
}
}
)
$resp = $client->inference()->put([
"task_type" => "text_embedding",
"inference_id" => "amazon_bedrock_embeddings",
"body" => [
"service" => "amazonbedrock",
"service_settings" => [
"access_key" => "AWS-access-key",
"secret_key" => "AWS-secret-key",
"region" => "us-east-1",
"provider" => "amazontitan",
"model" => "amazon.titan-embed-text-v2:0",
],
],
]);
curl -X PUT -H "Authorization: ApiKey $ELASTIC_API_KEY" -H "Content-Type: application/json" -d '{"service":"amazonbedrock","service_settings":{"access_key":"AWS-access-key","secret_key":"AWS-secret-key","region":"us-east-1","provider":"amazontitan","model":"amazon.titan-embed-text-v2:0"}}' "$ELASTICSEARCH_URL/_inference/text_embedding/amazon_bedrock_embeddings"
client.inference().put(p -> p
.inferenceId("amazon_bedrock_embeddings")
.taskType(TaskType.TextEmbedding)
.inferenceConfig(i -> i
.service("amazonbedrock")
.serviceSettings(JsonData.fromJson("{\"access_key\":\"AWS-access-key\",\"secret_key\":\"AWS-secret-key\",\"region\":\"us-east-1\",\"provider\":\"amazontitan\",\"model\":\"amazon.titan-embed-text-v2:0\"}"))
)
);
{
"service": "amazonbedrock",
"service_settings": {
"access_key": "AWS-access-key",
"secret_key": "AWS-secret-key",
"region": "us-east-1",
"provider": "amazontitan",
"model": "amazon.titan-embed-text-v2:0"
}
}
{
"service": "openai",
"service_settings": {
"api_key": "OpenAI-API-Key",
"model_id": "gpt-3.5-turbo"
}
}