chore: initial snapshot for gitea/github upload

This commit is contained in:
Your Name
2026-03-26 16:04:46 +08:00
commit a699a1ac98
3497 changed files with 1586237 additions and 0 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,198 @@
from litellm._uuid import uuid
from typing import Any, Coroutine, Optional, Union
from openai import AsyncAzureOpenAI, AzureOpenAI
from pydantic import BaseModel
from litellm.litellm_core_utils.audio_utils.utils import get_audio_file_name
from litellm.types.utils import FileTypes
from litellm.utils import (
TranscriptionResponse,
convert_to_model_response_object,
extract_duration_from_srt_or_vtt,
)
from .azure import AzureChatCompletion
from .common_utils import AzureOpenAIError
class AzureAudioTranscription(AzureChatCompletion):
def audio_transcriptions(
self,
model: str,
audio_file: FileTypes,
optional_params: dict,
logging_obj: Any,
model_response: TranscriptionResponse,
timeout: float,
max_retries: int,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
api_version: Optional[str] = None,
client=None,
azure_ad_token: Optional[str] = None,
atranscription: bool = False,
litellm_params: Optional[dict] = None,
) -> Union[TranscriptionResponse, Coroutine[Any, Any, TranscriptionResponse]]:
data = {"model": model, "file": audio_file, **optional_params}
if atranscription is True:
return self.async_audio_transcriptions(
audio_file=audio_file,
data=data,
model_response=model_response,
timeout=timeout,
api_key=api_key,
api_base=api_base,
client=client,
max_retries=max_retries,
logging_obj=logging_obj,
model=model,
litellm_params=litellm_params,
)
azure_client = self.get_azure_openai_client(
api_version=api_version,
api_base=api_base,
api_key=api_key,
model=model,
_is_async=False,
client=client,
litellm_params=litellm_params,
)
if not isinstance(azure_client, AzureOpenAI):
raise AzureOpenAIError(
status_code=500,
message="azure_client is not an instance of AzureOpenAI",
)
## LOGGING
logging_obj.pre_call(
input=f"audio_file_{uuid.uuid4()}",
api_key=azure_client.api_key,
additional_args={
"headers": {"Authorization": f"Bearer {azure_client.api_key}"},
"api_base": azure_client._base_url._uri_reference,
"atranscription": True,
"complete_input_dict": data,
},
)
response = azure_client.audio.transcriptions.create(
**data, timeout=timeout # type: ignore
)
if isinstance(response, BaseModel):
stringified_response = response.model_dump()
else:
stringified_response = TranscriptionResponse(text=response).model_dump()
## LOGGING
logging_obj.post_call(
input=get_audio_file_name(audio_file),
api_key=api_key,
additional_args={"complete_input_dict": data},
original_response=stringified_response,
)
hidden_params = {"model": model, "custom_llm_provider": "azure"}
final_response: TranscriptionResponse = convert_to_model_response_object(response_object=stringified_response, model_response_object=model_response, hidden_params=hidden_params, response_type="audio_transcription") # type: ignore
return final_response
async def async_audio_transcriptions(
self,
audio_file: FileTypes,
model: str,
data: dict,
model_response: TranscriptionResponse,
timeout: float,
logging_obj: Any,
api_version: Optional[str] = None,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
client=None,
max_retries=None,
litellm_params: Optional[dict] = None,
) -> TranscriptionResponse:
response = None
try:
async_azure_client = self.get_azure_openai_client(
api_version=api_version,
api_base=api_base,
api_key=api_key,
model=model,
_is_async=True,
client=client,
litellm_params=litellm_params,
)
if not isinstance(async_azure_client, AsyncAzureOpenAI):
raise AzureOpenAIError(
status_code=500,
message="async_azure_client is not an instance of AsyncAzureOpenAI",
)
## LOGGING
logging_obj.pre_call(
input=f"audio_file_{uuid.uuid4()}",
api_key=async_azure_client.api_key,
additional_args={
"headers": {
"Authorization": f"Bearer {async_azure_client.api_key}"
},
"api_base": async_azure_client._base_url._uri_reference,
"atranscription": True,
"complete_input_dict": data,
},
)
raw_response = (
await async_azure_client.audio.transcriptions.with_raw_response.create(
**data, timeout=timeout
)
) # type: ignore
headers = dict(raw_response.headers)
response = raw_response.parse()
if isinstance(response, BaseModel):
stringified_response = response.model_dump()
else:
stringified_response = TranscriptionResponse(text=response).model_dump()
duration = extract_duration_from_srt_or_vtt(response)
stringified_response["_audio_transcription_duration"] = duration
## LOGGING
logging_obj.post_call(
input=get_audio_file_name(audio_file),
api_key=api_key,
additional_args={
"headers": {
"Authorization": f"Bearer {async_azure_client.api_key}"
},
"api_base": async_azure_client._base_url._uri_reference,
"atranscription": True,
"complete_input_dict": data,
},
original_response=stringified_response,
)
hidden_params = {"model": model, "custom_llm_provider": "azure"}
response = convert_to_model_response_object(
_response_headers=headers,
response_object=stringified_response,
model_response_object=model_response,
hidden_params=hidden_params,
response_type="audio_transcription",
)
if not isinstance(response, TranscriptionResponse):
raise AzureOpenAIError(
status_code=500,
message="response is not an instance of TranscriptionResponse",
)
return response
except Exception as e:
## LOGGING
logging_obj.post_call(
input=input,
api_key=api_key,
original_response=str(e),
)
raise e

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,233 @@
"""
Azure Batches API Handler
"""
from typing import Any, Coroutine, Optional, Union, cast
import httpx
from openai import AsyncOpenAI, OpenAI
from litellm.llms.azure.azure import AsyncAzureOpenAI, AzureOpenAI
from litellm.types.llms.openai import (
CancelBatchRequest,
CreateBatchRequest,
RetrieveBatchRequest,
)
from litellm.types.utils import LiteLLMBatch
from ..common_utils import BaseAzureLLM
class AzureBatchesAPI(BaseAzureLLM):
"""
Azure methods to support for batches
- create_batch()
- retrieve_batch()
- cancel_batch()
- list_batch()
"""
def __init__(self) -> None:
super().__init__()
async def acreate_batch(
self,
create_batch_data: CreateBatchRequest,
azure_client: Union[AsyncAzureOpenAI, AsyncOpenAI],
) -> LiteLLMBatch:
response = await azure_client.batches.create(**create_batch_data) # type: ignore[arg-type]
return LiteLLMBatch(**response.model_dump())
def create_batch(
self,
_is_async: bool,
create_batch_data: CreateBatchRequest,
api_key: Optional[str],
api_base: Optional[str],
api_version: Optional[str],
timeout: Union[float, httpx.Timeout],
max_retries: Optional[int],
client: Optional[
Union[AzureOpenAI, AsyncAzureOpenAI, OpenAI, AsyncOpenAI]
] = None,
litellm_params: Optional[dict] = None,
) -> Union[LiteLLMBatch, Coroutine[Any, Any, LiteLLMBatch]]:
azure_client: Optional[
Union[AzureOpenAI, AsyncAzureOpenAI, OpenAI, AsyncOpenAI]
] = self.get_azure_openai_client(
api_key=api_key,
api_base=api_base,
api_version=api_version,
client=client,
_is_async=_is_async,
litellm_params=litellm_params or {},
)
if azure_client is None:
raise ValueError(
"OpenAI client is not initialized. Make sure api_key is passed or OPENAI_API_KEY is set in the environment."
)
if _is_async is True:
if not isinstance(azure_client, (AsyncAzureOpenAI, AsyncOpenAI)):
raise ValueError(
"OpenAI client is not an instance of AsyncOpenAI. Make sure you passed an AsyncOpenAI client."
)
return self.acreate_batch( # type: ignore
create_batch_data=create_batch_data, azure_client=azure_client
)
response = cast(Union[AzureOpenAI, OpenAI], azure_client).batches.create(**create_batch_data) # type: ignore[arg-type]
return LiteLLMBatch(**response.model_dump())
async def aretrieve_batch(
self,
retrieve_batch_data: RetrieveBatchRequest,
client: Union[AsyncAzureOpenAI, AsyncOpenAI],
) -> LiteLLMBatch:
response = await client.batches.retrieve(**retrieve_batch_data) # type: ignore[arg-type]
return LiteLLMBatch(**response.model_dump())
def retrieve_batch(
self,
_is_async: bool,
retrieve_batch_data: RetrieveBatchRequest,
api_key: Optional[str],
api_base: Optional[str],
api_version: Optional[str],
timeout: Union[float, httpx.Timeout],
max_retries: Optional[int],
client: Optional[
Union[AzureOpenAI, AsyncAzureOpenAI, OpenAI, AsyncOpenAI]
] = None,
litellm_params: Optional[dict] = None,
):
azure_client: Optional[
Union[AzureOpenAI, AsyncAzureOpenAI, OpenAI, AsyncOpenAI]
] = self.get_azure_openai_client(
api_key=api_key,
api_base=api_base,
api_version=api_version,
client=client,
_is_async=_is_async,
litellm_params=litellm_params or {},
)
if azure_client is None:
raise ValueError(
"OpenAI client is not initialized. Make sure api_key is passed or OPENAI_API_KEY is set in the environment."
)
if _is_async is True:
if not isinstance(azure_client, (AsyncAzureOpenAI, AsyncOpenAI)):
raise ValueError(
"OpenAI client is not an instance of AsyncOpenAI. Make sure you passed an AsyncOpenAI client."
)
return self.aretrieve_batch( # type: ignore
retrieve_batch_data=retrieve_batch_data, client=azure_client
)
response = cast(Union[AzureOpenAI, OpenAI], azure_client).batches.retrieve(
**retrieve_batch_data
)
return LiteLLMBatch(**response.model_dump())
async def acancel_batch(
self,
cancel_batch_data: CancelBatchRequest,
client: Union[AsyncAzureOpenAI, AsyncOpenAI],
) -> LiteLLMBatch:
response = await client.batches.cancel(**cancel_batch_data)
return LiteLLMBatch(**response.model_dump())
def cancel_batch(
self,
_is_async: bool,
cancel_batch_data: CancelBatchRequest,
api_key: Optional[str],
api_base: Optional[str],
api_version: Optional[str],
timeout: Union[float, httpx.Timeout],
max_retries: Optional[int],
client: Optional[
Union[AzureOpenAI, AsyncAzureOpenAI, OpenAI, AsyncOpenAI]
] = None,
litellm_params: Optional[dict] = None,
):
azure_client: Optional[
Union[AzureOpenAI, AsyncAzureOpenAI, OpenAI, AsyncOpenAI]
] = self.get_azure_openai_client(
api_key=api_key,
api_base=api_base,
api_version=api_version,
client=client,
_is_async=_is_async,
litellm_params=litellm_params or {},
)
if azure_client is None:
raise ValueError(
"OpenAI client is not initialized. Make sure api_key is passed or OPENAI_API_KEY is set in the environment."
)
if _is_async is True:
if not isinstance(azure_client, (AsyncAzureOpenAI, AsyncOpenAI)):
raise ValueError(
"Azure client is not an instance of AsyncAzureOpenAI or AsyncOpenAI. Make sure you passed an async client."
)
return self.acancel_batch( # type: ignore
cancel_batch_data=cancel_batch_data, client=azure_client
)
# At this point, azure_client is guaranteed to be a sync client
if not isinstance(azure_client, (AzureOpenAI, OpenAI)):
raise ValueError(
"Azure client is not an instance of AzureOpenAI or OpenAI. Make sure you passed a sync client."
)
response = azure_client.batches.cancel(**cancel_batch_data)
return LiteLLMBatch(**response.model_dump())
async def alist_batches(
self,
client: Union[AsyncAzureOpenAI, AsyncOpenAI],
after: Optional[str] = None,
limit: Optional[int] = None,
):
response = await client.batches.list(after=after, limit=limit) # type: ignore
return response
def list_batches(
self,
_is_async: bool,
api_key: Optional[str],
api_base: Optional[str],
api_version: Optional[str],
timeout: Union[float, httpx.Timeout],
max_retries: Optional[int],
after: Optional[str] = None,
limit: Optional[int] = None,
client: Optional[
Union[AzureOpenAI, AsyncAzureOpenAI, OpenAI, AsyncOpenAI]
] = None,
litellm_params: Optional[dict] = None,
):
azure_client: Optional[
Union[AzureOpenAI, AsyncAzureOpenAI, OpenAI, AsyncOpenAI]
] = self.get_azure_openai_client(
api_key=api_key,
api_base=api_base,
api_version=api_version,
client=client,
_is_async=_is_async,
litellm_params=litellm_params or {},
)
if azure_client is None:
raise ValueError(
"OpenAI client is not initialized. Make sure api_key is passed or OPENAI_API_KEY is set in the environment."
)
if _is_async is True:
if not isinstance(azure_client, (AsyncAzureOpenAI, AsyncOpenAI)):
raise ValueError(
"OpenAI client is not an instance of AsyncOpenAI. Make sure you passed an AsyncOpenAI client."
)
return self.alist_batches( # type: ignore
client=azure_client, after=after, limit=limit
)
response = azure_client.batches.list(after=after, limit=limit) # type: ignore
return response

View File

@@ -0,0 +1,160 @@
"""Support for Azure OpenAI gpt-5 model family."""
from typing import List
import litellm
from litellm.exceptions import UnsupportedParamsError
from litellm.llms.openai.chat.gpt_5_transformation import (
OpenAIGPT5Config,
_get_effort_level,
)
from litellm.types.llms.openai import AllMessageValues
from .gpt_transformation import AzureOpenAIConfig
class AzureOpenAIGPT5Config(AzureOpenAIConfig, OpenAIGPT5Config):
"""Azure specific handling for gpt-5 models."""
GPT5_SERIES_ROUTE = "gpt5_series/"
@classmethod
def _supports_reasoning_effort_level(cls, model: str, level: str) -> bool:
"""Override to handle gpt5_series/ prefix used for Azure routing.
The parent class calls ``_supports_factory(model, custom_llm_provider=None)``
which fails to resolve ``gpt5_series/gpt-5.1`` to the correct Azure model
entry. Strip the prefix and prepend ``azure/`` so the lookup finds
``azure/gpt-5.1`` in model_prices_and_context_window.json.
"""
if model.startswith(cls.GPT5_SERIES_ROUTE):
model = "azure/" + model[len(cls.GPT5_SERIES_ROUTE) :]
elif not model.startswith("azure/"):
model = "azure/" + model
return super()._supports_reasoning_effort_level(model, level)
@classmethod
def is_model_gpt_5_model(cls, model: str) -> bool:
"""Check if the Azure model string refers to a gpt-5 variant.
Accepts both explicit gpt-5 model names and the ``gpt5_series/`` prefix
used for manual routing.
"""
# gpt-5-chat* is a chat model and shouldn't go through GPT-5 reasoning restrictions.
return (
"gpt-5" in model and "gpt-5-chat" not in model
) or "gpt5_series" in model
def get_supported_openai_params(self, model: str) -> List[str]:
"""Get supported parameters for Azure OpenAI GPT-5 models.
Azure OpenAI GPT-5.2/5.4 models support logprobs, unlike OpenAI's GPT-5.
This overrides the parent class to add logprobs support back for gpt-5.2+.
Reference:
- Tested with Azure OpenAI GPT-5.2 (api-version: 2025-01-01-preview)
- Azure returns logprobs successfully despite Microsoft's general
documentation stating reasoning models don't support it.
"""
params = OpenAIGPT5Config.get_supported_openai_params(self, model=model)
# Azure supports tool_choice for GPT-5 deployments, but the base GPT-5 config
# can drop it when the deployment name isn't in the OpenAI model registry.
if "tool_choice" not in params:
params.append("tool_choice")
# Only gpt-5.2+ has been verified to support logprobs on Azure.
# The base OpenAI class includes logprobs for gpt-5.1+, but Azure
# hasn't verified support for gpt-5.1, so remove them unless gpt-5.2/5.4+.
if self._supports_reasoning_effort_level(
model, "none"
) and not self.is_model_gpt_5_2_model(model):
params = [p for p in params if p not in ["logprobs", "top_logprobs"]]
elif self.is_model_gpt_5_2_model(model):
azure_supported_params = ["logprobs", "top_logprobs"]
params.extend(azure_supported_params)
return params
def map_openai_params(
self,
non_default_params: dict,
optional_params: dict,
model: str,
drop_params: bool,
api_version: str = "",
) -> dict:
reasoning_effort_value = non_default_params.get(
"reasoning_effort"
) or optional_params.get("reasoning_effort")
effective_effort = _get_effort_level(reasoning_effort_value)
# gpt-5.1/5.2/5.4 support reasoning_effort='none', but other gpt-5 models don't
# See: https://learn.microsoft.com/en-us/azure/ai-foundry/openai/how-to/reasoning
supports_none = self._supports_reasoning_effort_level(model, "none")
if effective_effort == "none" and not supports_none:
if litellm.drop_params is True or (
drop_params is not None and drop_params is True
):
non_default_params = non_default_params.copy()
optional_params = optional_params.copy()
if (
_get_effort_level(non_default_params.get("reasoning_effort"))
== "none"
):
non_default_params.pop("reasoning_effort")
if _get_effort_level(optional_params.get("reasoning_effort")) == "none":
optional_params.pop("reasoning_effort")
else:
raise UnsupportedParamsError(
status_code=400,
message=(
"Azure OpenAI does not support reasoning_effort='none' for this model. "
"Supported values are: 'low', 'medium', and 'high'. "
"To drop this parameter, set `litellm.drop_params=True` or for proxy:\n\n"
"`litellm_settings:\n drop_params: true`\n"
"Issue: https://github.com/BerriAI/litellm/issues/16704"
),
)
result = OpenAIGPT5Config.map_openai_params(
self,
non_default_params=non_default_params,
optional_params=optional_params,
model=model,
drop_params=drop_params,
)
# Only drop reasoning_effort='none' for models that don't support it
result_effort = _get_effort_level(result.get("reasoning_effort"))
if result_effort == "none" and not supports_none:
result.pop("reasoning_effort")
# Azure Chat Completions: gpt-5.4+ does not support tools + reasoning together.
# Drop reasoning_effort when both are present (OpenAI routes to Responses API; Azure does not).
if self.is_model_gpt_5_4_plus_model(model):
has_tools = bool(
non_default_params.get("tools") or optional_params.get("tools")
)
if has_tools and result_effort not in (None, "none"):
result.pop("reasoning_effort", None)
return result
def transform_request(
self,
model: str,
messages: List[AllMessageValues],
optional_params: dict,
litellm_params: dict,
headers: dict,
) -> dict:
model = model.replace(self.GPT5_SERIES_ROUTE, "")
return super().transform_request(
model=model,
messages=messages,
optional_params=optional_params,
litellm_params=litellm_params,
headers=headers,
)

View File

@@ -0,0 +1,334 @@
from typing import TYPE_CHECKING, Any, List, Optional, Union
from httpx._models import Headers, Response
import litellm
from litellm.litellm_core_utils.prompt_templates.factory import (
convert_to_azure_openai_messages,
)
from litellm.llms.base_llm.chat.transformation import BaseLLMException
from litellm.types.llms.azure import (
API_VERSION_MONTH_SUPPORTED_RESPONSE_FORMAT,
API_VERSION_YEAR_SUPPORTED_RESPONSE_FORMAT,
)
from litellm.types.utils import ModelResponse
from ....exceptions import UnsupportedParamsError
from ....types.llms.openai import AllMessageValues
from ...base_llm.chat.transformation import BaseConfig
from ..common_utils import AzureOpenAIError
if TYPE_CHECKING:
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
LoggingClass = LiteLLMLoggingObj
else:
LoggingClass = Any
class AzureOpenAIConfig(BaseConfig):
"""
Reference: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions
The class `AzureOpenAIConfig` provides configuration for the OpenAI's Chat API interface, for use with Azure. Below are the parameters::
- `frequency_penalty` (number or null): Defaults to 0. Allows a value between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, thereby minimizing repetition.
- `function_call` (string or object): This optional parameter controls how the model calls functions.
- `functions` (array): An optional parameter. It is a list of functions for which the model may generate JSON inputs.
- `logit_bias` (map): This optional parameter modifies the likelihood of specified tokens appearing in the completion.
- `max_tokens` (integer or null): This optional parameter helps to set the maximum number of tokens to generate in the chat completion.
- `n` (integer or null): This optional parameter helps to set how many chat completion choices to generate for each input message.
- `presence_penalty` (number or null): Defaults to 0. It penalizes new tokens based on if they appear in the text so far, hence increasing the model's likelihood to talk about new topics.
- `stop` (string / array / null): Specifies up to 4 sequences where the API will stop generating further tokens.
- `temperature` (number or null): Defines the sampling temperature to use, varying between 0 and 2.
- `top_p` (number or null): An alternative to sampling with temperature, used for nucleus sampling.
"""
def __init__(
self,
frequency_penalty: Optional[int] = None,
function_call: Optional[Union[str, dict]] = None,
functions: Optional[list] = None,
logit_bias: Optional[dict] = None,
max_tokens: Optional[int] = None,
n: Optional[int] = None,
presence_penalty: Optional[int] = None,
stop: Optional[Union[str, list]] = None,
temperature: Optional[int] = None,
top_p: Optional[int] = None,
) -> None:
locals_ = locals().copy()
for key, value in locals_.items():
if key != "self" and value is not None:
setattr(self.__class__, key, value)
@classmethod
def get_config(cls):
return super().get_config()
def get_supported_openai_params(self, model: str) -> List[str]:
return [
"temperature",
"n",
"stream",
"stream_options",
"stop",
"max_tokens",
"max_completion_tokens",
"tools",
"tool_choice",
"presence_penalty",
"frequency_penalty",
"logit_bias",
"user",
"function_call",
"functions",
"tools",
"tool_choice",
"top_p",
"logprobs",
"top_logprobs",
"response_format",
"seed",
"extra_headers",
"parallel_tool_calls",
"prediction",
"modalities",
"audio",
"web_search_options",
"prompt_cache_key",
"store",
]
def _is_response_format_supported_model(self, model: str) -> bool:
"""
Determines if the model supports response_format.
- Handles Azure deployment names (e.g., azure/gpt-4.1-suffix)
- Normalizes model names (e.g., gpt-4-1 -> gpt-4.1)
- Strips deployment-specific suffixes
- Passes provider to supports_response_schema
- Backwards compatible with previous model name patterns
"""
import re
# Normalize model name: e.g., gpt-3-5-turbo -> gpt-3.5-turbo
normalized_model = re.sub(r"(\d)-(\d)", r"\1.\2", model)
if "gpt-3.5" in normalized_model or "gpt-35" in model:
return False
return True
def _is_response_format_supported_api_version(
self, api_version_year: str, api_version_month: str
) -> bool:
"""
- check if api_version is supported for response_format
- returns True if the API version is equal to or newer than the supported version
"""
api_year = int(api_version_year)
api_month = int(api_version_month)
supported_year = int(API_VERSION_YEAR_SUPPORTED_RESPONSE_FORMAT)
supported_month = int(API_VERSION_MONTH_SUPPORTED_RESPONSE_FORMAT)
# If the year is greater than supported year, it's definitely supported
if api_year > supported_year:
return True
# If the year is less than supported year, it's not supported
elif api_year < supported_year:
return False
# If same year, check if month is >= supported month
else:
return api_month >= supported_month
def map_openai_params(
self,
non_default_params: dict,
optional_params: dict,
model: str,
drop_params: bool,
api_version: str = "",
) -> dict:
supported_openai_params = self.get_supported_openai_params(model)
api_version_times = api_version.split("-")
if len(api_version_times) >= 3:
api_version_year = api_version_times[0]
api_version_month = api_version_times[1]
api_version_day = api_version_times[2]
else:
api_version_year = None
api_version_month = None
api_version_day = None
for param, value in non_default_params.items():
if param == "tool_choice":
"""
This parameter requires API version 2023-12-01-preview or later
tool_choice='required' is not supported as of 2024-05-01-preview
"""
## check if api version supports this param ##
if (
api_version_year is None
or api_version_month is None
or api_version_day is None
):
optional_params["tool_choice"] = value
else:
if (
api_version_year < "2023"
or (api_version_year == "2023" and api_version_month < "12")
or (
api_version_year == "2023"
and api_version_month == "12"
and api_version_day < "01"
)
):
if litellm.drop_params is True or (
drop_params is not None and drop_params is True
):
pass
else:
raise UnsupportedParamsError(
status_code=400,
message=f"""Azure does not support 'tool_choice', for api_version={api_version}. Bump your API version to '2023-12-01-preview' or later. This parameter requires 'api_version="2023-12-01-preview"' or later. Azure API Reference: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions""",
)
elif value == "required" and (
api_version_year == "2024" and api_version_month <= "05"
): ## check if tool_choice value is supported ##
if litellm.drop_params is True or (
drop_params is not None and drop_params is True
):
pass
else:
raise UnsupportedParamsError(
status_code=400,
message=f"Azure does not support '{value}' as a {param} param, for api_version={api_version}. To drop 'tool_choice=required' for calls with this Azure API version, set `litellm.drop_params=True` or for proxy:\n\n`litellm_settings:\n drop_params: true`\nAzure API Reference: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions",
)
else:
optional_params["tool_choice"] = value
elif param == "response_format" and isinstance(value, dict):
_is_response_format_supported_model = (
self._is_response_format_supported_model(model)
)
if api_version_year is None or api_version_month is None:
is_response_format_supported_api_version = True
else:
is_response_format_supported_api_version = (
self._is_response_format_supported_api_version(
api_version_year, api_version_month
)
)
is_response_format_supported = (
is_response_format_supported_api_version
and _is_response_format_supported_model
)
optional_params = self._add_response_format_to_tools(
optional_params=optional_params,
value=value,
is_response_format_supported=is_response_format_supported,
)
elif param == "tools" and isinstance(value, list):
optional_params.setdefault("tools", [])
optional_params["tools"].extend(value)
elif param in supported_openai_params:
optional_params[param] = value
return optional_params
def transform_request(
self,
model: str,
messages: List[AllMessageValues],
optional_params: dict,
litellm_params: dict,
headers: dict,
) -> dict:
messages = convert_to_azure_openai_messages(messages)
return {
"model": model,
"messages": messages,
**optional_params,
}
def transform_response(
self,
model: str,
raw_response: Response,
model_response: ModelResponse,
logging_obj: LoggingClass,
request_data: dict,
messages: List[AllMessageValues],
optional_params: dict,
litellm_params: dict,
encoding: Any,
api_key: Optional[str] = None,
json_mode: Optional[bool] = None,
) -> ModelResponse:
raise NotImplementedError(
"Azure OpenAI handler.py has custom logic for transforming response, as it uses the OpenAI SDK."
)
def get_mapped_special_auth_params(self) -> dict:
return {"token": "azure_ad_token"}
def map_special_auth_params(self, non_default_params: dict, optional_params: dict):
for param, value in non_default_params.items():
if param == "token":
optional_params["azure_ad_token"] = value
return optional_params
def get_eu_regions(self) -> List[str]:
"""
Source: https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models#gpt-4-and-gpt-4-turbo-model-availability
"""
return ["europe", "sweden", "switzerland", "france", "uk"]
def get_us_regions(self) -> List[str]:
"""
Source: https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models#gpt-4-and-gpt-4-turbo-model-availability
"""
return [
"us",
"eastus",
"eastus2",
"eastus2euap",
"eastus3",
"southcentralus",
"westus",
"westus2",
"westus3",
"westus4",
]
def get_error_class(
self, error_message: str, status_code: int, headers: Union[dict, Headers]
) -> BaseLLMException:
return AzureOpenAIError(
message=error_message, status_code=status_code, headers=headers
)
def validate_environment(
self,
headers: dict,
model: str,
messages: List[AllMessageValues],
optional_params: dict,
litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
raise NotImplementedError(
"Azure OpenAI has custom logic for validating environment, as it uses the OpenAI SDK."
)

View File

@@ -0,0 +1,77 @@
"""
Handler file for calls to Azure OpenAI's o1/o3 family of models
Written separately to handle faking streaming for o1 and o3 models.
"""
from typing import TYPE_CHECKING, Any, Callable, Optional, Union
import httpx
from litellm.types.utils import ModelResponse
from ...openai.openai import OpenAIChatCompletion
from ..common_utils import BaseAzureLLM
if TYPE_CHECKING:
from aiohttp import ClientSession
class AzureOpenAIO1ChatCompletion(BaseAzureLLM, OpenAIChatCompletion):
def completion(
self,
model_response: ModelResponse,
timeout: Union[float, httpx.Timeout],
optional_params: dict,
litellm_params: dict,
logging_obj: Any,
model: Optional[str] = None,
messages: Optional[list] = None,
print_verbose: Optional[Callable] = None,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
api_version: Optional[str] = None,
dynamic_params: Optional[bool] = None,
azure_ad_token: Optional[str] = None,
acompletion: bool = False,
logger_fn=None,
headers: Optional[dict] = None,
custom_prompt_dict: dict = {},
client=None,
organization: Optional[str] = None,
custom_llm_provider: Optional[str] = None,
drop_params: Optional[bool] = None,
shared_session: Optional["ClientSession"] = None,
):
client = self.get_azure_openai_client(
litellm_params=litellm_params,
api_key=api_key,
api_base=api_base,
api_version=api_version,
client=client,
_is_async=acompletion,
)
return super().completion(
model_response=model_response,
timeout=timeout,
optional_params=optional_params,
litellm_params=litellm_params,
logging_obj=logging_obj,
model=model,
messages=messages,
print_verbose=print_verbose,
api_key=api_key,
api_base=api_base,
api_version=api_version,
dynamic_params=dynamic_params,
azure_ad_token=azure_ad_token,
acompletion=acompletion,
logger_fn=logger_fn,
headers=headers,
custom_prompt_dict=custom_prompt_dict,
client=client,
organization=organization,
custom_llm_provider=custom_llm_provider,
drop_params=drop_params,
shared_session=shared_session,
)

View File

@@ -0,0 +1,123 @@
"""
Support for o1 and o3 model families
https://platform.openai.com/docs/guides/reasoning
Translations handled by LiteLLM:
- modalities: image => drop param (if user opts in to dropping param)
- role: system ==> translate to role 'user'
- streaming => faked by LiteLLM
- Tools, response_format => drop param (if user opts in to dropping param)
- Logprobs => drop param (if user opts in to dropping param)
- Temperature => drop param (if user opts in to dropping param)
"""
from typing import List, Optional
import litellm
from litellm import verbose_logger
from litellm.types.llms.openai import AllMessageValues
from litellm.utils import get_model_info, supports_reasoning
from ...openai.chat.o_series_transformation import OpenAIOSeriesConfig
class AzureOpenAIO1Config(OpenAIOSeriesConfig):
def get_supported_openai_params(self, model: str) -> list:
"""
Get the supported OpenAI params for the Azure O-Series models
"""
all_openai_params = litellm.OpenAIGPTConfig().get_supported_openai_params(
model=model
)
non_supported_params = [
"logprobs",
"top_p",
"presence_penalty",
"frequency_penalty",
"top_logprobs",
]
o_series_only_param = self._get_o_series_only_params(model)
all_openai_params.extend(o_series_only_param)
return [
param for param in all_openai_params if param not in non_supported_params
]
def _get_o_series_only_params(self, model: str) -> list:
"""
Helper function to get the o-series only params for the model
- reasoning_effort
"""
o_series_only_param = []
#########################################################
# Case 1: If the model is recognized and in litellm model cost map
# then check if it supports reasoning
#########################################################
if model in litellm.model_list_set:
if supports_reasoning(model):
o_series_only_param.append("reasoning_effort")
#########################################################
# Case 2: If the model is not recognized, then we assume it supports reasoning
# This is critical because several users tend to use custom deployment names
# for azure o-series models.
#########################################################
else:
o_series_only_param.append("reasoning_effort")
return o_series_only_param
def should_fake_stream(
self,
model: Optional[str],
stream: Optional[bool],
custom_llm_provider: Optional[str] = None,
) -> bool:
"""
Currently no Azure O Series models support native streaming.
"""
if stream is not True:
return False
if (
model and "o3" in model
): # o3 models support streaming - https://github.com/BerriAI/litellm/issues/8274
return False
if model is not None:
try:
model_info = get_model_info(
model=model, custom_llm_provider=custom_llm_provider
) # allow user to override default with model_info={"supports_native_streaming": true}
if (
model_info.get("supports_native_streaming") is True
): # allow user to override default with model_info={"supports_native_streaming": true}
return False
except Exception as e:
verbose_logger.debug(
f"Error getting model info in AzureOpenAIO1Config: {e}"
)
return True
def is_o_series_model(self, model: str) -> bool:
return "o1" in model or "o3" in model or "o4" in model or "o_series/" in model
def transform_request(
self,
model: str,
messages: List[AllMessageValues],
optional_params: dict,
litellm_params: dict,
headers: dict,
) -> dict:
model = model.replace(
"o_series/", ""
) # handle o_series/my-random-deployment-name
return super().transform_request(
model, messages, optional_params, litellm_params, headers
)

View File

@@ -0,0 +1,844 @@
import json
import os
from typing import Any, Callable, Dict, Literal, NamedTuple, Optional, Union, cast
import httpx
from openai import AsyncAzureOpenAI, AsyncOpenAI, AzureOpenAI, OpenAI
import litellm
from litellm._logging import verbose_logger
from litellm.caching.caching import DualCache
from litellm.llms.base_llm.chat.transformation import BaseLLMException
from litellm.llms.openai.common_utils import BaseOpenAILLM
from litellm.secret_managers.get_azure_ad_token_provider import (
get_azure_ad_token_provider,
)
from litellm.secret_managers.main import get_secret_str
from litellm.types.router import GenericLiteLLMParams
from litellm.utils import _add_path_to_api_base
azure_ad_cache = DualCache()
class AzureOpenAIError(BaseLLMException):
def __init__(
self,
status_code,
message,
request: Optional[httpx.Request] = None,
response: Optional[httpx.Response] = None,
headers: Optional[Union[httpx.Headers, dict]] = None,
body: Optional[dict] = None,
):
super().__init__(
status_code=status_code,
message=message,
request=request,
response=response,
headers=headers,
body=body,
)
def process_azure_headers(headers: Union[httpx.Headers, dict]) -> dict:
openai_headers = {}
if "x-ratelimit-limit-requests" in headers:
openai_headers["x-ratelimit-limit-requests"] = headers[
"x-ratelimit-limit-requests"
]
if "x-ratelimit-remaining-requests" in headers:
openai_headers["x-ratelimit-remaining-requests"] = headers[
"x-ratelimit-remaining-requests"
]
if "x-ratelimit-limit-tokens" in headers:
openai_headers["x-ratelimit-limit-tokens"] = headers["x-ratelimit-limit-tokens"]
if "x-ratelimit-remaining-tokens" in headers:
openai_headers["x-ratelimit-remaining-tokens"] = headers[
"x-ratelimit-remaining-tokens"
]
llm_response_headers = {
"{}-{}".format("llm_provider", k): v for k, v in headers.items()
}
return {**llm_response_headers, **openai_headers}
def get_azure_ad_token_from_entra_id(
tenant_id: str,
client_id: str,
client_secret: str,
scope: str = "https://cognitiveservices.azure.com/.default",
) -> Callable[[], str]:
"""
Get Azure AD token provider from `client_id`, `client_secret`, and `tenant_id`
Args:
tenant_id: str
client_id: str
client_secret: str
scope: str
Returns:
callable that returns a bearer token.
"""
from azure.identity import ClientSecretCredential, get_bearer_token_provider
verbose_logger.debug("Getting Azure AD Token from Entra ID")
if tenant_id.startswith("os.environ/"):
_tenant_id = get_secret_str(tenant_id)
else:
_tenant_id = tenant_id
if client_id.startswith("os.environ/"):
_client_id = get_secret_str(client_id)
else:
_client_id = client_id
if client_secret.startswith("os.environ/"):
_client_secret = get_secret_str(client_secret)
else:
_client_secret = client_secret
verbose_logger.debug(
"tenant_id %s, client_id %s, client_secret %s",
_tenant_id,
_client_id,
_client_secret,
)
if _tenant_id is None or _client_id is None or _client_secret is None:
raise ValueError("tenant_id, client_id, and client_secret must be provided")
credential = ClientSecretCredential(_tenant_id, _client_id, _client_secret)
verbose_logger.debug("credential %s", credential)
token_provider = get_bearer_token_provider(credential, scope)
verbose_logger.debug("token_provider %s", token_provider)
return token_provider
def get_azure_ad_token_from_username_password(
client_id: str,
azure_username: str,
azure_password: str,
scope: str = "https://cognitiveservices.azure.com/.default",
) -> Callable[[], str]:
"""
Get Azure AD token provider from `client_id`, `azure_username`, and `azure_password`
Args:
client_id: str
azure_username: str
azure_password: str
scope: str
Returns:
callable that returns a bearer token.
"""
from azure.identity import UsernamePasswordCredential, get_bearer_token_provider
verbose_logger.debug(
"client_id %s, azure_username %s, azure_password %s",
client_id,
azure_username,
azure_password,
)
credential = UsernamePasswordCredential(
client_id=client_id,
username=azure_username,
password=azure_password,
)
verbose_logger.debug("credential %s", credential)
token_provider = get_bearer_token_provider(credential, scope)
verbose_logger.debug("token_provider %s", token_provider)
return token_provider
def get_azure_ad_token_from_oidc(
azure_ad_token: str,
azure_client_id: Optional[str] = None,
azure_tenant_id: Optional[str] = None,
scope: Optional[str] = None,
) -> str:
"""
Get Azure AD token from OIDC token
Args:
azure_ad_token: str
azure_client_id: Optional[str]
azure_tenant_id: Optional[str]
scope: str
Returns:
`azure_ad_token_access_token` - str
"""
if scope is None:
scope = "https://cognitiveservices.azure.com/.default"
azure_authority_host = os.getenv(
"AZURE_AUTHORITY_HOST", "https://login.microsoftonline.com"
)
azure_client_id = azure_client_id or os.getenv("AZURE_CLIENT_ID")
azure_tenant_id = azure_tenant_id or os.getenv("AZURE_TENANT_ID")
if azure_client_id is None or azure_tenant_id is None:
raise AzureOpenAIError(
status_code=422,
message="AZURE_CLIENT_ID and AZURE_TENANT_ID must be set",
)
oidc_token = get_secret_str(azure_ad_token)
if oidc_token is None:
raise AzureOpenAIError(
status_code=401,
message="OIDC token could not be retrieved from secret manager.",
)
azure_ad_token_cache_key = json.dumps(
{
"azure_client_id": azure_client_id,
"azure_tenant_id": azure_tenant_id,
"azure_authority_host": azure_authority_host,
"oidc_token": oidc_token,
}
)
azure_ad_token_access_token = azure_ad_cache.get_cache(azure_ad_token_cache_key)
if azure_ad_token_access_token is not None:
return azure_ad_token_access_token
client = litellm.module_level_client
req_token = client.post(
f"{azure_authority_host}/{azure_tenant_id}/oauth2/v2.0/token",
data={
"client_id": azure_client_id,
"grant_type": "client_credentials",
"scope": scope,
"client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer",
"client_assertion": oidc_token,
},
)
if req_token.status_code != 200:
raise AzureOpenAIError(
status_code=req_token.status_code,
message=req_token.text,
)
azure_ad_token_json = req_token.json()
azure_ad_token_access_token = azure_ad_token_json.get("access_token", None)
azure_ad_token_expires_in = azure_ad_token_json.get("expires_in", None)
if azure_ad_token_access_token is None:
raise AzureOpenAIError(
status_code=422, message="Azure AD Token access_token not returned"
)
if azure_ad_token_expires_in is None:
raise AzureOpenAIError(
status_code=422, message="Azure AD Token expires_in not returned"
)
azure_ad_cache.set_cache(
key=azure_ad_token_cache_key,
value=azure_ad_token_access_token,
ttl=azure_ad_token_expires_in,
)
return azure_ad_token_access_token
def select_azure_base_url_or_endpoint(azure_client_params: dict):
azure_endpoint = azure_client_params.get("azure_endpoint", None)
if azure_endpoint is not None:
# see : https://github.com/openai/openai-python/blob/3d61ed42aba652b547029095a7eb269ad4e1e957/src/openai/lib/azure.py#L192
if "/openai/deployments" in azure_endpoint:
# this is base_url, not an azure_endpoint
azure_client_params["base_url"] = azure_endpoint
azure_client_params.pop("azure_endpoint")
return azure_client_params
def get_azure_ad_token(
litellm_params: GenericLiteLLMParams,
) -> Optional[str]:
"""
Get Azure AD token from various authentication methods.
This function tries different methods to obtain an Azure AD token:
1. From an existing token provider
2. From Entra ID using tenant_id, client_id, and client_secret
3. From username and password
4. From OIDC token
5. From a service principal with secret workflow
6. From DefaultAzureCredential
Args:
litellm_params: Dictionary containing authentication parameters
- azure_ad_token_provider: Optional callable that returns a token
- azure_ad_token: Optional existing token
- tenant_id: Optional Azure tenant ID
- client_id: Optional Azure client ID
- client_secret: Optional Azure client secret
- azure_username: Optional Azure username
- azure_password: Optional Azure password
Returns:
Azure AD token as string if successful, None otherwise
"""
# Extract parameters
# Use `or` instead of default parameter to handle cases where key exists but value is None
azure_ad_token_provider = litellm_params.get("azure_ad_token_provider")
azure_ad_token = litellm_params.get("azure_ad_token") or get_secret_str(
"AZURE_AD_TOKEN"
)
tenant_id = litellm_params.get("tenant_id") or os.getenv("AZURE_TENANT_ID")
client_id = litellm_params.get("client_id") or os.getenv("AZURE_CLIENT_ID")
client_secret = litellm_params.get("client_secret") or os.getenv(
"AZURE_CLIENT_SECRET"
)
azure_username = litellm_params.get("azure_username") or os.getenv("AZURE_USERNAME")
azure_password = litellm_params.get("azure_password") or os.getenv("AZURE_PASSWORD")
scope = litellm_params.get("azure_scope") or os.getenv(
"AZURE_SCOPE", "https://cognitiveservices.azure.com/.default"
)
if scope is None:
scope = "https://cognitiveservices.azure.com/.default"
# Try to get token provider from Entra ID
if azure_ad_token_provider is None and tenant_id and client_id and client_secret:
verbose_logger.debug(
"Using Azure AD Token Provider from Entra ID for Azure Auth"
)
azure_ad_token_provider = get_azure_ad_token_from_entra_id(
tenant_id=tenant_id,
client_id=client_id,
client_secret=client_secret,
scope=scope,
)
# Try to get token provider from username and password
if (
azure_ad_token_provider is None
and azure_username
and azure_password
and client_id
):
verbose_logger.debug("Using Azure Username and Password for Azure Auth")
azure_ad_token_provider = get_azure_ad_token_from_username_password(
azure_username=azure_username,
azure_password=azure_password,
client_id=client_id,
scope=scope,
)
# Try to get token from OIDC
if (
client_id
and tenant_id
and azure_ad_token
and azure_ad_token.startswith("oidc/")
):
verbose_logger.debug("Using Azure OIDC Token for Azure Auth")
azure_ad_token = get_azure_ad_token_from_oidc(
azure_ad_token=azure_ad_token,
azure_client_id=client_id,
azure_tenant_id=tenant_id,
scope=scope,
)
# Try to get token provider from service principal or DefaultAzureCredential
elif (
azure_ad_token_provider is None
and litellm.enable_azure_ad_token_refresh is True
):
verbose_logger.debug(
"Using Azure AD token provider based on Service Principal with Secret workflow or DefaultAzureCredential for Azure Auth"
)
try:
azure_ad_token_provider = get_azure_ad_token_provider(azure_scope=scope)
except ValueError:
verbose_logger.debug("Azure AD Token Provider could not be used.")
except Exception as e:
verbose_logger.error(
f"Error calling Azure AD token provider: {str(e)}. Follow docs - https://docs.litellm.ai/docs/providers/azure/#azure-ad-token-refresh---defaultazurecredential"
)
raise e
#########################################################
# If litellm.enable_azure_ad_token_refresh is True and no other token provider is available,
# try to get DefaultAzureCredential provider
#########################################################
if azure_ad_token_provider is None and azure_ad_token is None:
azure_ad_token_provider = (
BaseAzureLLM._try_get_default_azure_credential_provider(
scope=scope,
)
)
# Execute the token provider to get the token if available
if azure_ad_token_provider and callable(azure_ad_token_provider):
try:
token = azure_ad_token_provider()
if not isinstance(token, str):
verbose_logger.error(
f"Azure AD token provider returned non-string value: {type(token)}"
)
raise TypeError(f"Azure AD token must be a string, got {type(token)}")
else:
azure_ad_token = token
except TypeError:
# Re-raise TypeError directly
raise
except Exception as e:
verbose_logger.error(f"Error calling Azure AD token provider: {str(e)}")
raise RuntimeError(f"Failed to get Azure AD token: {str(e)}") from e
return azure_ad_token
class BaseAzureLLM(BaseOpenAILLM):
@staticmethod
def _try_get_default_azure_credential_provider(
scope: str,
) -> Optional[Callable[[], str]]:
"""
Try to get DefaultAzureCredential provider
Args:
scope: Azure scope for the token
Returns:
Token provider callable if DefaultAzureCredential is enabled and available, None otherwise
"""
from litellm.types.secret_managers.get_azure_ad_token_provider import (
AzureCredentialType,
)
verbose_logger.debug("Attempting to use DefaultAzureCredential for Azure Auth")
try:
azure_ad_token_provider = get_azure_ad_token_provider(
azure_scope=scope,
azure_credential=AzureCredentialType.DefaultAzureCredential,
)
verbose_logger.debug(
"Successfully obtained Azure AD token provider using DefaultAzureCredential"
)
return azure_ad_token_provider
except Exception as e:
verbose_logger.debug(f"DefaultAzureCredential failed: {str(e)}")
return None
def get_azure_openai_client(
self,
api_key: Optional[str],
api_base: Optional[str],
api_version: Optional[str] = None,
client: Optional[
Union[AzureOpenAI, AsyncAzureOpenAI, OpenAI, AsyncOpenAI]
] = None,
litellm_params: Optional[dict] = None,
_is_async: bool = False,
model: Optional[str] = None,
) -> Optional[Union[AzureOpenAI, AsyncAzureOpenAI, OpenAI, AsyncOpenAI]]:
openai_client: Optional[
Union[AzureOpenAI, AsyncAzureOpenAI, OpenAI, AsyncOpenAI]
] = None
client_initialization_params: dict = locals()
client_initialization_params["is_async"] = _is_async
if client is None:
cached_client = self.get_cached_openai_client(
client_initialization_params=client_initialization_params,
client_type="azure",
)
if cached_client:
if isinstance(
cached_client, (AzureOpenAI, AsyncAzureOpenAI, OpenAI, AsyncOpenAI)
):
return cached_client
azure_client_params = self.initialize_azure_sdk_client(
litellm_params=litellm_params or {},
api_key=api_key,
api_base=api_base,
model_name=model,
api_version=api_version,
is_async=_is_async,
)
# For Azure v1 API, use standard OpenAI client instead of AzureOpenAI
# See: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs
if self._is_azure_v1_api_version(api_version):
# Extract only params that OpenAI client accepts
# Always use /openai/v1/ regardless of whether user passed "v1", "latest", or "preview"
v1_params = {
"api_key": azure_client_params.get("api_key"),
"base_url": f"{api_base}/openai/v1/",
}
if "timeout" in azure_client_params:
v1_params["timeout"] = azure_client_params["timeout"]
if "max_retries" in azure_client_params:
v1_params["max_retries"] = azure_client_params["max_retries"]
if "http_client" in azure_client_params:
v1_params["http_client"] = azure_client_params["http_client"]
verbose_logger.debug(
f"Using Azure v1 API with base_url: {v1_params['base_url']}"
)
if _is_async is True:
openai_client = AsyncOpenAI(**v1_params) # type: ignore
else:
openai_client = OpenAI(**v1_params) # type: ignore
else:
# Traditional Azure API uses AzureOpenAI client
if _is_async is True:
openai_client = AsyncAzureOpenAI(**azure_client_params)
else:
openai_client = AzureOpenAI(**azure_client_params) # type: ignore
else:
openai_client = client
if (
api_version is not None
and isinstance(openai_client, (AzureOpenAI, AsyncAzureOpenAI))
and isinstance(openai_client._custom_query, dict)
):
# set api_version to version passed by user
openai_client._custom_query.setdefault("api-version", api_version)
# save client in-memory cache
self.set_cached_openai_client(
openai_client=openai_client,
client_initialization_params=client_initialization_params,
client_type="azure",
)
return openai_client
def initialize_azure_sdk_client(
self,
litellm_params: dict,
api_key: Optional[str],
api_base: Optional[str],
model_name: Optional[str],
api_version: Optional[str],
is_async: bool,
) -> dict:
azure_ad_token_provider = litellm_params.get("azure_ad_token_provider")
# If we have api_key, then we have higher priority
azure_ad_token = litellm_params.get("azure_ad_token")
# litellm_params sometimes contains the key, but the value is None
# We should respect environment variables in this case
tenant_id = self._resolve_env_var(
litellm_params, "tenant_id", "AZURE_TENANT_ID"
)
client_id = self._resolve_env_var(
litellm_params, "client_id", "AZURE_CLIENT_ID"
)
client_secret = self._resolve_env_var(
litellm_params, "client_secret", "AZURE_CLIENT_SECRET"
)
azure_username = self._resolve_env_var(
litellm_params, "azure_username", "AZURE_USERNAME"
)
azure_password = self._resolve_env_var(
litellm_params, "azure_password", "AZURE_PASSWORD"
)
scope = self._resolve_env_var(litellm_params, "azure_scope", "AZURE_SCOPE")
if scope is None:
scope = "https://cognitiveservices.azure.com/.default"
max_retries = litellm_params.get("max_retries")
timeout = litellm_params.get("timeout")
if (
not api_key
and azure_ad_token_provider is None
and tenant_id
and client_id
and client_secret
):
verbose_logger.debug(
"Using Azure AD Token Provider from Entra ID for Azure Auth"
)
azure_ad_token_provider = get_azure_ad_token_from_entra_id(
tenant_id=tenant_id,
client_id=client_id,
client_secret=client_secret,
scope=scope,
)
if (
azure_ad_token_provider is None
and azure_username
and azure_password
and client_id
):
verbose_logger.debug("Using Azure Username and Password for Azure Auth")
azure_ad_token_provider = get_azure_ad_token_from_username_password(
azure_username=azure_username,
azure_password=azure_password,
client_id=client_id,
scope=scope,
)
if azure_ad_token is not None and azure_ad_token.startswith("oidc/"):
verbose_logger.debug("Using Azure OIDC Token for Azure Auth")
azure_ad_token = get_azure_ad_token_from_oidc(
azure_ad_token=azure_ad_token,
azure_client_id=client_id,
azure_tenant_id=tenant_id,
scope=scope,
)
elif (
not api_key
and azure_ad_token_provider is None
and litellm.enable_azure_ad_token_refresh is True
):
verbose_logger.debug(
"Using Azure AD token provider based on Service Principal with Secret workflow for Azure Auth"
)
try:
azure_ad_token_provider = get_azure_ad_token_provider(
azure_scope=scope,
)
except ValueError:
verbose_logger.debug("Azure AD Token Provider could not be used.")
if api_version is None:
api_version = os.getenv(
"AZURE_API_VERSION", litellm.AZURE_DEFAULT_API_VERSION
)
_api_key = api_key
if _api_key is not None and isinstance(_api_key, str):
# only show first 5 chars of api_key
_api_key = _api_key[:8] + "*" * 15
verbose_logger.debug(
f"Initializing Azure OpenAI Client for {model_name}, Api Base: {str(api_base)}, Api Key:{_api_key}"
)
azure_client_params = {
"api_key": api_key,
"azure_endpoint": api_base,
"api_version": api_version,
"azure_ad_token": azure_ad_token,
"azure_ad_token_provider": azure_ad_token_provider,
}
# init http client + SSL Verification settings
if is_async is True:
azure_client_params["http_client"] = self._get_async_http_client()
else:
azure_client_params["http_client"] = self._get_sync_http_client()
if max_retries is not None:
azure_client_params["max_retries"] = max_retries
if timeout is not None:
azure_client_params["timeout"] = timeout
if azure_ad_token_provider is not None:
azure_client_params["azure_ad_token_provider"] = azure_ad_token_provider
# this decides if we should set azure_endpoint or base_url on Azure OpenAI Client
# required to support GPT-4 vision enhancements, since base_url needs to be set on Azure OpenAI Client
azure_client_params = select_azure_base_url_or_endpoint(
azure_client_params=azure_client_params
)
return azure_client_params
def _init_azure_client_for_cloudflare_ai_gateway(
self,
api_base: str,
model: str,
api_version: str,
max_retries: int,
timeout: Union[float, httpx.Timeout],
litellm_params: dict,
api_key: Optional[str],
azure_ad_token: Optional[str],
azure_ad_token_provider: Optional[Callable[[], str]],
acompletion: bool,
client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None,
) -> Union[AzureOpenAI, AsyncAzureOpenAI]:
## build base url - assume api base includes resource name
tenant_id = litellm_params.get("tenant_id", os.getenv("AZURE_TENANT_ID"))
client_id = litellm_params.get("client_id", os.getenv("AZURE_CLIENT_ID"))
scope = litellm_params.get(
"azure_scope",
os.getenv("AZURE_SCOPE", "https://cognitiveservices.azure.com/.default"),
)
if client is None:
if not api_base.endswith("/"):
api_base += "/"
api_base += f"{model}"
azure_client_params: Dict[str, Any] = {
"api_version": api_version,
"base_url": f"{api_base}",
"http_client": litellm.client_session,
"max_retries": max_retries,
"timeout": timeout,
}
if api_key is not None:
azure_client_params["api_key"] = api_key
elif azure_ad_token is not None:
if azure_ad_token.startswith("oidc/"):
azure_ad_token = get_azure_ad_token_from_oidc(
azure_ad_token=azure_ad_token,
azure_client_id=client_id,
azure_tenant_id=tenant_id,
scope=scope,
)
azure_client_params["azure_ad_token"] = azure_ad_token
if azure_ad_token_provider is not None:
azure_client_params["azure_ad_token_provider"] = azure_ad_token_provider
if acompletion is True:
client = AsyncAzureOpenAI(**azure_client_params) # type: ignore
else:
client = AzureOpenAI(**azure_client_params) # type: ignore
return client
@staticmethod
def _base_validate_azure_environment(
headers: dict, litellm_params: Optional[GenericLiteLLMParams]
) -> dict:
litellm_params = litellm_params or GenericLiteLLMParams()
# Check if api-key is already in headers; if so, use it
if "api-key" in headers:
return headers
api_key = (
litellm_params.api_key
or litellm.api_key
or litellm.azure_key
or get_secret_str("AZURE_OPENAI_API_KEY")
or get_secret_str("AZURE_API_KEY")
)
if api_key:
headers["api-key"] = api_key
return headers
### Fallback to Azure AD token-based authentication if no API key is available
### Retrieves Azure AD token and adds it to the Authorization header
azure_ad_token = get_azure_ad_token(litellm_params)
if azure_ad_token:
headers["Authorization"] = f"Bearer {azure_ad_token}"
return headers
@staticmethod
def _get_base_azure_url(
api_base: Optional[str],
litellm_params: Optional[Union[GenericLiteLLMParams, Dict[str, Any]]],
route: Union[Literal["/openai/responses", "/openai/vector_stores"], str],
default_api_version: Optional[Union[str, Literal["latest", "preview"]]] = None,
) -> str:
"""
Get the base Azure URL for the given route and API version.
Args:
api_base: The base URL of the Azure API.
litellm_params: The litellm parameters.
route: The route to the API.
default_api_version: The default API version to use if no api_version is provided. If 'latest', it will use `openai/v1/...` route.
"""
api_base = api_base or litellm.api_base or get_secret_str("AZURE_API_BASE")
if api_base is None:
raise ValueError(
f"api_base is required for Azure AI Studio. Please set the api_base parameter. Passed `api_base={api_base}`"
)
original_url = httpx.URL(api_base)
# Extract api_version or use default
litellm_params = litellm_params or {}
api_version = (
cast(Optional[str], litellm_params.get("api_version"))
or default_api_version
)
# Create a new dictionary with existing params
query_params = dict(original_url.params)
# Add api_version if needed
if "api-version" not in query_params and api_version:
query_params["api-version"] = api_version
# Add the path to the base URL
if route not in api_base:
new_url = _add_path_to_api_base(api_base=api_base, ending_path=route)
else:
new_url = api_base
if BaseAzureLLM._is_azure_v1_api_version(api_version):
# ensure the request go to /openai/v1 and not just /openai
if "/openai/v1" not in new_url:
parsed_url = httpx.URL(new_url)
new_url = str(
parsed_url.copy_with(
path=parsed_url.path.replace("/openai", "/openai/v1")
)
)
# Use the new query_params dictionary
final_url = httpx.URL(new_url).copy_with(params=query_params)
return str(final_url)
@staticmethod
def _is_azure_v1_api_version(api_version: Optional[str]) -> bool:
if api_version is None:
return False
return api_version in {"preview", "latest", "v1"}
def _resolve_env_var(
self, litellm_params: Dict[str, Any], param_key: str, env_var_key: str
) -> Optional[str]:
"""Resolve the environment variable for a given parameter key.
The logic here is different from `params.get(key, os.getenv(env_var))` because
litellm_params may contain the key with a None value, in which case we want
to fallback to the environment variable.
"""
param_value = litellm_params.get(param_key)
if param_value is not None:
return param_value
return os.getenv(env_var_key)
class AzureCredentials(NamedTuple):
api_base: Optional[str]
api_key: Optional[str]
api_version: Optional[str]
def get_azure_credentials(
api_base: Optional[str] = None,
api_key: Optional[str] = None,
api_version: Optional[str] = None,
) -> AzureCredentials:
"""Resolve Azure credentials from params, litellm globals, and env vars."""
resolved_api_base = api_base or litellm.api_base or get_secret_str("AZURE_API_BASE")
resolved_api_version = (
api_version or litellm.api_version or get_secret_str("AZURE_API_VERSION")
)
resolved_api_key = (
api_key
or litellm.api_key
or litellm.azure_key
or get_secret_str("AZURE_OPENAI_API_KEY")
or get_secret_str("AZURE_API_KEY")
)
return AzureCredentials(
api_base=resolved_api_base,
api_key=resolved_api_key,
api_version=resolved_api_version,
)

View File

@@ -0,0 +1,379 @@
from typing import Any, Callable, Optional
from openai import AsyncAzureOpenAI, AzureOpenAI
from litellm.litellm_core_utils.prompt_templates.factory import prompt_factory
from litellm.utils import CustomStreamWrapper, ModelResponse, TextCompletionResponse
from ...openai.completion.transformation import OpenAITextCompletionConfig
from ..common_utils import AzureOpenAIError, BaseAzureLLM
openai_text_completion_config = OpenAITextCompletionConfig()
class AzureTextCompletion(BaseAzureLLM):
def __init__(self) -> None:
super().__init__()
def validate_environment(self, api_key, azure_ad_token):
headers = {
"content-type": "application/json",
}
if api_key is not None:
headers["api-key"] = api_key
elif azure_ad_token is not None:
headers["Authorization"] = f"Bearer {azure_ad_token}"
return headers
def completion( # noqa: PLR0915
self,
model: str,
messages: list,
model_response: ModelResponse,
api_key: Optional[str],
api_base: str,
api_version: str,
api_type: str,
azure_ad_token: Optional[str],
azure_ad_token_provider: Optional[Callable],
print_verbose: Callable,
timeout,
logging_obj,
optional_params,
litellm_params,
logger_fn,
acompletion: bool = False,
headers: Optional[dict] = None,
client=None,
):
try:
if model is None or messages is None:
raise AzureOpenAIError(
status_code=422, message="Missing model or messages"
)
max_retries = optional_params.pop("max_retries", 2)
prompt = prompt_factory(
messages=messages, model=model, custom_llm_provider="azure_text"
)
### CHECK IF CLOUDFLARE AI GATEWAY ###
### if so - set the model as part of the base url
if api_base is not None and "gateway.ai.cloudflare.com" in api_base:
## build base url - assume api base includes resource name
client = self._init_azure_client_for_cloudflare_ai_gateway(
api_key=api_key,
api_version=api_version,
api_base=api_base,
model=model,
client=client,
max_retries=max_retries,
timeout=timeout,
azure_ad_token=azure_ad_token,
azure_ad_token_provider=azure_ad_token_provider,
acompletion=acompletion,
litellm_params=litellm_params,
)
data = {"model": None, "prompt": prompt, **optional_params}
else:
data = {
"model": model, # type: ignore
"prompt": prompt,
**optional_params,
}
if acompletion is True:
if optional_params.get("stream", False):
return self.async_streaming(
logging_obj=logging_obj,
api_base=api_base,
data=data,
model=model,
api_key=api_key,
api_version=api_version,
azure_ad_token=azure_ad_token,
timeout=timeout,
client=client,
litellm_params=litellm_params,
)
else:
return self.acompletion(
api_base=api_base,
data=data,
model_response=model_response,
api_key=api_key,
api_version=api_version,
model=model,
azure_ad_token=azure_ad_token,
timeout=timeout,
client=client,
logging_obj=logging_obj,
max_retries=max_retries,
litellm_params=litellm_params,
)
elif "stream" in optional_params and optional_params["stream"] is True:
return self.streaming(
logging_obj=logging_obj,
api_base=api_base,
data=data,
model=model,
api_key=api_key,
api_version=api_version,
azure_ad_token=azure_ad_token,
timeout=timeout,
client=client,
)
else:
## LOGGING
logging_obj.pre_call(
input=prompt,
api_key=api_key,
additional_args={
"headers": {
"api_key": api_key,
"azure_ad_token": azure_ad_token,
},
"api_version": api_version,
"api_base": api_base,
"complete_input_dict": data,
},
)
if not isinstance(max_retries, int):
raise AzureOpenAIError(
status_code=422, message="max retries must be an int"
)
# init AzureOpenAI Client
azure_client = self.get_azure_openai_client(
api_key=api_key,
api_base=api_base,
api_version=api_version,
client=client,
litellm_params=litellm_params,
_is_async=False,
model=model,
)
if not isinstance(azure_client, AzureOpenAI):
raise AzureOpenAIError(
status_code=500,
message="azure_client is not an instance of AzureOpenAI",
)
raw_response = azure_client.completions.with_raw_response.create(
**data, timeout=timeout
)
response = raw_response.parse()
stringified_response = response.model_dump()
## LOGGING
logging_obj.post_call(
input=prompt,
api_key=api_key,
original_response=stringified_response,
additional_args={
"headers": headers,
"api_version": api_version,
"api_base": api_base,
},
)
return (
openai_text_completion_config.convert_to_chat_model_response_object(
response_object=TextCompletionResponse(**stringified_response),
model_response_object=model_response,
)
)
except AzureOpenAIError as e:
raise e
except Exception as e:
status_code = getattr(e, "status_code", 500)
error_headers = getattr(e, "headers", None)
error_response = getattr(e, "response", None)
if error_headers is None and error_response:
error_headers = getattr(error_response, "headers", None)
raise AzureOpenAIError(
status_code=status_code, message=str(e), headers=error_headers
)
async def acompletion(
self,
api_key: Optional[str],
api_version: str,
model: str,
api_base: str,
data: dict,
timeout: Any,
model_response: ModelResponse,
logging_obj: Any,
max_retries: int,
azure_ad_token: Optional[str] = None,
client=None, # this is the AsyncAzureOpenAI
litellm_params: dict = {},
):
response = None
try:
# init AzureOpenAI Client
# setting Azure client
azure_client = self.get_azure_openai_client(
api_version=api_version,
api_base=api_base,
api_key=api_key,
model=model,
_is_async=True,
client=client,
litellm_params=litellm_params,
)
if not isinstance(azure_client, AsyncAzureOpenAI):
raise AzureOpenAIError(
status_code=500,
message="azure_client is not an instance of AsyncAzureOpenAI",
)
## LOGGING
logging_obj.pre_call(
input=data["prompt"],
api_key=azure_client.api_key,
additional_args={
"headers": {"Authorization": f"Bearer {azure_client.api_key}"},
"api_base": azure_client._base_url._uri_reference,
"acompletion": True,
"complete_input_dict": data,
},
)
raw_response = await azure_client.completions.with_raw_response.create(
**data, timeout=timeout
)
response = raw_response.parse()
return openai_text_completion_config.convert_to_chat_model_response_object(
response_object=response.model_dump(),
model_response_object=model_response,
)
except AzureOpenAIError as e:
raise e
except Exception as e:
status_code = getattr(e, "status_code", 500)
error_headers = getattr(e, "headers", None)
error_response = getattr(e, "response", None)
if error_headers is None and error_response:
error_headers = getattr(error_response, "headers", None)
raise AzureOpenAIError(
status_code=status_code, message=str(e), headers=error_headers
)
def streaming(
self,
logging_obj,
api_base: str,
api_key: Optional[str],
api_version: str,
data: dict,
model: str,
timeout: Any,
azure_ad_token: Optional[str] = None,
client=None,
litellm_params: dict = {},
):
max_retries = data.pop("max_retries", 2)
if not isinstance(max_retries, int):
raise AzureOpenAIError(
status_code=422, message="max retries must be an int"
)
# init AzureOpenAI Client
azure_client = self.get_azure_openai_client(
api_version=api_version,
api_base=api_base,
api_key=api_key,
model=model,
_is_async=False,
client=client,
litellm_params=litellm_params,
)
if not isinstance(azure_client, AzureOpenAI):
raise AzureOpenAIError(
status_code=500,
message="azure_client is not an instance of AzureOpenAI",
)
## LOGGING
logging_obj.pre_call(
input=data["prompt"],
api_key=azure_client.api_key,
additional_args={
"headers": {"Authorization": f"Bearer {azure_client.api_key}"},
"api_base": azure_client._base_url._uri_reference,
"acompletion": True,
"complete_input_dict": data,
},
)
raw_response = azure_client.completions.with_raw_response.create(
**data, timeout=timeout
)
response = raw_response.parse()
streamwrapper = CustomStreamWrapper(
completion_stream=response,
model=model,
custom_llm_provider="azure_text",
logging_obj=logging_obj,
)
return streamwrapper
async def async_streaming(
self,
logging_obj,
api_base: str,
api_key: Optional[str],
api_version: str,
data: dict,
model: str,
timeout: Any,
azure_ad_token: Optional[str] = None,
client=None,
litellm_params: dict = {},
):
try:
# init AzureOpenAI Client
azure_client = self.get_azure_openai_client(
api_version=api_version,
api_base=api_base,
api_key=api_key,
model=model,
_is_async=True,
client=client,
litellm_params=litellm_params,
)
if not isinstance(azure_client, AsyncAzureOpenAI):
raise AzureOpenAIError(
status_code=500,
message="azure_client is not an instance of AsyncAzureOpenAI",
)
## LOGGING
logging_obj.pre_call(
input=data["prompt"],
api_key=azure_client.api_key,
additional_args={
"headers": {"Authorization": f"Bearer {azure_client.api_key}"},
"api_base": azure_client._base_url._uri_reference,
"acompletion": True,
"complete_input_dict": data,
},
)
raw_response = await azure_client.completions.with_raw_response.create(
**data, timeout=timeout
)
response = raw_response.parse()
# return response
streamwrapper = CustomStreamWrapper(
completion_stream=response,
model=model,
custom_llm_provider="azure_text",
logging_obj=logging_obj,
)
return streamwrapper ## DO NOT make this into an async for ... loop, it will yield an async generator, which won't raise errors if the response fails
except Exception as e:
status_code = getattr(e, "status_code", 500)
error_headers = getattr(e, "headers", None)
error_response = getattr(e, "response", None)
if error_headers is None and error_response:
error_headers = getattr(error_response, "headers", None)
raise AzureOpenAIError(
status_code=status_code, message=str(e), headers=error_headers
)

View File

@@ -0,0 +1,53 @@
from typing import Optional, Union
from ...openai.completion.transformation import OpenAITextCompletionConfig
class AzureOpenAITextConfig(OpenAITextCompletionConfig):
"""
Reference: https://platform.openai.com/docs/api-reference/chat/create
The class `AzureOpenAIConfig` provides configuration for the OpenAI's Chat API interface, for use with Azure. It inherits from `OpenAIConfig`. Below are the parameters::
- `frequency_penalty` (number or null): Defaults to 0. Allows a value between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, thereby minimizing repetition.
- `function_call` (string or object): This optional parameter controls how the model calls functions.
- `functions` (array): An optional parameter. It is a list of functions for which the model may generate JSON inputs.
- `logit_bias` (map): This optional parameter modifies the likelihood of specified tokens appearing in the completion.
- `max_tokens` (integer or null): This optional parameter helps to set the maximum number of tokens to generate in the chat completion.
- `n` (integer or null): This optional parameter helps to set how many chat completion choices to generate for each input message.
- `presence_penalty` (number or null): Defaults to 0. It penalizes new tokens based on if they appear in the text so far, hence increasing the model's likelihood to talk about new topics.
- `stop` (string / array / null): Specifies up to 4 sequences where the API will stop generating further tokens.
- `temperature` (number or null): Defines the sampling temperature to use, varying between 0 and 2.
- `top_p` (number or null): An alternative to sampling with temperature, used for nucleus sampling.
"""
def __init__(
self,
frequency_penalty: Optional[int] = None,
logit_bias: Optional[dict] = None,
max_tokens: Optional[int] = None,
n: Optional[int] = None,
presence_penalty: Optional[int] = None,
stop: Optional[Union[str, list]] = None,
temperature: Optional[int] = None,
top_p: Optional[int] = None,
) -> None:
super().__init__(
frequency_penalty=frequency_penalty,
logit_bias=logit_bias,
max_tokens=max_tokens,
n=n,
presence_penalty=presence_penalty,
stop=stop,
temperature=temperature,
top_p=top_p,
)

View File

@@ -0,0 +1,50 @@
"""
Helper util for handling azure openai-specific cost calculation
- e.g.: prompt caching, audio tokens
"""
from typing import Optional, Tuple
from litellm._logging import verbose_logger
from litellm.litellm_core_utils.llm_cost_calc.utils import generic_cost_per_token
from litellm.types.utils import Usage
from litellm.utils import get_model_info
def cost_per_token(
model: str, usage: Usage, response_time_ms: Optional[float] = 0.0
) -> Tuple[float, float]:
"""
Calculates the cost per token for a given model, prompt tokens, and completion tokens.
Input:
- model: str, the model name without provider prefix
- usage: LiteLLM Usage block, containing caching and audio token information
Returns:
Tuple[float, float] - prompt_cost_in_usd, completion_cost_in_usd
"""
## GET MODEL INFO
model_info = get_model_info(model=model, custom_llm_provider="azure")
## Speech / Audio cost calculation (cost per second for TTS models)
if (
"output_cost_per_second" in model_info
and model_info["output_cost_per_second"] is not None
and response_time_ms is not None
):
verbose_logger.debug(
f"For model={model} - output_cost_per_second: {model_info.get('output_cost_per_second')}; response time: {response_time_ms}"
)
## COST PER SECOND ##
prompt_cost = 0.0
completion_cost = model_info["output_cost_per_second"] * response_time_ms / 1000
return prompt_cost, completion_cost
## Use generic cost calculator for all other cases
## This properly handles: text tokens, audio tokens, cached tokens, reasoning tokens, etc.
return generic_cost_per_token(
model=model,
usage=usage,
custom_llm_provider="azure",
)

View File

@@ -0,0 +1,91 @@
from typing import Any, Dict, Optional, Tuple
from litellm.exceptions import ContentPolicyViolationError
class AzureOpenAIExceptionMapping:
"""
Class for creating Azure OpenAI specific exceptions
"""
@staticmethod
def create_content_policy_violation_error(
message: str,
model: str,
extra_information: str,
original_exception: Exception,
) -> ContentPolicyViolationError:
"""
Create a content policy violation error
"""
azure_error, inner_error = AzureOpenAIExceptionMapping._extract_azure_error(
original_exception
)
# Prefer the provider message/type/code when present.
provider_message = (
azure_error.get("message") if isinstance(azure_error, dict) else None
) or message
provider_type = (
azure_error.get("type") if isinstance(azure_error, dict) else None
)
provider_code = (
azure_error.get("code") if isinstance(azure_error, dict) else None
)
# Keep the OpenAI-style body fields populated so downstream (proxy + SDK)
# can surface `type` / `code` correctly.
openai_style_body: Dict[str, Any] = {
"message": provider_message,
"type": provider_type or "invalid_request_error",
"code": provider_code or "content_policy_violation",
"param": None,
}
raise ContentPolicyViolationError(
message=provider_message,
llm_provider="azure",
model=model,
litellm_debug_info=extra_information,
response=getattr(original_exception, "response", None),
provider_specific_fields={
# Preserve legacy key for backward compatibility.
"innererror": inner_error,
# Prefer Azure's current naming.
"inner_error": inner_error,
# Include the full Azure error object for clients that want it.
"azure_error": azure_error or None,
},
body=openai_style_body,
)
@staticmethod
def _extract_azure_error(
original_exception: Exception,
) -> Tuple[Dict[str, Any], Optional[dict]]:
"""Extract Azure OpenAI error payload and inner error details.
Azure error formats can vary by endpoint/version. Common shapes:
- {"innererror": {...}} (legacy)
- {"error": {"code": "...", "message": "...", "type": "...", "inner_error": {...}}}
- {"code": "...", "message": "...", "type": "..."} (already flattened)
"""
body_dict = getattr(original_exception, "body", None) or {}
if not isinstance(body_dict, dict):
return {}, None
# Some SDKs place the payload under "error".
azure_error: Dict[str, Any]
if isinstance(body_dict.get("error"), dict):
azure_error = body_dict.get("error", {}) # type: ignore[assignment]
else:
azure_error = body_dict
inner_error = (
azure_error.get("inner_error")
or azure_error.get("innererror")
or body_dict.get("innererror")
or body_dict.get("inner_error")
)
return azure_error, inner_error

View File

@@ -0,0 +1,308 @@
from typing import Any, Coroutine, Optional, Union, cast
import httpx
from openai import AsyncAzureOpenAI, AsyncOpenAI, AzureOpenAI, OpenAI
from openai.types.file_deleted import FileDeleted
from litellm._logging import verbose_logger
from litellm.types.llms.openai import *
from ..common_utils import BaseAzureLLM
class AzureOpenAIFilesAPI(BaseAzureLLM):
"""
AzureOpenAI methods to support for batches
- create_file()
- retrieve_file()
- list_files()
- delete_file()
- file_content()
- update_file()
"""
def __init__(self) -> None:
super().__init__()
@staticmethod
def _prepare_create_file_data(
create_file_data: CreateFileRequest,
) -> dict[str, Any]:
"""
Prepare create_file_data for OpenAI SDK.
Removes expires_after if None to match SDK's Omit pattern.
SDK expects file_create_params.ExpiresAfter | Omit, but FileExpiresAfter works at runtime.
"""
data = dict(create_file_data)
if data.get("expires_after") is None:
data.pop("expires_after", None)
return data
async def acreate_file(
self,
create_file_data: CreateFileRequest,
openai_client: Union[AsyncAzureOpenAI, AsyncOpenAI],
) -> OpenAIFileObject:
verbose_logger.debug("create_file_data=%s", create_file_data)
response = await openai_client.files.create(**self._prepare_create_file_data(create_file_data)) # type: ignore[arg-type]
verbose_logger.debug("create_file_response=%s", response)
return OpenAIFileObject(**response.model_dump())
def create_file(
self,
_is_async: bool,
create_file_data: CreateFileRequest,
api_base: Optional[str],
api_key: Optional[str],
api_version: Optional[str],
timeout: Union[float, httpx.Timeout],
max_retries: Optional[int],
client: Optional[
Union[AzureOpenAI, AsyncAzureOpenAI, OpenAI, AsyncOpenAI]
] = None,
litellm_params: Optional[dict] = None,
) -> Union[OpenAIFileObject, Coroutine[Any, Any, OpenAIFileObject]]:
openai_client: Optional[
Union[AzureOpenAI, AsyncAzureOpenAI, OpenAI, AsyncOpenAI]
] = self.get_azure_openai_client(
litellm_params=litellm_params or {},
api_key=api_key,
api_base=api_base,
api_version=api_version,
client=client,
_is_async=_is_async,
)
if openai_client is None:
raise ValueError(
"AzureOpenAI client is not initialized. Make sure api_key is passed or OPENAI_API_KEY is set in the environment."
)
if _is_async is True:
if not isinstance(openai_client, (AsyncAzureOpenAI, AsyncOpenAI)):
raise ValueError(
"AzureOpenAI client is not an instance of AsyncAzureOpenAI. Make sure you passed an AsyncAzureOpenAI client."
)
return self.acreate_file(
create_file_data=create_file_data, openai_client=openai_client
)
response = cast(Union[AzureOpenAI, OpenAI], openai_client).files.create(**self._prepare_create_file_data(create_file_data)) # type: ignore[arg-type]
return OpenAIFileObject(**response.model_dump())
async def afile_content(
self,
file_content_request: FileContentRequest,
openai_client: Union[AsyncAzureOpenAI, AsyncOpenAI],
) -> HttpxBinaryResponseContent:
response = await openai_client.files.content(**file_content_request)
return HttpxBinaryResponseContent(response=response.response)
def file_content(
self,
_is_async: bool,
file_content_request: FileContentRequest,
api_base: Optional[str],
api_key: Optional[str],
timeout: Union[float, httpx.Timeout],
max_retries: Optional[int],
api_version: Optional[str] = None,
client: Optional[
Union[AzureOpenAI, AsyncAzureOpenAI, OpenAI, AsyncOpenAI]
] = None,
litellm_params: Optional[dict] = None,
) -> Union[
HttpxBinaryResponseContent, Coroutine[Any, Any, HttpxBinaryResponseContent]
]:
openai_client: Optional[
Union[AzureOpenAI, AsyncAzureOpenAI, OpenAI, AsyncOpenAI]
] = self.get_azure_openai_client(
litellm_params=litellm_params or {},
api_key=api_key,
api_base=api_base,
api_version=api_version,
client=client,
_is_async=_is_async,
)
if openai_client is None:
raise ValueError(
"AzureOpenAI client is not initialized. Make sure api_key is passed or OPENAI_API_KEY is set in the environment."
)
if _is_async is True:
if not isinstance(openai_client, (AsyncAzureOpenAI, AsyncOpenAI)):
raise ValueError(
"AzureOpenAI client is not an instance of AsyncAzureOpenAI. Make sure you passed an AsyncAzureOpenAI client."
)
return self.afile_content( # type: ignore
file_content_request=file_content_request,
openai_client=openai_client,
)
response = cast(Union[AzureOpenAI, OpenAI], openai_client).files.content(
**file_content_request
)
return HttpxBinaryResponseContent(response=response.response)
async def aretrieve_file(
self,
file_id: str,
openai_client: Union[AsyncAzureOpenAI, AsyncOpenAI],
) -> FileObject:
response = await openai_client.files.retrieve(file_id=file_id)
return response
def retrieve_file(
self,
_is_async: bool,
file_id: str,
api_base: Optional[str],
api_key: Optional[str],
timeout: Union[float, httpx.Timeout],
max_retries: Optional[int],
api_version: Optional[str] = None,
client: Optional[
Union[AzureOpenAI, AsyncAzureOpenAI, OpenAI, AsyncOpenAI]
] = None,
litellm_params: Optional[dict] = None,
):
openai_client: Optional[
Union[AzureOpenAI, AsyncAzureOpenAI, OpenAI, AsyncOpenAI]
] = self.get_azure_openai_client(
litellm_params=litellm_params or {},
api_key=api_key,
api_base=api_base,
api_version=api_version,
client=client,
_is_async=_is_async,
)
if openai_client is None:
raise ValueError(
"AzureOpenAI client is not initialized. Make sure api_key is passed or OPENAI_API_KEY is set in the environment."
)
if _is_async is True:
if not isinstance(openai_client, (AsyncAzureOpenAI, AsyncOpenAI)):
raise ValueError(
"AzureOpenAI client is not an instance of AsyncAzureOpenAI. Make sure you passed an AsyncAzureOpenAI client."
)
return self.aretrieve_file( # type: ignore
file_id=file_id,
openai_client=openai_client,
)
response = openai_client.files.retrieve(file_id=file_id)
return response
async def adelete_file(
self,
file_id: str,
openai_client: Union[AsyncAzureOpenAI, AsyncOpenAI],
) -> FileDeleted:
response = await openai_client.files.delete(file_id=file_id)
if not isinstance(response, FileDeleted): # azure returns an empty string
return FileDeleted(id=file_id, deleted=True, object="file")
return response
def delete_file(
self,
_is_async: bool,
file_id: str,
api_base: Optional[str],
api_key: Optional[str],
timeout: Union[float, httpx.Timeout],
max_retries: Optional[int],
organization: Optional[str] = None,
api_version: Optional[str] = None,
client: Optional[
Union[AzureOpenAI, AsyncAzureOpenAI, OpenAI, AsyncOpenAI]
] = None,
litellm_params: Optional[dict] = None,
):
openai_client: Optional[
Union[AzureOpenAI, AsyncAzureOpenAI, OpenAI, AsyncOpenAI]
] = self.get_azure_openai_client(
litellm_params=litellm_params or {},
api_key=api_key,
api_base=api_base,
api_version=api_version,
client=client,
_is_async=_is_async,
)
if openai_client is None:
raise ValueError(
"AzureOpenAI client is not initialized. Make sure api_key is passed or OPENAI_API_KEY is set in the environment."
)
if _is_async is True:
if not isinstance(openai_client, (AsyncAzureOpenAI, AsyncOpenAI)):
raise ValueError(
"AzureOpenAI client is not an instance of AsyncAzureOpenAI. Make sure you passed an AsyncAzureOpenAI client."
)
return self.adelete_file( # type: ignore
file_id=file_id,
openai_client=openai_client,
)
response = openai_client.files.delete(file_id=file_id)
if not isinstance(response, FileDeleted): # azure returns an empty string
return FileDeleted(id=file_id, deleted=True, object="file")
return response
async def alist_files(
self,
openai_client: Union[AsyncAzureOpenAI, AsyncOpenAI],
purpose: Optional[str] = None,
):
if isinstance(purpose, str):
response = await openai_client.files.list(purpose=purpose)
else:
response = await openai_client.files.list()
return response
def list_files(
self,
_is_async: bool,
api_base: Optional[str],
api_key: Optional[str],
timeout: Union[float, httpx.Timeout],
max_retries: Optional[int],
purpose: Optional[str] = None,
api_version: Optional[str] = None,
client: Optional[
Union[AzureOpenAI, AsyncAzureOpenAI, OpenAI, AsyncOpenAI]
] = None,
litellm_params: Optional[dict] = None,
):
openai_client: Optional[
Union[AzureOpenAI, AsyncAzureOpenAI, OpenAI, AsyncOpenAI]
] = self.get_azure_openai_client(
litellm_params=litellm_params or {},
api_key=api_key,
api_base=api_base,
api_version=api_version,
client=client,
_is_async=_is_async,
)
if openai_client is None:
raise ValueError(
"AzureOpenAI client is not initialized. Make sure api_key is passed or OPENAI_API_KEY is set in the environment."
)
if _is_async is True:
if not isinstance(openai_client, (AsyncAzureOpenAI, AsyncOpenAI)):
raise ValueError(
"AzureOpenAI client is not an instance of AsyncAzureOpenAI. Make sure you passed an AsyncAzureOpenAI client."
)
return self.alist_files( # type: ignore
purpose=purpose,
openai_client=openai_client,
)
if isinstance(purpose, str):
response = openai_client.files.list(purpose=purpose)
else:
response = openai_client.files.list()
return response

View File

@@ -0,0 +1,40 @@
from typing import Optional, Union
import httpx
from openai import AsyncAzureOpenAI, AsyncOpenAI, AzureOpenAI, OpenAI
from litellm.llms.azure.common_utils import BaseAzureLLM
from litellm.llms.openai.fine_tuning.handler import OpenAIFineTuningAPI
class AzureOpenAIFineTuningAPI(OpenAIFineTuningAPI, BaseAzureLLM):
"""
AzureOpenAI methods to support fine tuning, inherits from OpenAIFineTuningAPI.
"""
def get_openai_client(
self,
api_key: Optional[str],
api_base: Optional[str],
timeout: Union[float, httpx.Timeout],
max_retries: Optional[int],
organization: Optional[str],
client: Optional[
Union[OpenAI, AsyncOpenAI, AzureOpenAI, AsyncAzureOpenAI]
] = None,
_is_async: bool = False,
api_version: Optional[str] = None,
litellm_params: Optional[dict] = None,
) -> Optional[Union[OpenAI, AsyncOpenAI, AzureOpenAI, AsyncAzureOpenAI,]]:
# Override to use Azure-specific client initialization
if isinstance(client, OpenAI) or isinstance(client, AsyncOpenAI):
client = None
return self.get_azure_openai_client(
litellm_params=litellm_params or {},
api_key=api_key,
api_base=api_base,
api_version=api_version,
client=client,
_is_async=_is_async,
)

View File

@@ -0,0 +1,83 @@
from typing import Optional, cast
import httpx
import litellm
from litellm.llms.openai.image_edit.transformation import OpenAIImageEditConfig
from litellm.secret_managers.main import get_secret_str
from litellm.utils import _add_path_to_api_base
class AzureImageEditConfig(OpenAIImageEditConfig):
def validate_environment(
self,
headers: dict,
model: str,
api_key: Optional[str] = None,
) -> dict:
api_key = (
api_key
or litellm.api_key
or litellm.azure_key
or get_secret_str("AZURE_OPENAI_API_KEY")
or get_secret_str("AZURE_API_KEY")
)
headers.update(
{
"Authorization": f"Bearer {api_key}",
}
)
return headers
def get_complete_url(
self,
model: str,
api_base: Optional[str],
litellm_params: dict,
) -> str:
"""
Constructs a complete URL for the API request.
Args:
- api_base: Base URL, e.g.,
"https://litellm8397336933.openai.azure.com"
OR
"https://litellm8397336933.openai.azure.com/openai/deployments/<deployment_name>/images/edits?api-version=2024-05-01-preview"
- model: Model name (deployment name).
- litellm_params: Additional query parameters, including "api_version".
Returns:
- A complete URL string, e.g.,
"https://litellm8397336933.openai.azure.com/openai/deployments/<deployment_name>/images/edits?api-version=2024-05-01-preview"
"""
api_base = api_base or litellm.api_base or get_secret_str("AZURE_API_BASE")
if api_base is None:
raise ValueError(
f"api_base is required for Azure AI Studio. Please set the api_base parameter. Passed `api_base={api_base}`"
)
original_url = httpx.URL(api_base)
# Extract api_version or use default
api_version = cast(Optional[str], litellm_params.get("api_version"))
# Create a new dictionary with existing params
query_params = dict(original_url.params)
# Add api_version if needed
if "api-version" not in query_params and api_version:
query_params["api-version"] = api_version
# Add the path to the base URL using the model as deployment name
if "/openai/deployments/" not in api_base:
new_url = _add_path_to_api_base(
api_base=api_base,
ending_path=f"/openai/deployments/{model}/images/edits",
)
else:
new_url = api_base
# Use the new query_params dictionary
final_url = httpx.URL(new_url).copy_with(params=query_params)
return str(final_url)

View File

@@ -0,0 +1,29 @@
from litellm._logging import verbose_logger
from litellm.llms.base_llm.image_generation.transformation import (
BaseImageGenerationConfig,
)
from .dall_e_2_transformation import AzureDallE2ImageGenerationConfig
from .dall_e_3_transformation import AzureDallE3ImageGenerationConfig
from .gpt_transformation import AzureGPTImageGenerationConfig
__all__ = [
"AzureDallE2ImageGenerationConfig",
"AzureDallE3ImageGenerationConfig",
"AzureGPTImageGenerationConfig",
]
def get_azure_image_generation_config(model: str) -> BaseImageGenerationConfig:
model = model.lower()
model = model.replace("-", "")
model = model.replace("_", "")
if model == "" or "dalle2" in model: # empty model is dall-e-2
return AzureDallE2ImageGenerationConfig()
elif "dalle3" in model:
return AzureDallE3ImageGenerationConfig()
else:
verbose_logger.debug(
f"Using AzureGPTImageGenerationConfig for model: {model}. This follows the gpt-image-1 model format."
)
return AzureGPTImageGenerationConfig()

View File

@@ -0,0 +1,9 @@
from litellm.llms.openai.image_generation import DallE2ImageGenerationConfig
class AzureDallE2ImageGenerationConfig(DallE2ImageGenerationConfig):
"""
Azure dall-e-2 image generation config
"""
pass

View File

@@ -0,0 +1,9 @@
from litellm.llms.openai.image_generation import DallE3ImageGenerationConfig
class AzureDallE3ImageGenerationConfig(DallE3ImageGenerationConfig):
"""
Azure dall-e-3 image generation config
"""
pass

View File

@@ -0,0 +1,9 @@
from litellm.llms.openai.image_generation import GPTImageGenerationConfig
class AzureGPTImageGenerationConfig(GPTImageGenerationConfig):
"""
Azure gpt-image-1 image generation config
"""
pass

View File

@@ -0,0 +1,85 @@
from typing import TYPE_CHECKING, List, Optional, Tuple
import httpx
from litellm.llms.azure.common_utils import BaseAzureLLM
from litellm.llms.base_llm.passthrough.transformation import BasePassthroughConfig
from litellm.secret_managers.main import get_secret_str
from litellm.types.llms.openai import AllMessageValues
from litellm.types.router import GenericLiteLLMParams
if TYPE_CHECKING:
from httpx import URL
class AzurePassthroughConfig(BasePassthroughConfig):
def is_streaming_request(self, endpoint: str, request_data: dict) -> bool:
return "stream" in request_data
def get_complete_url(
self,
api_base: Optional[str],
api_key: Optional[str],
model: str,
endpoint: str,
request_query_params: Optional[dict],
litellm_params: dict,
) -> Tuple["URL", str]:
base_target_url = self.get_api_base(api_base)
if base_target_url is None:
raise Exception("Azure api base not found")
litellm_metadata = litellm_params.get("litellm_metadata") or {}
model_group = litellm_metadata.get("model_group")
if model_group and model_group in endpoint:
endpoint = endpoint.replace(model_group, model)
complete_url = BaseAzureLLM._get_base_azure_url(
api_base=base_target_url,
litellm_params=litellm_params,
route=endpoint,
default_api_version=litellm_params.get("api_version"),
)
return (
httpx.URL(complete_url),
base_target_url,
)
def validate_environment(
self,
headers: dict,
model: str,
messages: List[AllMessageValues],
optional_params: dict,
litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
return BaseAzureLLM._base_validate_azure_environment(
headers=headers,
litellm_params=GenericLiteLLMParams(
**{**litellm_params, "api_key": api_key}
),
)
@staticmethod
def get_api_base(
api_base: Optional[str] = None,
) -> Optional[str]:
return api_base or get_secret_str("AZURE_API_BASE")
@staticmethod
def get_api_key(
api_key: Optional[str] = None,
) -> Optional[str]:
return api_key or get_secret_str("AZURE_API_KEY")
@staticmethod
def get_base_model(model: str) -> Optional[str]:
return model
def get_models(
self, api_key: Optional[str] = None, api_base: Optional[str] = None
) -> List[str]:
return super().get_models(api_key, api_base)

View File

@@ -0,0 +1,126 @@
"""
This file contains the calling Azure OpenAI's `/openai/realtime` endpoint.
This requires websockets, and is currently only supported on LiteLLM Proxy.
"""
from typing import Any, Optional, cast
from litellm._logging import verbose_proxy_logger
from litellm.constants import REALTIME_WEBSOCKET_MAX_MESSAGE_SIZE_BYTES
from ....litellm_core_utils.litellm_logging import Logging as LiteLLMLogging
from ....litellm_core_utils.realtime_streaming import RealTimeStreaming
from ....llms.custom_httpx.http_handler import get_shared_realtime_ssl_context
from ..azure import AzureChatCompletion
# BACKEND_WS_URL = "ws://localhost:8080/v1/realtime?model=gpt-4o-realtime-preview-2024-10-01"
async def forward_messages(client_ws: Any, backend_ws: Any):
import websockets
try:
while True:
message = await backend_ws.recv()
await client_ws.send_text(message)
except websockets.exceptions.ConnectionClosed: # type: ignore
pass
class AzureOpenAIRealtime(AzureChatCompletion):
def _construct_url(
self,
api_base: str,
model: str,
api_version: Optional[str],
realtime_protocol: Optional[str] = None,
) -> str:
"""
Construct Azure realtime WebSocket URL.
Args:
api_base: Azure API base URL (will be converted from https:// to wss://)
model: Model deployment name
api_version: Azure API version
realtime_protocol: Protocol version to use:
- "GA" or "v1": Uses /openai/v1/realtime (GA path)
- "beta" or None: Uses /openai/realtime (beta path, default)
Returns:
WebSocket URL string
Examples:
beta/default: "wss://.../openai/realtime?api-version=2024-10-01-preview&deployment=gpt-4o-realtime-preview"
GA/v1: "wss://.../openai/v1/realtime?model=gpt-realtime-deployment"
"""
api_base = api_base.replace("https://", "wss://")
# Determine path based on realtime_protocol (case-insensitive)
_is_ga = realtime_protocol is not None and realtime_protocol.upper() in (
"GA",
"V1",
)
if _is_ga:
path = "/openai/v1/realtime"
return f"{api_base}{path}?model={model}"
else:
# Default to beta path for backwards compatibility
path = "/openai/realtime"
return f"{api_base}{path}?api-version={api_version}&deployment={model}"
async def async_realtime(
self,
model: str,
websocket: Any,
logging_obj: LiteLLMLogging,
api_base: Optional[str] = None,
api_key: Optional[str] = None,
api_version: Optional[str] = None,
azure_ad_token: Optional[str] = None,
client: Optional[Any] = None,
timeout: Optional[float] = None,
realtime_protocol: Optional[str] = None,
user_api_key_dict: Optional[Any] = None,
litellm_metadata: Optional[dict] = None,
):
import websockets
from websockets.asyncio.client import ClientConnection
if api_base is None:
raise ValueError("api_base is required for Azure OpenAI calls")
if api_version is None and (
realtime_protocol is None or realtime_protocol.upper() not in ("GA", "V1")
):
raise ValueError("api_version is required for Azure OpenAI calls")
url = self._construct_url(
api_base, model, api_version, realtime_protocol=realtime_protocol
)
try:
ssl_context = get_shared_realtime_ssl_context()
async with websockets.connect( # type: ignore
url,
additional_headers={
"api-key": api_key, # type: ignore
},
max_size=REALTIME_WEBSOCKET_MAX_MESSAGE_SIZE_BYTES,
ssl=ssl_context,
) as backend_ws:
realtime_streaming = RealTimeStreaming(
websocket,
cast(ClientConnection, backend_ws),
logging_obj,
user_api_key_dict=user_api_key_dict,
request_data={"litellm_metadata": litellm_metadata or {}},
)
await realtime_streaming.bidirectional_forward()
except websockets.exceptions.InvalidStatusCode as e: # type: ignore
await websocket.close(code=e.status_code, reason=str(e))
except Exception:
verbose_proxy_logger.exception(
"Error in AzureOpenAIRealtime.async_realtime"
)
pass

View File

@@ -0,0 +1,46 @@
"""Azure OpenAI realtime HTTP transformation config (client_secrets + realtime_calls)."""
from typing import Optional
import litellm
from litellm.llms.base_llm.realtime.http_transformation import BaseRealtimeHTTPConfig
from litellm.secret_managers.main import get_secret_str
class AzureRealtimeHTTPConfig(BaseRealtimeHTTPConfig):
def get_api_base(self, api_base: Optional[str], **kwargs) -> str:
return api_base or litellm.api_base or get_secret_str("AZURE_API_BASE") or ""
def get_api_key(self, api_key: Optional[str], **kwargs) -> str:
return api_key or litellm.api_key or get_secret_str("AZURE_API_KEY") or ""
def get_complete_url(
self, api_base: Optional[str], model: str, api_version: Optional[str] = None
) -> str:
base = self.get_api_base(api_base).rstrip("/")
version = api_version or get_secret_str("AZURE_API_VERSION") or "2024-12-17"
return f"{base}/openai/realtime/client_secrets?api-version={version}"
def validate_environment(
self,
headers: dict,
model: str,
api_key: Optional[str] = None,
) -> dict:
return {
**headers,
"api-key": api_key or "",
"Content-Type": "application/json",
}
def get_realtime_calls_url(
self, api_base: Optional[str], model: str, api_version: Optional[str] = None
) -> str:
base = self.get_api_base(api_base).rstrip("/")
version = api_version or get_secret_str("AZURE_API_VERSION") or "2024-12-17"
return f"{base}/openai/realtime/calls?api-version={version}"
def get_realtime_calls_headers(self, ephemeral_key: str) -> dict:
return {
"api-key": ephemeral_key,
}

View File

@@ -0,0 +1,94 @@
"""
Support for Azure OpenAI O-series models (o1, o3, etc.) in Responses API
https://platform.openai.com/docs/guides/reasoning
Translations handled by LiteLLM:
- temperature => drop param (if user opts in to dropping param)
- Other parameters follow base Azure OpenAI Responses API behavior
"""
from typing import TYPE_CHECKING, Any, Dict
from litellm._logging import verbose_logger
from litellm.types.llms.openai import ResponsesAPIOptionalRequestParams
from litellm.utils import supports_reasoning
from .transformation import AzureOpenAIResponsesAPIConfig
if TYPE_CHECKING:
from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj
LiteLLMLoggingObj = _LiteLLMLoggingObj
else:
LiteLLMLoggingObj = Any
class AzureOpenAIOSeriesResponsesAPIConfig(AzureOpenAIResponsesAPIConfig):
"""
Configuration for Azure OpenAI O-series models in Responses API.
O-series models (o1, o3, etc.) do not support the temperature parameter
in the responses API, so we need to drop it when drop_params is enabled.
"""
def get_supported_openai_params(self, model: str) -> list:
"""
Get supported parameters for Azure OpenAI O-series Responses API.
O-series models don't support temperature parameter in responses API.
"""
# Get the base Azure supported params
base_supported_params = super().get_supported_openai_params(model)
# O-series models don't support temperature parameter in responses API
o_series_unsupported_params = ["temperature"]
# Filter out unsupported parameters for O-series models
o_series_supported_params = [
param
for param in base_supported_params
if param not in o_series_unsupported_params
]
return o_series_supported_params
def map_openai_params(
self,
response_api_optional_params: ResponsesAPIOptionalRequestParams,
model: str,
drop_params: bool,
) -> Dict:
"""
Map OpenAI parameters for Azure OpenAI O-series Responses API.
Drops temperature parameter if drop_params is True since O-series models
don't support temperature in the responses API.
"""
mapped_params = dict(response_api_optional_params)
# If drop_params is enabled, remove temperature parameter for O-series models
if drop_params and "temperature" in mapped_params:
verbose_logger.debug(
f"Dropping unsupported parameter 'temperature' for Azure OpenAI O-series responses API model {model}"
)
mapped_params.pop("temperature", None)
return mapped_params
def is_o_series_model(self, model: str) -> bool:
"""
Check if the model is an O-series model.
Args:
model: The model name to check
Returns:
True if it's an O-series model, False otherwise
"""
# Check if model name contains o_series or if it's a known O-series model
if "o_series" in model.lower():
return True
# Check if the model supports reasoning (which is O-series specific)
return supports_reasoning(model)

View File

@@ -0,0 +1,359 @@
from copy import deepcopy
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Tuple, Union
import httpx
from openai.types.responses import ResponseReasoningItem
from litellm._logging import verbose_logger
from litellm.llms.azure.common_utils import BaseAzureLLM
from litellm.llms.openai.responses.transformation import OpenAIResponsesAPIConfig
from litellm.types.llms.openai import *
from litellm.types.responses.main import *
from litellm.types.router import GenericLiteLLMParams
from litellm.types.utils import LlmProviders
if TYPE_CHECKING:
from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj
LiteLLMLoggingObj = _LiteLLMLoggingObj
else:
LiteLLMLoggingObj = Any
class AzureOpenAIResponsesAPIConfig(OpenAIResponsesAPIConfig):
# Parameters not supported by Azure Responses API
AZURE_UNSUPPORTED_PARAMS = ["context_management"]
@property
def custom_llm_provider(self) -> LlmProviders:
return LlmProviders.AZURE
def get_supported_openai_params(self, model: str) -> list:
"""
Azure Responses API does not support context_management (compaction).
"""
base_supported_params = super().get_supported_openai_params(model)
return [
param
for param in base_supported_params
if param not in self.AZURE_UNSUPPORTED_PARAMS
]
def validate_environment(
self, headers: dict, model: str, litellm_params: Optional[GenericLiteLLMParams]
) -> dict:
return BaseAzureLLM._base_validate_azure_environment(
headers=headers, litellm_params=litellm_params
)
def get_stripped_model_name(self, model: str) -> str:
# if "responses/" is in the model name, remove it
if "responses/" in model:
model = model.replace("responses/", "")
if "o_series" in model:
model = model.replace("o_series/", "")
return model
def _handle_reasoning_item(self, item: Dict[str, Any]) -> Dict[str, Any]:
"""
Handle reasoning items to filter out the status field.
Issue: https://github.com/BerriAI/litellm/issues/13484
Azure OpenAI API does not accept 'status' field in reasoning input items.
"""
if item.get("type") == "reasoning":
try:
# Ensure required fields are present for ResponseReasoningItem
item_data = dict(item)
if "summary" not in item_data:
item_data["summary"] = (
item_data.get("reasoning_content", "")[:100] + "..."
if len(item_data.get("reasoning_content", "")) > 100
else item_data.get("reasoning_content", "")
)
# Create ResponseReasoningItem object from the item data
reasoning_item = ResponseReasoningItem(**item_data)
# Convert back to dict with exclude_none=True to exclude None fields
dict_reasoning_item = reasoning_item.model_dump(exclude_none=True)
dict_reasoning_item.pop("status", None)
return dict_reasoning_item
except Exception as e:
verbose_logger.debug(
f"Failed to create ResponseReasoningItem, falling back to manual filtering: {e}"
)
# Fallback: manually filter out known None fields
filtered_item = {
k: v
for k, v in item.items()
if v is not None
or k not in {"status", "content", "encrypted_content"}
}
return filtered_item
return item
def _validate_input_param(
self, input: Union[str, ResponseInputParam]
) -> Union[str, ResponseInputParam]:
"""
Override parent method to also filter out 'status' field from message items.
Azure OpenAI API does not accept 'status' field in input messages.
"""
from typing import cast
# First call parent's validation
validated_input = super()._validate_input_param(input)
# Then filter out status from message items
if isinstance(validated_input, list):
filtered_input: List[Any] = []
for item in validated_input:
if isinstance(item, dict) and item.get("type") == "message":
# Filter out status field from message items
filtered_item = {k: v for k, v in item.items() if k != "status"}
filtered_input.append(filtered_item)
else:
filtered_input.append(item)
return cast(ResponseInputParam, filtered_input)
return validated_input
def transform_responses_api_request(
self,
model: str,
input: Union[str, ResponseInputParam],
response_api_optional_request_params: Dict,
litellm_params: GenericLiteLLMParams,
headers: dict,
) -> Dict:
"""No transform applied since inputs are in OpenAI spec already"""
stripped_model_name = self.get_stripped_model_name(model)
# Azure Responses API requires flattened tools (params at top level, not nested in 'function')
if "tools" in response_api_optional_request_params and isinstance(
response_api_optional_request_params["tools"], list
):
new_tools: List[Dict[str, Any]] = []
for tool in response_api_optional_request_params["tools"]:
if isinstance(tool, dict) and "function" in tool:
new_tool: Dict[str, Any] = deepcopy(tool)
function_data = new_tool.pop("function")
new_tool.update(function_data)
new_tools.append(new_tool)
else:
new_tools.append(tool)
response_api_optional_request_params["tools"] = new_tools
return super().transform_responses_api_request(
model=stripped_model_name,
input=input,
response_api_optional_request_params=response_api_optional_request_params,
litellm_params=litellm_params,
headers=headers,
)
def get_complete_url(
self,
api_base: Optional[str],
litellm_params: dict,
) -> str:
"""
Constructs a complete URL for the API request.
Args:
- api_base: Base URL, e.g.,
"https://litellm8397336933.openai.azure.com"
OR
"https://litellm8397336933.openai.azure.com/openai/responses?api-version=2024-05-01-preview"
- model: Model name.
- optional_params: Additional query parameters, including "api_version".
- stream: If streaming is required (optional).
Returns:
- A complete URL string, e.g.,
"https://litellm8397336933.openai.azure.com/openai/responses?api-version=2024-05-01-preview"
"""
from litellm.constants import AZURE_DEFAULT_RESPONSES_API_VERSION
return BaseAzureLLM._get_base_azure_url(
api_base=api_base,
litellm_params=litellm_params,
route="/openai/responses",
default_api_version=AZURE_DEFAULT_RESPONSES_API_VERSION,
)
#########################################################
########## DELETE RESPONSE API TRANSFORMATION ##############
#########################################################
def _construct_url_for_response_id_in_path(
self, api_base: str, response_id: str
) -> str:
"""
Constructs a URL for the API request with the response_id in the path.
"""
from urllib.parse import urlparse, urlunparse
# Parse the URL to separate its components
parsed_url = urlparse(api_base)
# Insert the response_id at the end of the path component
# Remove trailing slash if present to avoid double slashes
path = parsed_url.path.rstrip("/")
new_path = f"{path}/{response_id}"
# Reconstruct the URL with all original components but with the modified path
constructed_url = urlunparse(
(
parsed_url.scheme, # http, https
parsed_url.netloc, # domain name, port
new_path, # path with response_id added
parsed_url.params, # parameters
parsed_url.query, # query string
parsed_url.fragment, # fragment
)
)
return constructed_url
def transform_delete_response_api_request(
self,
response_id: str,
api_base: str,
litellm_params: GenericLiteLLMParams,
headers: dict,
) -> Tuple[str, Dict]:
"""
Transform the delete response API request into a URL and data
Azure OpenAI API expects the following request:
- DELETE /openai/responses/{response_id}?api-version=xxx
This function handles URLs with query parameters by inserting the response_id
at the correct location (before any query parameters).
"""
delete_url = self._construct_url_for_response_id_in_path(
api_base=api_base, response_id=response_id
)
data: Dict = {}
verbose_logger.debug(f"delete response url={delete_url}")
return delete_url, data
#########################################################
########## GET RESPONSE API TRANSFORMATION ###############
#########################################################
def transform_get_response_api_request(
self,
response_id: str,
api_base: str,
litellm_params: GenericLiteLLMParams,
headers: dict,
) -> Tuple[str, Dict]:
"""
Transform the get response API request into a URL and data
OpenAI API expects the following request
- GET /v1/responses/{response_id}
"""
get_url = self._construct_url_for_response_id_in_path(
api_base=api_base, response_id=response_id
)
data: Dict = {}
verbose_logger.debug(f"get response url={get_url}")
return get_url, data
def transform_list_input_items_request(
self,
response_id: str,
api_base: str,
litellm_params: GenericLiteLLMParams,
headers: dict,
after: Optional[str] = None,
before: Optional[str] = None,
include: Optional[List[str]] = None,
limit: int = 20,
order: Literal["asc", "desc"] = "desc",
) -> Tuple[str, Dict]:
url = (
self._construct_url_for_response_id_in_path(
api_base=api_base, response_id=response_id
)
+ "/input_items"
)
params: Dict[str, Any] = {}
if after is not None:
params["after"] = after
if before is not None:
params["before"] = before
if include:
params["include"] = ",".join(include)
if limit is not None:
params["limit"] = limit
if order is not None:
params["order"] = order
verbose_logger.debug(f"list input items url={url}")
return url, params
#########################################################
########## CANCEL RESPONSE API TRANSFORMATION ##########
#########################################################
def transform_cancel_response_api_request(
self,
response_id: str,
api_base: str,
litellm_params: GenericLiteLLMParams,
headers: dict,
) -> Tuple[str, Dict]:
"""
Transform the cancel response API request into a URL and data
Azure OpenAI API expects the following request:
- POST /openai/responses/{response_id}/cancel?api-version=xxx
This function handles URLs with query parameters by inserting the response_id
at the correct location (before any query parameters).
"""
from urllib.parse import urlparse, urlunparse
# Parse the URL to separate its components
parsed_url = urlparse(api_base)
# Insert the response_id and /cancel at the end of the path component
# Remove trailing slash if present to avoid double slashes
path = parsed_url.path.rstrip("/")
new_path = f"{path}/{response_id}/cancel"
# Reconstruct the URL with all original components but with the modified path
cancel_url = urlunparse(
(
parsed_url.scheme, # http, https
parsed_url.netloc, # domain name, port
new_path, # path with response_id and /cancel added
parsed_url.params, # parameters
parsed_url.query, # query string
parsed_url.fragment, # fragment
)
)
data: Dict = {}
verbose_logger.debug(f"cancel response url={cancel_url}")
return cancel_url, data
def transform_cancel_response_api_response(
self,
raw_response: httpx.Response,
logging_obj: LiteLLMLoggingObj,
) -> ResponsesAPIResponse:
"""
Transform the cancel response API response into a ResponsesAPIResponse
"""
try:
raw_response_json = raw_response.json()
except Exception:
from litellm.llms.azure.chat.gpt_transformation import AzureOpenAIError
raise AzureOpenAIError(
message=raw_response.text, status_code=raw_response.status_code
)
return ResponsesAPIResponse(**raw_response_json)

View File

@@ -0,0 +1,7 @@
"""Azure Text-to-Speech module"""
from .transformation import AzureAVATextToSpeechConfig
__all__ = [
"AzureAVATextToSpeechConfig",
]

View File

@@ -0,0 +1,504 @@
"""
Azure AVA (Cognitive Services) Text-to-Speech transformation
Maps OpenAI TTS spec to Azure Cognitive Services TTS API
"""
from typing import TYPE_CHECKING, Any, Coroutine, Dict, Optional, Tuple, Union
from urllib.parse import urlparse
import httpx
import litellm
from litellm.llms.base_llm.text_to_speech.transformation import (
BaseTextToSpeechConfig,
TextToSpeechRequestData,
)
from litellm.secret_managers.main import get_secret_str
if TYPE_CHECKING:
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
from litellm.types.llms.openai import HttpxBinaryResponseContent
else:
LiteLLMLoggingObj = Any
HttpxBinaryResponseContent = Any
class AzureAVATextToSpeechConfig(BaseTextToSpeechConfig):
"""
Configuration for Azure AVA (Cognitive Services) Text-to-Speech
Reference: https://learn.microsoft.com/en-us/azure/ai-services/speech-service/rest-text-to-speech
"""
# Azure endpoint domains
DEFAULT_VOICE = "en-US-AriaNeural"
COGNITIVE_SERVICES_DOMAIN = "api.cognitive.microsoft.com"
TTS_SPEECH_DOMAIN = "tts.speech.microsoft.com"
TTS_ENDPOINT_PATH = "/cognitiveservices/v1"
# Voice name mappings from OpenAI voices to Azure voices
VOICE_MAPPINGS = {
"alloy": "en-US-JennyNeural",
"echo": "en-US-GuyNeural",
"fable": "en-GB-RyanNeural",
"onyx": "en-US-DavisNeural",
"nova": "en-US-AmberNeural",
"shimmer": "en-US-AriaNeural",
}
# Response format mappings from OpenAI to Azure
FORMAT_MAPPINGS = {
"mp3": "audio-24khz-48kbitrate-mono-mp3",
"opus": "ogg-48khz-16bit-mono-opus",
"aac": "audio-24khz-48kbitrate-mono-mp3", # Azure doesn't have AAC, use MP3
"flac": "audio-24khz-48kbitrate-mono-mp3", # Azure doesn't have FLAC, use MP3
"wav": "riff-24khz-16bit-mono-pcm",
"pcm": "raw-24khz-16bit-mono-pcm",
}
def dispatch_text_to_speech(
self,
model: str,
input: str,
voice: Optional[Union[str, Dict]],
optional_params: Dict,
litellm_params_dict: Dict,
logging_obj: "LiteLLMLoggingObj",
timeout: Union[float, httpx.Timeout],
extra_headers: Optional[Dict[str, Any]],
base_llm_http_handler: Any,
aspeech: bool,
api_base: Optional[str],
api_key: Optional[str],
**kwargs: Any,
) -> Union[
"HttpxBinaryResponseContent",
Coroutine[Any, Any, "HttpxBinaryResponseContent"],
]:
"""
Dispatch method to handle Azure AVA TTS requests
This method encapsulates Azure-specific credential resolution and parameter handling
Args:
base_llm_http_handler: The BaseLLMHTTPHandler instance from main.py
"""
# Resolve api_base from multiple sources
api_base = (
api_base
or litellm_params_dict.get("api_base")
or litellm.api_base
or get_secret_str("AZURE_API_BASE")
)
# Resolve api_key from multiple sources (Azure-specific)
api_key = (
api_key
or litellm_params_dict.get("api_key")
or litellm.api_key
or litellm.azure_key
or get_secret_str("AZURE_OPENAI_API_KEY")
or get_secret_str("AZURE_API_KEY")
)
# Convert voice to string if it's a dict (for Azure AVA, voice must be a string)
voice_str: Optional[str] = None
if isinstance(voice, str):
voice_str = voice
elif isinstance(voice, dict):
# Extract voice name from dict if needed
voice_str = voice.get("name") if voice else None
litellm_params_dict.update(
{
"api_key": api_key,
"api_base": api_base,
}
)
# Call the text_to_speech_handler
response = base_llm_http_handler.text_to_speech_handler(
model=model,
input=input,
voice=voice_str,
text_to_speech_provider_config=self,
text_to_speech_optional_params=optional_params,
custom_llm_provider="azure",
litellm_params=litellm_params_dict,
logging_obj=logging_obj,
timeout=timeout,
extra_headers=extra_headers,
client=None,
_is_async=aspeech,
)
return response
def get_supported_openai_params(self, model: str) -> list:
"""
Azure AVA TTS supports these OpenAI parameters
Note: Azure also supports additional SSML-specific parameters (style, styledegree, role)
which can be passed but are not part of the OpenAI spec
"""
return ["voice", "response_format", "speed"]
def _convert_speed_to_azure_rate(self, speed: float) -> str:
"""
Convert OpenAI speed value to Azure SSML prosody rate percentage
Args:
speed: OpenAI speed value (0.25-4.0, default 1.0)
Returns:
Azure rate string with percentage (e.g., "+50%", "-50%", "+0%")
Examples:
speed=1.0 -> "+0%" (default)
speed=2.0 -> "+100%"
speed=0.5 -> "-50%"
"""
rate_percentage = int((speed - 1.0) * 100)
return f"{rate_percentage:+d}%"
def _build_express_as_element(
self,
content: str,
style: Optional[str] = None,
styledegree: Optional[str] = None,
role: Optional[str] = None,
) -> str:
"""
Build mstts:express-as element with optional style, styledegree, and role attributes
Args:
content: The inner content to wrap
style: Speaking style (e.g., "cheerful", "sad", "angry")
styledegree: Style intensity (0.01 to 2)
role: Voice role (e.g., "Girl", "Boy", "SeniorFemale", "SeniorMale")
Returns:
Content wrapped in mstts:express-as if any attributes provided, otherwise raw content
"""
if not (style or styledegree or role):
return content
express_as_attrs = []
if style:
express_as_attrs.append(f"style='{style}'")
if styledegree:
express_as_attrs.append(f"styledegree='{styledegree}'")
if role:
express_as_attrs.append(f"role='{role}'")
express_as_attrs_str = " ".join(express_as_attrs)
return f"<mstts:express-as {express_as_attrs_str}>{content}</mstts:express-as>"
def _get_voice_language(
self,
voice_name: Optional[str],
explicit_lang: Optional[str] = None,
) -> Optional[str]:
"""
Get the language for the voice element's xml:lang attribute
Args:
voice_name: The Azure voice name (e.g., "en-US-AriaNeural")
explicit_lang: Explicitly provided language code (takes precedence)
Returns:
Language code if available (e.g., "es-ES"), or None
Examples:
- explicit_lang="es-ES""es-ES" (explicit takes precedence)
- voice_name="en-US-AriaNeural", explicit_lang=None → None (use default from voice)
- voice_name="en-US-AvaMultilingualNeural", explicit_lang="fr-FR""fr-FR"
"""
# If explicit language is provided, use it (for multilingual voices)
if explicit_lang:
return explicit_lang
# For non-multilingual voices, we don't need to set xml:lang on the voice element
# The voice name already encodes the language (e.g., en-US-AriaNeural)
# Only return a language if explicitly set
return None
def map_openai_params(
self,
model: str,
optional_params: Dict,
voice: Optional[Union[str, Dict]] = None,
drop_params: bool = False,
kwargs: Dict = {},
) -> Tuple[Optional[str], Dict]:
"""
Map OpenAI parameters to Azure AVA TTS parameters
"""
mapped_params = {}
##########################################################
# Map voice
# OpenAI uses voice as a required param, hence not in optional_params
##########################################################
# If it's already an Azure voice, use it directly
mapped_voice: Optional[str] = None
if isinstance(voice, str):
if voice in self.VOICE_MAPPINGS:
mapped_voice = self.VOICE_MAPPINGS[voice]
else:
# Assume it's already an Azure voice name
mapped_voice = voice
# Map response format
if "response_format" in optional_params:
format_name = optional_params["response_format"]
if format_name in self.FORMAT_MAPPINGS:
mapped_params["output_format"] = self.FORMAT_MAPPINGS[format_name]
else:
# Try to use it directly as Azure format
mapped_params["output_format"] = format_name
else:
# Default to MP3
mapped_params["output_format"] = "audio-24khz-48kbitrate-mono-mp3"
# Map speed (OpenAI: 0.25-4.0, Azure: prosody rate)
if "speed" in optional_params:
speed = optional_params["speed"]
if speed is not None:
mapped_params["rate"] = self._convert_speed_to_azure_rate(speed=speed)
# Pass through Azure-specific SSML parameters
if "style" in kwargs:
mapped_params["style"] = kwargs["style"]
if "styledegree" in kwargs:
mapped_params["styledegree"] = kwargs["styledegree"]
if "role" in kwargs:
mapped_params["role"] = kwargs["role"]
if "lang" in kwargs:
mapped_params["lang"] = kwargs["lang"]
return mapped_voice, mapped_params
def validate_environment(
self,
headers: dict,
model: str,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
"""
Validate Azure environment and set up authentication headers
"""
validated_headers = headers.copy()
# Azure AVA TTS requires either:
# 1. Ocp-Apim-Subscription-Key header, or
# 2. Authorization: Bearer <token> header
# We'll use the token-based auth via our token handler
# The token will be added later in the handler
if api_key:
# If subscription key is provided, use it directly
validated_headers["Ocp-Apim-Subscription-Key"] = api_key
# Content-Type for SSML
validated_headers["Content-Type"] = "application/ssml+xml"
# User-Agent
validated_headers["User-Agent"] = "litellm"
return validated_headers
def get_complete_url(
self,
model: str,
api_base: Optional[str],
litellm_params: dict,
) -> str:
"""
Get the complete URL for Azure AVA TTS request
Azure TTS endpoint format:
https://{region}.tts.speech.microsoft.com/cognitiveservices/v1
"""
if api_base is None:
raise ValueError(
f"api_base is required for Azure AVA TTS. "
f"Format: https://{{region}}.{self.COGNITIVE_SERVICES_DOMAIN} or "
f"https://{{region}}.{self.TTS_SPEECH_DOMAIN}"
)
# Remove trailing slash and parse URL
api_base = api_base.rstrip("/")
parsed_url = urlparse(api_base)
hostname = parsed_url.hostname or ""
# Check if it's a Cognitive Services endpoint (convert to TTS endpoint)
if self._is_cognitive_services_endpoint(hostname=hostname):
region = self._extract_region_from_hostname(
hostname=hostname, domain=self.COGNITIVE_SERVICES_DOMAIN
)
return self._build_tts_url(region=region)
# Check if it's already a TTS endpoint
if self._is_tts_endpoint(hostname=hostname):
if not api_base.endswith(self.TTS_ENDPOINT_PATH):
return f"{api_base}{self.TTS_ENDPOINT_PATH}"
return api_base
# Assume it's a custom endpoint, append the path
return f"{api_base}{self.TTS_ENDPOINT_PATH}"
def _is_cognitive_services_endpoint(self, hostname: str) -> bool:
"""Check if hostname is a Cognitive Services endpoint"""
return hostname == self.COGNITIVE_SERVICES_DOMAIN or hostname.endswith(
f".{self.COGNITIVE_SERVICES_DOMAIN}"
)
def _is_tts_endpoint(self, hostname: str) -> bool:
"""Check if hostname is a TTS endpoint"""
return hostname == self.TTS_SPEECH_DOMAIN or hostname.endswith(
f".{self.TTS_SPEECH_DOMAIN}"
)
def _extract_region_from_hostname(self, hostname: str, domain: str) -> str:
"""
Extract region from hostname
Examples:
eastus.api.cognitive.microsoft.com -> eastus
api.cognitive.microsoft.com -> ""
"""
if hostname.endswith(f".{domain}"):
return hostname[: -len(f".{domain}")]
return ""
def _build_tts_url(self, region: str) -> str:
"""Build the complete TTS URL with region"""
if region:
return f"https://{region}.{self.TTS_SPEECH_DOMAIN}{self.TTS_ENDPOINT_PATH}"
return f"https://{self.TTS_SPEECH_DOMAIN}{self.TTS_ENDPOINT_PATH}"
def is_ssml_input(self, input: str) -> bool:
"""
Returns True if input is SSML, False otherwise
Based on https://www.w3.org/TR/speech-synthesis/ all SSML must start with <speak>
"""
return "<speak>" in input or "<speak " in input
def transform_text_to_speech_request(
self,
model: str,
input: str,
voice: Optional[str],
optional_params: Dict,
litellm_params: Dict,
headers: dict,
) -> TextToSpeechRequestData:
"""
Transform OpenAI TTS request to Azure AVA TTS SSML format
Note: optional_params should already be mapped via map_openai_params in main.py
Supports Azure-specific SSML features:
- style: Speaking style (e.g., "cheerful", "sad", "angry")
- styledegree: Style intensity (0.01 to 2)
- role: Voice role (e.g., "Girl", "Boy", "SeniorFemale", "SeniorMale")
- lang: Language code for multilingual voices (e.g., "es-ES", "fr-FR")
Auto-detects SSML:
- If input contains <speak>, it's passed through as-is without transformation
Returns:
TextToSpeechRequestData: Contains SSML body and Azure-specific headers
"""
# Get voice (already mapped in main.py, or use default)
azure_voice = voice or self.DEFAULT_VOICE
# Get output format (already mapped in main.py)
output_format = optional_params.get(
"output_format", "audio-24khz-48kbitrate-mono-mp3"
)
headers["X-Microsoft-OutputFormat"] = output_format
# Auto-detect SSML: if input contains <speak>, pass it through as-is
# Similar to Vertex AI behavior - check if input looks like SSML
if self.is_ssml_input(input=input):
return TextToSpeechRequestData(
ssml_body=input,
headers=headers,
)
# Build SSML from plain text
rate = optional_params.get("rate", "0%")
style = optional_params.get("style")
styledegree = optional_params.get("styledegree")
role = optional_params.get("role")
lang = optional_params.get("lang")
# Escape XML special characters in input text
escaped_input = (
input.replace("&", "&amp;")
.replace("<", "&lt;")
.replace(">", "&gt;")
.replace('"', "&quot;")
.replace("'", "&apos;")
)
# Determine if we need mstts namespace (for express-as element)
use_mstts = style or role or styledegree
# Build the xmlns attributes
if use_mstts:
xmlns = "xmlns='http://www.w3.org/2001/10/synthesis' xmlns:mstts='https://www.w3.org/2001/mstts'"
else:
xmlns = "xmlns='http://www.w3.org/2001/10/synthesis'"
# Build the inner content with prosody
prosody_content = f"<prosody rate='{rate}'>{escaped_input}</prosody>"
# Wrap in mstts:express-as if style or role is specified
voice_content = self._build_express_as_element(
content=prosody_content,
style=style,
styledegree=styledegree,
role=role,
)
# Build voice element with optional xml:lang attribute
voice_lang = self._get_voice_language(
voice_name=azure_voice,
explicit_lang=lang,
)
voice_lang_attr = f" xml:lang='{voice_lang}'" if voice_lang else ""
ssml_body = f"""<speak version='1.0' {xmlns} xml:lang='en-US'>
<voice name='{azure_voice}'{voice_lang_attr}>
{voice_content}
</voice>
</speak>"""
return {
"ssml_body": ssml_body,
"headers": headers,
}
def transform_text_to_speech_response(
self,
model: str,
raw_response: httpx.Response,
logging_obj: "LiteLLMLoggingObj",
) -> "HttpxBinaryResponseContent":
"""
Transform Azure AVA TTS response to standard format
Azure returns the audio data directly in the response body
"""
from litellm.types.llms.openai import HttpxBinaryResponseContent
# Azure returns audio data directly in the response body
# Wrap it in HttpxBinaryResponseContent for consistent return type
return HttpxBinaryResponseContent(raw_response)

View File

@@ -0,0 +1,25 @@
from typing import Optional
from litellm.llms.azure.common_utils import BaseAzureLLM
from litellm.llms.openai.vector_stores.transformation import OpenAIVectorStoreConfig
from litellm.types.router import GenericLiteLLMParams
class AzureOpenAIVectorStoreConfig(OpenAIVectorStoreConfig):
def get_complete_url(
self,
api_base: Optional[str],
litellm_params: dict,
) -> str:
return BaseAzureLLM._get_base_azure_url(
api_base=api_base,
litellm_params=litellm_params,
route="/openai/vector_stores",
)
def validate_environment(
self, headers: dict, litellm_params: Optional[GenericLiteLLMParams]
) -> dict:
return BaseAzureLLM._base_validate_azure_environment(
headers=headers, litellm_params=litellm_params
)

View File

@@ -0,0 +1,93 @@
from typing import TYPE_CHECKING, Any, Dict, Optional
from litellm.types.videos.main import VideoCreateOptionalRequestParams
from litellm.types.router import GenericLiteLLMParams
from litellm.llms.azure.common_utils import BaseAzureLLM
from litellm.llms.openai.videos.transformation import OpenAIVideoConfig
if TYPE_CHECKING:
from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj
from ...base_llm.videos.transformation import BaseVideoConfig as _BaseVideoConfig
from ...base_llm.chat.transformation import BaseLLMException as _BaseLLMException
LiteLLMLoggingObj = _LiteLLMLoggingObj
BaseVideoConfig = _BaseVideoConfig
BaseLLMException = _BaseLLMException
else:
LiteLLMLoggingObj = Any
BaseVideoConfig = Any
BaseLLMException = Any
class AzureVideoConfig(OpenAIVideoConfig):
"""
Configuration class for OpenAI video generation.
"""
def __init__(self):
super().__init__()
def get_supported_openai_params(self, model: str) -> list:
"""
Get the list of supported OpenAI parameters for video generation.
"""
return [
"model",
"prompt",
"input_reference",
"seconds",
"size",
"user",
"extra_headers",
]
def map_openai_params(
self,
video_create_optional_params: VideoCreateOptionalRequestParams,
model: str,
drop_params: bool,
) -> Dict:
"""No mapping applied since inputs are in OpenAI spec already"""
return dict(video_create_optional_params)
def validate_environment(
self,
headers: dict,
model: str,
api_key: Optional[str] = None,
litellm_params: Optional[GenericLiteLLMParams] = None,
) -> dict:
"""
Validate Azure environment and set up authentication headers.
Uses _base_validate_azure_environment to properly handle credentials from litellm_credential_name.
"""
# If litellm_params is provided, use it; otherwise create a new one
if litellm_params is None:
litellm_params = GenericLiteLLMParams()
if api_key and not litellm_params.api_key:
litellm_params.api_key = api_key
# Use the base Azure validation method which properly handles:
# 1. Credentials from litellm_credential_name via litellm_params
# 2. Sets the correct "api-key" header (not "Authorization: Bearer")
return BaseAzureLLM._base_validate_azure_environment(
headers=headers, litellm_params=litellm_params
)
def get_complete_url(
self,
model: str,
api_base: Optional[str],
litellm_params: dict,
) -> str:
"""
Constructs a complete URL for the API request.
"""
return BaseAzureLLM._get_base_azure_url(
api_base=api_base,
litellm_params=litellm_params,
route="/openai/v1/videos",
default_api_version="",
)