chore: initial public snapshot for github upload

This commit is contained in:
Your Name
2026-03-26 20:06:14 +08:00
commit 0e5ecd930e
3497 changed files with 1586236 additions and 0 deletions

View File

@@ -0,0 +1,160 @@
"""
Support for OVHCloud AI Endpoints `/v1/audio/transcriptions` endpoint.
Our unified API follows the OpenAI standard.
More information on our website: https://endpoints.ai.cloud.ovh.net
"""
from typing import List, Optional, Union
import httpx
from litellm.litellm_core_utils.audio_utils.utils import process_audio_file
from litellm.llms.base_llm.audio_transcription.transformation import (
AudioTranscriptionRequestData,
BaseAudioTranscriptionConfig,
)
from litellm.llms.base_llm.chat.transformation import BaseLLMException
from litellm.secret_managers.main import get_secret_str
from litellm.types.llms.openai import (
AllMessageValues,
OpenAIAudioTranscriptionOptionalParams,
)
from litellm.types.utils import FileTypes, TranscriptionResponse
from ..utils import OVHCloudException
class OVHCloudAudioTranscriptionConfig(BaseAudioTranscriptionConfig):
def get_supported_openai_params(
self, model: str
) -> List[OpenAIAudioTranscriptionOptionalParams]:
# OVHCloud implements the OpenAI-compatible Whisper interface.
# We pass through the same optional params as the OpenAI Whisper API.
return [
"language",
"prompt",
"response_format",
"timestamp_granularities",
"temperature",
]
def map_openai_params(
self,
non_default_params: dict,
optional_params: dict,
model: str,
drop_params: bool,
) -> dict:
supported_params = self.get_supported_openai_params(model)
for k, v in non_default_params.items():
if k in supported_params:
optional_params[k] = v
return optional_params
def get_complete_url(
self,
api_base: Optional[str],
api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
stream: Optional[bool] = None,
) -> str:
api_base = (
"https://oai.endpoints.kepler.ai.cloud.ovh.net/v1"
if api_base is None
else api_base.rstrip("/")
)
complete_url = f"{api_base}/audio/transcriptions"
return complete_url
def get_error_class(
self, error_message: str, status_code: int, headers: Union[dict, httpx.Headers]
) -> BaseLLMException:
return OVHCloudException(
message=error_message,
status_code=status_code,
headers=headers,
)
def validate_environment(
self,
headers: dict,
model: str,
messages: List[AllMessageValues],
optional_params: dict,
litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
if api_key is None:
api_key = get_secret_str("OVHCLOUD_API_KEY")
default_headers = {
"Authorization": f"Bearer {api_key}",
"accept": "application/json",
}
# Caller can override / extend headers if needed
default_headers.update(headers or {})
return default_headers
def transform_audio_transcription_request(
self,
model: str,
audio_file: FileTypes,
optional_params: dict,
litellm_params: dict,
) -> AudioTranscriptionRequestData:
"""
Transform the audio transcription request into OpenAI-compatible form-data.
OVHCloud follows OpenAI's `/audio/transcriptions` format, so we:
- Build a multipart form-data body with `file`, `model`, and optional params
- Let the shared HTTP handler set the proper content-type boundary
"""
processed_audio = process_audio_file(audio_file)
# Base form fields: model + OpenAI-compatible optional params
form_fields: dict = {
"model": model,
}
# Include OpenAI-compatible optional params
for key in self.get_supported_openai_params(model):
value = optional_params.get(key)
if value is not None:
form_fields[key] = value
files = {
"file": (
processed_audio.filename,
processed_audio.file_content,
processed_audio.content_type,
)
}
return AudioTranscriptionRequestData(data=form_fields, files=files)
def transform_audio_transcription_response(
self,
raw_response: httpx.Response,
) -> TranscriptionResponse:
"""
Transform OVHCloud audio transcription response to OpenAI-compatible TranscriptionResponse.
"""
try:
response_json = raw_response.json()
except Exception:
raise OVHCloudException(
message=raw_response.text,
status_code=raw_response.status_code,
headers=raw_response.headers,
)
text = response_json.get("text") or response_json.get("transcript") or ""
response = TranscriptionResponse(text=text)
response._hidden_params = response_json
return response

View File

@@ -0,0 +1,148 @@
"""
Support for OVHCloud AI Endpoints `/v1/chat/completions` endpoint.
Our unified API follows the OpenAI standard.
More information on our website: https://endpoints.ai.cloud.ovh.net
"""
from typing import Optional, Union, List
import httpx
from litellm.utils import ModelResponseStream, get_model_info
from litellm.llms.openai.chat.gpt_transformation import OpenAIGPTConfig
from litellm._logging import verbose_logger
from litellm.llms.ovhcloud.utils import OVHCloudException
from litellm.llms.base_llm.base_model_iterator import BaseModelResponseIterator
from litellm.llms.base_llm.chat.transformation import BaseLLMException
from litellm.types.llms.openai import AllMessageValues
class OVHCloudChatConfig(OpenAIGPTConfig):
@property
def custom_llm_provider(self) -> Optional[str]:
return "ovhcloud"
def get_supported_openai_params(self, model: str) -> list:
"""
Details about function calling support can be found here:
https://help.ovhcloud.com/csm/en-gb-public-cloud-ai-endpoints-function-calling?id=kb_article_view&sysparm_article=KB0071907
"""
supports_function_calling: Optional[bool] = None
try:
model_info = get_model_info(model, custom_llm_provider="ovhcloud")
supports_function_calling = model_info.get(
"supports_function_calling", False
)
except Exception as e:
verbose_logger.debug(f"Error getting supported OpenAI params: {e}")
pass
optional_params = super().get_supported_openai_params(model)
if supports_function_calling is not True:
verbose_logger.debug(
"You can see our models supporting function_calling in our catalog: https://endpoints.ai.cloud.ovh.net/catalog "
)
optional_params.remove("tools")
optional_params.remove("tool_choice")
optional_params.remove("function_call")
optional_params.remove("response_format")
return optional_params
def get_complete_url(
self,
api_base: Optional[str],
api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
stream: Optional[bool] = None,
) -> str:
api_base = (
"https://oai.endpoints.kepler.ai.cloud.ovh.net/v1"
if api_base is None
else api_base.rstrip("/")
)
complete_url = f"{api_base}/chat/completions"
return complete_url
def get_error_class(
self, error_message: str, status_code: int, headers: Union[dict, httpx.Headers]
) -> BaseLLMException:
return OVHCloudException(
message=error_message,
status_code=status_code,
headers=headers,
)
def map_openai_params(
self,
non_default_params: dict,
optional_params: dict,
model: str,
drop_params: bool,
) -> dict:
mapped_openai_params = super().map_openai_params(
non_default_params, optional_params, model, drop_params
)
return mapped_openai_params
def transform_request(
self,
model: str,
messages: List[AllMessageValues],
optional_params: dict,
litellm_params: dict,
headers: dict,
) -> dict:
extra_body = optional_params.pop("extra_body", {})
response = super().transform_request(
model, messages, optional_params, litellm_params, headers
)
response.update(extra_body)
return response
class OVHCloudChatCompletionStreamingHandler(BaseModelResponseIterator):
"""
Handler for OVHCloud AI Endpoints streaming chat completion responses
"""
def chunk_parser(self, chunk: dict) -> ModelResponseStream:
"""
Parse individual chunks from streaming response
"""
try:
if "error" in chunk:
error_chunk = chunk["error"]
error_message = "OVHCloud Error: {}".format(
error_chunk.get("message", "Unknown error")
)
raise OVHCloudException(
message=error_message,
status_code=error_chunk.get("code", 400),
headers={"Content-Type": "application/json"},
)
new_choices = []
for choice in chunk["choices"]:
if "delta" in choice and "reasoning" in choice["delta"]:
choice["delta"]["reasoning_content"] = choice["delta"].get(
"reasoning"
)
new_choices.append(choice)
return ModelResponseStream(
id=chunk["id"],
object="chat.completion.chunk",
created=chunk["created"],
usage=chunk.get("usage"),
model=chunk["model"],
choices=new_choices,
)
except KeyError as e:
raise OVHCloudException(
message=f"KeyError: {e}, Got unexpected response from CometAPI: {chunk}",
status_code=400,
headers={"Content-Type": "application/json"},
)
except Exception as e:
raise e

View File

@@ -0,0 +1,126 @@
"""
This is OpenAI compatible - no transformation is applied
"""
from typing import List, Optional, Union
import httpx
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
from litellm.llms.base_llm.chat.transformation import BaseLLMException
from litellm.llms.base_llm.embedding.transformation import BaseEmbeddingConfig
from litellm.secret_managers.main import get_secret_str
from litellm.types.llms.openai import AllEmbeddingInputValues, AllMessageValues
from litellm.types.utils import EmbeddingResponse, Usage
from ..utils import OVHCloudException
class OVHCloudEmbeddingConfig(BaseEmbeddingConfig):
def __init__(self) -> None:
pass
def get_complete_url(
self,
api_base: Optional[str],
api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
stream: Optional[bool] = None,
) -> str:
api_base = (
"https://oai.endpoints.kepler.ai.cloud.ovh.net/v1"
if api_base is None
else api_base.rstrip("/")
)
complete_url = f"{api_base}/embeddings"
return complete_url
def validate_environment(
self,
headers: dict,
model: str,
messages: List[AllMessageValues],
optional_params: dict,
litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
if api_key is None:
api_key = get_secret_str("OVHCLOUD_API_KEY")
default_headers = {
"Authorization": f"Bearer {api_key}",
"accept": "application/json",
"Content-Type": "application/json",
}
if "Authorization" in headers:
default_headers["Authorization"] = headers["Authorization"]
return {**default_headers, **headers}
def get_supported_openai_params(self, model: str):
return []
def map_openai_params(
self,
non_default_params: dict,
optional_params: dict,
model: str,
drop_params: bool,
):
supported_openai_params = self.get_supported_openai_params(model)
for param, value in non_default_params.items():
if param in supported_openai_params:
optional_params[param] = value
return optional_params
def transform_embedding_request(
self,
model: str,
input: AllEmbeddingInputValues,
optional_params: dict,
headers: dict,
) -> dict:
return {"input": input, "model": model, **optional_params}
def transform_embedding_response(
self,
model: str,
raw_response: httpx.Response,
model_response: EmbeddingResponse,
logging_obj: LiteLLMLoggingObj,
api_key: Optional[str],
request_data: dict,
optional_params: dict,
litellm_params: dict,
) -> EmbeddingResponse:
try:
raw_response_json = raw_response.json()
except Exception:
raise OVHCloudException(
message=raw_response.text,
status_code=raw_response.status_code,
headers=raw_response.headers,
)
model_response.model = raw_response_json.get("model")
model_response.data = raw_response_json.get("data")
model_response.object = raw_response_json.get("object")
usage = Usage(
prompt_tokens=raw_response_json.get("usage", {}).get("prompt_tokens", 0),
total_tokens=raw_response_json.get("usage", {}).get("total_tokens", 0),
)
model_response.usage = usage
return model_response
def get_error_class(
self, error_message: str, status_code: int, headers: Union[dict, httpx.Headers]
) -> BaseLLMException:
return OVHCloudException(
message=error_message, status_code=status_code, headers=headers
)

View File

@@ -0,0 +1,7 @@
from litellm.llms.base_llm.chat.transformation import BaseLLMException
class OVHCloudException(BaseLLMException):
"""OVHCloud AI Endpoints exception handling class"""
pass