chore: initial snapshot for gitea/github upload

This commit is contained in:
Your Name
2026-03-26 16:04:46 +08:00
commit a699a1ac98
3497 changed files with 1586237 additions and 0 deletions

View File

@@ -0,0 +1,206 @@
"""
Support for CometAPI's `/v1/chat/completions` endpoint.
Based on OpenAI-compatible API interface implementation
Documentation: [CometAPI Documentation Link]
"""
from typing import Any, AsyncIterator, Iterator, List, Optional, Tuple, Union
import httpx
from litellm.llms.base_llm.base_model_iterator import BaseModelResponseIterator
from litellm.llms.base_llm.chat.transformation import BaseLLMException
from litellm.types.llms.openai import AllMessageValues, ChatCompletionToolParam
from litellm.types.utils import ModelResponse, ModelResponseStream
from ...openai.chat.gpt_transformation import OpenAIGPTConfig
from ..common_utils import CometAPIException
class CometAPIConfig(OpenAIGPTConfig):
"""
CometAPI configuration class, inherits from OpenAIGPTConfig
Since CometAPI is OpenAI-compatible API, we inherit from OpenAIGPTConfig
and only need to override necessary methods to handle CometAPI-specific features
"""
def map_openai_params(
self,
non_default_params: dict,
optional_params: dict,
model: str,
drop_params: bool,
) -> dict:
"""
Map OpenAI format parameters to CometAPI format
"""
mapped_openai_params = super().map_openai_params(
non_default_params, optional_params, model, drop_params
)
# CometAPI-specific parameters (if any)
extra_body: dict[str, Any] = {}
# TODO: Add CometAPI-specific parameter handling here
# Example:
# custom_param = non_default_params.pop("custom_param", None)
# if custom_param is not None:
# extra_body["custom_param"] = custom_param
if extra_body:
mapped_openai_params["extra_body"] = extra_body
return mapped_openai_params
def remove_cache_control_flag_from_messages_and_tools(
self,
model: str,
messages: List[AllMessageValues],
tools: Optional[List["ChatCompletionToolParam"]] = None,
) -> Tuple[List[AllMessageValues], Optional[List["ChatCompletionToolParam"]]]:
"""
Remove cache control flags from messages and tools if not supported
"""
# For CometAPI, use default behavior (remove cache control)
return super().remove_cache_control_flag_from_messages_and_tools(
model, messages, tools
)
def transform_request(
self,
model: str,
messages: List[AllMessageValues],
optional_params: dict,
litellm_params: dict,
headers: dict,
) -> dict:
"""
Transform the overall request to be sent to the API.
Returns:
dict: The transformed request. Sent as the body of the API call.
"""
extra_body = optional_params.pop("extra_body", {})
response = super().transform_request(
model, messages, optional_params, litellm_params, headers
)
response.update(extra_body)
return response
def get_complete_url(
self,
api_base: Optional[str],
api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
stream: Optional[bool] = None,
) -> str:
"""
Get the complete URL for the CometAPI call.
Returns:
str: The complete URL for the API call.
"""
# Default base
if api_base is None:
api_base = "https://api.cometapi.com/v1"
endpoint = "chat/completions"
# Normalize
api_base = api_base.rstrip("/")
# If endpoint already present, return as-is
if endpoint in api_base:
return api_base
# Ensure we include /v1 prefix when missing
if api_base.endswith("/v1"):
return f"{api_base}/{endpoint}"
if api_base.endswith("/v1/"):
return f"{api_base}{endpoint}"
# If user provided https://api.cometapi.com, add /v1
if api_base == "https://api.cometapi.com":
return f"{api_base}/v1/{endpoint}"
# Generic fallback: if '/v1' not in path, add it
if "/v1" not in api_base.split("//", 1)[-1]:
return f"{api_base}/v1/{endpoint}"
return f"{api_base}/{endpoint}"
def get_error_class(
self, error_message: str, status_code: int, headers: Union[dict, httpx.Headers]
) -> BaseLLMException:
"""
Return CometAPI-specific error class
"""
return CometAPIException(
message=error_message,
status_code=status_code,
headers=headers,
)
def get_model_response_iterator(
self,
streaming_response: Union[Iterator[str], AsyncIterator[str], ModelResponse],
sync_stream: bool,
json_mode: Optional[bool] = False,
) -> Any:
"""
Get model response iterator for streaming responses
"""
return CometAPIChatCompletionStreamingHandler(
streaming_response=streaming_response,
sync_stream=sync_stream,
json_mode=json_mode,
)
class CometAPIChatCompletionStreamingHandler(BaseModelResponseIterator):
"""
Handler for CometAPI streaming chat completion responses
"""
def chunk_parser(self, chunk: dict) -> ModelResponseStream:
"""
Parse individual chunks from streaming response
"""
try:
# Handle error in chunk
if "error" in chunk:
error_chunk = chunk["error"]
error_message = "CometAPI Error: {}".format(
error_chunk.get("message", "Unknown error")
)
raise CometAPIException(
message=error_message,
status_code=error_chunk.get("code", 400),
headers={"Content-Type": "application/json"},
)
# Process choices
new_choices = []
for choice in chunk["choices"]:
# Handle reasoning content if present
if "delta" in choice and "reasoning" in choice["delta"]:
choice["delta"]["reasoning_content"] = choice["delta"].get(
"reasoning"
)
new_choices.append(choice)
return ModelResponseStream(
id=chunk["id"],
object="chat.completion.chunk",
created=chunk["created"],
usage=chunk.get("usage"),
model=chunk["model"],
choices=new_choices,
)
except KeyError as e:
raise CometAPIException(
message=f"KeyError: {e}, Got unexpected response from CometAPI: {chunk}",
status_code=400,
headers={"Content-Type": "application/json"},
)
except Exception as e:
raise e

View File

@@ -0,0 +1,7 @@
from litellm.llms.base_llm.chat.transformation import BaseLLMException
class CometAPIException(BaseLLMException):
"""CometAPI exception handling class"""
pass

View File

@@ -0,0 +1,3 @@
from .transformation import CometAPIEmbeddingConfig
__all__ = ["CometAPIEmbeddingConfig"]

View File

@@ -0,0 +1,157 @@
"""
CometAPI Embedding API support - OpenAI compatible
"""
from typing import List, Optional, Union
import httpx
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
from litellm.llms.base_llm.chat.transformation import BaseLLMException
from litellm.llms.base_llm.embedding.transformation import BaseEmbeddingConfig
from litellm.secret_managers.main import get_secret_str
from litellm.types.llms.openai import AllEmbeddingInputValues, AllMessageValues
from litellm.types.utils import EmbeddingResponse, Usage
from ..common_utils import CometAPIException
class CometAPIEmbeddingConfig(BaseEmbeddingConfig):
"""
Configuration class for CometAPI Embedding API.
Since CometAPI is OpenAI-compatible, this class provides OpenAI-standard
embedding functionality with CometAPI-specific authentication and endpoints.
"""
def __init__(self) -> None:
pass
def get_complete_url(
self,
api_base: Optional[str],
api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
stream: Optional[bool] = None,
) -> str:
"""
Get the complete URL for the CometAPI embedding endpoint.
"""
api_base = (
"https://api.cometapi.com/v1" if api_base is None else api_base.rstrip("/")
)
complete_url = f"{api_base}/embeddings"
return complete_url
def validate_environment(
self,
headers: dict,
model: str,
messages: List[AllMessageValues],
optional_params: dict,
litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
"""
Validate and set up authentication headers for CometAPI.
"""
if api_key is None:
api_key = get_secret_str("COMETAPI_KEY")
default_headers = {
"Authorization": f"Bearer {api_key}",
"accept": "application/json",
"Content-Type": "application/json",
}
if "Authorization" in headers:
default_headers["Authorization"] = headers["Authorization"]
return {**default_headers, **headers}
def get_supported_openai_params(self, model: str) -> List[str]:
"""
Get the supported OpenAI parameters for embedding requests.
CometAPI supports standard OpenAI embedding parameters.
"""
return [
"dimensions",
"encoding_format",
"user",
]
def map_openai_params(
self,
non_default_params: dict,
optional_params: dict,
model: str,
drop_params: bool,
) -> dict:
"""
Map OpenAI parameters to CometAPI format.
"""
supported_openai_params = self.get_supported_openai_params(model)
for param, value in non_default_params.items():
if param in supported_openai_params:
optional_params[param] = value
return optional_params
def transform_embedding_request(
self,
model: str,
input: AllEmbeddingInputValues,
optional_params: dict,
headers: dict,
) -> dict:
"""
Transform the embedding request into CometAPI format.
"""
return {"input": input, "model": model, **optional_params}
def transform_embedding_response(
self,
model: str,
raw_response: httpx.Response,
model_response: EmbeddingResponse,
logging_obj: LiteLLMLoggingObj,
api_key: Optional[str],
request_data: dict,
optional_params: dict,
litellm_params: dict,
) -> EmbeddingResponse:
"""
Transform CometAPI response into standard EmbeddingResponse format.
"""
try:
raw_response_json = raw_response.json()
except Exception:
raise CometAPIException(
message=raw_response.text,
status_code=raw_response.status_code,
headers=raw_response.headers,
)
model_response.model = raw_response_json.get("model")
model_response.data = raw_response_json.get("data")
model_response.object = raw_response_json.get("object")
usage = Usage(
prompt_tokens=raw_response_json.get("usage", {}).get("prompt_tokens", 0),
total_tokens=raw_response_json.get("usage", {}).get("total_tokens", 0),
)
model_response.usage = usage
return model_response
def get_error_class(
self, error_message: str, status_code: int, headers: Union[dict, httpx.Headers]
) -> BaseLLMException:
"""
Get the appropriate error class for CometAPI exceptions.
"""
return CometAPIException(
message=error_message, status_code=status_code, headers=headers
)

View File

@@ -0,0 +1,13 @@
from litellm.llms.base_llm.image_generation.transformation import (
BaseImageGenerationConfig,
)
from .transformation import CometAPIImageGenerationConfig
__all__ = [
"CometAPIImageGenerationConfig",
]
def get_cometapi_image_generation_config(model: str) -> BaseImageGenerationConfig:
return CometAPIImageGenerationConfig()

View File

@@ -0,0 +1,27 @@
from typing import Any
import litellm
from litellm.types.utils import ImageResponse
def cost_calculator(
model: str,
image_response: Any,
) -> float:
"""
CometAPI image generation cost calculator
"""
_model_info = litellm.get_model_info(
model=model,
custom_llm_provider=litellm.LlmProviders.COMETAPI.value,
)
output_cost_per_image: float = _model_info.get("output_cost_per_image") or 0.0
num_images: int = 0
if isinstance(image_response, ImageResponse):
if image_response.data:
num_images = len(image_response.data)
return output_cost_per_image * num_images
else:
raise ValueError(
f"image_response must be of type ImageResponse got type={type(image_response)}"
)

View File

@@ -0,0 +1,170 @@
from typing import TYPE_CHECKING, Any, List, Optional
import httpx
from litellm.llms.base_llm.image_generation.transformation import (
BaseImageGenerationConfig,
)
from litellm.secret_managers.main import get_secret_str
from litellm.types.llms.openai import (
AllMessageValues,
OpenAIImageGenerationOptionalParams,
)
from litellm.types.utils import ImageObject, ImageResponse
if TYPE_CHECKING:
from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj
LiteLLMLoggingObj = _LiteLLMLoggingObj
else:
LiteLLMLoggingObj = Any
class CometAPIImageGenerationConfig(BaseImageGenerationConfig):
DEFAULT_BASE_URL: str = "https://api.cometapi.com"
IMAGE_GENERATION_ENDPOINT: str = "v1/images/generations"
def get_supported_openai_params(
self, model: str
) -> List[OpenAIImageGenerationOptionalParams]:
"""
https://api.cometapi.com/v1/images/generations
"""
return [
"n",
"quality",
"response_format",
"size",
"style",
]
def map_openai_params(
self,
non_default_params: dict,
optional_params: dict,
model: str,
drop_params: bool,
) -> dict:
supported_params = self.get_supported_openai_params(model)
for k in non_default_params.keys():
if k not in optional_params.keys():
if k in supported_params:
# CometAPI uses OpenAI-compatible parameters, so we can pass them directly
optional_params[k] = non_default_params[k]
elif drop_params:
pass
else:
raise ValueError(
f"Parameter {k} is not supported for model {model}. Supported parameters are {supported_params}. Set drop_params=True to drop unsupported parameters."
)
return optional_params
def get_complete_url(
self,
api_base: Optional[str],
api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
stream: Optional[bool] = None,
) -> str:
"""
Get the complete url for the request
"""
complete_url: str = (
api_base
or get_secret_str("COMETAPI_BASE_URL")
or get_secret_str("COMETAPI_API_BASE")
or self.DEFAULT_BASE_URL
)
complete_url = complete_url.rstrip("/")
complete_url = f"{complete_url}/{self.IMAGE_GENERATION_ENDPOINT}"
return complete_url
def validate_environment(
self,
headers: dict,
model: str,
messages: List[AllMessageValues],
optional_params: dict,
litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
final_api_key: Optional[str] = (
api_key
or get_secret_str("COMETAPI_KEY")
or get_secret_str("COMETAPI_API_KEY")
)
if not final_api_key:
raise ValueError("COMETAPI_KEY or COMETAPI_API_KEY is not set")
headers["Authorization"] = f"Bearer {final_api_key}"
headers["Content-Type"] = "application/json"
return headers
def transform_image_generation_request(
self,
model: str,
prompt: str,
optional_params: dict,
litellm_params: dict,
headers: dict,
) -> dict:
"""
Transform the image generation request to the CometAPI image generation request body
https://api.cometapi.com/v1/images/generations
"""
# CometAPI uses OpenAI-compatible format
request_body = {
"prompt": prompt,
"model": model,
**optional_params,
}
return request_body
def transform_image_generation_response(
self,
model: str,
raw_response: httpx.Response,
model_response: ImageResponse,
logging_obj: LiteLLMLoggingObj,
request_data: dict,
optional_params: dict,
litellm_params: dict,
encoding: Any,
api_key: Optional[str] = None,
json_mode: Optional[bool] = None,
) -> ImageResponse:
"""
Transform the image generation response to the litellm image response
https://api.cometapi.com/v1/images/generations
"""
try:
response_data = raw_response.json()
except Exception as e:
raise self.get_error_class(
error_message=f"Error transforming image generation response: {e}",
status_code=raw_response.status_code,
headers=raw_response.headers,
)
if not model_response.data:
model_response.data = []
# CometAPI returns OpenAI-compatible format
# Expected format: {"created": timestamp, "data": [{"url": "...", "b64_json": "..."}]}
if "data" in response_data:
for image_data in response_data["data"]:
image_obj = ImageObject(
b64_json=image_data.get("b64_json"),
url=image_data.get("url"),
)
model_response.data.append(image_obj)
return model_response