chore: initial public snapshot for github upload
This commit is contained in:
@@ -0,0 +1,241 @@
|
||||
# Container Files API
|
||||
|
||||
This module provides a unified interface for container file operations across multiple LLM providers (OpenAI, Azure OpenAI, etc.).
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
endpoints.json # Declarative endpoint definitions
|
||||
↓
|
||||
endpoint_factory.py # Auto-generates SDK functions
|
||||
↓
|
||||
container_handler.py # Generic HTTP handler
|
||||
↓
|
||||
BaseContainerConfig # Provider-specific transformations
|
||||
├── OpenAIContainerConfig
|
||||
└── AzureContainerConfig (example)
|
||||
```
|
||||
|
||||
## Files Overview
|
||||
|
||||
| File | Purpose |
|
||||
|------|---------|
|
||||
| `endpoints.json` | **Single source of truth** - Defines all container file endpoints |
|
||||
| `endpoint_factory.py` | Auto-generates SDK functions (`list_container_files`, etc.) |
|
||||
| `main.py` | Core container operations (create, list, retrieve, delete containers) |
|
||||
| `utils.py` | Request parameter utilities |
|
||||
|
||||
## Adding a New Endpoint
|
||||
|
||||
To add a new container file endpoint (e.g., `get_container_file_content`):
|
||||
|
||||
### Step 1: Add to `endpoints.json`
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "get_container_file_content",
|
||||
"async_name": "aget_container_file_content",
|
||||
"path": "/containers/{container_id}/files/{file_id}/content",
|
||||
"method": "GET",
|
||||
"path_params": ["container_id", "file_id"],
|
||||
"query_params": [],
|
||||
"response_type": "ContainerFileContentResponse"
|
||||
}
|
||||
```
|
||||
|
||||
### Step 2: Add Response Type (if new)
|
||||
|
||||
In `litellm/types/containers/main.py`:
|
||||
|
||||
```python
|
||||
class ContainerFileContentResponse(BaseModel):
|
||||
"""Response for file content download."""
|
||||
content: bytes
|
||||
# ... other fields
|
||||
```
|
||||
|
||||
### Step 3: Register Response Type
|
||||
|
||||
In `litellm/llms/custom_httpx/container_handler.py`, add to `RESPONSE_TYPES`:
|
||||
|
||||
```python
|
||||
RESPONSE_TYPES = {
|
||||
# ... existing types
|
||||
"ContainerFileContentResponse": ContainerFileContentResponse,
|
||||
}
|
||||
```
|
||||
|
||||
### Step 4: Update Router (one-time setup)
|
||||
|
||||
In `litellm/router.py`, add the call_type to the factory_function Literal and `_init_containers_api_endpoints` condition.
|
||||
|
||||
In `litellm/proxy/route_llm_request.py`, add to the route mappings and skip-model-routing lists.
|
||||
|
||||
### Step 5: Update Proxy Handler Factory (if new path params)
|
||||
|
||||
If your endpoint has a new combination of path parameters, add a handler in `litellm/proxy/container_endpoints/handler_factory.py`:
|
||||
|
||||
```python
|
||||
elif path_params == ["container_id", "file_id", "new_param"]:
|
||||
async def handler(...):
|
||||
# handler implementation
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Adding a New Provider (e.g., Azure OpenAI)
|
||||
|
||||
### Step 1: Create Provider Config
|
||||
|
||||
Create `litellm/llms/azure/containers/transformation.py`:
|
||||
|
||||
```python
|
||||
from typing import Dict, Optional, Tuple, Any
|
||||
import httpx
|
||||
|
||||
from litellm.llms.base_llm.containers.transformation import BaseContainerConfig
|
||||
from litellm.types.containers.main import (
|
||||
ContainerFileListResponse,
|
||||
ContainerFileObject,
|
||||
DeleteContainerFileResponse,
|
||||
)
|
||||
from litellm.types.router import GenericLiteLLMParams
|
||||
from litellm.secret_managers.main import get_secret_str
|
||||
|
||||
|
||||
class AzureContainerConfig(BaseContainerConfig):
|
||||
"""Configuration class for Azure OpenAI container API."""
|
||||
|
||||
def get_supported_openai_params(self) -> list:
|
||||
return ["name", "expires_after", "file_ids", "extra_headers"]
|
||||
|
||||
def map_openai_params(
|
||||
self,
|
||||
container_create_optional_params,
|
||||
drop_params: bool,
|
||||
) -> Dict:
|
||||
return dict(container_create_optional_params)
|
||||
|
||||
def validate_environment(
|
||||
self,
|
||||
headers: dict,
|
||||
api_key: Optional[str] = None,
|
||||
) -> dict:
|
||||
"""Azure uses api-key header instead of Bearer token."""
|
||||
import litellm
|
||||
|
||||
api_key = (
|
||||
api_key
|
||||
or litellm.azure_key
|
||||
or get_secret_str("AZURE_API_KEY")
|
||||
)
|
||||
headers["api-key"] = api_key
|
||||
return headers
|
||||
|
||||
def get_complete_url(
|
||||
self,
|
||||
api_base: Optional[str],
|
||||
litellm_params: dict,
|
||||
) -> str:
|
||||
"""
|
||||
Azure format:
|
||||
https://{resource}.openai.azure.com/openai/containers?api-version=2024-xx
|
||||
"""
|
||||
if api_base is None:
|
||||
raise ValueError("api_base is required for Azure")
|
||||
|
||||
api_version = litellm_params.get("api_version", "2024-02-15-preview")
|
||||
return f"{api_base.rstrip('/')}/openai/containers?api-version={api_version}"
|
||||
|
||||
# Implement remaining abstract methods from BaseContainerConfig:
|
||||
# - transform_container_create_request
|
||||
# - transform_container_create_response
|
||||
# - transform_container_list_request
|
||||
# - transform_container_list_response
|
||||
# - transform_container_retrieve_request
|
||||
# - transform_container_retrieve_response
|
||||
# - transform_container_delete_request
|
||||
# - transform_container_delete_response
|
||||
# - transform_container_file_list_request
|
||||
# - transform_container_file_list_response
|
||||
```
|
||||
|
||||
### Step 2: Register Provider Config
|
||||
|
||||
In `litellm/utils.py`, find `ProviderConfigManager.get_provider_container_config()` and add:
|
||||
|
||||
```python
|
||||
@staticmethod
|
||||
def get_provider_container_config(
|
||||
provider: LlmProviders,
|
||||
) -> Optional[BaseContainerConfig]:
|
||||
if provider == LlmProviders.OPENAI:
|
||||
from litellm.llms.openai.containers.transformation import OpenAIContainerConfig
|
||||
return OpenAIContainerConfig()
|
||||
elif provider == LlmProviders.AZURE:
|
||||
from litellm.llms.azure.containers.transformation import AzureContainerConfig
|
||||
return AzureContainerConfig()
|
||||
return None
|
||||
```
|
||||
|
||||
### Step 3: Test the New Provider
|
||||
|
||||
```bash
|
||||
# Create container via Azure
|
||||
curl -X POST "http://localhost:4000/v1/containers" \
|
||||
-H "Authorization: Bearer sk-1234" \
|
||||
-H "custom-llm-provider: azure" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"name": "My Azure Container"}'
|
||||
|
||||
# List container files via Azure
|
||||
curl -X GET "http://localhost:4000/v1/containers/cntr_123/files" \
|
||||
-H "Authorization: Bearer sk-1234" \
|
||||
-H "custom-llm-provider: azure"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## How Provider Selection Works
|
||||
|
||||
1. **Proxy receives request** with `custom-llm-provider` header/query/body
|
||||
2. **Router calls** `ProviderConfigManager.get_provider_container_config(provider)`
|
||||
3. **Generic handler** uses the provider config for:
|
||||
- URL construction (`get_complete_url`)
|
||||
- Authentication (`validate_environment`)
|
||||
- Request/response transformation
|
||||
|
||||
---
|
||||
|
||||
## Testing
|
||||
|
||||
Run the container API tests:
|
||||
|
||||
```bash
|
||||
cd /Users/ishaanjaffer/github/litellm
|
||||
python -m pytest tests/test_litellm/containers/ -v
|
||||
```
|
||||
|
||||
Test via proxy:
|
||||
|
||||
```bash
|
||||
# Start proxy
|
||||
cd litellm/proxy && python proxy_cli.py --config proxy_config.yaml --port 4000
|
||||
|
||||
# Test endpoints
|
||||
curl -X GET "http://localhost:4000/v1/containers/cntr_123/files" \
|
||||
-H "Authorization: Bearer sk-1234"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Endpoint Reference
|
||||
|
||||
| Endpoint | Method | Path |
|
||||
|----------|--------|------|
|
||||
| List container files | GET | `/v1/containers/{container_id}/files` |
|
||||
| Retrieve container file | GET | `/v1/containers/{container_id}/files/{file_id}` |
|
||||
| Delete container file | DELETE | `/v1/containers/{container_id}/files/{file_id}` |
|
||||
|
||||
See `endpoints.json` for the complete list.
|
||||
|
||||
@@ -0,0 +1,44 @@
|
||||
"""Container management functions for LiteLLM."""
|
||||
|
||||
# Auto-generated container file functions from endpoints.json
|
||||
from .endpoint_factory import (
|
||||
adelete_container_file,
|
||||
alist_container_files,
|
||||
aretrieve_container_file,
|
||||
aretrieve_container_file_content,
|
||||
delete_container_file,
|
||||
list_container_files,
|
||||
retrieve_container_file,
|
||||
retrieve_container_file_content,
|
||||
)
|
||||
from .main import (
|
||||
acreate_container,
|
||||
adelete_container,
|
||||
alist_containers,
|
||||
aretrieve_container,
|
||||
create_container,
|
||||
delete_container,
|
||||
list_containers,
|
||||
retrieve_container,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
# Core container operations
|
||||
"acreate_container",
|
||||
"adelete_container",
|
||||
"alist_containers",
|
||||
"aretrieve_container",
|
||||
"create_container",
|
||||
"delete_container",
|
||||
"list_containers",
|
||||
"retrieve_container",
|
||||
# Container file operations (auto-generated from endpoints.json)
|
||||
"adelete_container_file",
|
||||
"alist_container_files",
|
||||
"aretrieve_container_file",
|
||||
"aretrieve_container_file_content",
|
||||
"delete_container_file",
|
||||
"list_container_files",
|
||||
"retrieve_container_file",
|
||||
"retrieve_container_file_content",
|
||||
]
|
||||
@@ -0,0 +1,232 @@
|
||||
"""
|
||||
Factory for generating container SDK functions from JSON config.
|
||||
|
||||
This module reads endpoints.json and dynamically generates SDK functions
|
||||
that use the generic container handler.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import contextvars
|
||||
import json
|
||||
from functools import partial
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Dict, List, Literal, Optional, Type
|
||||
|
||||
import litellm
|
||||
from litellm.constants import request_timeout as DEFAULT_REQUEST_TIMEOUT
|
||||
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
|
||||
from litellm.llms.base_llm.containers.transformation import BaseContainerConfig
|
||||
from litellm.llms.custom_httpx.container_handler import generic_container_handler
|
||||
from litellm.types.containers.main import (
|
||||
ContainerFileListResponse,
|
||||
ContainerFileObject,
|
||||
DeleteContainerFileResponse,
|
||||
)
|
||||
from litellm.types.router import GenericLiteLLMParams
|
||||
from litellm.utils import ProviderConfigManager, client
|
||||
|
||||
# Response type mapping
|
||||
RESPONSE_TYPES: Dict[str, Type] = {
|
||||
"ContainerFileListResponse": ContainerFileListResponse,
|
||||
"ContainerFileObject": ContainerFileObject,
|
||||
"DeleteContainerFileResponse": DeleteContainerFileResponse,
|
||||
}
|
||||
|
||||
|
||||
def _load_endpoints_config() -> Dict:
|
||||
"""Load the endpoints configuration from JSON file."""
|
||||
config_path = Path(__file__).parent / "endpoints.json"
|
||||
with open(config_path) as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def create_sync_endpoint_function(endpoint_config: Dict) -> Callable:
|
||||
"""
|
||||
Create a sync SDK function from endpoint config.
|
||||
|
||||
Uses the generic container handler instead of individual handler methods.
|
||||
"""
|
||||
endpoint_name = endpoint_config["name"]
|
||||
response_type = RESPONSE_TYPES.get(endpoint_config["response_type"])
|
||||
path_params = endpoint_config.get("path_params", [])
|
||||
|
||||
@client
|
||||
def endpoint_func(
|
||||
timeout: int = 600,
|
||||
custom_llm_provider: Literal["openai"] = "openai",
|
||||
extra_headers: Optional[Dict[str, Any]] = None,
|
||||
extra_query: Optional[Dict[str, Any]] = None,
|
||||
extra_body: Optional[Dict[str, Any]] = None,
|
||||
**kwargs,
|
||||
):
|
||||
local_vars = locals()
|
||||
try:
|
||||
litellm_logging_obj: LiteLLMLoggingObj = kwargs.pop("litellm_logging_obj")
|
||||
litellm_call_id: Optional[str] = kwargs.get("litellm_call_id")
|
||||
_is_async = kwargs.pop("async_call", False) is True
|
||||
|
||||
# Check for mock response
|
||||
mock_response = kwargs.get("mock_response")
|
||||
if mock_response is not None:
|
||||
if isinstance(mock_response, str):
|
||||
mock_response = json.loads(mock_response)
|
||||
if response_type:
|
||||
return response_type(**mock_response)
|
||||
return mock_response
|
||||
|
||||
# Get provider config
|
||||
litellm_params = GenericLiteLLMParams(**kwargs)
|
||||
container_provider_config: Optional[
|
||||
BaseContainerConfig
|
||||
] = ProviderConfigManager.get_provider_container_config(
|
||||
provider=litellm.LlmProviders(custom_llm_provider),
|
||||
)
|
||||
|
||||
if container_provider_config is None:
|
||||
raise ValueError(
|
||||
f"Container provider config not found for: {custom_llm_provider}"
|
||||
)
|
||||
|
||||
# Build optional params for logging
|
||||
optional_params = {k: kwargs.get(k) for k in path_params if k in kwargs}
|
||||
|
||||
# Pre-call logging
|
||||
litellm_logging_obj.update_environment_variables(
|
||||
model="",
|
||||
optional_params=optional_params,
|
||||
litellm_params={"litellm_call_id": litellm_call_id},
|
||||
custom_llm_provider=custom_llm_provider,
|
||||
)
|
||||
|
||||
# Use generic handler
|
||||
return generic_container_handler.handle(
|
||||
endpoint_name=endpoint_name,
|
||||
container_provider_config=container_provider_config,
|
||||
litellm_params=litellm_params,
|
||||
logging_obj=litellm_logging_obj,
|
||||
extra_headers=extra_headers,
|
||||
extra_query=extra_query,
|
||||
timeout=timeout or DEFAULT_REQUEST_TIMEOUT,
|
||||
_is_async=_is_async,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
raise litellm.exception_type(
|
||||
model="",
|
||||
custom_llm_provider=custom_llm_provider,
|
||||
original_exception=e,
|
||||
completion_kwargs=local_vars,
|
||||
extra_kwargs=kwargs,
|
||||
)
|
||||
|
||||
return endpoint_func
|
||||
|
||||
|
||||
def create_async_endpoint_function(
|
||||
sync_func: Callable,
|
||||
endpoint_config: Dict,
|
||||
) -> Callable:
|
||||
"""Create an async SDK function that wraps the sync function."""
|
||||
|
||||
@client
|
||||
async def async_endpoint_func(
|
||||
timeout: int = 600,
|
||||
custom_llm_provider: Literal["openai"] = "openai",
|
||||
extra_headers: Optional[Dict[str, Any]] = None,
|
||||
extra_query: Optional[Dict[str, Any]] = None,
|
||||
extra_body: Optional[Dict[str, Any]] = None,
|
||||
**kwargs,
|
||||
):
|
||||
local_vars = locals()
|
||||
try:
|
||||
loop = asyncio.get_event_loop()
|
||||
kwargs["async_call"] = True
|
||||
|
||||
func = partial(
|
||||
sync_func,
|
||||
timeout=timeout,
|
||||
custom_llm_provider=custom_llm_provider,
|
||||
extra_headers=extra_headers,
|
||||
extra_query=extra_query,
|
||||
extra_body=extra_body,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
ctx = contextvars.copy_context()
|
||||
func_with_context = partial(ctx.run, func)
|
||||
init_response = await loop.run_in_executor(None, func_with_context)
|
||||
|
||||
if asyncio.iscoroutine(init_response):
|
||||
response = await init_response
|
||||
else:
|
||||
response = init_response
|
||||
|
||||
return response
|
||||
except Exception as e:
|
||||
raise litellm.exception_type(
|
||||
model="",
|
||||
custom_llm_provider=custom_llm_provider,
|
||||
original_exception=e,
|
||||
completion_kwargs=local_vars,
|
||||
extra_kwargs=kwargs,
|
||||
)
|
||||
|
||||
return async_endpoint_func
|
||||
|
||||
|
||||
def generate_container_endpoints() -> Dict[str, Callable]:
|
||||
"""
|
||||
Generate all container endpoint functions from the JSON config.
|
||||
|
||||
Returns a dict mapping function names to their implementations.
|
||||
"""
|
||||
config = _load_endpoints_config()
|
||||
endpoints = {}
|
||||
|
||||
for endpoint_config in config["endpoints"]:
|
||||
# Create sync function
|
||||
sync_func = create_sync_endpoint_function(endpoint_config)
|
||||
endpoints[endpoint_config["name"]] = sync_func
|
||||
|
||||
# Create async function
|
||||
async_func = create_async_endpoint_function(sync_func, endpoint_config)
|
||||
endpoints[endpoint_config["async_name"]] = async_func
|
||||
|
||||
return endpoints
|
||||
|
||||
|
||||
def get_all_endpoint_names() -> List[str]:
|
||||
"""Get all endpoint names (sync and async) from config."""
|
||||
config = _load_endpoints_config()
|
||||
names = []
|
||||
for endpoint in config["endpoints"]:
|
||||
names.append(endpoint["name"])
|
||||
names.append(endpoint["async_name"])
|
||||
return names
|
||||
|
||||
|
||||
def get_async_endpoint_names() -> List[str]:
|
||||
"""Get all async endpoint names for router registration."""
|
||||
config = _load_endpoints_config()
|
||||
return [endpoint["async_name"] for endpoint in config["endpoints"]]
|
||||
|
||||
|
||||
# Generate endpoints on module load
|
||||
_generated_endpoints = generate_container_endpoints()
|
||||
|
||||
# Export generated functions dynamically
|
||||
list_container_files = _generated_endpoints.get("list_container_files")
|
||||
alist_container_files = _generated_endpoints.get("alist_container_files")
|
||||
upload_container_file = _generated_endpoints.get("upload_container_file")
|
||||
aupload_container_file = _generated_endpoints.get("aupload_container_file")
|
||||
retrieve_container_file = _generated_endpoints.get("retrieve_container_file")
|
||||
aretrieve_container_file = _generated_endpoints.get("aretrieve_container_file")
|
||||
delete_container_file = _generated_endpoints.get("delete_container_file")
|
||||
adelete_container_file = _generated_endpoints.get("adelete_container_file")
|
||||
retrieve_container_file_content = _generated_endpoints.get(
|
||||
"retrieve_container_file_content"
|
||||
)
|
||||
aretrieve_container_file_content = _generated_endpoints.get(
|
||||
"aretrieve_container_file_content"
|
||||
)
|
||||
@@ -0,0 +1,51 @@
|
||||
{
|
||||
"endpoints": [
|
||||
{
|
||||
"name": "list_container_files",
|
||||
"async_name": "alist_container_files",
|
||||
"path": "/containers/{container_id}/files",
|
||||
"method": "GET",
|
||||
"path_params": ["container_id"],
|
||||
"query_params": ["after", "limit", "order"],
|
||||
"response_type": "ContainerFileListResponse"
|
||||
},
|
||||
{
|
||||
"name": "upload_container_file",
|
||||
"async_name": "aupload_container_file",
|
||||
"path": "/containers/{container_id}/files",
|
||||
"method": "POST",
|
||||
"path_params": ["container_id"],
|
||||
"query_params": [],
|
||||
"response_type": "ContainerFileObject",
|
||||
"is_multipart": true
|
||||
},
|
||||
{
|
||||
"name": "retrieve_container_file",
|
||||
"async_name": "aretrieve_container_file",
|
||||
"path": "/containers/{container_id}/files/{file_id}",
|
||||
"method": "GET",
|
||||
"path_params": ["container_id", "file_id"],
|
||||
"query_params": [],
|
||||
"response_type": "ContainerFileObject"
|
||||
},
|
||||
{
|
||||
"name": "delete_container_file",
|
||||
"async_name": "adelete_container_file",
|
||||
"path": "/containers/{container_id}/files/{file_id}",
|
||||
"method": "DELETE",
|
||||
"path_params": ["container_id", "file_id"],
|
||||
"query_params": [],
|
||||
"response_type": "DeleteContainerFileResponse"
|
||||
},
|
||||
{
|
||||
"name": "retrieve_container_file_content",
|
||||
"async_name": "aretrieve_container_file_content",
|
||||
"path": "/containers/{container_id}/files/{file_id}/content",
|
||||
"method": "GET",
|
||||
"path_params": ["container_id", "file_id"],
|
||||
"query_params": [],
|
||||
"response_type": "raw",
|
||||
"returns_binary": true
|
||||
}
|
||||
]
|
||||
}
|
||||
1290
llm-gateway-competitors/litellm-wheel-src/litellm/containers/main.py
Normal file
1290
llm-gateway-competitors/litellm-wheel-src/litellm/containers/main.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,70 @@
|
||||
from typing import Dict
|
||||
|
||||
from litellm.llms.base_llm.containers.transformation import BaseContainerConfig
|
||||
from litellm.types.containers.main import (
|
||||
ContainerCreateOptionalRequestParams,
|
||||
ContainerListOptionalRequestParams,
|
||||
)
|
||||
|
||||
|
||||
class ContainerRequestUtils:
|
||||
@staticmethod
|
||||
def get_requested_container_create_optional_param(
|
||||
passed_params: dict,
|
||||
) -> ContainerCreateOptionalRequestParams:
|
||||
"""Extract only valid container creation parameters from the passed parameters."""
|
||||
container_create_optional_params = ContainerCreateOptionalRequestParams()
|
||||
|
||||
valid_params = [
|
||||
"expires_after",
|
||||
"file_ids",
|
||||
"extra_headers",
|
||||
"extra_body",
|
||||
]
|
||||
|
||||
for param in valid_params:
|
||||
if param in passed_params and passed_params[param] is not None:
|
||||
container_create_optional_params[param] = passed_params[param] # type: ignore
|
||||
|
||||
return container_create_optional_params
|
||||
|
||||
@staticmethod
|
||||
def get_optional_params_container_create(
|
||||
container_provider_config: BaseContainerConfig,
|
||||
container_create_optional_params: ContainerCreateOptionalRequestParams,
|
||||
) -> Dict:
|
||||
"""Get the optional parameters for container creation."""
|
||||
supported_params = container_provider_config.get_supported_openai_params()
|
||||
|
||||
# Filter out unsupported parameters
|
||||
filtered_params = {
|
||||
k: v
|
||||
for k, v in container_create_optional_params.items()
|
||||
if k in supported_params
|
||||
}
|
||||
|
||||
return container_provider_config.map_openai_params(
|
||||
container_create_optional_params=filtered_params, # type: ignore
|
||||
drop_params=False,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_requested_container_list_optional_param(
|
||||
passed_params: dict,
|
||||
) -> ContainerListOptionalRequestParams:
|
||||
"""Extract only valid container list parameters from the passed parameters."""
|
||||
container_list_optional_params = ContainerListOptionalRequestParams()
|
||||
|
||||
valid_params = [
|
||||
"after",
|
||||
"limit",
|
||||
"order",
|
||||
"extra_headers",
|
||||
"extra_query",
|
||||
]
|
||||
|
||||
for param in valid_params:
|
||||
if param in passed_params and passed_params[param] is not None:
|
||||
container_list_optional_params[param] = passed_params[param] # type: ignore
|
||||
|
||||
return container_list_optional_params
|
||||
Reference in New Issue
Block a user