OpenAI#
- pydantic model llama_index.multi_modal_llms.openai.OpenAIMultiModal#
Show JSON schema
{ "title": "OpenAIMultiModal", "description": "Multi-Modal LLM interface.", "type": "object", "properties": { "model": { "title": "Model", "description": "The Multi-Modal model to use from OpenAI.", "type": "string" }, "temperature": { "title": "Temperature", "description": "The temperature to use for sampling.", "type": "number" }, "max_new_tokens": { "title": "Max New Tokens", "description": " The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt", "exclusiveMinimum": 0, "type": "integer" }, "context_window": { "title": "Context Window", "description": "The maximum number of context tokens for the model.", "exclusiveMinimum": 0, "type": "integer" }, "image_detail": { "title": "Image Detail", "description": "The level of details for image in API calls. Can be low, high, or auto", "type": "string" }, "max_retries": { "title": "Max Retries", "description": "Maximum number of retries.", "default": 3, "gte": 0, "type": "integer" }, "timeout": { "title": "Timeout", "description": "The timeout, in seconds, for API requests.", "default": 60.0, "gte": 0, "type": "number" }, "api_key": { "title": "Api Key", "description": "The OpenAI API key.", "type": "string" }, "api_base": { "title": "Api Base", "description": "The base URL for OpenAI API.", "type": "string" }, "api_version": { "title": "Api Version", "description": "The API version for OpenAI API.", "type": "string" }, "additional_kwargs": { "title": "Additional Kwargs", "description": "Additional kwargs for the OpenAI API.", "type": "object" }, "default_headers": { "title": "Default Headers", "description": "The default headers for API requests.", "type": "object", "additionalProperties": { "type": "string" } }, "class_name": { "title": "Class Name", "type": "string", "default": "openai_multi_modal_llm" } }, "required": [ "model", "temperature", "image_detail", "api_version" ] }
- Config
arbitrary_types_allowed: bool = True
- Fields
- field additional_kwargs: Dict[str, Any] [Optional]#
Additional kwargs for the OpenAI API.
- field api_base: str = None#
The base URL for OpenAI API.
- field api_key: str = None#
The OpenAI API key.
- field api_version: str [Required]#
The API version for OpenAI API.
- field context_window: Optional[int] = None#
The maximum number of context tokens for the model.
- Constraints
exclusiveMinimum = 0
- field default_headers: Dict[str, str] = None#
The default headers for API requests.
- field image_detail: str [Required]#
The level of details for image in API calls. Can be low, high, or auto
- field max_new_tokens: Optional[int] = None#
The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt
- Constraints
exclusiveMinimum = 0
- field max_retries: int = 3#
Maximum number of retries.
- field model: str [Required]#
The Multi-Modal model to use from OpenAI.
- field temperature: float [Required]#
The temperature to use for sampling.
- field timeout: float = 60.0#
The timeout, in seconds, for API requests.
- async achat(messages: Sequence[ChatMessage], **kwargs: Any) ChatResponse #
Async chat endpoint for Multi-Modal LLM.
- async acomplete(prompt: str, image_documents: Sequence[ImageDocument], **kwargs: Any) CompletionResponse #
Async completion endpoint for Multi-Modal LLM.
- async astream_chat(messages: Sequence[ChatMessage], **kwargs: Any) AsyncGenerator[ChatResponse, None] #
Async streaming chat endpoint for Multi-Modal LLM.
- async astream_complete(prompt: str, image_documents: Sequence[ImageDocument], **kwargs: Any) AsyncGenerator[CompletionResponse, None] #
Async streaming completion endpoint for Multi-Modal LLM.
- chat(messages: Sequence[ChatMessage], **kwargs: Any) ChatResponse #
Chat endpoint for Multi-Modal LLM.
- classmethod class_name() str #
Get the class name, used as a unique ID in serialization.
This provides a key that makes serialization robust against actual class name changes.
- complete(prompt: str, image_documents: Sequence[ImageDocument], **kwargs: Any) CompletionResponse #
Completion endpoint for Multi-Modal LLM.
- stream_chat(messages: Sequence[ChatMessage], **kwargs: Any) Generator[ChatResponse, None, None] #
Stream chat endpoint for Multi-Modal LLM.
- stream_complete(prompt: str, image_documents: Sequence[ImageDocument], **kwargs: Any) Generator[CompletionResponse, None, None] #
Streaming completion endpoint for Multi-Modal LLM.
- property metadata: MultiModalLLMMetadata#
Multi Modal LLM metadata.