Skip to content

Ollama

OllamaMultiModal #

Bases: Ollama

Source code in llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-ollama/llama_index/multi_modal_llms/ollama/base.py
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
@deprecated(
    reason="This package has been deprecated and will no longer be maintained. Please use llama-index-llms-ollama instead. See Multi Modal LLMs documentation for a complete guide on migration: https://docs.llamaindex.ai/en/stable/understanding/using_llms/using_llms/#multi-modal-llms",
    version="0.5.1",
)
class OllamaMultiModal(Ollama):
    @classmethod
    def class_name(cls) -> str:
        return "Ollama_multi_modal_llm"

    def _get_messages(
        self, prompt: str, image_documents: Sequence[Union[ImageNode, ImageBlock]]
    ) -> Sequence[ChatMessage]:
        if all(isinstance(doc, ImageNode) for doc in image_documents):
            image_blocks = [
                ImageBlock(
                    image=image_document.image,
                    path=image_document.image_path,
                    url=image_document.image_url,
                    image_mimetype=image_document.image_mimetype,
                )
                for image_document in image_documents
            ]
        else:
            image_blocks = image_documents

        return [
            ChatMessage(
                role=MessageRole.USER,
                blocks=[
                    TextBlock(text=prompt),
                    *image_blocks,
                ],
            )
        ]

    @llm_completion_callback()
    def complete(
        self,
        prompt: str,
        image_documents: Sequence[Union[ImageNode, ImageBlock]],
        formatted: bool = False,
        **kwargs: Any,
    ) -> CompletionResponse:
        """Complete."""
        messages = self._get_messages(prompt, image_documents)
        chat_response = self.chat(messages, **kwargs)
        return chat_response_to_completion_response(chat_response)

    @llm_completion_callback()
    def stream_complete(
        self,
        prompt: str,
        image_documents: Sequence[Union[ImageNode, ImageBlock]],
        formatted: bool = False,
        **kwargs: Any,
    ) -> CompletionResponseGen:
        """Stream complete."""
        messages = self._get_messages(prompt, image_documents)
        stream_chat_response = self.stream_chat(messages, **kwargs)
        return stream_chat_response_to_completion_response(stream_chat_response)

    @llm_completion_callback()
    async def acomplete(
        self,
        prompt: str,
        image_documents: Sequence[Union[ImageNode, ImageBlock]],
        **kwargs: Any,
    ) -> CompletionResponse:
        """Async complete."""
        messages = self._get_messages(prompt, image_documents)
        chat_response = await self.achat(messages, **kwargs)
        return chat_response_to_completion_response(chat_response)

    async def astream_complete(
        self,
        prompt: str,
        image_documents: Sequence[Union[ImageNode, ImageBlock]],
        **kwargs: Any,
    ) -> CompletionResponseAsyncGen:
        """Async stream complete."""
        messages = self._get_messages(prompt, image_documents)
        astream_chat_response = await self.astream_chat(messages, **kwargs)
        return astream_chat_response_to_completion_response(astream_chat_response)

complete #

complete(prompt: str, image_documents: Sequence[Union[ImageNode, ImageBlock]], formatted: bool = False, **kwargs: Any) -> CompletionResponse

Complete.

Source code in llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-ollama/llama_index/multi_modal_llms/ollama/base.py
86
87
88
89
90
91
92
93
94
95
96
97
@llm_completion_callback()
def complete(
    self,
    prompt: str,
    image_documents: Sequence[Union[ImageNode, ImageBlock]],
    formatted: bool = False,
    **kwargs: Any,
) -> CompletionResponse:
    """Complete."""
    messages = self._get_messages(prompt, image_documents)
    chat_response = self.chat(messages, **kwargs)
    return chat_response_to_completion_response(chat_response)

stream_complete #

stream_complete(prompt: str, image_documents: Sequence[Union[ImageNode, ImageBlock]], formatted: bool = False, **kwargs: Any) -> CompletionResponseGen

Stream complete.

Source code in llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-ollama/llama_index/multi_modal_llms/ollama/base.py
 99
100
101
102
103
104
105
106
107
108
109
110
@llm_completion_callback()
def stream_complete(
    self,
    prompt: str,
    image_documents: Sequence[Union[ImageNode, ImageBlock]],
    formatted: bool = False,
    **kwargs: Any,
) -> CompletionResponseGen:
    """Stream complete."""
    messages = self._get_messages(prompt, image_documents)
    stream_chat_response = self.stream_chat(messages, **kwargs)
    return stream_chat_response_to_completion_response(stream_chat_response)

acomplete async #

acomplete(prompt: str, image_documents: Sequence[Union[ImageNode, ImageBlock]], **kwargs: Any) -> CompletionResponse

Async complete.

Source code in llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-ollama/llama_index/multi_modal_llms/ollama/base.py
112
113
114
115
116
117
118
119
120
121
122
@llm_completion_callback()
async def acomplete(
    self,
    prompt: str,
    image_documents: Sequence[Union[ImageNode, ImageBlock]],
    **kwargs: Any,
) -> CompletionResponse:
    """Async complete."""
    messages = self._get_messages(prompt, image_documents)
    chat_response = await self.achat(messages, **kwargs)
    return chat_response_to_completion_response(chat_response)

astream_complete async #

astream_complete(prompt: str, image_documents: Sequence[Union[ImageNode, ImageBlock]], **kwargs: Any) -> CompletionResponseAsyncGen

Async stream complete.

Source code in llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-ollama/llama_index/multi_modal_llms/ollama/base.py
124
125
126
127
128
129
130
131
132
133
async def astream_complete(
    self,
    prompt: str,
    image_documents: Sequence[Union[ImageNode, ImageBlock]],
    **kwargs: Any,
) -> CompletionResponseAsyncGen:
    """Async stream complete."""
    messages = self._get_messages(prompt, image_documents)
    astream_chat_response = await self.astream_chat(messages, **kwargs)
    return astream_chat_response_to_completion_response(astream_chat_response)