Skip to content

Ollama

OllamaMultiModal #

Bases: Ollama

Source code in llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-ollama/llama_index/multi_modal_llms/ollama/base.py
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
class OllamaMultiModal(Ollama):
    @classmethod
    def class_name(cls) -> str:
        return "Ollama_multi_modal_llm"

    def _get_messages(
        self, prompt: str, image_documents: Sequence[ImageNode]
    ) -> Sequence[ChatMessage]:
        image_blocks = [
            ImageBlock(
                image=image_document.image,
                path=image_document.image_path,
                url=image_document.image_url,
                image_mimetype=image_document.image_mimetype,
            )
            for image_document in image_documents
        ]

        return [
            ChatMessage(
                role=MessageRole.USER,
                blocks=[
                    TextBlock(text=prompt),
                    *image_blocks,
                ],
            )
        ]

    @llm_completion_callback()
    def complete(
        self,
        prompt: str,
        image_documents: Sequence[ImageNode],
        formatted: bool = False,
        **kwargs: Any,
    ) -> CompletionResponse:
        """Complete."""
        messages = self._get_messages(prompt, image_documents)
        chat_response = self.chat(messages, **kwargs)
        return chat_response_to_completion_response(chat_response)

    @llm_completion_callback()
    def stream_complete(
        self,
        prompt: str,
        image_documents: Sequence[ImageNode],
        formatted: bool = False,
        **kwargs: Any,
    ) -> CompletionResponseGen:
        """Stream complete."""
        messages = self._get_messages(prompt, image_documents)
        stream_chat_response = self.stream_chat(messages, **kwargs)
        return stream_chat_response_to_completion_response(stream_chat_response)

    @llm_completion_callback()
    async def acomplete(
        self,
        prompt: str,
        image_documents: Sequence[ImageNode],
        **kwargs: Any,
    ) -> CompletionResponse:
        """Async complete."""
        messages = self._get_messages(prompt, image_documents)
        chat_response = await self.achat(messages, **kwargs)
        return chat_response_to_completion_response(chat_response)

    async def astream_complete(
        self,
        prompt: str,
        image_documents: Sequence[ImageNode],
        **kwargs: Any,
    ) -> CompletionResponseAsyncGen:
        """Async stream complete."""
        messages = self._get_messages(prompt, image_documents)
        astream_chat_response = await self.astream_chat(messages, **kwargs)
        return astream_chat_response_to_completion_response(astream_chat_response)

complete #

complete(prompt: str, image_documents: Sequence[ImageNode], formatted: bool = False, **kwargs: Any) -> CompletionResponse

Complete.

Source code in llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-ollama/llama_index/multi_modal_llms/ollama/base.py
77
78
79
80
81
82
83
84
85
86
87
88
@llm_completion_callback()
def complete(
    self,
    prompt: str,
    image_documents: Sequence[ImageNode],
    formatted: bool = False,
    **kwargs: Any,
) -> CompletionResponse:
    """Complete."""
    messages = self._get_messages(prompt, image_documents)
    chat_response = self.chat(messages, **kwargs)
    return chat_response_to_completion_response(chat_response)

stream_complete #

stream_complete(prompt: str, image_documents: Sequence[ImageNode], formatted: bool = False, **kwargs: Any) -> CompletionResponseGen

Stream complete.

Source code in llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-ollama/llama_index/multi_modal_llms/ollama/base.py
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
@llm_completion_callback()
def stream_complete(
    self,
    prompt: str,
    image_documents: Sequence[ImageNode],
    formatted: bool = False,
    **kwargs: Any,
) -> CompletionResponseGen:
    """Stream complete."""
    messages = self._get_messages(prompt, image_documents)
    stream_chat_response = self.stream_chat(messages, **kwargs)
    return stream_chat_response_to_completion_response(stream_chat_response)

acomplete async #

acomplete(prompt: str, image_documents: Sequence[ImageNode], **kwargs: Any) -> CompletionResponse

Async complete.

Source code in llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-ollama/llama_index/multi_modal_llms/ollama/base.py
103
104
105
106
107
108
109
110
111
112
113
@llm_completion_callback()
async def acomplete(
    self,
    prompt: str,
    image_documents: Sequence[ImageNode],
    **kwargs: Any,
) -> CompletionResponse:
    """Async complete."""
    messages = self._get_messages(prompt, image_documents)
    chat_response = await self.achat(messages, **kwargs)
    return chat_response_to_completion_response(chat_response)

astream_complete async #

astream_complete(prompt: str, image_documents: Sequence[ImageNode], **kwargs: Any) -> CompletionResponseAsyncGen

Async stream complete.

Source code in llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-ollama/llama_index/multi_modal_llms/ollama/base.py
115
116
117
118
119
120
121
122
123
124
async def astream_complete(
    self,
    prompt: str,
    image_documents: Sequence[ImageNode],
    **kwargs: Any,
) -> CompletionResponseAsyncGen:
    """Async stream complete."""
    messages = self._get_messages(prompt, image_documents)
    astream_chat_response = await self.astream_chat(messages, **kwargs)
    return astream_chat_response_to_completion_response(astream_chat_response)