-
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathollama_client.py
More file actions
143 lines (125 loc) · 5.08 KB
/
ollama_client.py
File metadata and controls
143 lines (125 loc) · 5.08 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
from __future__ import annotations
import time
from dataclasses import asdict, dataclass
from typing import Any
import requests
class OllamaUnavailableError(RuntimeError):
"""Raised when the local Ollama service or model is unavailable."""
@dataclass(frozen=True, slots=True)
class OllamaGenerationConfig:
temperature: float = 0.0
top_p: float = 0.9
num_predict: int = 128
num_ctx: int = 2048
repeat_penalty: float = 1.0
seed: int = 0
@classmethod
def from_dict(cls, raw: dict[str, Any] | None) -> "OllamaGenerationConfig":
raw = raw or {}
return cls(
temperature=float(raw.get("temperature", 0.0)),
top_p=float(raw.get("top_p", 0.9)),
num_predict=int(raw.get("num_predict", 128)),
num_ctx=int(raw.get("num_ctx", 2048)),
repeat_penalty=float(raw.get("repeat_penalty", 1.0)),
seed=int(raw.get("seed", 0)),
)
def as_dict(self) -> dict[str, Any]:
return asdict(self)
@dataclass(frozen=True, slots=True)
class OllamaConfig:
base_url: str = "http://localhost:11434/api"
model: str = "gemma3:1b"
timeout_s: float = 90.0
max_retries: int = 2
keep_alive: str = "0m"
generation: OllamaGenerationConfig = OllamaGenerationConfig()
@classmethod
def from_dict(cls, raw: dict[str, Any] | None) -> "OllamaConfig":
raw = raw or {}
return cls(
base_url=str(raw.get("base_url", "http://localhost:11434/api")).rstrip("/"),
model=str(raw.get("model", "gemma3:1b")),
timeout_s=float(raw.get("timeout_s", 90.0)),
max_retries=int(raw.get("max_retries", 2)),
keep_alive=str(raw.get("keep_alive", "0m")),
generation=OllamaGenerationConfig.from_dict(raw.get("generation")),
)
def as_dict(self) -> dict[str, Any]:
payload = asdict(self)
payload["generation"] = self.generation.as_dict()
return payload
class OllamaClient:
def __init__(self, config: OllamaConfig) -> None:
self.config = config
def _request(self, method: str, path: str, *, json_payload: dict[str, Any] | None = None) -> dict[str, Any]:
base_url = self.config.base_url.rstrip("/")
if base_url.endswith("/api"):
base_url = base_url[: -len("/api")]
url = f"{base_url}{path}"
last_error: Exception | None = None
for attempt in range(self.config.max_retries + 1):
try:
response = requests.request(
method=method,
url=url,
json=json_payload,
timeout=self.config.timeout_s,
)
response.raise_for_status()
return response.json()
except requests.RequestException as exc:
last_error = exc
if attempt >= self.config.max_retries:
break
time.sleep(0.3 * (attempt + 1))
raise OllamaUnavailableError(
f"Failed to reach local Ollama at {url} after {self.config.max_retries + 1} attempts."
) from last_error
def check_connection(self) -> dict[str, Any]:
data = self._request("GET", "/api/tags")
models = {item["name"] for item in data.get("models", [])}
if self.config.model not in models:
raise OllamaUnavailableError(
f"Model {self.config.model!r} not found in local Ollama at {self.config.base_url}. "
f"Available models: {sorted(models)}"
)
return {
"base_url": self.config.base_url,
"model": self.config.model,
"available_models": sorted(models),
}
def generate(self, *, prompt: str, system: str | None = None) -> dict[str, Any]:
payload = {
"model": self.config.model,
"prompt": prompt,
"stream": False,
"keep_alive": self.config.keep_alive,
"options": self.config.generation.as_dict(),
}
if system:
payload["system"] = system
started = time.perf_counter()
response = self._request("POST", "/api/generate", json_payload=payload)
latency_s = time.perf_counter() - started
usage = {
"prompt_eval_count": response.get("prompt_eval_count"),
"eval_count": response.get("eval_count"),
"total_duration": response.get("total_duration"),
"load_duration": response.get("load_duration"),
"prompt_eval_duration": response.get("prompt_eval_duration"),
"eval_duration": response.get("eval_duration"),
}
return {
"response_text": response.get("response", ""),
"latency_s": latency_s,
"model": response.get("model", self.config.model),
"created_at": response.get("created_at"),
"done_reason": response.get("done_reason"),
"usage": usage,
"request": {
"prompt": prompt,
"system": system,
"options": self.config.generation.as_dict(),
},
}