Skip to content

Commit

Permalink
chore: cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
phil65 committed Feb 18, 2025
1 parent 6a79e17 commit 0d71624
Show file tree
Hide file tree
Showing 3 changed files with 7 additions and 137 deletions.
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ license = "MIT"
[project.optional-dependencies]
cli = ["rich", "typer"]
extras = ["black", "pygments", "humanize", "natsort"]
llm = ["litellm", "python-dotenv", "pillow"]
llm = ["llmling-agent[litellm,pillow]"]
icons = ["pyconify"]
yaml = ["yamling"]
httpx = ["httpx"]
Expand Down
116 changes: 3 additions & 113 deletions src/jinjarope/llmfilters.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from dotenv import load_dotenv

# import litellm
from jinjarope import htmlfilters, lazylitellm
from jinjarope import lazylitellm


load_dotenv()
Expand All @@ -15,52 +15,6 @@
litellm = lazylitellm.LazyLiteLLM()


def llm_complete(
prompt: str,
system_prompt: str | None = None,
context: str | None = None,
model: str | None = None,
token: str | None = None,
base_url: str | None = None,
**kwargs: Any,
) -> str:
"""Complete a prompt using the LLM API.
Args:
prompt: The prompt to complete.
system_prompt: The system prompt to set context for the model.
context: Additional context for the prompt.
model: The model to use.
token: The API token.
base_url: The base URL of the API.
kwargs: Additional keyword arguments passed to litellm.completion.
Returns:
The completed text from the LLM.
Raises:
ValueError: If the API response is invalid or missing expected data.
"""
messages: list[dict[str, str]] = []
if system_prompt:
messages.append({"role": "system", "content": system_prompt})
if context:
messages.append({"role": "user", "content": context})
messages.append({"role": "user", "content": prompt})

response = litellm.completion(
model=model or os.getenv("OPENAI_MODEL", ""),
api_key=token or os.getenv("OPENAI_API_TOKEN"),
api_base=base_url or os.getenv("OPENAI_API_BASE"),
messages=messages,
**kwargs,
)
if not response.choices or not response.choices[0].message:
msg = "Invalid API response: missing choices or message"
raise ValueError(msg)
return response.choices[0].message.content or ""


def llm_generate_image(
prompt: str,
model: str | None = None,
Expand Down Expand Up @@ -103,73 +57,9 @@ def llm_generate_image(
return None


def llm_analyze_image(
image_url: str,
prompt: str | None = None,
model: str = "gpt-4-vision-preview",
token: str | None = None,
base_url: str | None = None,
encode_b64: bool = False,
**kwargs: Any,
) -> str:
"""Analyze an image using an LLM vision model and return the analysis as a string.
Args:
image_url: The URL of the image to analyze.
prompt: A prompt to guide the image analysis. If None, use a default prompt.
model: The name of the model to use. Defaults to "gpt-4-vision-preview".
token: The API token (key) for authentication.
If None, it will use the OPENAI_API_KEY environment variable.
base_url: The base URL for the API endpoint.
If None, the default URL for the model will be used.
encode_b64: Whether to encode the image to base64 before sending it to the API.
(required for some models)
kwargs: Additional keyword arguments passed to litellm.completion.
Returns:
The analysis of the image as a string.
Raises:
ValueError: If the image_url is empty or invalid.
requests.RequestException: If there's an error downloading the image.
Exception: If there's an error in making the API call or processing the response.
"""
if not image_url or not image_url.strip():
msg = "Image URL cannot be empty"
raise ValueError(msg)

prompt = prompt or "Analyze this image and describe what you see in detail."
image_str = htmlfilters.url_to_b64(image_url) if encode_b64 else image_url
completion_kwargs: dict[str, Any] = {
"model": model,
"messages": [
{
"role": "user",
"content": [
{"type": "text", "text": prompt},
{
"type": "image_url",
"image_url": {
"url": image_str,
},
},
],
},
],
"max_tokens": 300, # Default max tokens
}

if token:
completion_kwargs["api_key"] = token
if base_url:
completion_kwargs["api_base"] = base_url
response = litellm.completion(**completion_kwargs, **kwargs)
return response.choices[0].message.content.strip()


if __name__ == "__main__":
response = llm_analyze_image(
image_url="https://picsum.photos/200/300",
response = llm_generate_image(
prompt="A dog!",
model="ollama/llava",
)
print(response)
Expand Down
26 changes: 3 additions & 23 deletions src/jinjarope/resources/llm_filters.toml
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
# This file contains definitions for functions of the "litellm" library.

[filters.llm_complete]
fn = "jinjarope.llmfilters.llm_complete"
fn = "llmling_agent_functional.run_agent"
group = "llm"
required_packages = ["litellm"]
required_packages = ["llmling-agent", "litellm"]

[filters.llm_complete.examples.basic]
template = """
Expand All @@ -23,24 +23,4 @@ template = """
[filters.llm_generate_image]
fn = "jinjarope.llmfilters.llm_generate_image"
group = "llm"
required_packages = ["litellm", "pillow"]

# [filters.llm_generate_image.examples.basic]
# template = """
# {{ "A beautiful sunset over the ocean" | llm_generate_image(model="gemini/gemini-pro-vision") }}
# """

# [filters.llm_generate_image.examples.with_size]
# template = """
# {{ "A futuristic cityscape" | llm_generate_image(model="gemini/gemini-pro-vision", size="512x512") }}
# """

# [filters.llm_generate_image.examples.with_quality]
# template = """
# {{ "A detailed portrait of a cat" | llm_generate_image(model="gemini/gemini-pro-vision", quality="hd") }}
# """

# [filters.llm_generate_image.examples.as_b64_json]
# template = """
# {{ "A colorful abstract painting" | llm_generate_image(model="gemini/gemini-pro-vision", as_b64_json=true) }}
# """
required_packages = ["llmling-agent", "litellm", "pillow"]

0 comments on commit 0d71624

Please sign in to comment.