ExLLM (ex_llm v0.1.0)
View SourceExLLM - Unified Elixir client library for Large Language Models.
ExLLM provides a consistent interface across multiple LLM providers including OpenAI, Anthropic Claude, Ollama, and others. It features configuration injection, standardized error handling, and streaming support.
Quick Start
# Using environment variables
messages = [%{role: "user", content: "Hello!"}]
{:ok, response} = ExLLM.chat(:anthropic, messages)
IO.puts(response.content)
# Using static configuration
config = %{anthropic: %{api_key: "your-key"}}
{:ok, provider} = ExLLM.ConfigProvider.Static.start_link(config)
{:ok, response} = ExLLM.chat(:anthropic, messages, config_provider: provider)
Supported Providers
:anthropic
- Anthropic Claude models:openai
- OpenAI GPT models:ollama
- Local Ollama models- More providers coming soon!
Features
- Unified Interface: Same API across all providers
- Configuration Injection: Flexible config management
- Streaming Support: Real-time response streaming
- Error Standardization: Consistent error handling
- No Process Dependencies: Pure functional core
- Extensible: Easy to add new providers
Configuration
ExLLM supports multiple configuration methods:
Environment Variables
export OPENAI_API_KEY="sk-..."
export ANTHROPIC_API_KEY="api-..."
export OLLAMA_BASE_URL="http://localhost:11434"
Static Configuration
config = %{
openai: %{api_key: "sk-...", model: "gpt-4"},
anthropic: %{api_key: "api-...", model: "claude-3"},
ollama: %{base_url: "http://localhost:11434", model: "llama2"}
}
{:ok, provider} = ExLLM.ConfigProvider.Static.start_link(config)
Custom Configuration
defmodule MyConfigProvider do
@behaviour ExLLM.ConfigProvider
def get([:openai, :api_key]), do: MyApp.get_secret("openai_key")
def get(_), do: nil
def get_all(), do: %{}
end
Examples
# Simple chat
{:ok, response} = ExLLM.chat(:openai, [
%{role: "user", content: "What is Elixir?"}
])
# With options
{:ok, response} = ExLLM.chat(:anthropic, messages,
model: "claude-3-haiku-20240307",
temperature: 0.7,
max_tokens: 1000
)
# Streaming
{:ok, stream} = ExLLM.stream_chat(:openai, messages)
for chunk <- stream do
if chunk.content, do: IO.write(chunk.content)
end
# Check if provider is configured
if ExLLM.configured?(:anthropic) do
{:ok, response} = ExLLM.chat(:anthropic, messages)
end
# List available models
{:ok, models} = ExLLM.list_models(:openai)
Enum.each(models, fn model ->
IO.puts(model.name)
end)
Summary
Functions
Add a message to a session.
Calculate cost for token usage.
Send a chat completion request to the specified LLM provider.
Send a chat request using a session, automatically tracking messages and usage.
Clear messages from a session while preserving metadata.
Check if the specified provider is properly configured.
Get statistics about message context usage.
Get context window size for a model.
Get the default model for the specified provider.
Estimate token count for text.
Format cost for display.
Get messages from a session.
List available models for the specified provider.
Load a session from JSON.
Create a new conversation session.
Prepare messages for sending to a provider with context management.
Save a session to JSON.
Get total token usage for a session.
Send a streaming chat completion request to the specified LLM provider.
Get list of supported providers.
Validate that messages fit within a model's context window.
Types
@type messages() :: [ExLLM.Types.message()]
@type options() :: keyword()
@type provider() :: :anthropic | :openai | :ollama | :local
Functions
@spec add_session_message( ExLLM.Session.Types.Session.t(), String.t(), String.t(), keyword() ) :: ExLLM.Session.Types.Session.t()
Add a message to a session.
Parameters
session
- The session to updaterole
- Message role ("user", "assistant", etc.)content
- Message contentopts
- Additional message metadata
Returns
Updated session.
Examples
session = ExLLM.add_session_message(session, "user", "What is Elixir?")
@spec calculate_cost(provider(), String.t(), ExLLM.Types.token_usage()) :: ExLLM.Types.cost_result() | %{error: String.t()}
Calculate cost for token usage.
Parameters
provider
- LLM provider namemodel
- Model nametoken_usage
- Map with:input_tokens
and:output_tokens
Returns
Cost calculation result or error map.
Examples
usage = %{input_tokens: 1000, output_tokens: 500}
cost = ExLLM.calculate_cost("openai", "gpt-4", usage)
# => %{total_cost: 0.06, ...}
@spec chat(provider(), messages(), options()) :: {:ok, ExLLM.Types.LLMResponse.t() | struct() | map()} | {:error, term()}
Send a chat completion request to the specified LLM provider.
Parameters
provider
- The LLM provider (:anthropic
,:openai
,:ollama
)messages
- List of conversation messagesoptions
- Options for the request (see module docs)
Options
:model
- Override the default model:temperature
- Temperature setting (0.0 to 1.0):max_tokens
- Maximum tokens in response or context:config_provider
- Configuration provider module or pid:track_cost
- Whether to track costs (default: true):strategy
- Context truncation strategy (default: :sliding_window):sliding_window
- Keep most recent messages:smart
- Preserve system messages and recent context
:preserve_messages
- Number of recent messages to always preserve (default: 5):response_model
- Ecto schema or type spec for structured output (requires instructor):max_retries
- Number of retries for structured output validation
Returns
{:ok, %ExLLM.Types.LLMResponse{}}
on success, or {:ok, struct}
when using
response_model. Returns {:error, reason}
on failure.
Examples
# Simple usage
{:ok, response} = ExLLM.chat(:anthropic, [
%{role: "user", content: "Hello!"}
])
# With custom configuration
{:ok, provider} = ExLLM.ConfigProvider.Static.start_link(%{
anthropic: %{api_key: "your-key"}
})
{:ok, response} = ExLLM.chat(:anthropic, messages, config_provider: provider)
# With model override
{:ok, response} = ExLLM.chat(:openai, messages, model: "gpt-4-turbo")
# With context management
{:ok, response} = ExLLM.chat(:anthropic, messages,
max_tokens: 4000,
strategy: :smart
)
# With structured output (requires instructor)
{:ok, classification} = ExLLM.chat(:anthropic, messages,
response_model: EmailClassification,
max_retries: 3
)
@spec chat_with_session(ExLLM.Session.Types.Session.t(), String.t(), options()) :: {:ok, {ExLLM.Types.LLMResponse.t(), ExLLM.Session.Types.Session.t()}} | {:error, term()}
Send a chat request using a session, automatically tracking messages and usage.
Parameters
session
- The session to usecontent
- The user message contentoptions
- Chat options (same aschat/3
)
Returns
{:ok, {response, updated_session}}
on success, {:error, reason}
on failure.
Examples
session = ExLLM.new_session(:anthropic)
{:ok, {response, session}} = ExLLM.chat_with_session(session, "Hello!")
# Session now contains the conversation history
@spec clear_session(ExLLM.Session.Types.Session.t()) :: ExLLM.Session.Types.Session.t()
Clear messages from a session while preserving metadata.
Parameters
session
- The session to clear
Returns
Updated session with no messages.
Examples
session = ExLLM.clear_session(session)
Check if the specified provider is properly configured.
Parameters
provider
- The LLM provider to checkoptions
- Options including configuration provider
Returns
true
if configured, false
otherwise.
Examples
if ExLLM.configured?(:anthropic) do
{:ok, response} = ExLLM.chat(:anthropic, messages)
else
IO.puts("Anthropic not configured")
end
Get statistics about message context usage.
Parameters
messages
- List of conversation messages
Returns
Map with context statistics.
Examples
stats = ExLLM.context_stats(messages)
# => %{total_tokens: 1500, message_count: 10, ...}
@spec context_window_size(provider(), String.t()) :: non_neg_integer() | nil
Get context window size for a model.
Parameters
provider
- LLM provider namemodel
- Model name
Returns
Context window size in tokens or nil if unknown.
Examples
tokens = ExLLM.context_window_size(:anthropic, "claude-3-5-sonnet-20241022")
# => 200000
Get the default model for the specified provider.
Parameters
provider
- The LLM provider
Returns
String model identifier.
Examples
model = ExLLM.default_model(:anthropic)
# => "claude-sonnet-4-20250514"
@spec estimate_tokens(String.t() | map() | [map()]) :: non_neg_integer()
Estimate token count for text.
Parameters
text
- Text to analyze (string, message map, or list)
Returns
Estimated token count.
Examples
tokens = ExLLM.estimate_tokens("Hello, world!")
# => 4
Format cost for display.
Parameters
cost
- Cost in dollars
Returns
Formatted cost string.
Examples
ExLLM.format_cost(0.0035)
# => "$0.350¢"
@spec get_session_messages(ExLLM.Session.Types.Session.t(), non_neg_integer() | nil) :: [ ExLLM.Session.Types.message() ]
Get messages from a session.
Parameters
session
- The session to querylimit
- Optional message limit
Returns
List of messages.
Examples
messages = ExLLM.get_session_messages(session)
last_10 = ExLLM.get_session_messages(session, 10)
@spec list_models(provider(), options()) :: {:ok, [ExLLM.Types.Model.t()]} | {:error, term()}
List available models for the specified provider.
Parameters
provider
- The LLM provideroptions
- Options including configuration provider
Returns
{:ok, [%ExLLM.Types.Model{}]}
on success, {:error, reason}
on failure.
Examples
{:ok, models} = ExLLM.list_models(:anthropic)
Enum.each(models, fn model ->
IO.puts(model.name)
end)
@spec load_session(String.t()) :: {:ok, ExLLM.Session.Types.Session.t()} | {:error, term()}
Load a session from JSON.
Parameters
json
- JSON string containing session data
Returns
{:ok, session}
on success, {:error, reason}
on failure.
Examples
json = File.read!("session.json")
{:ok, session} = ExLLM.load_session(json)
@spec new_session( provider(), keyword() ) :: ExLLM.Session.Types.Session.t()
Create a new conversation session.
Parameters
provider
- LLM provider to use for the sessionopts
- Session options (:name
for session name)
Returns
A new session struct.
Examples
session = ExLLM.new_session(:anthropic)
session = ExLLM.new_session(:openai, name: "Customer Support")
Prepare messages for sending to a provider with context management.
Parameters
messages
- List of conversation messagesoptions
- Options for context management
Options
:max_tokens
- Maximum tokens for context (default: model-specific):strategy
- Context truncation strategy (default: :sliding_window):preserve_messages
- Number of recent messages to preserve (default: 5)
Returns
Prepared messages list that fits within context window.
Examples
messages = ExLLM.prepare_messages(long_conversation,
max_tokens: 4000,
strategy: :smart
)
@spec save_session(ExLLM.Session.Types.Session.t()) :: {:ok, String.t()} | {:error, term()}
Save a session to JSON.
Parameters
session
- The session to save
Returns
{:ok, json}
on success, {:error, reason}
on failure.
Examples
{:ok, json} = ExLLM.save_session(session)
File.write!("session.json", json)
@spec session_token_usage(ExLLM.Session.Types.Session.t()) :: non_neg_integer()
Get total token usage for a session.
Parameters
session
- The session to analyze
Returns
Total token count.
Examples
tokens = ExLLM.session_token_usage(session)
# => 2500
@spec stream_chat(provider(), messages(), options()) :: {:ok, ExLLM.Types.stream()} | {:error, term()}
Send a streaming chat completion request to the specified LLM provider.
Parameters
provider
- The LLM provider (:anthropic
,:openai
,:ollama
)messages
- List of conversation messagesoptions
- Options for the request (see module docs)
Options
Same as chat/3
, plus:
:on_chunk
- Callback function for each chunk
Returns
{:ok, stream}
on success where stream yields %ExLLM.Types.StreamChunk{}
structs,
{:error, reason}
on failure.
Examples
{:ok, stream} = ExLLM.stream_chat(:anthropic, messages)
# Process the stream
for chunk <- stream do
case chunk do
%{content: content} when content != nil ->
IO.write(content)
%{finish_reason: "stop"} ->
IO.puts("\nDone!")
_ ->
:continue
end
end
# With context management
{:ok, stream} = ExLLM.stream_chat(:anthropic, messages,
max_tokens: 4000,
strategy: :smart
)
@spec supported_providers() :: [provider()]
Get list of supported providers.
Returns
List of provider atoms.
Examples
providers = ExLLM.supported_providers()
# => [:anthropic, :openai, :ollama]
@spec validate_context(messages(), options()) :: {:ok, non_neg_integer()} | {:error, term()}
Validate that messages fit within a model's context window.
Parameters
messages
- List of conversation messagesoptions
- Options including model info
Returns
{:ok, token_count}
if valid, {:error, reason}
if too large.
Examples
{:ok, tokens} = ExLLM.validate_context(messages, model: "claude-3-5-sonnet-20241022")
# => {:ok, 3500}