got some tool calling running

This commit is contained in:
2026-03-05 22:06:58 -07:00
parent cacae19f74
commit b89d4e5a28
8 changed files with 495 additions and 151 deletions

View File

@@ -33,17 +33,14 @@
@apply text-sm text-cyan-100; @apply text-sm text-cyan-100;
} }
/* Paragraphs */
.markdown p { .markdown p {
@apply my-3; @apply my-3;
} }
/* Links */
.markdown a { .markdown a {
@apply text-cyan-400 underline underline-offset-2 transition-colors duration-150 hover:text-cyan-300; @apply text-cyan-400 underline underline-offset-2 transition-colors duration-150 hover:text-cyan-300;
} }
/* Strong / Em */
.markdown strong { .markdown strong {
@apply font-bold text-cyan-100; @apply font-bold text-cyan-100;
} }
@@ -51,12 +48,10 @@
@apply italic text-cyan-200; @apply italic text-cyan-200;
} }
/* Inline code */
.markdown code { .markdown code {
@apply font-mono text-sm bg-cyan-950 text-cyan-300 px-1 py-0.5 rounded border border-cyan-900; @apply font-mono text-sm bg-cyan-950 text-cyan-300 px-1 py-0.5 rounded border border-cyan-900;
} }
/* Code blocks */
.markdown pre { .markdown pre {
@apply bg-cyan-950 border border-cyan-900 rounded-lg px-5 py-4 overflow-x-auto my-4; @apply bg-cyan-950 border border-cyan-900 rounded-lg px-5 py-4 overflow-x-auto my-4;
} }
@@ -64,17 +59,13 @@
@apply bg-transparent border-0 p-0 text-sm text-cyan-100; @apply bg-transparent border-0 p-0 text-sm text-cyan-100;
} }
/* Blockquote */
.markdown blockquote { .markdown blockquote {
@apply border-l-2 border-cyan-700 my-4 px-4 py-2 bg-cyan-950 text-cyan-200 rounded-r italic; @apply border-l-2 border-cyan-700 my-4 px-4 py-2 bg-cyan-950 text-cyan-200 rounded-r italic;
} }
/* Horizontal rule */
.markdown hr { .markdown hr {
@apply border-0 border-t border-cyan-900 my-6; @apply border-0 border-t border-cyan-900 my-6;
} }
/* Lists */
.markdown ul, .markdown ul,
.markdown ol { .markdown ol {
@apply my-3 pl-6; @apply my-3 pl-6;
@@ -92,7 +83,6 @@
@apply text-cyan-700; @apply text-cyan-700;
} }
/* Nested lists */
.markdown ul ul, .markdown ul ul,
.markdown ol ul { .markdown ol ul {
list-style-type: circle; list-style-type: circle;
@@ -101,7 +91,6 @@
list-style-type: square; list-style-type: square;
} }
/* Tables */
.markdown table { .markdown table {
@apply w-full border-collapse my-4 text-sm; @apply w-full border-collapse my-4 text-sm;
} }
@@ -118,12 +107,10 @@
@apply bg-cyan-950; @apply bg-cyan-950;
} }
/* Images */
.markdown img { .markdown img {
@apply max-w-full rounded-md border border-cyan-900 my-2; @apply max-w-full rounded-md border border-cyan-900 my-2;
} }
/* Task list checkboxes (GitHub-flavored) */
.markdown input[type="checkbox"] { .markdown input[type="checkbox"] {
@apply accent-cyan-700 mr-1; @apply accent-cyan-700 mr-1;
} }

View File

@@ -0,0 +1,45 @@
defmodule ElixirAi.ChatUtils do
require Logger
import ElixirAi.AiUtils.StreamLineUtils
def request_ai_response(server, messages, tools) do
Task.start(fn ->
api_url = Application.fetch_env!(:elixir_ai, :ai_endpoint)
api_key = Application.fetch_env!(:elixir_ai, :ai_token)
model = Application.fetch_env!(:elixir_ai, :ai_model)
tool_definition = tools |> Enum.map(fn {_name, definition} -> definition end)
body = %{
model: model,
stream: true,
messages: messages |> Enum.map(&api_message/1),
tools: tool_definition
}
headers = [{"authorization", "Bearer #{api_key}"}]
case Req.post(api_url,
json: body,
headers: headers,
into: fn {:data, data}, acc ->
data
|> String.split("\n")
|> Enum.each(&handle_stream_line(server, &1))
{:cont, acc}
end
) do
{:ok, _} ->
:ok
{:error, reason} ->
IO.warn("AI request failed: #{inspect(reason)}")
end
end)
end
def api_message(%{role: role, content: content}) do
%{role: Atom.to_string(role), content: content}
end
end

View File

@@ -0,0 +1,167 @@
defmodule ElixirAi.AiUtils.StreamLineUtils do
require Logger
def handle_stream_line(_server, "") do
:ok
end
def handle_stream_line(_server, "data: [DONE]") do
# send(server, :ai_stream_done)
:ok
end
def handle_stream_line(server, "data: " <> json) do
case Jason.decode(json) do
{:ok, body} ->
# Logger.debug("Received AI chunk: #{inspect(body)}")
handle_stream_line(server, body)
other ->
Logger.error("Failed to decode AI response chunk: #{inspect(other)}")
:ok
end
end
# first streamed response
def handle_stream_line(server, %{
"choices" => [%{"delta" => %{"content" => nil, "role" => "assistant"}}],
"id" => id
}) do
send(
server,
{:start_new_ai_response, id}
)
end
# last streamed response
def handle_stream_line(server, %{
"choices" => [%{"finish_reason" => "stop"}],
"id" => id
}) do
send(
server,
{:ai_stream_finish, id}
)
end
# streamed in reasoning
def handle_stream_line(server, %{
"choices" => [
%{
"delta" => %{"reasoning_content" => reasoning_content},
"finish_reason" => nil
}
],
"id" => id
}) do
send(
server,
{:ai_reasoning_chunk, id, reasoning_content}
)
end
# streamed in text
def handle_stream_line(server, %{
"choices" => [
%{
"delta" => %{"content" => reasoning_content},
"finish_reason" => nil
}
],
"id" => id
}) do
send(
server,
{:ai_text_chunk, id, reasoning_content}
)
end
# start tool call
def handle_stream_line(server, %{
"choices" => [
%{
"delta" => %{
"tool_calls" => [
%{
"function" => %{
"name" => tool_name,
"arguments" => tool_args_start
}
}
]
},
"finish_reason" => nil,
"index" => tool_index
}
],
"id" => id
}) do
send(
server,
{:ai_tool_call_start, id, {tool_name, tool_args_start, tool_index}}
)
end
# middle tool call
def handle_stream_line(server, %{
"choices" => [
%{
"delta" => %{
"tool_calls" => [
%{
"function" => %{
"arguments" => tool_args_diff
}
}
]
},
"finish_reason" => nil,
"index" => tool_index
}
],
"id" => id
}) do
send(
server,
{:ai_tool_call_middle, id, {tool_args_diff, tool_index}}
)
end
# end tool call
def handle_stream_line(server, %{
"choices" => [
%{
"delta" => %{},
"finish_reason" => "tool_calls",
"index" => tool_index
}
],
"id" => id
}) do
send(
server,
{:ai_tool_call_end, id, tool_index}
)
end
def handle_stream_line(_server, %{"error" => error_info}) do
Logger.error("Received error from AI stream: #{inspect(error_info)}")
:ok
end
def handle_stream_line(server, json) when is_binary(json) do
case Jason.decode(json) do
{:ok, body} ->
handle_stream_line(server, body)
_ ->
Logger.warning("Received unmatched stream line: #{inspect(json)}")
:ok
end
end
def handle_stream_line(_server, unmatched_message) do
Logger.warning("Received unmatched stream line: #{inspect(unmatched_message)}")
:ok
end
end

View File

@@ -29,16 +29,35 @@ defmodule ElixirAi.ChatRunner do
{:ok, state} {:ok, state}
end end
def tools do
%{
"store_thing" => %{
definition: ElixirAi.ToolTesting.store_thing_definition("store_thing"),
function: &ElixirAi.ToolTesting.hold_thing/1
},
"read_thing" => %{
definition: ElixirAi.ToolTesting.read_thing_definition("read_thing"),
function: &ElixirAi.ToolTesting.get_thing/0
}
}
end
def handle_cast({:user_message, text_content}, state) do def handle_cast({:user_message, text_content}, state) do
new_message = %{role: :user, content: text_content} new_message = %{role: :user, content: text_content}
broadcast({:user_chat_message, new_message}) broadcast({:user_chat_message, new_message})
new_state = %{state | messages: state.messages ++ [new_message], turn: :assistant} new_state = %{state | messages: state.messages ++ [new_message], turn: :assistant}
request_ai_response(self(), new_state.messages)
tools =
tools()
|> Enum.map(fn {name, %{definition: definition}} -> {name, definition} end)
|> Enum.into(%{})
request_ai_response(self(), new_state.messages, tools)
{:noreply, new_state} {:noreply, new_state}
end end
def handle_info({:start_new_ai_response, id}, state) do def handle_info({:start_new_ai_response, id}, state) do
starting_response = %{id: id, reasoning_content: "", content: ""} starting_response = %{id: id, reasoning_content: "", content: "", tool_calls: []}
broadcast({:start_ai_response_stream, starting_response}) broadcast({:start_ai_response_stream, starting_response})
{:noreply, %{state | streaming_response: starting_response}} {:noreply, %{state | streaming_response: starting_response}}
@@ -88,7 +107,8 @@ defmodule ElixirAi.ChatRunner do
final_message = %{ final_message = %{
role: :assistant, role: :assistant,
content: state.streaming_response.content, content: state.streaming_response.content,
reasoning_content: state.streaming_response.reasoning_content reasoning_content: state.streaming_response.reasoning_content,
tool_calls: state.streaming_response.tool_calls
} }
{:noreply, {:noreply,
@@ -100,6 +120,111 @@ defmodule ElixirAi.ChatRunner do
}} }}
end end
def handle_info({:ai_tool_call_start, _id, {tool_name, tool_args_start, tool_index}}, state) do
Logger.info("AI started tool call #{tool_name}")
new_streaming_response = %{
state.streaming_response
| tool_calls:
state.streaming_response.tool_calls ++
[
%{
name: tool_name,
arguments: tool_args_start,
index: tool_index
}
]
}
{:noreply, %{state | streaming_response: new_streaming_response}}
end
def handle_info({:ai_tool_call_middle, _id, {tool_args_diff, tool_index}}, state) do
new_streaming_response = %{
state.streaming_response
| tool_calls:
Enum.map(state.streaming_response.tool_calls, fn
%{
arguments: existing_args,
index: ^tool_index
} = tool_call ->
%{
tool_call
| arguments: existing_args <> tool_args_diff
}
other ->
other
end)
}
{:noreply, %{state | streaming_response: new_streaming_response}}
end
def handle_info({:ai_tool_call_end, _id, tool_index}, state) do
tool_calls =
Enum.map(state.streaming_response.tool_calls, fn
%{
arguments: existing_args,
index: ^tool_index
} = tool_call ->
case Jason.decode(existing_args) do
{:ok, decoded_args} ->
tool_function = tools()[tool_call.name].function
res = tool_function.(decoded_args)
Map.put(tool_call, :result, res)
{:error, e} ->
Map.put(tool_call, :error, "Failed to decode tool arguments: #{inspect(e)}")
end
other ->
other
end)
all_tool_calls_finished =
Enum.all?(tool_calls, fn call ->
Map.has_key?(call, :result) or Map.has_key?(call, :error)
end)
state =
case all_tool_calls_finished do
true ->
Logger.info("All tool calls finished, broadcasting updated tool calls with results")
new_message = %{
role: :assistant,
content: state.streaming_response.content,
reasoning_content: state.streaming_response.reasoning_content,
tool_calls: tool_calls
}
new_state = %{
state
| messages:
state.messages ++
[
new_message
],
streaming_response: nil
}
broadcast({:tool_calls_finished, new_message})
false ->
%{
state
| streaming_response: %{
state.streaming_response
| tool_calls: tool_calls
}
}
end
{:noreply, state}
end
def handle_call(:get_conversation, _from, state) do def handle_call(:get_conversation, _from, state) do
{:reply, state, state} {:reply, state, state}
end end

View File

@@ -1,120 +0,0 @@
defmodule ElixirAi.ChatUtils do
require Logger
def request_ai_response(server, messages) do
Task.start(fn ->
api_url = Application.fetch_env!(:elixir_ai, :ai_endpoint)
api_key = Application.fetch_env!(:elixir_ai, :ai_token)
model = Application.fetch_env!(:elixir_ai, :ai_model)
body = %{
model: model,
stream: true,
messages: messages |> Enum.map(&api_message/1)
}
headers = [{"authorization", "Bearer #{api_key}"}]
case Req.post(api_url,
json: body,
headers: headers,
into: fn {:data, data}, acc ->
data
|> String.split("\n")
|> Enum.each(&handle_stream_line(server, &1))
{:cont, acc}
end
) do
{:ok, _} ->
:ok
{:error, reason} ->
IO.warn("AI request failed: #{inspect(reason)}")
end
end)
end
def handle_stream_line(_server, "") do
:ok
end
def handle_stream_line(server, "data: [DONE]") do
# send(server, :ai_stream_done)
:ok
end
def handle_stream_line(server, "data: " <> json) do
case Jason.decode(json) do
{:ok, body} ->
# Logger.debug("Received AI chunk: #{inspect(body)}")
handle_stream_line(server, body)
other ->
Logger.error("Failed to decode AI response chunk: #{inspect(other)}")
:ok
end
end
# first streamed response
def handle_stream_line(server, %{
"choices" => [%{"delta" => %{"content" => nil, "role" => "assistant"}}],
"id" => id
}) do
send(
server,
{:start_new_ai_response, id}
)
end
# last streamed response
def handle_stream_line(server, %{
"choices" => [%{"finish_reason" => "stop"}],
"id" => id
}) do
send(
server,
{:ai_stream_finish, id}
)
end
# streamed in reasoning
def handle_stream_line(server, %{
"choices" => [
%{
"delta" => %{"reasoning_content" => reasoning_content},
"finish_reason" => nil
}
],
"id" => id
}) do
send(
server,
{:ai_reasoning_chunk, id, reasoning_content}
)
end
def handle_stream_line(server, %{
"choices" => [
%{
"delta" => %{"content" => reasoning_content},
"finish_reason" => nil
}
],
"id" => id
}) do
send(
server,
{:ai_text_chunk, id, reasoning_content}
)
end
def handle_stream_line(_server, unmatched_message) do
Logger.warning("Received unmatched stream line: #{inspect(unmatched_message)}")
:ok
end
def api_message(%{role: role, content: content}) do
%{role: Atom.to_string(role), content: content}
end
end

View File

@@ -0,0 +1,55 @@
defmodule ElixirAi.ToolTesting do
use GenServer
def hold_thing(thing) do
GenServer.cast(__MODULE__, {:hold_thing, thing})
end
def get_thing do
GenServer.call(__MODULE__, :get_thing)
end
def store_thing_definition(name) do
%{
"type" => "function",
"function" => %{
"name" => name,
"description" => "store key value pair",
"parameters" => %{
"type" => "object",
"properties" => %{
"name" => %{"type" => "string"},
"value" => %{"type" => "string"}
},
"required" => ["name", "value"]
}
}
}
end
def read_thing_definition(name) do
%{
"type" => "function",
"function" => %{
"name" => name,
"description" => "read key value pair that was previously stored with store_thing"
}
}
end
def start_link(_) do
GenServer.start_link(__MODULE__, %{}, name: __MODULE__)
end
def init(args) do
{:ok, args}
end
def handle_cast({:hold_thing, thing}, _state) do
{:noreply, thing}
end
def handle_call(:get_thing, _from, state) do
{:reply, state, state}
end
end

View File

@@ -17,10 +17,53 @@ defmodule ElixirAiWeb.ChatMessage do
attr :content, :string, required: true attr :content, :string, required: true
attr :reasoning_content, :string, default: nil attr :reasoning_content, :string, default: nil
attr :tool_calls, :list, default: []
def assistant_message(assigns) do def assistant_message(assigns) do
assigns = assign(assigns, :_reasoning_id, "reasoning-#{:erlang.phash2(assigns.content)}") assigns =
assigns
|> assign(:_reasoning_id, "reasoning-#{:erlang.phash2(assigns.content)}")
|> assign(:_expanded, false)
~H"""
<.message_bubble
reasoning_id={@_reasoning_id}
content={@content}
reasoning_content={@reasoning_content}
tool_calls={@tool_calls}
expanded={@_expanded}
/>
"""
end
attr :content, :string, required: true
attr :reasoning_content, :string, default: nil
attr :tool_calls, :list, default: []
def streaming_assistant_message(assigns) do
assigns =
assigns
|> assign(:_reasoning_id, "reasoning-stream")
|> assign(:_expanded, true)
~H"""
<.message_bubble
reasoning_id={@_reasoning_id}
content={@content}
reasoning_content={@reasoning_content}
tool_calls={@tool_calls}
expanded={@_expanded}
/>
"""
end
attr :content, :string, required: true
attr :reasoning_content, :string, default: nil
attr :tool_calls, :list, default: []
attr :reasoning_id, :string, required: true
attr :expanded, :boolean, default: false
defp message_bubble(assigns) do
~H""" ~H"""
<div class="mb-2 text-sm text-left"> <div class="mb-2 text-sm text-left">
<%= if @reasoning_content && @reasoning_content != "" do %> <%= if @reasoning_content && @reasoning_content != "" do %>
@@ -28,19 +71,19 @@ defmodule ElixirAiWeb.ChatMessage do
type="button" type="button"
class="flex items-center text-cyan-500/60 hover:text-cyan-300 transition-colors duration-150 cursor-pointer" class="flex items-center text-cyan-500/60 hover:text-cyan-300 transition-colors duration-150 cursor-pointer"
phx-click={ phx-click={
JS.toggle_class("collapsed", to: "##{@_reasoning_id}") JS.toggle_class("collapsed", to: "##{@reasoning_id}")
|> JS.toggle_class("rotate-180", to: "##{@_reasoning_id}-chevron") |> JS.toggle_class("rotate-180", to: "##{@reasoning_id}-chevron")
} }
aria-label="Toggle reasoning" aria-label="Toggle reasoning"
> >
<div class="flex items-center gap-1 text-cyan-100/40 ps-2 mb-1"> <div class="flex items-center gap-1 text-cyan-100/40 ps-2 mb-1">
<span class="text-xs">reasoning</span> <span class="text-xs">reasoning</span>
<svg <svg
id={"#{@_reasoning_id}-chevron"} id={"#{@reasoning_id}-chevron"}
xmlns="http://www.w3.org/2000/svg" xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 20 20" viewBox="0 0 20 20"
fill="currentColor" fill="currentColor"
class="w-3 h-3 transition-transform duration-300" class={["w-3 h-3 transition-transform duration-300", !@expanded && "rotate-180"]}
> >
<path <path
fill-rule="evenodd" fill-rule="evenodd"
@@ -51,15 +94,45 @@ defmodule ElixirAiWeb.ChatMessage do
</div> </div>
</button> </button>
<div <div
id={@_reasoning_id} id={@reasoning_id}
class="reasoning-content block px-3 py-2 rounded-lg bg-cyan-950/50 text-cyan-400 italic text-xs max-w-prose mb-1 markdown" class={[
"reasoning-content block px-3 py-2 rounded-lg bg-cyan-950/50 text-cyan-400 italic text-xs max-w-prose mb-1 markdown",
!@expanded && "collapsed"
]}
> >
{Markdown.render(@reasoning_content)} {Markdown.render(@reasoning_content)}
</div> </div>
<% end %> <% end %>
<div class="inline-block px-3 py-2 rounded-lg max-w-prose markdown bg-cyan-950/50"> <%= for tool_call <- @tool_calls do %>
{Markdown.render(@content)} <div class="mb-1 max-w-prose rounded-lg border border-cyan-900 bg-cyan-950/40 text-xs font-mono overflow-hidden">
</div> <div class="flex items-center gap-2 px-3 py-1 border-b border-cyan-900 bg-cyan-900/30 text-cyan-400">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" fill="currentColor" class="w-3 h-3">
<path fill-rule="evenodd" d="M6.28 5.22a.75.75 0 0 1 0 1.06L2.56 10l3.72 3.72a.75.75 0 0 1-1.06 1.06L.97 10.53a.75.75 0 0 1 0-1.06l4.25-4.25a.75.75 0 0 1 1.06 0Zm7.44 0a.75.75 0 0 1 1.06 0l4.25 4.25a.75.75 0 0 1 0 1.06l-4.25 4.25a.75.75 0 0 1-1.06-1.06L17.44 10l-3.72-3.72a.75.75 0 0 1 0-1.06Z" clip-rule="evenodd" />
</svg>
<span class="text-cyan-300 font-semibold">{tool_call.name}</span>
</div>
<%= if tool_call[:arguments] && tool_call[:arguments] != "" do %>
<div class="px-3 py-2 text-cyan-500 border-b border-cyan-900/50">
<span class="text-cyan-700 mr-1">args</span>{tool_call.arguments}
</div>
<% end %>
<%= if Map.has_key?(tool_call, :result) do %>
<div class="px-3 py-2 text-cyan-200">
<span class="text-cyan-700 mr-1">result</span>{inspect(tool_call.result)}
</div>
<% end %>
<%= if Map.has_key?(tool_call, :error) do %>
<div class="px-3 py-2 text-red-400">
<span class="text-red-600 mr-1">error</span>{tool_call.error}
</div>
<% end %>
</div>
<% end %>
<%= if @content && @content != "" do %>
<div class="inline-block px-3 py-2 rounded-lg max-w-prose markdown bg-cyan-950/50">
{Markdown.render(@content)}
</div>
<% end %>
</div> </div>
""" """
end end

View File

@@ -2,13 +2,13 @@ defmodule ElixirAiWeb.ChatLive do
use ElixirAiWeb, :live_view use ElixirAiWeb, :live_view
import ElixirAiWeb.Spinner import ElixirAiWeb.Spinner
import ElixirAiWeb.ChatMessage import ElixirAiWeb.ChatMessage
import ElixirAi.ChatRunner alias ElixirAi.ChatRunner
@topic "ai_chat" @topic "ai_chat"
def mount(_params, _session, socket) do def mount(_params, _session, socket) do
if connected?(socket), do: Phoenix.PubSub.subscribe(ElixirAi.PubSub, @topic) if connected?(socket), do: Phoenix.PubSub.subscribe(ElixirAi.PubSub, @topic)
conversation = get_conversation() conversation = ChatRunner.get_conversation()
{:ok, {:ok,
socket socket
@@ -31,13 +31,18 @@ defmodule ElixirAiWeb.ChatLive do
<%= if msg.role == :user do %> <%= if msg.role == :user do %>
<.user_message content={msg.content} /> <.user_message content={msg.content} />
<% else %> <% else %>
<.assistant_message content={msg.content} reasoning_content={msg.reasoning_content} /> <.assistant_message
content={msg.content}
reasoning_content={msg.reasoning_content}
tool_calls={Map.get(msg, :tool_calls, [])}
/>
<% end %> <% end %>
<% end %> <% end %>
<%= if @streaming_response do %> <%= if @streaming_response do %>
<.assistant_message <.streaming_assistant_message
content={@streaming_response.content} content={@streaming_response.content}
reasoning_content={@streaming_response.reasoning_content} reasoning_content={@streaming_response.reasoning_content}
tool_calls={@streaming_response.tool_calls}
/> />
<.spinner /> <.spinner />
<% end %> <% end %>
@@ -62,7 +67,7 @@ defmodule ElixirAiWeb.ChatLive do
end end
def handle_event("submit", %{"user_input" => user_input}, socket) when user_input != "" do def handle_event("submit", %{"user_input" => user_input}, socket) when user_input != "" do
ElixirAi.ChatRunner.new_user_message(user_input) ChatRunner.new_user_message(user_input)
{:noreply, assign(socket, user_input: "")} {:noreply, assign(socket, user_input: "")}
end end
@@ -97,6 +102,13 @@ defmodule ElixirAiWeb.ChatLive do
{:noreply, assign(socket, streaming_response: updated_response)} {:noreply, assign(socket, streaming_response: updated_response)}
end end
def handle_info({:tool_calls_finished, final_message}, socket) do
{:noreply,
socket
|> update(:messages, &(&1 ++ [final_message]))
|> assign(streaming_response: nil)}
end
def handle_info(:end_ai_response, socket) do def handle_info(:end_ai_response, socket) do
final_response = %{ final_response = %{
role: :assistant, role: :assistant,