refactoring folders
Some checks failed
CI/CD Pipeline / build (push) Failing after 8s

This commit is contained in:
2026-03-25 12:05:56 -06:00
parent d857e91241
commit 0041c25f19
32 changed files with 139 additions and 78 deletions

View File

@@ -0,0 +1,128 @@
defmodule ElixirAi.ChatUtils do
require Logger
import ElixirAi.AiUtils.StreamLineUtils
def ai_tool(
name: name,
description: description,
function: function,
parameters: parameters,
server: server
) do
schema = %{
"type" => "function",
"function" => %{
"name" => name,
"description" => description,
"parameters" => parameters
# %{
# "type" => "object",
# "properties" => %{
# "name" => %{"type" => "string"},
# "value" => %{"type" => "string"}
# },
# "required" => ["name", "value"]
# }
}
}
run_function = fn current_message_id, tool_call_id, args ->
Task.start_link(fn ->
try do
result = function.(args)
send(server, {:stream, {:tool_response, current_message_id, tool_call_id, result}})
rescue
e ->
reason = Exception.format(:error, e, __STACKTRACE__)
Logger.error("Tool task crashed: #{reason}")
send(
server,
{:stream, {:tool_response, current_message_id, tool_call_id, {:error, reason}}}
)
end
end)
end
%{
name: name,
definition: schema,
run_function: run_function
}
end
def request_ai_response(server, messages, tools, provider, tool_choice \\ "auto") do
Task.start_link(fn ->
api_url = provider.completions_url
api_key = provider.api_token
model = provider.model_name
if is_nil(api_url) or api_url == "" do
Logger.warning("AI endpoint is empty or nil")
end
if is_nil(api_key) or api_key == "" do
Logger.warning("AI token is empty or nil")
end
if is_nil(model) or model == "" do
Logger.warning("AI model is empty or nil")
end
body = %{
model: model,
stream: true,
messages: messages |> Enum.map(&api_message/1),
tools: Enum.map(tools, & &1.definition),
tool_choice: tool_choice
}
headers = [{"authorization", "Bearer #{api_key}"}]
case Req.post(api_url,
json: body,
headers: headers,
into: fn {:data, data}, acc ->
data
|> String.split("\n")
|> Enum.each(&handle_stream_line(server, &1))
{:cont, acc}
end
) do
{:ok, _response} ->
:ok
{:error, reason} ->
Logger.warning("AI request failed: #{inspect(reason)} for #{api_url}")
send(server, {:stream, {:ai_request_error, reason}})
end
end)
end
def api_message(%{role: :assistant, tool_calls: [_ | _] = tool_calls} = msg) do
%{
role: "assistant",
content: Map.get(msg, :content, ""),
tool_calls:
Enum.map(tool_calls, fn call ->
%{
id: call.id,
type: "function",
function: %{
name: call.name,
arguments: call.arguments
}
}
end)
}
end
def api_message(%{role: :tool, tool_call_id: tool_call_id, content: content}) do
%{role: "tool", tool_call_id: tool_call_id, content: content}
end
def api_message(%{role: role, content: content}) do
%{role: Atom.to_string(role), content: content}
end
end

View File

@@ -0,0 +1,158 @@
defmodule ElixirAi.AiUtils.StreamLineUtils do
require Logger
def handle_stream_line(_server, "") do
:ok
end
def handle_stream_line(_server, "data: [DONE]") do
# send(server, :ai_stream_done)
:ok
end
def handle_stream_line(server, "data: " <> json) do
case Jason.decode(json) do
{:ok, body} ->
# Logger.debug("Received AI chunk: #{inspect(body)}")
handle_stream_line(server, body)
other ->
Logger.error("Failed to decode AI response chunk: #{inspect(other)}")
:ok
end
end
# first streamed response
def handle_stream_line(server, %{
"choices" => [%{"delta" => %{"content" => nil, "role" => "assistant"}}],
"id" => id
}) do
send(
server,
{:stream, {:start_new_ai_response, id}}
)
end
# last streamed response
def handle_stream_line(
server,
%{
"choices" => [%{"finish_reason" => "stop"}],
"id" => id
} = _msg
) do
# Logger.info("Received end of AI response stream for id #{id} with message: #{inspect(msg)}")
send(
server,
{:stream, {:ai_text_stream_finish, id}}
)
end
# streamed in reasoning
def handle_stream_line(server, %{
"choices" => [
%{
"delta" => %{"reasoning_content" => reasoning_content},
"finish_reason" => nil
}
],
"id" => id
}) do
send(
server,
{:stream, {:ai_reasoning_chunk, id, reasoning_content}}
)
end
# streamed in text
def handle_stream_line(server, %{
"choices" => [
%{
"delta" => %{"content" => reasoning_content},
"finish_reason" => nil
}
],
"id" => id
}) do
send(
server,
{:stream, {:ai_text_chunk, id, reasoning_content}}
)
end
# start and middle tool call
def handle_stream_line(server, %{
"choices" => [
%{
"delta" => %{
"tool_calls" => tool_calls
},
"finish_reason" => nil
}
],
"id" => id
})
when is_list(tool_calls) do
Enum.each(tool_calls, fn
%{
"id" => tool_call_id,
"index" => tool_index,
"type" => "function",
"function" => %{"name" => tool_name, "arguments" => tool_args_start}
} ->
# Logger.info("Received tool call start for tool #{tool_name}")
send(
server,
{:stream,
{:ai_tool_call_start, id, {tool_name, tool_args_start, tool_index, tool_call_id}}}
)
%{"index" => tool_index, "function" => %{"arguments" => tool_args_diff}} ->
# Logger.info("Received tool call middle for index #{tool_index}")
send(server, {:stream, {:ai_tool_call_middle, id, {tool_args_diff, tool_index}}})
other ->
Logger.warning("Unmatched tool call item: #{inspect(other)}")
end)
end
# end tool call
def handle_stream_line(
server,
%{
"choices" => [%{"finish_reason" => "tool_calls"}],
"id" => id
}
) do
# Logger.info("Received tool_calls_finished with message: #{inspect(message)}")
send(server, {:stream, {:ai_tool_call_end, id}})
end
def handle_stream_line(_server, %{"error" => error_info}) do
Logger.error("Received error from AI stream: #{inspect(error_info)}")
:ok
end
def handle_stream_line(server, "proxy error" <> _ = error) when is_binary(error) do
Logger.error("Proxy error in AI stream: #{error}")
send(server, {:stream, {:ai_request_error, error}})
end
def handle_stream_line(server, json) when is_binary(json) do
case Jason.decode(json) do
{:ok, body} ->
handle_stream_line(server, body)
_ ->
Logger.warning("Received unmatched stream line: #{inspect(json)}")
:ok
end
end
def handle_stream_line(_server, unmatched_message) do
Logger.warning("Received unmatched stream line: #{inspect(unmatched_message)}")
:ok
end
end

View File

@@ -68,6 +68,7 @@ defmodule ElixirAi.ChatRunner do
def init(name) do
Phoenix.PubSub.subscribe(ElixirAi.PubSub, conversation_message_topic(name))
:pg.join(ElixirAi.RunnerPG, {:runner, name}, self())
messages =
case Conversation.find_id(name) do

View File

@@ -0,0 +1,16 @@
defmodule ElixirAi.RunnerPG do
@moduledoc """
Named :pg scope for tracking ChatRunner processes across the cluster.
Each ChatRunner joins {:runner, name} on init; :pg syncs membership
automatically and removes dead processes without any additional cleanup.
"""
def child_spec(_opts) do
%{
id: __MODULE__,
start: {:pg, :start_link, [__MODULE__]},
type: :worker,
restart: :permanent
}
end
end

View File

@@ -1,183 +0,0 @@
defmodule ElixirAi.ChatRunner.StreamHandler do
require Logger
import ElixirAi.ChatRunner.OutboundHelpers
def handle({:start_new_ai_response, id}, state) do
starting_response = %{id: id, reasoning_content: "", content: "", tool_calls: []}
broadcast_ui(state.name, {:start_ai_response_stream, starting_response})
{:noreply, %{state | streaming_response: starting_response}}
end
def handle({:ai_reasoning_chunk, _id, reasoning_content}, state) do
broadcast_ui(state.name, {:reasoning_chunk_content, reasoning_content})
{:noreply,
%{
state
| streaming_response: %{
state.streaming_response
| reasoning_content: state.streaming_response.reasoning_content <> reasoning_content
}
}}
end
def handle({:ai_text_chunk, _id, text_content}, state) do
broadcast_ui(state.name, {:text_chunk_content, text_content})
{:noreply,
%{
state
| streaming_response: %{
state.streaming_response
| content: state.streaming_response.content <> text_content
}
}}
end
def handle({:ai_text_stream_finish, _id}, state) do
Logger.info(
"AI stream finished for id #{state.streaming_response.id}, broadcasting end of AI response"
)
final_message = %{
role: :assistant,
content: state.streaming_response.content,
reasoning_content: state.streaming_response.reasoning_content,
tool_calls: state.streaming_response.tool_calls
}
broadcast_ui(state.name, {:end_ai_response, final_message})
store_message(state.name, final_message)
{:noreply,
%{
state
| streaming_response: nil,
messages: state.messages ++ [final_message]
}}
end
def handle(
{:ai_tool_call_start, _id, {tool_name, tool_args_start, tool_index, tool_call_id}},
state
) do
Logger.info("AI started tool call #{tool_name}")
new_streaming_response = %{
state.streaming_response
| tool_calls:
state.streaming_response.tool_calls ++
[
%{
name: tool_name,
arguments: tool_args_start,
index: tool_index,
id: tool_call_id
}
]
}
{:noreply, %{state | streaming_response: new_streaming_response}}
end
def handle({:ai_tool_call_middle, _id, {tool_args_diff, tool_index}}, state) do
new_streaming_response = %{
state.streaming_response
| tool_calls:
Enum.map(state.streaming_response.tool_calls, fn
%{arguments: existing_args, index: ^tool_index} = tool_call ->
%{tool_call | arguments: existing_args <> tool_args_diff}
other ->
other
end)
}
{:noreply, %{state | streaming_response: new_streaming_response}}
end
def handle({:ai_tool_call_end, id}, state) do
tool_request_message = %{
role: :assistant,
content: state.streaming_response.content,
reasoning_content: state.streaming_response.reasoning_content,
tool_calls: state.streaming_response.tool_calls
}
broadcast_ui(state.name, {:tool_request_message, tool_request_message})
{failed_call_messages, pending_call_ids} =
Enum.reduce(state.streaming_response.tool_calls, {[], []}, fn tool_call,
{failed, pending} ->
with {:ok, decoded_args} <- Jason.decode(tool_call.arguments),
tool when not is_nil(tool) <-
Enum.find(state.server_tools ++ state.liveview_tools ++ state.page_tools, fn t ->
t.name == tool_call.name
end) do
tool.run_function.(id, tool_call.id, decoded_args)
{failed, [tool_call.id | pending]}
else
{:error, e} ->
error_msg = "Failed to decode tool arguments: #{inspect(e)}"
Logger.error("Tool call #{tool_call.name} failed: #{error_msg}")
{[%{role: :tool, content: error_msg, tool_call_id: tool_call.id} | failed], pending}
nil ->
error_msg = "No tool definition found for #{tool_call.name}"
Logger.error(error_msg)
{[%{role: :tool, content: error_msg, tool_call_id: tool_call.id} | failed], pending}
end
end)
store_message(state.name, [tool_request_message] ++ failed_call_messages)
{:noreply,
%{
state
| messages: state.messages ++ [tool_request_message] ++ failed_call_messages,
pending_tool_calls: pending_call_ids
}}
end
def handle({:tool_response, _id, tool_call_id, result}, state) do
new_message = %{role: :tool, content: inspect(result), tool_call_id: tool_call_id}
broadcast_ui(state.name, {:one_tool_finished, new_message})
store_message(state.name, new_message)
new_pending_tool_calls =
Enum.filter(state.pending_tool_calls, fn id -> id != tool_call_id end)
new_streaming_response =
case new_pending_tool_calls do
[] -> nil
_ -> state.streaming_response
end
if new_pending_tool_calls == [] do
broadcast_ui(state.name, :tool_calls_finished)
ElixirAi.ChatUtils.request_ai_response(
self(),
messages_with_system_prompt(state.messages ++ [new_message], state.system_prompt),
state.server_tools ++ state.liveview_tools ++ state.page_tools,
state.provider,
state.tool_choice
)
end
{:noreply,
%{
state
| pending_tool_calls: new_pending_tool_calls,
streaming_response: new_streaming_response,
messages: state.messages ++ [new_message]
}}
end
def handle({:ai_request_error, reason}, state) do
Logger.error("AI request error: #{inspect(reason)}")
broadcast_ui(state.name, {:ai_request_error, reason})
{:noreply, %{state | streaming_response: nil, pending_tool_calls: []}}
end
end

View File

@@ -0,0 +1,14 @@
defmodule ElixirAi.SystemPrompts do
@prompts %{
"voice" =>
"You are responding to voice-transcribed input. Keep replies concise and conversational. The user spoke aloud and their message was transcribed, so minor transcription errors may be present.",
"user-web" => nil
}
def for_category(category) do
case Map.get(@prompts, category) do
nil -> nil
prompt -> %{role: :system, content: prompt}
end
end
end