XGOBDG23Q4PBQCE5IDY4ACQQKXJ77VM43Q433GBWI4VK3PT5NLEAC % Start accepting connections in a separate processspawn_link(fun() -> accept_loop(ListenerPid, KeyPair, TransportPid) end),
% Start accepting connections in a separate process (not linked to avoid crashing transport)AcceptPid = spawn(fun() -> accept_loop(ListenerPid, KeyPair, TransportPid) end),io:format("[Transport] Accept loop started (pid: ~p)~n", [AcceptPid]),
% Monitor the transport process - exit if it dieserlang:monitor(process, TransportPid),accept_loop_internal(ListenerPid, KeyPair, TransportPid).accept_loop_internal(ListenerPid, KeyPair, TransportPid) ->% Check if transport is still alivereceive{'DOWN', _Ref, process, TransportPid, DownReason} ->io:format("[Transport] Accept loop exiting - transport process died: ~p~n", [DownReason]),exit(normal)after 0 ->okend,
% Set the controlling process to the transport gen_server% so it receives {cable_transport, ConnPid, Data} messagesok = enoise_cable:controlling_process(ConnPid, TransportPid),% Get the peer address from the connection{ok, {PeerIP, PeerPort}} = enoise_cable:peername(ConnPid),PeerAddr = {PeerIP, PeerPort},io:format("[Transport] Peer address: ~p~n", [PeerAddr]),TransportPid ! {new_connection, ConnPid, PeerAddr},accept_loop(ListenerPid, KeyPair, TransportPid);{error, Reason} ->io:format("[Transport] Accept error: ~p~n", [Reason]),
% Wrap the connection setup in a try/catch to prevent crashes from killing the accept loopResult = try% Set the controlling process to the transport gen_server% so it receives {cable_transport, ConnPid, Data} messagesok = enoise_cable:controlling_process(ConnPid, TransportPid),% Get the peer address from the connection{ok, {PeerIP, PeerPort}} = enoise_cable:peername(ConnPid),PeerAddr = {PeerIP, PeerPort},io:format("[Transport] Peer address: ~p~n", [PeerAddr]),TransportPid ! {new_connection, ConnPid, PeerAddr},okcatchError:ErrorReason:Stacktrace ->io:format("[Transport] Error setting up connection ~p: ~p:~p~n Stacktrace: ~p~n",[ConnPid, Error, ErrorReason, Stacktrace]),% Close the connection on errorcatch enoise_cable:close(ConnPid),errorend,case Result ofok -> ok;error -> io:format("[Transport] Connection setup failed, continuing to accept~n")end,accept_loop_internal(ListenerPid, KeyPair, TransportPid);{error, AcceptReason} ->io:format("[Transport] Accept error: ~p~n", [AcceptReason]),
case Pid ofDb ->io:format("[Peer] Database process died: ~p - shutting down peer~n", [Reason]),{stop, {database_died, Reason}, State};TPid ->io:format("[Peer] Transport process died: ~p - shutting down peer~n", [Reason]),{stop, {transport_died, Reason}, State};EL ->io:format("[Peer] Event loop process died: ~p - shutting down peer~n", [Reason]),{stop, {event_loop_died, Reason}, State};_ ->io:format("[Peer] Linked process ~p died: ~p~n", [Pid, Reason]),{noreply, State}end;
% SPDX-FileCopyrightText: 2023 Henry Bubert%% SPDX-License-Identifier: LGPL-2.1-or-later-module(caberl_sup).-behaviour(supervisor).-export([start_link/1]).-export([init/1]).%% Start the supervisor with peer configurationstart_link(PeerArgs) ->supervisor:start_link(?MODULE, PeerArgs).%% Supervisor callback%%%% Note: Currently uses one_for_one strategy since peer manages db and transport directly.%% For better supervision, a future refactoring should:%% 1. Use rest_for_one strategy%% 2. Start database as first child%% 3. Start peer as second child (with db pid passed in)%% 4. Have transport supervised by peer's internal supervisor%%%% This would ensure:%% - Database I/O errors bring down the whole peer (correct behavior)%% - Transport errors only restart the transport layer (more resilient)%% - Accept loop errors don't crash the transport gen_server (fixed in transport.erl)init(PeerArgs) ->SupFlags = #{strategy => one_for_one,intensity => 3, % Max 3 restartsperiod => 10 % Within 10 seconds},ChildSpecs = [#{id => peer,start => {peer, start_link, [PeerArgs]},restart => permanent,shutdown => 5000,type => worker,modules => [peer]}],{ok, {SupFlags, ChildSpecs}}.