Giter Site home page Giter Site logo

ranch's People

Contributors

archaelus avatar elzor avatar essen avatar ferd avatar ferigis avatar fishcakez avatar g-andrade avatar getong avatar joaohf avatar joshrotenberg avatar juhlig avatar keynslug avatar kianmeng avatar kimshrier avatar klaustrainer avatar kuroneer avatar ljzn avatar lukebakken avatar maria-12648430 avatar matrixise avatar michaelklishin avatar mmzeeman avatar mworrell avatar niamtokik avatar olleolleolle avatar suexcxine avatar vdimir avatar vicbaz avatar yjh0502 avatar zuiderkwast avatar

Stargazers

 avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar

Watchers

 avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar

ranch's Issues

gen_server usage

gen_server usage should be added to The Guide:

To use ranch with gen_server, trigger a timeout from your gen_server's init callback...

start_link(ListenerPid, Socket, Transport, Opts) ->
    {ok, Pid} = gen_server:start_link(?MODULE, [ListenerPid, Socket, Transport], Opts),
    {ok, Pid}.

...

init([ListenerPid, Socket, Transport]) ->
    {ok, #state{socket = Socket, transport = Transport, listener_pid = ListenerPid, buffer = []}, 0}.

And match it in handle_info, where you can accept the connection:

handle_info(timeout, State = #state{listener_pid = ListenerPid, socket = Socket, transport = Transport}) ->
    ok = ranch:accept_ack(ListenerPid), % Accept connection
    Transport:setopts(Socket, [{active, true}]), % Set the state to active to have further messages delivered to your gen_server's mailbox
    {noreply, State};

Add option for child spec Shutdown property

Currently we basically do something similar to brutal_kill. We need to be more supervisor-like in this behavior, and make it configurable, so that we can have a timeout also. Basically a per-listener Shutdown property.

Improve documentation on usage of ranch with gen_server as protocol handler

Here is an improved example for the documentation in the file protocols.md:

%% Feel free to use, reuse and abuse the code in this file.

-module(my_echo_protocol).
-behaviour(ranch_protocol).
-behaviour(gen_server).

-export([start_link/4]).

-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
     terminate/2, code_change/3]).

-record(state, {ref, socket, transport, opts}).

-define(SERVER, ?MODULE).

start_link(Ref, Socket, Transport, Opts) ->
    gen_server:start_link(?MODULE, [Ref, Socket, Transport, Opts], []).

%% Alternative A: 
init([Ref, Socket, Transport, Opts]) ->
    %% the 0 at the follwing line sets the timeout to 0, thus the 
    %%
    {ok, #state{ref=Ref, socket=Socket, transport=Transport, opts= Opts}, 0}.

% Alternative B: Uncomment this and comment alternative A    
%init([Ref, Socket, Transport, Opts=[]]) ->
%    ok = proc_lib:init_ack({ok, self()}),
%    %% Perform any required state initialization here.
%    ok = ranch:accept_ack(Ref),
%    ok = Transport:setopts(Socket, [{active, once}]),
%    gen_server:enter_loop(?MODULE, [], #state{ref=Ref, socket=Socket, transport=Transport, opts= Opts}).

handle_call(_Request, _From, State) ->
    Reply = ok,
    {reply, Reply, State}.

handle_cast(_Msg, State) ->
    {noreply, State}.

%% The following timeout handling is only used in conjunction with alternative A
handle_info(timeout, State=#state{ref=Ref, socket=Socket, transport=Transport}) ->
    ok = ranch:accept_ack(Ref),
    ok = Transport:setopts(Socket, [{active, once}]),
    {noreply, State};

handle_info({tcp, Socket, Data}, State=#state{socket=Socket, transport=Transport}) ->
    Transport:setopts(Socket, [{active, once}]),
    Transport:send(Socket, reverse_binary(Data)),
    {noreply, State};
handle_info({tcp_closed, Socket}, State) ->
    {stop, normal, State};
handle_info({tcp_error, Socket, Reason}, State) ->
    {stop, Reason, State}.    
handle_info(_Info, State) ->
    {noreply, State}.

terminate(_Reason, _State) ->
    ok.

code_change(_OldVsn, State, _Extra) ->
    {ok, State}.


reverse_binary(B) when is_binary(B) ->
    [list_to_binary(lists:reverse(binary_to_list(
        binary:part(B, {0, byte_size(B)-2})
    ))), "\r\n"].

Problem when connection count reach max_conn

Hi, @essen
I'm using ranch in my web application. I found a problem when I test my web server.

When the connection count reaches max_conn, ranch will stop all acceptors. Normally, it is work fine, but in some extreme cases when incoming connection much faster than connection processing, many connection will be queued in kernel tcp connection queue. If the kernel 'somaxconn' param is given a large value, the server will crash with a out of memory error.

So, I think may be it's a good idea to close the new incoming socket when connection count reaches max_conn. This feature can be add as configuration option.

In my case, my server will hold incoming connection a long time, so when connection count reaches max_conn, new connection will be queued in kernel.

The Ranch application with SSL listener fails after calling init:stop/0 in Erlang/OTP 17.1 and 17.3

$ git clone https://github.com/ninenines/cowboy
$ cd cowboy
$ git checkout 1.0.0
$ cd examples/ssl_hello_world
$ make
$ ./_rel/ssl_hello_world_example/bin/ssl_hello_world_example console

Erlang/OTP 17 [erts-6.2] [source-5c974be] [64-bit] [smp:4:4] [async-threads:10] [hipe] [kernel-poll:false]

Eshell V6.2  (abort with ^G)
([email protected])1> application:which_applications().
[{ssl_hello_world,"Cowboy Hello World example with SSL.",
                  "1"},
 {ssl,"Erlang/OTP SSL application","5.3.6"},
 {cowboy,"Small, fast, modular HTTP server.","1.0.0"},
 {cowlib,"Support library for manipulating Web protocols.",
         "1.0.0"},
 {public_key,"Public key infrastructure","0.22.1"},
 {crypto,"CRYPTO","3.4.1"},
 {ranch,"Socket acceptor pool for TCP protocols.","1.0.0"},
 {asn1,"The Erlang ASN1 compiler version 3.0.2","3.0.2"},
 {stdlib,"ERTS  CXC 138 10","2.2"},
 {kernel,"ERTS  CXC 138 10","3.0.3"}]
([email protected])2> init:stop().
ok
([email protected])3>
=ERROR REPORT==== 29-Sep-2014::16:49:15 ===
Error in process <0.69.0> on node '[email protected]' with exit value: {{case_clause,{error,closed}},[{ranch_acceptor,loop,3,[{file,"src/ranch_acceptor.erl"},{line,28}]}]}
...
=ERROR REPORT==== 29-Sep-2014::16:49:15 ===
Error in process <0.164.0> on node '[email protected]' with exit value: {{case_clause,{error,closed}},[{ranch_acceptor,loop,3,[{file,"src/ranch_acceptor.erl"},{line,28}]}]}

The listener socket has been closed. The ranch_acceptors_sup shuts down the ranch application because of the maximum number of restarts. This forces all other applications to be also terminated. But the entire Erlang node remains running.

What is the purpose of this guard? As mentioned in the erlang-questions mailing list, the listener socket can be closed before the ranch application has finished its shutdown. And this behaviour is considered normal.

TCP options

I may be asking a stupid question, but I read the guide and could not find the answer.

How do I specify these kind of options for the TCP listen stuff on ranch:

gen_tcp:listen(Port, [binary, {packet, 4}, {reuseaddr, true}, {backlog, 128}]),

The server always throws the same exception?

2014-09-11 00:00:27.304 [error] <0.1091.0> Ranch listener https had connection process started with cowboy_protocol:start_link/4 at <0.27049.9> exit with reason: {closed,[{ranch_ssl,accept_ack,2,[{file,"src/ranch_ssl.erl"},{line,115}]},{cowboy_protocol,init,4,[{file,"src/cowboy_protocol.erl"},{line,91}]}]}

2014-09-11 00:00:27.304 [error] emulator Error in process <0.27049.9> on node '[email protected]' with exit value: {closed,[{ranch_ssl,accept_ack,2,[{file,"src/ranch_ssl.erl"},{line,115}]},{cowboy_protocol,init,4,[{file,"src/cowboy_protocol.erl"},{line,91}]}]}

Some API for detecting bind errors

When I start ranch acceptor, there may be errors like {error, eaccess} or {error, eaddrinuse}.

There is no way to catch this error in upper application and make proper user request.

I think it would be good to have some API like:

case ranch:start(...) of
  {ok, _} -> ok;
  {error, eaccess} -> error_logger:error_msg("Bind to privileged port");
  {error, eaddrinuse} -> error_logger:error_msg("Port is used")
end 

ranch_listener_sup fails to restart ranch_listener

I tried chaos_monkey on ranch.
Eventually the chaos_monkey killed ranch_listener.
And I got a supervisor report by ranch_listener_sup followed by a crash report saying,

{exit, {{badmatch,false},
        [{ranch_server,insert_listener,2,
                              [{file,"src/ranch_server.erl"},{line,57},

Guess the line true = ets:insert_new(...) can be changed to true = ets:insert(...) from ranch_server:insert_listener/2, or there must be a ranch_server:remove_listener or so defined and be called from ranch_lister:terminate/2.

Ranch 2

I believe these are the main improvements that should be brought to Ranch 2:

  • Merge acceptors with connection supervisor: #110
  • Better ssl_accept support (I will have to dig up suggestions, I believe @asabil had something good in mind.)
  • Graceful and brutal shutdown of connections.
  • Restart a listener with new options without service interruption.
  • Handle cases where matching on the socket is not possible: https://github.com/ninenines/cowboy/pull/912/files#r44168355
  • Release upgrades

{error, overload} observed in ranch_acceptor

supervisor:start_child/2 may return {error, Reason}.
Experienced many such phenomena in ranch_acceptor:loop/7 under frequent connect/disconnect,
which caused supervisor down due to restart policy set as 10, 10

TCP keepalive

Hello Loïc,

Any reason tcp keepalive are not allowed in ranch_tcp ?

Stop filtering ssl options, or allow all valid ssl options

Related to #34, ranch_ssl currently requires certfile as a transport option, but this is only one way that the Erlang SSL library can be configured for certs.

I am trying to programmatically generate certs for a ranch/cowboy based application and use the ssl cert, key, cacerts ssl options, but ranch_ssl both tests for certfile which I don't pass, and filters out the mentioned options.

I can write a patch for this if required - do you want one that completely relaxes the option validation, and filtering, or do you want a patch that specifically tests for cert or certfile, and additionally allows cert, key, cacerts through the option filtering?

Add the ability to stop listening, without killing existing connections

Using ranch, is there any way to stop the listener (and acceptors)
without dropping the existing connections?

I ask because I'd like to start another instance of my server on the
same box and have the old instance continue to handle its existing
connections for a while.

It looks to me that if I call ranch:stop_listener, it'll kill the
ranch_listener_sup, which will also kill the ranch_conns_sup (and the
existing connections).

If I manually do a supervisor:terminate_child(ListenerPid,
ranch_acceptors_sup), the acceptors go away, but the socket is still
open, which means that I can't start another instance of my server.

Scalaing ranch across server

Hello @essen,

A friend of mine just introduced me to ranch, it seems to be a good first step resolving my use-case.

Let me add a little context. My use case is as follow: I have currently a server than needs to handle a growing number of persistent TCP connections.
Is ranch able to scale across servers? Do you know any projects/patterns (not necessarily in erlang) that is able to so?

The kind of pattern I'm looking for in ranch would be to have a kind of "router" that can forward a client to the right ranch server that maintains the TCP connection. Is it possible with ranch?

Support Server Name Indication (SNI)

SNI is a TLS extension which allows clients to indicate the hostname while handshaking, making it possible for servers to present multiple certificates on a single IP address and port pair.
Erlang ssl module has hadded support to SNI, in ssl:ssloption() server_name_indication.
But I didn't see similar options in module ranch_ssl.
Please add support to it.
Thanks.

compile error

src/ranch_tcp.erl:21: behaviour ranch_transport undefined

Starting/stopping listeners on a single port in a tight loop crashes ranch_listener_sup

This is kind of corner-case behavior, but thought you might want to know.

Observed on current master (c1d0c45) and tag 0.8.3, running R15B03.

Starting and then immediately stopping ranch listeners seems to cause a crash (I know the modules make no sense, it's just there to pass the code:ensure_loaded/1):

> [
    begin
        {ok, _} = ranch:start_listener(foo, 100,
            ranch_tcp, [{port, 8080}], ranch_tcp , []),
        ok = ranch:stop_listener(foo),
        io:format("~p~n", [N])
    end ||

    N <- lists:seq(1, 100)
].

1
2
3
<..>
21
** exception error: no match of right hand side value
{error,{shutdown,{child,undefined, {ranch_listener_sup,foo},
    {ranch_listener_sup,start_link,
    [foo,100,ranch_tcp,[{port,8080}],ranch_tcp,[]]},
        permanent,infinity,supervisor, [ranch_listener_sup]}}}

The key to reproducing this is a fairly large pool size and a single port, which might mean this is something to do with the OS not being happy about binding sockets so intensively. So hopefully you can reproduce it.

I bumped into this, because I was running lots of small tests where cowboy was stopped and started on each one of them multiple times and had undeterministic failures.

ranch_server:count_connections/1 results in a negative number

I'm using Cowboy 1.0.1/Ranch 1.0.0 inside ejabberd on R16B02 to handle websocket connections. One issue I've noticed is this:

(ejabberd@localhost)4> ranch_server:count_connections(ejabberd_sockjs_https).
-3250
(ejabberd@localhost)5> sys:get_status(erlang:list_to_pid("<0.608.0>")).      
{status,<0.608.0>,
        {module,ranch_conns_sup},
        [[{<0.21963.15>,true},
          {<0.15373.18>,true},
          {<0.30534.17>,true},
          {<0.24334.18>,true},
          {<0.4224.13>,true},
          {<0.9254.17>,true},
          {<0.5404.16>,true},
          {<0.26013.18>,true},
          {<0.30583.11>,true},
          {<0.24617.16>,true},
          {<0.1497.0>,true},
          {<0.18870.10>,true},
          {<0.23083.9>,true},
          {<0.26760.6>,true},
          {<0.3010.0>,true},
          {<0.27421.10>,true},
          {<0.26321.13>,true},
          {<0.17893.18>,true},
          {<0.20894.2>,true},
          {<0.7611.11>,true},
          {<0.23505.4>,true},
          {<0.16921.8>,true},
          {<0.32702.10>,...},
          {...}|...],
         running,<0.605.0>,[],
         {{state,<0.605.0>,ejabberd_sockjs_https,worker,5000,
                 ranch_ssl,cowboy_protocol,
                 [{env,[{dispatch,[{'_',[],[{...}]}]}]}],
                 5000,1024},
          -3255,680,[]}]}

I assume the mechanism that's used to keep track of active connections is failing somewhere.

"missing" max_connections in ranch_tcp:opts()?

I'm having Dialyzer 'non local return' warning in my app when I'm calling cowboy:start_http/4 with max_connections key val in transport options.

start_http/4 calls ranch:start_listener/6 where TransOpts is being passed through to newly spawned child ranch_listener_sup under ranch_sup[1]. ranch_listener_sup passed it along to ranch_acceptors_sup[2] which in turn calls Transport:listen[3] withTransOpts. in cowboy:start_http/4 case it's ranch_tcp, thus it expects options to be matching types in ranch_tcp:opts() which is missing max_connections.

From what I can see, ranch_tcp doesn't care about max_connections and that property is only needed to update ranch_server[4] from ranch_listener_sup.

I see two options here; either adding the max_connections to the ranch_tcp:opts() which would be 'wrong' since it's not used in ranch_tcp or filter max_connections out in ranch_listener_sup before it passes it to ranch_acceptors_sup.

What do you think?

[1]

ranch/src/ranch.erl

Lines 85 to 86 in 02ff653

{{ranch_listener_sup, Ref}, {ranch_listener_sup, start_link, [
Ref, NbAcceptors, Transport, TransOpts, Protocol, ProtoOpts

[2]
{ranch_acceptors_sup, {ranch_acceptors_sup, start_link,

[3]
{ok, Socket} = Transport:listen(TransOpts),

[4]
MaxConns = proplists:get_value(max_connections, TransOpts, 1024),
ranch_server:set_new_listener_opts(Ref, MaxConns, ProtoOpts),

Send Timeout not work

My settings
ranch:start_listener(tcp_reverse, 10,
ranch_tcp, [{port, 5555},{active, true}, {packet, 0}, {keepalive,
true}, {reuseaddr, true},{nodelay, true},{send_timeout, 360000}, {send_timeout_close, true},{backlog, 4096}], reverse_protocol, []),

As if I did not change vakue send_timeout , tcp_error still comes to only 16 minutes
{tcp_error,#Port<0.420>,etimedout}

Please tell me how to set up, so I just learned about the loss of Internet connection at the client

Allow using more than one connection supervisor

Currently accepting a socket involves the following steps:

  • acceptor accepts
  • acceptor sets controlling_process to supervisor
  • send socket to supervisor
  • supervisor creates connection process
  • supervisor sets controlling_process to connection process
  • connection process acks to know it can start reading the socket

What if, instead, the supervisor was also the one accepting?

  • supervisor accepts
  • supervisor creates connection process
  • supervisor sets controlling_process to connection process
  • connection process acks to know it can start reading the socket

We could also do this:

  • supervisor preemptively creates connection process
  • supervisor accepts
  • supervisor sets controlling_process to connection process
  • connection process receives socket and continues

Regardless of the solution chosen, this means that we end up with more than one supervisor for all connections, which is also something that was desirable, as this gets rid of the unique supervisor bottleneck (ie when we have 1 supervisor handling million of connections). This however means that limits need to be per supervisor (or at least to divide limits by number of supervisors or something).

Thoughts?

Questions about TLS concurrent performance

Hello,

this is not actually an issue but 4 open questions. I'd like to know more about ranch TLS connections performance in a scaling context.
1)To my understanding ranch uses OTP ssl:listen/2 and I don't know how OTP implements its ssl app. Is this full pure Erlang or C-NIFed or otherwise upspeeded ?
2)Can somebody share some numbers about TLS concurrency with ranch say in some banking/broker or similar "full httpS" service ?
3)Has anybody tried to benchmark OTP/ranch/cowboy TLS vs some specialized TLS offloading proxy like https://github.com/bumptech/stud ?
4)And (in such a last case) how have stud & ranch/cowboy been connected together ? I mean in order to keep the originating client IP forwarded ?

Thanks in advance for experience sharing. Have fun all.

maybe_wait in ranch_acceptor's loop

Now the code is using erlang:yield() and comparing MaxConns with the current connection numbers in the ETS table constantly. Would it consume more CPU than necessary?

I think we can create a new process for ''pending'' acceptors registered. When some connections are released, this process will be informed and it will select one ''pending'' acceptor to reinit itself.

FTP example?

I've been following the FTP example & think it would be nice if this could be added to the examples as there is only Echo right now.

Also:
in the FTP example it says
"The attentive reader will also take note that in the case of text- based protocols where commands are separated by line breaks, you can set an option using Transport:setopts/2 and have all the buffering done for you for free by Erlang itself."

I must not be being attentive because I couldn't figure this out. If I'm implementing an RFC text protocol and I'm receiving strings, e.g. like "415 X\r\n" and "LST \r\n", how do I set the Transport:setopts/2 so buffering is done automatically?

Lots of connections hanging in CLOSE_WAIT

Hi,
We've recently starting experiencing lots of connections hanging in CLOSE_WAIT.

These connection are HTTP 1.1 and most of them should be long-live (keep-alive).

netstat -tn | grep 38.71.**.** | awk '{print $6}' | sort | uniq -c | sort -n
     25 LAST_ACK
   1724 SYN_RECV
   3030 ESTABLISHED
   6589 TIME_WAIT
  56304 CLOSE_WAIT

We were able to mitigate this issue by adding send_timeout and send_timeout_close to the gen_tcp socket options.

lpgauth@8fdce34

It would be nice to be able to configure these in Ranch/Cowboy and possibly adding them by default.

Setting max connections causes problems when switching between numbers and infinity

There is a race condition that can causes acceptors to crash after using set_max_connections/2, as an attempt to update element on non existing ets object (deleted by listener).

After switching from integers to infinity the listener gen_server may attempt to write to the (non existing) ets object and crash. This occurs when rm_diff =0 and a monitored process (from before the switch) dies.

It is possible there are other issues involved here too.

Edit: This should be solved when custom supervisor implemented.

Provide access for setting options in ranch_ssl:accept_ack/2

I needed a way of dynamically changing the SSL certificate for a listener WITHOUT having to restart the listener again.

I did the following "hack" where I wrapped ranch_ssl in my own module and switched to use ssl_accept/3 instead of ssl_accept/2 in accept_ack/2.

It would be nice to be able to set these options directly from ranch_ssl instead of having to wrap the module.

Maybe this also could mean that ranch_ssl doesn't HAVE TO require a cert og certfile to be set during initialisation.

https://gist.github.com/ahf/bfb80dbf2801d78b7826#file-gistfile1-erl-L52-L78

Handle {error, emfile} in ranch_acceptor.erl

Ranch consumes a lot of CPU when you hit the limit of open files.

To reproduce with the cowboy/websocket example application:

ulimit -n 100 && ./start.sh

And connect a lot of clients.

I discovered the issue by accident when starting ranch in a shell with -sname (increases the required number of open files). dtrace'd when i saw the CPU go through the roof and noticed that multiple calls to accept were made in rapid succession.

Please let me know if I can provide more information.

Leak in ranch_conns_sup

In ranch_conns_sup.erl line 110 in version 0.8.3,
It calls Protocol:start_link/4.
Suppose the Protocol be a gen_server and Protocol:init/1 returned {stop, Reason}.
Then it matches in line 125 of ranch_conns_sup,

_ ->
To ! self(),
loop(State, CurConns2, NbChildren + 1, [To|Sleepers])

It doesn't close the socket.
I found phenomena ranch_conns_sup process stuck with a large amount of {tcp_closed, ...} messages in it's message queue.

examples/tcp_echo crashes on startup.

I am unable to run the example, finding that it crashes when the listener is started in tcp_echo_app. From a clean tree at commit 53be20d, I do:

> pwd
/tmp/ranch/examples/tcp_echo 

> git log -1
commit 53be20d25f94095ec9b44c5bd03cf158dd5a6820
Author: Loïc Hoguin <[email protected]>
Date:   Thu May 16 19:07:54 2013 +0200

    Update Ranch to 0.8.3

> rebar version
rebar 2.1.0-pre R15B02 20130414_082239 git 2.1.0-pre-46-g78fa8fc

> rebar get-deps compile
==> ranch (get-deps)
==> tcp_echo (get-deps)
==> ranch (compile)
Compiled src/ranch_transport.erl
Compiled src/ranch_protocol.erl
Compiled src/ranch_sup.erl
Compiled src/ranch_tcp.erl
Compiled src/ranch_listener_sup.erl
Compiled src/ranch_ssl.erl
Compiled src/ranch_app.erl
Compiled src/ranch_server.erl
Compiled src/ranch_acceptors_sup.erl
Compiled src/ranch_acceptor.erl
Compiled src/ranch_conns_sup.erl
Compiled src/ranch.erl
==> tcp_echo (compile)
Compiled src/tcp_echo.erl
Compiled src/tcp_echo_sup.erl
Compiled src/tcp_echo_app.erl
Compiled src/echo_protocol.erl

> ./start.sh 
Erlang R15B03 (erts-5.9.3) [source] [64-bit] [smp:8:8] [async-threads:0] [hipe] [kernel-poll:false]

Eshell V5.9.3  (abort with ^G)
1> {"init terminating in do_boot",{{badmatch,{error,{bad_return,{{tcp_echo_app,start,[normal,[]]},{'EXIT',{{badmatch,{error,{shutdown,{child,undefined,{ranch_listener_sup,tcp_echo},{ranch_listener_sup,start_link,[tcp_echo,1,ranch_tcp,[{port,5555}],echo_protocol,[]]},permanent,5000,supervisor,[ranch_listener_sup]}}}},[{tcp_echo_app,start,2,[{file,"src/tcp_echo_app.erl"},{line,14}]},{application_master,start_it_old,4,[{file,"application_master.erl"},{line,274}]}]}}}}}},[{tcp_echo,start,0,[{file,"src/tcp_echo.erl"},{line,12}]},{init,start_it,1,[]},{init,start_em,1,[]}]}}
init terminating in do_boot ()

Validate transport name

Ranch should be able to ensure that transport name has proper value and display informative error message if it isn't.

Recommend Projects

  • React photo React

    A declarative, efficient, and flexible JavaScript library for building user interfaces.

  • Vue.js photo Vue.js

    🖖 Vue.js is a progressive, incrementally-adoptable JavaScript framework for building UI on the web.

  • Typescript photo Typescript

    TypeScript is a superset of JavaScript that compiles to clean JavaScript output.

  • TensorFlow photo TensorFlow

    An Open Source Machine Learning Framework for Everyone

  • Django photo Django

    The Web framework for perfectionists with deadlines.

  • D3 photo D3

    Bring data to life with SVG, Canvas and HTML. 📊📈🎉

Recommend Topics

  • javascript

    JavaScript (JS) is a lightweight interpreted programming language with first-class functions.

  • web

    Some thing interesting about web. New door for the world.

  • server

    A server is a program made to process requests and deliver data to clients.

  • Machine learning

    Machine learning is a way of modeling and interpreting data that allows a piece of software to respond intelligently.

  • Game

    Some thing interesting about game, make everyone happy.

Recommend Org

  • Facebook photo Facebook

    We are working to build community through open source technology. NB: members must have two-factor auth.

  • Microsoft photo Microsoft

    Open source projects and samples from Microsoft.

  • Google photo Google

    Google ❤️ Open Source for everyone.

  • D3 photo D3

    Data-Driven Documents codes.