2009-08-12 09:40:16 +02:00
|
|
|
/*
|
2011-10-31 16:20:30 +01:00
|
|
|
Copyright (c) 2009-2011 250bpm s.r.o.
|
2011-11-01 18:06:11 +01:00
|
|
|
Copyright (c) 2007-2009 iMatix Corporation
|
2011-11-01 13:39:54 +01:00
|
|
|
Copyright (c) 2011 VMware, Inc.
|
2011-03-02 16:30:40 +01:00
|
|
|
Copyright (c) 2007-2011 Other contributors as noted in the AUTHORS file
|
2009-08-12 09:40:16 +02:00
|
|
|
|
|
|
|
This file is part of 0MQ.
|
|
|
|
|
|
|
|
0MQ is free software; you can redistribute it and/or modify it under
|
2010-10-30 15:08:28 +02:00
|
|
|
the terms of the GNU Lesser General Public License as published by
|
2009-08-12 09:40:16 +02:00
|
|
|
the Free Software Foundation; either version 3 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
0MQ is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2010-10-30 15:08:28 +02:00
|
|
|
GNU Lesser General Public License for more details.
|
2009-08-12 09:40:16 +02:00
|
|
|
|
2010-10-30 15:08:28 +02:00
|
|
|
You should have received a copy of the GNU Lesser General Public License
|
2009-08-12 09:40:16 +02:00
|
|
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
2012-05-13 20:49:05 +08:00
|
|
|
#include <stdarg.h>
|
|
|
|
|
2011-09-15 10:00:23 +02:00
|
|
|
#include "session_base.hpp"
|
2010-08-11 14:09:56 +02:00
|
|
|
#include "socket_base.hpp"
|
2009-08-30 08:18:31 +02:00
|
|
|
#include "i_engine.hpp"
|
2009-08-12 09:40:16 +02:00
|
|
|
#include "err.hpp"
|
2009-08-27 10:54:28 +02:00
|
|
|
#include "pipe.hpp"
|
2010-10-08 21:42:55 +02:00
|
|
|
#include "likely.hpp"
|
2011-07-26 00:43:57 +02:00
|
|
|
#include "tcp_connecter.hpp"
|
2011-07-28 13:19:55 +02:00
|
|
|
#include "ipc_connecter.hpp"
|
2011-07-24 18:25:30 +02:00
|
|
|
#include "pgm_sender.hpp"
|
|
|
|
#include "pgm_receiver.hpp"
|
2012-02-02 14:56:51 +01:00
|
|
|
#include "address.hpp"
|
2009-08-12 09:40:16 +02:00
|
|
|
|
2011-09-15 10:00:23 +02:00
|
|
|
#include "req.hpp"
|
2012-03-22 11:36:19 -05:00
|
|
|
#include "dealer.hpp"
|
2011-09-15 10:00:23 +02:00
|
|
|
#include "rep.hpp"
|
2012-03-22 11:36:19 -05:00
|
|
|
#include "router.hpp"
|
2011-09-15 10:00:23 +02:00
|
|
|
#include "pub.hpp"
|
|
|
|
#include "xpub.hpp"
|
|
|
|
#include "sub.hpp"
|
|
|
|
#include "xsub.hpp"
|
|
|
|
#include "push.hpp"
|
|
|
|
#include "pull.hpp"
|
|
|
|
#include "pair.hpp"
|
|
|
|
|
|
|
|
zmq::session_base_t *zmq::session_base_t::create (class io_thread_t *io_thread_,
|
|
|
|
bool connect_, class socket_base_t *socket_, const options_t &options_,
|
2012-02-02 14:56:51 +01:00
|
|
|
const address_t *addr_)
|
2011-09-15 10:00:23 +02:00
|
|
|
{
|
|
|
|
session_base_t *s = NULL;
|
|
|
|
switch (options_.type) {
|
|
|
|
case ZMQ_REQ:
|
|
|
|
s = new (std::nothrow) req_session_t (io_thread_, connect_,
|
2012-02-02 14:56:51 +01:00
|
|
|
socket_, options_, addr_);
|
2011-09-15 10:00:23 +02:00
|
|
|
break;
|
2012-03-22 11:36:19 -05:00
|
|
|
case ZMQ_DEALER:
|
|
|
|
s = new (std::nothrow) dealer_session_t (io_thread_, connect_,
|
2012-02-02 14:56:51 +01:00
|
|
|
socket_, options_, addr_);
|
2012-03-25 20:06:06 +02:00
|
|
|
break;
|
2011-09-15 10:00:23 +02:00
|
|
|
case ZMQ_REP:
|
|
|
|
s = new (std::nothrow) rep_session_t (io_thread_, connect_,
|
2012-02-02 14:56:51 +01:00
|
|
|
socket_, options_, addr_);
|
2011-09-15 10:00:23 +02:00
|
|
|
break;
|
2012-03-22 11:36:19 -05:00
|
|
|
case ZMQ_ROUTER:
|
|
|
|
s = new (std::nothrow) router_session_t (io_thread_, connect_,
|
2012-02-02 14:56:51 +01:00
|
|
|
socket_, options_, addr_);
|
2011-09-15 10:00:23 +02:00
|
|
|
break;
|
|
|
|
case ZMQ_PUB:
|
|
|
|
s = new (std::nothrow) pub_session_t (io_thread_, connect_,
|
2012-02-02 14:56:51 +01:00
|
|
|
socket_, options_, addr_);
|
2011-09-15 10:00:23 +02:00
|
|
|
break;
|
|
|
|
case ZMQ_XPUB:
|
|
|
|
s = new (std::nothrow) xpub_session_t (io_thread_, connect_,
|
2012-02-02 14:56:51 +01:00
|
|
|
socket_, options_, addr_);
|
2011-09-15 10:00:23 +02:00
|
|
|
break;
|
|
|
|
case ZMQ_SUB:
|
|
|
|
s = new (std::nothrow) sub_session_t (io_thread_, connect_,
|
2012-02-02 14:56:51 +01:00
|
|
|
socket_, options_, addr_);
|
2011-09-15 10:00:23 +02:00
|
|
|
break;
|
|
|
|
case ZMQ_XSUB:
|
|
|
|
s = new (std::nothrow) xsub_session_t (io_thread_, connect_,
|
2012-02-02 14:56:51 +01:00
|
|
|
socket_, options_, addr_);
|
2011-09-15 10:00:23 +02:00
|
|
|
break;
|
|
|
|
case ZMQ_PUSH:
|
|
|
|
s = new (std::nothrow) push_session_t (io_thread_, connect_,
|
2012-02-02 14:56:51 +01:00
|
|
|
socket_, options_, addr_);
|
2011-09-15 10:00:23 +02:00
|
|
|
break;
|
|
|
|
case ZMQ_PULL:
|
|
|
|
s = new (std::nothrow) pull_session_t (io_thread_, connect_,
|
2012-02-02 14:56:51 +01:00
|
|
|
socket_, options_, addr_);
|
2011-09-15 10:00:23 +02:00
|
|
|
break;
|
|
|
|
case ZMQ_PAIR:
|
|
|
|
s = new (std::nothrow) pair_session_t (io_thread_, connect_,
|
2012-02-02 14:56:51 +01:00
|
|
|
socket_, options_, addr_);
|
2011-09-15 10:00:23 +02:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
errno = EINVAL;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
alloc_assert (s);
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
zmq::session_base_t::session_base_t (class io_thread_t *io_thread_,
|
|
|
|
bool connect_, class socket_base_t *socket_, const options_t &options_,
|
2012-02-02 14:56:51 +01:00
|
|
|
const address_t *addr_) :
|
2010-10-16 10:53:29 +02:00
|
|
|
own_t (io_thread_, options_),
|
|
|
|
io_object_t (io_thread_),
|
2011-07-24 18:25:30 +02:00
|
|
|
connect (connect_),
|
2011-05-22 17:26:53 +02:00
|
|
|
pipe (NULL),
|
After speaking with Ben Gray and the discussion on the mailing list, this is an attempt to create a sockopt to allow connecting pipes to not immediately be available for traffic. The problem is in a PUSH to many PULL situation, where there is a connect to a PULL which is not there. This connect will immediately create a pipe (unlike bind), and traffic will be load balanced to that pipe. This means if there is a persistently unavailable end point then the traffic will queue until HWM is hit, and older messages will be lost.
This patch adds a sockopt ZMQ_DELAY_ATTACH_ON_CONNECT, which if set to 1 will attempt to preempt this behavior. It does this by extending the use of the session_base to include in the outbound as well as the inbound pipe, and only associates the pipe with the socket once it receives the connected callback via a process_attach message. This works, and a test has been added to show so, but may introduce unexpected complications. The shutdown logic in this class has become marginally more awkward because of this, requiring the session to serve as the sink for both pipes if shutdown occurs with a still-connecting pipe in place. It is also possible there could be issues around flushing the messages, but as I could not directly think how to create such an issue I have not written any code with regards to that.
The documentation has been updated to reflect the change, but please do check over the code and test and review.
2012-06-01 17:58:19 +01:00
|
|
|
outpipe (NULL),
|
2010-03-20 15:04:30 +01:00
|
|
|
incomplete_in (false),
|
2011-05-26 11:30:25 +02:00
|
|
|
pending (false),
|
2009-08-21 14:29:22 +02:00
|
|
|
engine (NULL),
|
2010-08-11 14:09:56 +02:00
|
|
|
socket (socket_),
|
|
|
|
io_thread (io_thread_),
|
2011-11-04 08:00:47 +01:00
|
|
|
has_linger_timer (false),
|
|
|
|
send_identity (options_.send_identity),
|
2012-02-02 14:56:51 +01:00
|
|
|
recv_identity (options_.recv_identity),
|
|
|
|
addr (addr_)
|
2011-05-25 10:25:51 +02:00
|
|
|
{
|
2009-08-12 09:40:16 +02:00
|
|
|
}
|
|
|
|
|
2011-09-15 10:00:23 +02:00
|
|
|
zmq::session_base_t::~session_base_t ()
|
2009-08-12 09:40:16 +02:00
|
|
|
{
|
2011-05-22 17:26:53 +02:00
|
|
|
zmq_assert (!pipe);
|
2010-08-14 08:37:38 +02:00
|
|
|
|
2010-10-16 10:53:29 +02:00
|
|
|
// If there's still a pending linger timer, remove it.
|
|
|
|
if (has_linger_timer) {
|
|
|
|
cancel_timer (linger_timer_id);
|
|
|
|
has_linger_timer = false;
|
|
|
|
}
|
|
|
|
|
2011-05-25 10:25:51 +02:00
|
|
|
// Close the engine.
|
|
|
|
if (engine)
|
|
|
|
engine->terminate ();
|
2012-02-02 14:56:51 +01:00
|
|
|
|
|
|
|
if (addr)
|
|
|
|
delete addr;
|
2011-05-25 10:25:51 +02:00
|
|
|
}
|
2010-10-08 17:23:21 +02:00
|
|
|
|
2011-09-15 10:00:23 +02:00
|
|
|
void zmq::session_base_t::attach_pipe (pipe_t *pipe_)
|
2011-05-25 10:25:51 +02:00
|
|
|
{
|
2011-06-19 12:48:36 +02:00
|
|
|
zmq_assert (!is_terminating ());
|
2011-05-25 10:25:51 +02:00
|
|
|
zmq_assert (!pipe);
|
|
|
|
zmq_assert (pipe_);
|
|
|
|
pipe = pipe_;
|
|
|
|
pipe->set_event_sink (this);
|
2010-08-06 17:49:37 +02:00
|
|
|
}
|
|
|
|
|
After speaking with Ben Gray and the discussion on the mailing list, this is an attempt to create a sockopt to allow connecting pipes to not immediately be available for traffic. The problem is in a PUSH to many PULL situation, where there is a connect to a PULL which is not there. This connect will immediately create a pipe (unlike bind), and traffic will be load balanced to that pipe. This means if there is a persistently unavailable end point then the traffic will queue until HWM is hit, and older messages will be lost.
This patch adds a sockopt ZMQ_DELAY_ATTACH_ON_CONNECT, which if set to 1 will attempt to preempt this behavior. It does this by extending the use of the session_base to include in the outbound as well as the inbound pipe, and only associates the pipe with the socket once it receives the connected callback via a process_attach message. This works, and a test has been added to show so, but may introduce unexpected complications. The shutdown logic in this class has become marginally more awkward because of this, requiring the session to serve as the sink for both pipes if shutdown occurs with a still-connecting pipe in place. It is also possible there could be issues around flushing the messages, but as I could not directly think how to create such an issue I have not written any code with regards to that.
The documentation has been updated to reflect the change, but please do check over the code and test and review.
2012-06-01 17:58:19 +01:00
|
|
|
void zmq::session_base_t::onconnect_attach_pipe (pipe_t *pipe_)
|
|
|
|
{
|
|
|
|
zmq_assert (!is_terminating ());
|
|
|
|
zmq_assert (pipe_);
|
|
|
|
outpipe = pipe_;
|
|
|
|
}
|
|
|
|
|
2011-09-16 09:29:43 +02:00
|
|
|
int zmq::session_base_t::read (msg_t *msg_)
|
2009-08-12 09:40:16 +02:00
|
|
|
{
|
2011-11-04 08:00:47 +01:00
|
|
|
// First message to send is identity (if required).
|
|
|
|
if (send_identity) {
|
|
|
|
zmq_assert (!(msg_->flags () & msg_t::more));
|
2012-05-28 23:13:09 +02:00
|
|
|
int rc = msg_->init_size (options.identity_size);
|
|
|
|
errno_assert (rc == 0);
|
2011-11-04 08:00:47 +01:00
|
|
|
memcpy (msg_->data (), options.identity, options.identity_size);
|
|
|
|
send_identity = false;
|
|
|
|
incomplete_in = false;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-09-16 09:29:43 +02:00
|
|
|
if (!pipe || !pipe->read (msg_)) {
|
|
|
|
errno = EAGAIN;
|
|
|
|
return -1;
|
|
|
|
}
|
2011-11-01 13:39:54 +01:00
|
|
|
incomplete_in = msg_->flags () & msg_t::more ? true : false;
|
2010-03-20 15:04:30 +01:00
|
|
|
|
2011-09-16 09:29:43 +02:00
|
|
|
return 0;
|
2009-08-12 09:40:16 +02:00
|
|
|
}
|
|
|
|
|
2011-09-16 09:29:43 +02:00
|
|
|
int zmq::session_base_t::write (msg_t *msg_)
|
2009-08-12 09:40:16 +02:00
|
|
|
{
|
2011-11-04 08:00:47 +01:00
|
|
|
// First message to receive is identity (if required).
|
|
|
|
if (recv_identity) {
|
|
|
|
msg_->set_flags (msg_t::identity);
|
|
|
|
recv_identity = false;
|
|
|
|
}
|
|
|
|
|
2011-05-22 17:26:53 +02:00
|
|
|
if (pipe && pipe->write (msg_)) {
|
2011-04-21 22:27:48 +02:00
|
|
|
int rc = msg_->init ();
|
|
|
|
errno_assert (rc == 0);
|
2011-09-16 09:29:43 +02:00
|
|
|
return 0;
|
2009-09-04 16:02:41 +02:00
|
|
|
}
|
|
|
|
|
2011-09-16 09:29:43 +02:00
|
|
|
errno = EAGAIN;
|
|
|
|
return -1;
|
2009-08-12 09:40:16 +02:00
|
|
|
}
|
|
|
|
|
2012-05-29 21:59:22 +02:00
|
|
|
void zmq::session_base_t::reset ()
|
|
|
|
{
|
|
|
|
// Restore identity flags.
|
|
|
|
send_identity = options.send_identity;
|
|
|
|
recv_identity = options.recv_identity;
|
|
|
|
}
|
|
|
|
|
2011-09-15 10:00:23 +02:00
|
|
|
void zmq::session_base_t::flush ()
|
2009-08-12 09:40:16 +02:00
|
|
|
{
|
2011-05-22 17:26:53 +02:00
|
|
|
if (pipe)
|
|
|
|
pipe->flush ();
|
2009-08-27 10:54:28 +02:00
|
|
|
}
|
|
|
|
|
2011-09-15 10:00:23 +02:00
|
|
|
void zmq::session_base_t::clean_pipes ()
|
2009-08-27 16:24:21 +02:00
|
|
|
{
|
2011-05-22 17:26:53 +02:00
|
|
|
if (pipe) {
|
2010-03-20 15:04:30 +01:00
|
|
|
|
2011-05-22 17:26:53 +02:00
|
|
|
// Get rid of half-processed messages in the out pipe. Flush any
|
|
|
|
// unflushed messages upstream.
|
|
|
|
pipe->rollback ();
|
|
|
|
pipe->flush ();
|
|
|
|
|
|
|
|
// Remove any half-read message from the in pipe.
|
2010-03-20 15:04:30 +01:00
|
|
|
while (incomplete_in) {
|
2011-04-21 22:27:48 +02:00
|
|
|
msg_t msg;
|
|
|
|
int rc = msg.init ();
|
|
|
|
errno_assert (rc == 0);
|
2010-03-20 15:04:30 +01:00
|
|
|
if (!read (&msg)) {
|
|
|
|
zmq_assert (!incomplete_in);
|
|
|
|
break;
|
|
|
|
}
|
2011-04-21 22:27:48 +02:00
|
|
|
rc = msg.close ();
|
|
|
|
errno_assert (rc == 0);
|
2010-03-20 15:04:30 +01:00
|
|
|
}
|
|
|
|
}
|
2009-12-15 17:49:40 +01:00
|
|
|
}
|
|
|
|
|
2011-09-15 10:00:23 +02:00
|
|
|
void zmq::session_base_t::terminated (pipe_t *pipe_)
|
2010-10-08 17:23:21 +02:00
|
|
|
{
|
After speaking with Ben Gray and the discussion on the mailing list, this is an attempt to create a sockopt to allow connecting pipes to not immediately be available for traffic. The problem is in a PUSH to many PULL situation, where there is a connect to a PULL which is not there. This connect will immediately create a pipe (unlike bind), and traffic will be load balanced to that pipe. This means if there is a persistently unavailable end point then the traffic will queue until HWM is hit, and older messages will be lost.
This patch adds a sockopt ZMQ_DELAY_ATTACH_ON_CONNECT, which if set to 1 will attempt to preempt this behavior. It does this by extending the use of the session_base to include in the outbound as well as the inbound pipe, and only associates the pipe with the socket once it receives the connected callback via a process_attach message. This works, and a test has been added to show so, but may introduce unexpected complications. The shutdown logic in this class has become marginally more awkward because of this, requiring the session to serve as the sink for both pipes if shutdown occurs with a still-connecting pipe in place. It is also possible there could be issues around flushing the messages, but as I could not directly think how to create such an issue I have not written any code with regards to that.
The documentation has been updated to reflect the change, but please do check over the code and test and review.
2012-06-01 17:58:19 +01:00
|
|
|
// If we get a term signal from our held outpipe
|
|
|
|
// we can safely ignore it.
|
2012-06-03 21:34:41 +01:00
|
|
|
if (pipe_ == outpipe)
|
After speaking with Ben Gray and the discussion on the mailing list, this is an attempt to create a sockopt to allow connecting pipes to not immediately be available for traffic. The problem is in a PUSH to many PULL situation, where there is a connect to a PULL which is not there. This connect will immediately create a pipe (unlike bind), and traffic will be load balanced to that pipe. This means if there is a persistently unavailable end point then the traffic will queue until HWM is hit, and older messages will be lost.
This patch adds a sockopt ZMQ_DELAY_ATTACH_ON_CONNECT, which if set to 1 will attempt to preempt this behavior. It does this by extending the use of the session_base to include in the outbound as well as the inbound pipe, and only associates the pipe with the socket once it receives the connected callback via a process_attach message. This works, and a test has been added to show so, but may introduce unexpected complications. The shutdown logic in this class has become marginally more awkward because of this, requiring the session to serve as the sink for both pipes if shutdown occurs with a still-connecting pipe in place. It is also possible there could be issues around flushing the messages, but as I could not directly think how to create such an issue I have not written any code with regards to that.
The documentation has been updated to reflect the change, but please do check over the code and test and review.
2012-06-01 17:58:19 +01:00
|
|
|
return;
|
|
|
|
|
2011-05-25 10:25:51 +02:00
|
|
|
// Drop the reference to the deallocated pipe.
|
2011-05-22 17:26:53 +02:00
|
|
|
zmq_assert (pipe == pipe_);
|
|
|
|
pipe = NULL;
|
2011-05-25 10:25:51 +02:00
|
|
|
|
2011-05-26 11:30:25 +02:00
|
|
|
// If we are waiting for pending messages to be sent, at this point
|
|
|
|
// we are sure that there will be no more messages and we can proceed
|
|
|
|
// with termination safely.
|
|
|
|
if (pending)
|
|
|
|
proceed_with_term ();
|
2009-09-02 10:22:23 +02:00
|
|
|
}
|
|
|
|
|
2011-09-15 10:00:23 +02:00
|
|
|
void zmq::session_base_t::read_activated (pipe_t *pipe_)
|
2009-08-27 10:54:28 +02:00
|
|
|
{
|
2011-05-22 17:26:53 +02:00
|
|
|
zmq_assert (pipe == pipe_);
|
2010-09-19 08:39:53 +02:00
|
|
|
|
2010-10-08 21:42:55 +02:00
|
|
|
if (likely (engine != NULL))
|
2010-08-11 14:09:56 +02:00
|
|
|
engine->activate_out ();
|
2010-10-08 21:42:55 +02:00
|
|
|
else
|
2011-05-22 17:26:53 +02:00
|
|
|
pipe->check_read ();
|
2009-08-28 16:51:46 +02:00
|
|
|
}
|
|
|
|
|
2011-09-15 10:00:23 +02:00
|
|
|
void zmq::session_base_t::write_activated (pipe_t *pipe_)
|
2010-03-01 10:13:26 +01:00
|
|
|
{
|
2011-05-22 17:26:53 +02:00
|
|
|
zmq_assert (pipe == pipe_);
|
|
|
|
|
2010-03-01 10:13:26 +01:00
|
|
|
if (engine)
|
2010-08-11 14:09:56 +02:00
|
|
|
engine->activate_in ();
|
2010-03-01 10:13:26 +01:00
|
|
|
}
|
|
|
|
|
2011-09-15 10:00:23 +02:00
|
|
|
void zmq::session_base_t::hiccuped (pipe_t *pipe_)
|
2011-05-30 10:07:34 +02:00
|
|
|
{
|
|
|
|
// Hiccups are always sent from session to socket, not the other
|
|
|
|
// way round.
|
|
|
|
zmq_assert (false);
|
|
|
|
}
|
|
|
|
|
2012-05-04 02:32:46 +01:00
|
|
|
int zmq::session_base_t::get_address (std::string &addr_)
|
|
|
|
{
|
|
|
|
if (addr)
|
|
|
|
return addr->to_string (addr_);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void zmq::session_base_t::monitor_event (int event_, ...)
|
|
|
|
{
|
|
|
|
va_list args;
|
|
|
|
va_start (args, event_);
|
|
|
|
socket->monitor_event (event_, args);
|
|
|
|
va_end (args);
|
|
|
|
}
|
|
|
|
|
2011-09-15 10:00:23 +02:00
|
|
|
void zmq::session_base_t::process_plug ()
|
2009-08-12 09:40:16 +02:00
|
|
|
{
|
2011-07-24 18:25:30 +02:00
|
|
|
if (connect)
|
|
|
|
start_connecting (false);
|
2010-02-13 13:07:33 +01:00
|
|
|
}
|
|
|
|
|
2011-09-15 10:00:23 +02:00
|
|
|
void zmq::session_base_t::process_attach (i_engine *engine_)
|
2010-02-13 13:07:33 +01:00
|
|
|
{
|
2012-05-27 14:51:12 +02:00
|
|
|
zmq_assert (engine_ != NULL);
|
2011-03-16 13:26:23 +01:00
|
|
|
|
2011-05-25 10:25:51 +02:00
|
|
|
// Create the pipe if it does not exist yet.
|
2011-06-19 12:48:36 +02:00
|
|
|
if (!pipe && !is_terminating ()) {
|
2011-05-22 17:26:53 +02:00
|
|
|
object_t *parents [2] = {this, socket};
|
|
|
|
pipe_t *pipes [2] = {NULL, NULL};
|
|
|
|
int hwms [2] = {options.rcvhwm, options.sndhwm};
|
2011-06-23 07:57:47 +02:00
|
|
|
bool delays [2] = {options.delay_on_close, options.delay_on_disconnect};
|
2011-05-22 17:26:53 +02:00
|
|
|
int rc = pipepair (parents, pipes, hwms, delays);
|
|
|
|
errno_assert (rc == 0);
|
|
|
|
|
|
|
|
// Plug the local end of the pipe.
|
|
|
|
pipes [0]->set_event_sink (this);
|
|
|
|
|
|
|
|
// Remember the local end of the pipe.
|
2011-05-25 10:25:51 +02:00
|
|
|
zmq_assert (!pipe);
|
2011-05-22 17:26:53 +02:00
|
|
|
pipe = pipes [0];
|
2010-02-14 13:34:48 +01:00
|
|
|
|
2011-05-22 17:26:53 +02:00
|
|
|
// Ask socket to plug into the remote end of the pipe.
|
2011-07-15 11:24:33 +02:00
|
|
|
send_bind (socket, pipes [1]);
|
2010-02-14 13:34:48 +01:00
|
|
|
}
|
|
|
|
|
After speaking with Ben Gray and the discussion on the mailing list, this is an attempt to create a sockopt to allow connecting pipes to not immediately be available for traffic. The problem is in a PUSH to many PULL situation, where there is a connect to a PULL which is not there. This connect will immediately create a pipe (unlike bind), and traffic will be load balanced to that pipe. This means if there is a persistently unavailable end point then the traffic will queue until HWM is hit, and older messages will be lost.
This patch adds a sockopt ZMQ_DELAY_ATTACH_ON_CONNECT, which if set to 1 will attempt to preempt this behavior. It does this by extending the use of the session_base to include in the outbound as well as the inbound pipe, and only associates the pipe with the socket once it receives the connected callback via a process_attach message. This works, and a test has been added to show so, but may introduce unexpected complications. The shutdown logic in this class has become marginally more awkward because of this, requiring the session to serve as the sink for both pipes if shutdown occurs with a still-connecting pipe in place. It is also possible there could be issues around flushing the messages, but as I could not directly think how to create such an issue I have not written any code with regards to that.
The documentation has been updated to reflect the change, but please do check over the code and test and review.
2012-06-01 17:58:19 +01:00
|
|
|
if (outpipe && options.delay_attach_on_connect) {
|
|
|
|
send_bind (socket, outpipe);
|
|
|
|
// Forget the outpipe
|
|
|
|
outpipe = NULL;
|
|
|
|
}
|
|
|
|
|
2010-02-13 13:07:33 +01:00
|
|
|
// Plug in the engine.
|
2011-03-16 13:26:23 +01:00
|
|
|
zmq_assert (!engine);
|
2009-08-21 14:29:22 +02:00
|
|
|
engine = engine_;
|
2010-08-11 14:09:56 +02:00
|
|
|
engine->plug (io_thread, this);
|
|
|
|
}
|
|
|
|
|
2011-09-15 10:00:23 +02:00
|
|
|
void zmq::session_base_t::detach ()
|
2010-08-11 17:06:49 +02:00
|
|
|
{
|
|
|
|
// Engine is dead. Let's forget about it.
|
|
|
|
engine = NULL;
|
|
|
|
|
2010-10-13 10:09:46 +02:00
|
|
|
// Remove any half-done messages from the pipes.
|
|
|
|
clean_pipes ();
|
|
|
|
|
|
|
|
// Send the event to the derived class.
|
2010-08-11 17:06:49 +02:00
|
|
|
detached ();
|
2010-10-13 10:09:46 +02:00
|
|
|
|
2011-05-25 10:25:51 +02:00
|
|
|
// Just in case there's only a delimiter in the pipe.
|
2011-05-22 17:26:53 +02:00
|
|
|
if (pipe)
|
|
|
|
pipe->check_read ();
|
2010-08-11 17:06:49 +02:00
|
|
|
}
|
|
|
|
|
2011-09-15 10:00:23 +02:00
|
|
|
void zmq::session_base_t::process_term (int linger_)
|
2010-08-11 14:09:56 +02:00
|
|
|
{
|
2011-05-26 11:30:25 +02:00
|
|
|
zmq_assert (!pending);
|
|
|
|
|
|
|
|
// If the termination of the pipe happens before the term command is
|
|
|
|
// delivered there's nothing much to do. We can proceed with the
|
|
|
|
// stadard termination immediately.
|
|
|
|
if (!pipe) {
|
|
|
|
proceed_with_term ();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
pending = true;
|
|
|
|
|
|
|
|
// If there's finite linger value, delay the termination.
|
|
|
|
// If linger is infinite (negative) we don't even have to set
|
|
|
|
// the timer.
|
|
|
|
if (linger_ > 0) {
|
|
|
|
zmq_assert (!has_linger_timer);
|
|
|
|
add_timer (linger_, linger_timer_id);
|
|
|
|
has_linger_timer = true;
|
2010-10-16 10:53:29 +02:00
|
|
|
}
|
|
|
|
|
2011-05-31 14:36:51 +02:00
|
|
|
// Start pipe termination process. Delay the termination till all messages
|
|
|
|
// are processed in case the linger time is non-zero.
|
|
|
|
pipe->terminate (linger_ != 0);
|
After speaking with Ben Gray and the discussion on the mailing list, this is an attempt to create a sockopt to allow connecting pipes to not immediately be available for traffic. The problem is in a PUSH to many PULL situation, where there is a connect to a PULL which is not there. This connect will immediately create a pipe (unlike bind), and traffic will be load balanced to that pipe. This means if there is a persistently unavailable end point then the traffic will queue until HWM is hit, and older messages will be lost.
This patch adds a sockopt ZMQ_DELAY_ATTACH_ON_CONNECT, which if set to 1 will attempt to preempt this behavior. It does this by extending the use of the session_base to include in the outbound as well as the inbound pipe, and only associates the pipe with the socket once it receives the connected callback via a process_attach message. This works, and a test has been added to show so, but may introduce unexpected complications. The shutdown logic in this class has become marginally more awkward because of this, requiring the session to serve as the sink for both pipes if shutdown occurs with a still-connecting pipe in place. It is also possible there could be issues around flushing the messages, but as I could not directly think how to create such an issue I have not written any code with regards to that.
The documentation has been updated to reflect the change, but please do check over the code and test and review.
2012-06-01 17:58:19 +01:00
|
|
|
|
2012-06-03 21:34:41 +01:00
|
|
|
// If we're storing to a pipe to be connected, we can clear that as well
|
After speaking with Ben Gray and the discussion on the mailing list, this is an attempt to create a sockopt to allow connecting pipes to not immediately be available for traffic. The problem is in a PUSH to many PULL situation, where there is a connect to a PULL which is not there. This connect will immediately create a pipe (unlike bind), and traffic will be load balanced to that pipe. This means if there is a persistently unavailable end point then the traffic will queue until HWM is hit, and older messages will be lost.
This patch adds a sockopt ZMQ_DELAY_ATTACH_ON_CONNECT, which if set to 1 will attempt to preempt this behavior. It does this by extending the use of the session_base to include in the outbound as well as the inbound pipe, and only associates the pipe with the socket once it receives the connected callback via a process_attach message. This works, and a test has been added to show so, but may introduce unexpected complications. The shutdown logic in this class has become marginally more awkward because of this, requiring the session to serve as the sink for both pipes if shutdown occurs with a still-connecting pipe in place. It is also possible there could be issues around flushing the messages, but as I could not directly think how to create such an issue I have not written any code with regards to that.
The documentation has been updated to reflect the change, but please do check over the code and test and review.
2012-06-01 17:58:19 +01:00
|
|
|
if (outpipe) {
|
|
|
|
outpipe->set_event_sink (this);
|
|
|
|
outpipe->terminate (linger_ != 0);
|
|
|
|
}
|
2011-05-31 14:36:51 +02:00
|
|
|
|
|
|
|
// TODO: Should this go into pipe_t::terminate ?
|
2011-05-26 11:30:25 +02:00
|
|
|
// In case there's no engine and there's only delimiter in the
|
|
|
|
// pipe it wouldn't be ever read. Thus we check for it explicitly.
|
|
|
|
pipe->check_read ();
|
|
|
|
}
|
|
|
|
|
2011-09-15 10:00:23 +02:00
|
|
|
void zmq::session_base_t::proceed_with_term ()
|
2011-05-26 11:30:25 +02:00
|
|
|
{
|
|
|
|
// The pending phase have just ended.
|
|
|
|
pending = false;
|
|
|
|
|
|
|
|
// Continue with standard termination.
|
2011-05-25 10:25:51 +02:00
|
|
|
own_t::process_term (0);
|
2010-08-11 14:09:56 +02:00
|
|
|
}
|
|
|
|
|
2011-09-15 10:00:23 +02:00
|
|
|
void zmq::session_base_t::timer_event (int id_)
|
2010-10-16 10:53:29 +02:00
|
|
|
{
|
2012-04-21 18:36:20 +04:00
|
|
|
|
2010-10-16 10:53:29 +02:00
|
|
|
// Linger period expired. We can proceed with termination even though
|
|
|
|
// there are still pending messages to be sent.
|
|
|
|
zmq_assert (id_ == linger_timer_id);
|
|
|
|
has_linger_timer = false;
|
2011-05-25 10:25:51 +02:00
|
|
|
|
|
|
|
// Ask pipe to terminate even though there may be pending messages in it.
|
|
|
|
zmq_assert (pipe);
|
2011-05-31 14:36:51 +02:00
|
|
|
pipe->terminate (false);
|
After speaking with Ben Gray and the discussion on the mailing list, this is an attempt to create a sockopt to allow connecting pipes to not immediately be available for traffic. The problem is in a PUSH to many PULL situation, where there is a connect to a PULL which is not there. This connect will immediately create a pipe (unlike bind), and traffic will be load balanced to that pipe. This means if there is a persistently unavailable end point then the traffic will queue until HWM is hit, and older messages will be lost.
This patch adds a sockopt ZMQ_DELAY_ATTACH_ON_CONNECT, which if set to 1 will attempt to preempt this behavior. It does this by extending the use of the session_base to include in the outbound as well as the inbound pipe, and only associates the pipe with the socket once it receives the connected callback via a process_attach message. This works, and a test has been added to show so, but may introduce unexpected complications. The shutdown logic in this class has become marginally more awkward because of this, requiring the session to serve as the sink for both pipes if shutdown occurs with a still-connecting pipe in place. It is also possible there could be issues around flushing the messages, but as I could not directly think how to create such an issue I have not written any code with regards to that.
The documentation has been updated to reflect the change, but please do check over the code and test and review.
2012-06-01 17:58:19 +01:00
|
|
|
|
|
|
|
if (outpipe)
|
|
|
|
outpipe->terminate (false);
|
2010-10-16 10:53:29 +02:00
|
|
|
}
|
|
|
|
|
2011-09-15 10:00:23 +02:00
|
|
|
void zmq::session_base_t::detached ()
|
2011-05-30 10:07:34 +02:00
|
|
|
{
|
2011-07-24 18:25:30 +02:00
|
|
|
// Transient session self-destructs after peer disconnects.
|
|
|
|
if (!connect) {
|
2011-05-30 10:07:34 +02:00
|
|
|
terminate ();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-05-29 21:59:22 +02:00
|
|
|
reset ();
|
2012-05-28 17:02:08 +02:00
|
|
|
|
2011-07-24 18:25:30 +02:00
|
|
|
// Reconnect.
|
2012-04-21 18:36:20 +04:00
|
|
|
if (options.reconnect_ivl != -1)
|
|
|
|
start_connecting (true);
|
2011-07-24 18:25:30 +02:00
|
|
|
|
2011-05-30 10:07:34 +02:00
|
|
|
// For subscriber sockets we hiccup the inbound pipe, which will cause
|
|
|
|
// the socket object to resend all the subscriptions.
|
|
|
|
if (pipe && (options.type == ZMQ_SUB || options.type == ZMQ_XSUB))
|
2012-04-21 18:36:20 +04:00
|
|
|
pipe->hiccup ();
|
2011-05-30 10:07:34 +02:00
|
|
|
}
|
|
|
|
|
2011-09-15 10:00:23 +02:00
|
|
|
void zmq::session_base_t::start_connecting (bool wait_)
|
2011-07-24 18:25:30 +02:00
|
|
|
{
|
|
|
|
zmq_assert (connect);
|
|
|
|
|
|
|
|
// Choose I/O thread to run connecter in. Given that we are already
|
|
|
|
// running in an I/O thread, there must be at least one available.
|
|
|
|
io_thread_t *io_thread = choose_io_thread (options.affinity);
|
|
|
|
zmq_assert (io_thread);
|
|
|
|
|
|
|
|
// Create the connecter object.
|
|
|
|
|
2012-02-02 14:56:51 +01:00
|
|
|
if (addr->protocol == "tcp") {
|
2011-07-26 00:43:57 +02:00
|
|
|
tcp_connecter_t *connecter = new (std::nothrow) tcp_connecter_t (
|
2012-02-02 14:56:51 +01:00
|
|
|
io_thread, this, options, addr, wait_);
|
2011-07-28 13:19:55 +02:00
|
|
|
alloc_assert (connecter);
|
|
|
|
launch_child (connecter);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-07-28 13:46:16 +02:00
|
|
|
#if !defined ZMQ_HAVE_WINDOWS && !defined ZMQ_HAVE_OPENVMS
|
2012-02-02 14:56:51 +01:00
|
|
|
if (addr->protocol == "ipc") {
|
2011-07-28 13:19:55 +02:00
|
|
|
ipc_connecter_t *connecter = new (std::nothrow) ipc_connecter_t (
|
2012-02-02 14:56:51 +01:00
|
|
|
io_thread, this, options, addr, wait_);
|
2011-07-24 18:25:30 +02:00
|
|
|
alloc_assert (connecter);
|
|
|
|
launch_child (connecter);
|
|
|
|
return;
|
|
|
|
}
|
2011-07-28 13:46:16 +02:00
|
|
|
#endif
|
2011-07-24 18:25:30 +02:00
|
|
|
|
|
|
|
#if defined ZMQ_HAVE_OPENPGM
|
2011-05-30 10:07:34 +02:00
|
|
|
|
2011-07-24 18:25:30 +02:00
|
|
|
// Both PGM and EPGM transports are using the same infrastructure.
|
2012-02-02 14:56:51 +01:00
|
|
|
if (addr->protocol == "pgm" || addr->protocol == "epgm") {
|
2011-07-24 18:25:30 +02:00
|
|
|
|
|
|
|
// For EPGM transport with UDP encapsulation of PGM is used.
|
2012-02-02 14:56:51 +01:00
|
|
|
bool udp_encapsulation = (addr->protocol == "epgm");
|
2011-07-24 18:25:30 +02:00
|
|
|
|
|
|
|
// At this point we'll create message pipes to the session straight
|
|
|
|
// away. There's no point in delaying it as no concept of 'connect'
|
|
|
|
// exists with PGM anyway.
|
|
|
|
if (options.type == ZMQ_PUB || options.type == ZMQ_XPUB) {
|
|
|
|
|
|
|
|
// PGM sender.
|
|
|
|
pgm_sender_t *pgm_sender = new (std::nothrow) pgm_sender_t (
|
|
|
|
io_thread, options);
|
|
|
|
alloc_assert (pgm_sender);
|
|
|
|
|
2012-02-02 14:56:51 +01:00
|
|
|
int rc = pgm_sender->init (udp_encapsulation, addr->address.c_str ());
|
2012-05-28 23:13:09 +02:00
|
|
|
errno_assert (rc == 0);
|
2011-07-24 18:25:30 +02:00
|
|
|
|
|
|
|
send_attach (this, pgm_sender);
|
|
|
|
}
|
|
|
|
else if (options.type == ZMQ_SUB || options.type == ZMQ_XSUB) {
|
|
|
|
|
|
|
|
// PGM receiver.
|
|
|
|
pgm_receiver_t *pgm_receiver = new (std::nothrow) pgm_receiver_t (
|
|
|
|
io_thread, options);
|
|
|
|
alloc_assert (pgm_receiver);
|
|
|
|
|
2012-02-02 14:56:51 +01:00
|
|
|
int rc = pgm_receiver->init (udp_encapsulation, addr->address.c_str ());
|
2012-05-28 23:13:09 +02:00
|
|
|
errno_assert (rc == 0);
|
2011-07-24 18:25:30 +02:00
|
|
|
|
|
|
|
send_attach (this, pgm_receiver);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
zmq_assert (false);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
zmq_assert (false);
|
|
|
|
}
|
2011-05-25 10:25:51 +02:00
|
|
|
|