diff --git a/3party/eventbus/include/detail/leftright.h b/3party/eventbus/include/detail/leftright.h new file mode 100644 index 0000000..75b5046 --- /dev/null +++ b/3party/eventbus/include/detail/leftright.h @@ -0,0 +1,388 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef MPM_LEFTRIGHT_CACHE_LINE_SIZE +#define MPM_LEFTRIGHT_CACHE_LINE_SIZE 64 +#endif + +namespace mpm { +//! \defgroup Concepts +//! Concept is a term that describes a named set of requirements for a type. + +//! \defgroup ReaderRegistry +//! \ingroup Concepts +//! \{ +//! +//! Keeps track of active readers such that it can efficiently +//! indicate whether there are any active readers when queried. +//! +//! \par Extends +//! DefaultConstructible +//! +//! \par Requirements +//! Given:\n +//! R, an implementation of the ReaderRegistry concept \n +//! r, an instance of R +//! +//! |Expression | Requirements | Return type | +//! |:-----------|:---------------------------------------------------------|:-------------| +//! | R() | R is default constructible. | R | +//! | r.arrive() | Notes the arival of a reader. Wait-free and noexcept. | void | +//! | r.depart() | Notes the departure of a reader. Wait-free and noexcept. | void | +//! | r.empty() | const and noexcept. | true if there are no readers; false otherwise. | +//! \} + +struct in_place_t {}; + +constexpr in_place_t in_place{}; + +//! Wrap any single-threaded datastructure with Left-Right +//! concurrency control +//! +//! Left-Right concurrency allows wait-free population-oblivious reads +//! and blocking writes. Writers never block readers. +//! +//! Instances of this class maintain two full copies of the underlying +//! datastructure and all modifications are performed twice. Consequently, +//! uses of this class should be limited to small amounts of data where +//! the number of reads dominates the number of writes. +//! +//! Left-Right concurrency control is described in depth in +//! A. Correia and P. Ramalhete. Left-Right: A Concurrency Control +//! Technique with Wait-Free Population Oblivious Reads +template +class basic_leftright { + static_assert(noexcept(std::declval().arrive()), + "ReaderRegistry::arrive() must be noexcept"); + static_assert(noexcept(std::declval().depart()), + "ReaderRegistry::depart() must be noexcept"); + static_assert(noexcept(std::declval().empty()), + "ReaderRegistry::empty() must be noexcept"); + +public: + using value_type = T; + using reference = value_type &; + using const_reference = const value_type &; + + basic_leftright() = default; + + //! Construct the two underlying instances of T + //! by moving a seed instance + basic_leftright(T &&seed) noexcept( + std::is_nothrow_copy_constructible::value + && std::is_nothrow_move_constructible::value); + + //! Construct the two underlying instances of T + //! by copying a seed instance + basic_leftright(const value_type &seed) noexcept( + std::is_nothrow_copy_constructible::value); + + //! Construct the two underlying instances of T in place, forwarding + //! the args after the mpm::in_place tag + template + basic_leftright(in_place_t, Args &&...) noexcept( + std::is_nothrow_copy_constructible::value + && std::is_nothrow_constructible::value); + + //! \internal + //! Need a use-case for these. It seems that you would never want to + //! move/copy/swap the full leftright instance but rather apply those + //! operations to the encapsulated instance, in which case the relevant + //! operation is accessed via modify() + + basic_leftright(const basic_leftright &other) = delete; + basic_leftright(basic_leftright &&other) = delete; + basic_leftright &operator=(const basic_leftright &rhs) = delete; + basic_leftright &operator=(basic_leftright &&rhs) = delete; + void swap(basic_leftright &other) noexcept = delete; + + //! Modify the state of the managed datastructure + //! + //! Blocks/is-blocked-by other concurrent writers; does not + //! block concurrent readers + //! + //! This function requires that execution of the supplied functor + //! be noexcept. + //! + //! The function passed will be executed twice and *must* + //! result in the exact same mutation operation being applied + //! in both cases. For example it would be incorrect to supply + //! a function here that inserted a random number into the + //! underlying datastructure if said random number were calculated + //! for each invocation (i.e. each invocation would insert a + //! different value). + //! + //! \throws std::system_error on failure to lock internal mutex + //! + //! \internal I wanted the declaration to be as below so that this + //! function doesn't even exist for non-noexcept functors, + //! but g++ has not yet implemented noexcept mangling + //! as of version 5.2.1. Instead we just static_assert the + //! noexcept-ness of the functor in the body of the function + //! + //! template + //! auto modify(F f) + //! -> typename std::enable_if())), + //! typename std::result_of::type>::type; + template + typename std::result_of::type modify(F f); + + //! Observe the state of the managed datastructure + //! + //! Always wait-free provided ReaderRegistry::arrive() and + //! ReaderRegistry::depart() are truly wait-free + //! + //! \throws Whatever the provided functor throws and nothing else + template + typename std::result_of::type observe(F f) const + noexcept(noexcept(f(std::declval()))); + +private: + class scoped_read_indication { + public: + scoped_read_indication(ReaderRegistry &rr) noexcept; + ~scoped_read_indication() noexcept; + + private: + ReaderRegistry &m_reg; + }; + + template + void toggle_reader_registry(Lock &l) noexcept; + + enum lr { read_left, read_right }; + + mutable std::array m_reader_registries; + + std::atomic_size_t m_registry_index{0}; + + std::atomic m_leftright{read_left}; + + T m_left alignas(MPM_LEFTRIGHT_CACHE_LINE_SIZE); + T m_right alignas(MPM_LEFTRIGHT_CACHE_LINE_SIZE); + std::mutex m_writemutex; +}; + +//! Simple implementation of ReaderRegistry concept +//! +//! This implemtation is wait-free but readers will contend +//! on a single cache line due to the use of a shared counter. +//! +//! \concept{ReaderRegistry} +class alignas(MPM_LEFTRIGHT_CACHE_LINE_SIZE) atomic_reader_registry { +public: + void arrive() noexcept; + void depart() noexcept; + bool empty() const noexcept; + +private: + std::atomic_uint_fast32_t m_count{0}; +}; + +//! Distributed implementation of ReaderRegistry +//! +//! Uses an array of N counters (suitably padded) and hashes reader +//! thread ids to indices into said array so that concurrent reader +//! registration can be made unlikely to contend. The likelihood +//! of a collision is dependent on the number of concurrent readers +//! relative to N. +//! +//! arrive() and depart() will perform better if N is a power of two. +//! +//! \concept{ReaderRegistry} +template> +class alignas(MPM_LEFTRIGHT_CACHE_LINE_SIZE) + distributed_atomic_reader_registry { +public: + void arrive() noexcept; + void depart() noexcept; + bool empty() const noexcept; + +private: + class alignas(MPM_LEFTRIGHT_CACHE_LINE_SIZE) counter { + public: + void incr() noexcept; + void decr() noexcept; + std::uint_fast32_t relaxed_read() const noexcept; + + private: + std::atomic_uint_fast32_t m_value{0}; + }; + + std::array m_counters{}; +}; + +//! Default leftright uses the simpler reader registry; prefer +//! distributed_atomic_reader_registry when reads are highly contended +template +using leftright = basic_leftright; + +#ifndef DOCS + +template +basic_leftright::basic_leftright(T &&seed) noexcept( + std::is_nothrow_copy_constructible::value + && std::is_nothrow_move_constructible::value) + : m_left(std::move(seed)), + m_right(m_left) +{} + +template +basic_leftright::basic_leftright(const T &seed) noexcept( + std::is_nothrow_copy_constructible::value) + : m_left(seed), + m_right(seed) +{} + +template +template +basic_leftright::basic_leftright(in_place_t, Args &&...args) noexcept( + std::is_nothrow_copy_constructible::value + && std::is_nothrow_constructible::value) + : m_left(std::forward(args)...), + m_right(m_left) +{} + +template +template +typename std::result_of::type +basic_leftright::observe(F f) const + noexcept(noexcept(f(std::declval()))) +{ + std::size_t idx = m_registry_index.load(std::memory_order_acquire); + scoped_read_indication sri(m_reader_registries[idx]); + return read_left == m_leftright.load(std::memory_order_acquire) + ? f(m_left) + : f(m_right); +} + +template +template +typename std::result_of::type +basic_leftright::modify(F f) +{ + static_assert(noexcept(f(std::declval())), + "Modify functor must be noexcept"); + std::unique_lock xlock(m_writemutex); + if (read_left == m_leftright.load(std::memory_order_relaxed)) { + f(m_right); + m_leftright.store(read_right, std::memory_order_release); + toggle_reader_registry(xlock); + return f(m_left); + } else { + f(m_left); + m_leftright.store(read_left, std::memory_order_release); + toggle_reader_registry(xlock); + return f(m_right); + } +} + +template +template +void +basic_leftright::toggle_reader_registry(Lock &l) noexcept +{ + assert(l); + const std::size_t current = + m_registry_index.load(std::memory_order_acquire); + const std::size_t next = (current + 1) & 0x1; + + while (!m_reader_registries[next].empty()) { std::this_thread::yield(); } + + m_registry_index.store(next, std::memory_order_release); + + while (!m_reader_registries[current].empty()) { std::this_thread::yield(); } +} + +template +basic_leftright::scoped_read_indication::scoped_read_indication( + R &r) noexcept + : m_reg(r) +{ + m_reg.arrive(); +} + +template +basic_leftright::scoped_read_indication::~scoped_read_indication() noexcept +{ + m_reg.depart(); +} + +inline void +atomic_reader_registry::arrive() noexcept +{ + m_count.fetch_add(1, std::memory_order_acq_rel); +} + +inline void +atomic_reader_registry::depart() noexcept +{ + m_count.fetch_sub(1, std::memory_order_acq_rel); +} + +inline bool +atomic_reader_registry::empty() const noexcept +{ + return 0 == m_count.load(std::memory_order_acquire); +} + +template +void +distributed_atomic_reader_registry::arrive() noexcept +{ + std::size_t index = Hasher()(std::this_thread::get_id()) % N; + m_counters[index].incr(); +} + +template +void +distributed_atomic_reader_registry::depart() noexcept +{ + std::size_t index = Hasher()(std::this_thread::get_id()) % N; + m_counters[index].decr(); +} + +template +bool +distributed_atomic_reader_registry::empty() const noexcept +{ + bool retval = + std::none_of(begin(m_counters), end(m_counters), + [](const counter &ctr) { return ctr.relaxed_read(); }); + std::atomic_thread_fence(std::memory_order_acquire); + return retval; +} + +template +void +distributed_atomic_reader_registry::counter::incr() noexcept +{ + (void) m_value.fetch_add(1, std::memory_order_acq_rel); +} + +template +void +distributed_atomic_reader_registry::counter::decr() noexcept +{ + (void) m_value.fetch_sub(1, std::memory_order_acq_rel); +} + +template +std::uint_fast32_t +distributed_atomic_reader_registry::counter::relaxed_read() const noexcept +{ + return m_value.load(std::memory_order_relaxed); +} + +#endif + +}// namespace mpm diff --git a/3party/eventbus/include/detail/typelist.h b/3party/eventbus/include/detail/typelist.h new file mode 100644 index 0000000..1b6d726 --- /dev/null +++ b/3party/eventbus/include/detail/typelist.h @@ -0,0 +1,49 @@ +#pragma once + +namespace mpm { +namespace detail { +//! A type used to indicate an empty typelist tail. I.e. the end marker +//! of a typelist. +struct null_t {}; + +//! Typelist a la [Alexandrescu](http://www.drdobbs.com/generic-programmingtypelists-and-applica/184403813) +template +struct typelist { + //! The first element, a non-typelist by convention + using head = Head; + + //! The second element, can be another typelist + using tail = Tail; +}; + +template +struct for_each_helper { + void operator()(F f) const + { + f.template operator()(); + for_each_helper()(f); + } +}; + +template +struct for_each_helper { + void operator()(const F &) {} +}; + +//! Iterate over a typelist at runtime. +//! The supplied functor, f, is invoked for each element in the typlist TL +//! +//! \tparam TL A typelist +//! \tparam F A FunctionObject +//! \param f A functor that will be called as f.template operator()\() +//! where T is an element in TL. This functor will be invoked once +//! for each element of TL +//! \relates typelist +template +void +for_each_type(const F &f) +{ + for_each_helper()(f); +} +}// namespace detail +}// namespace mpm diff --git a/3party/eventbus/include/enable_polymorphic_dispatch.h b/3party/eventbus/include/enable_polymorphic_dispatch.h new file mode 100644 index 0000000..ce07b83 --- /dev/null +++ b/3party/eventbus/include/enable_polymorphic_dispatch.h @@ -0,0 +1,41 @@ +#pragma once + +#include "detail/typelist.h" + +namespace mpm { +namespace detail { +/// root class for polymorphic event hierarchies +class event { +protected: + //don't dispatch as this type - it's an internal + //implementation detail + using dispatch_as = null_t; + + virtual ~event() {} +}; + +/// forward decl from eventbus.h +template +struct dispatch_typelist; +}// namespace detail + +//! Publicly inherit from this class to obtain polymorphic +//! event delivery. +template +class enable_polymorphic_dispatch : public Base { +protected: + // \internal + // Couldn't use a typelist based on std::tuple here because + // std::tuple_cat won't work with an incomplete type, and the + // derived event types are incomplete when decltype(tuple_cat) + // would be used + +#ifdef DOCS + using dispatch_as = implementation - defined; +#else + template + friend class detail::dispatch_typelist; + using dispatch_as = detail::typelist; +#endif +}; +}// namespace mpm diff --git a/3party/eventbus/include/eventbus.h b/3party/eventbus/include/eventbus.h new file mode 100644 index 0000000..4f4ec92 --- /dev/null +++ b/3party/eventbus/include/eventbus.h @@ -0,0 +1,433 @@ +#pragma once + +#include "enable_polymorphic_dispatch.h" +#include "detail/typelist.h" +#include +#include "detail/leftright.h" +#include +#include +#include + +//! The mpm namespace +namespace mpm { +//! \defgroup Concepts +//! Concept is a term that describes a named set of requirements for a type + +//! \defgroup Event +//! \ingroup Concepts +//! \{ +//! +//! An instance of any C++ object type. That is, anything but a function, +//! reference, or void type +//! +//! \par Requirements +//! Given:\n +//! E, an implementation of the Event concept +//! +//! |Expression | Requirements | Return type | +//! |:--------------------------------|:-------------------------|:-------------------| +//! |std::is_object::value == true | E must be an object type | bool, must be true | +//! \} + +//! \defgroup EventHandler +//! \ingroup Concepts +//! \{ +//! +//! A Callable that can be invoked to handle an instance of Event. +//! Callable's INVOKE operation must be noexcept. +//! +//! \par Extends +//! Callable +//! +//! \par Requirements +//! Given:\n +//! E an implementation of the Event concept, +//! e and instance of E, +//! H an implementation of the EventHandler concept handling events of type E, +//! h an instance of H +//! +//! |Expression | Requirements | +//! |:-------------------------------|:-----------------------| +//! |h(e) | well-formed | +//! |noexcept(h(e)) == true | h(e) must be noexcept | +//! \} + +namespace detail { +//! Adapts an instance of the Event concept into the +//! class hierarchy rooted at mpm::detail::event +template +class adapted_event : public detail::event { +public: + using dispatch_as = detail::typelist; + + adapted_event(const Event &event) : m_event{event} {} + + operator const Event &() const { return m_event; } + +private: + const Event &m_event; +}; + +//! Holds the subscriber event type and the handler instance +//! in a type-erased manner so that they can be put into a +//! homogeneous container (e.g. the std::unordered_multimap as below) +class subscription { +public: + using id_t = std::intptr_t; + + template + subscription(const H &handler, const Alloc &alloc, E *) + : m_self{std::allocate_shared>(alloc, handler)} + {} + + void deliver(const event &e) { m_self->deliver(e); } + + id_t id() const { return reinterpret_cast(m_self.get()); } + +private: + struct concept + { + virtual ~concept(){}; + virtual void deliver(const event &e) = 0; + }; + + // base template for events that extend detail::event (i.e. for + // polymorphic dispatchable events). These can be safely + // static_cast to the subscribed event type + template + struct model : concept { + explicit model(H h) : handler(std::move(h)) {} + + void deliver(const event &e) final + { + handler(static_cast(e)); + } + + H handler; + }; + + // Specialization for events that do not use + // enable_polymorphic_dispatch. The diffence is that we static_cast + // to adapted_event. + template + struct model::value>::type> + : concept { + explicit model(H h) : handler(std::move(h)) {} + + void deliver(const event &e) final + { + handler(static_cast &>(e)); + } + + H handler; + }; + + std::shared_ptr m_self; +}; + +struct cookie { + cookie() = default; + + cookie(subscription::id_t _id, std::type_index _ti) : id(_id), ti(_ti) {} + + subscription::id_t id = 0; + std::type_index ti = typeid(std::nullptr_t); +}; + +//! Extract a dispatch typelist for an event supplying a dispatch_as +//! typedef. If the supplied event type is not in the head position of +//! the dispatch_as typelist then it is prepended to said list +template +struct dispatch_typelist { + // todo C++14 can be std::conditional_t + // most derived type goes first + using type = typename std::conditional< + std::is_same::value, + typename E::dispatch_as, + detail::typelist>::type; +}; + +template +struct deliver { + deliver(const event &e, const SubsMap &subs) : m_subs{subs}, m_event{e} {} + + template + void operator()() + { + auto handlers = m_subs.equal_range(std::type_index(typeid(T))); + for (auto pos = handlers.first; pos != handlers.second; ++pos) { + const_cast(pos->second).deliver(m_event); + } + } + + const SubsMap &m_subs; + const event &m_event; +}; + +// not meant to be used as a virtual base class - nothing should +// destruct via this type +class unsubscribable { +public: + virtual void unsubscribe(const cookie &c) = 0; + +protected: + ~unsubscribable() {} +}; +}// namespace detail + +//! A small POD type representing a subscription +using cookie = detail::cookie; + +//! Accepts events from publishers and delivers them to subscribers +template +class basic_eventbus : public detail::unsubscribable { +public: + //! The type of the Allocator used by this eventbus + using allocator_type = Allocator; + + //! Delegates to basic_eventbus(allocator_type) using + //! default construction for the allocator_type instance. + basic_eventbus(); + + //! Constructs an instance using the supplied allocator_type. + explicit basic_eventbus(allocator_type alloc); + +#ifdef DOCS + + //! Publish an instance of E. + //! + //! The event instance will be delivered either as only type E or + //! if E has been defined with mpm::enable_polymorphic_dispatch it + //! will be delivered as every type in its inheritance chain. + //! + //! \tparam E An instance of the Event concept + //! \param event The event to publish + //! \returns void + template + void publish(const E &event) noexcept; + +#else + + template + typename std::enable_if::value>::type + publish(const E &event) noexcept; + + template + typename std::enable_if::value>::type + publish(const E &event) noexcept; + +#endif + + //! Subscribe to instances of Event + //! + //! The supplied handler will be invoked when events of type Event + //! are published or when classes derived from Event that have chosen + //! to enable polymorphic dispatch are published. + //! + //! \tparam Event The type for which to subscribe + //! \tparam EventHandler an instance of the EventHandler concept + //! \param handler An instance of EventHandler + //! \returns A cookie which will allow for this handler to be + //! unsubscribed later via basic_eventbus::unsubscribe + template + cookie subscribe(const EventHandler &handler); + + //! Unsubscribes an event handler + //! + //! \param c A cookie obtained when basic_eventbus::subscribe was called + //! \returns void + void unsubscribe(const cookie &c) override; + +private: + using subscriptions = + leftright, + std::equal_to, + allocator_type>>; + allocator_type m_alloc; + subscriptions m_subscriptions; +}; + +//! Default eventbus uses std::allocator +using eventbus = basic_eventbus>; + +template +basic_eventbus::basic_eventbus() : basic_eventbus(allocator_type()) +{} + +template +basic_eventbus::basic_eventbus(allocator_type alloc) + : m_alloc{std::move(alloc)}, + m_subscriptions{in_place, alloc} +{} + +#ifndef DOCS + +template +template +typename std::enable_if::value>::type +basic_eventbus::publish(const Event &event) noexcept +{ + using types = typename detail::dispatch_typelist::type; + m_subscriptions.observe([&](typename subscriptions::const_reference subs) { + detail::for_each_type( + detail::deliver{event, subs}); + }); +} + +template +template +typename std::enable_if::value>::type +basic_eventbus::publish(const Event &event) noexcept +{ + publish(detail::adapted_event{event}); +} + +#endif + +template +template +cookie +basic_eventbus::subscribe(const EventHandler &handler) +{ + static_assert(std::is_object::value, "Events must be object types"); + static_assert(noexcept(handler(std::declval())), + "Need noexcept handler for Event"); + + Event *ptr{}; + detail::subscription sub{handler, m_alloc, ptr}; + return m_subscriptions.modify( + [&](typename subscriptions::reference subs) noexcept { + auto idx = std::type_index(typeid(Event)); + subs.emplace(idx, sub); + return cookie{sub.id(), idx}; + }); +} + +template +void +basic_eventbus::unsubscribe(const cookie &c) +{ + m_subscriptions.modify( + [=](typename subscriptions::reference subs) noexcept { + auto range = subs.equal_range(c.ti); + for (auto pos = range.first; pos != range.second; ++pos) { + if (pos->second.id() == c.id) { + subs.erase(pos); + return; + } + } + }); +} + +//! An RAII type for eventbus subscriptions +//! +//! This type will ensure that the encapsulated eventbus subscription +//! is released when it goes out of scope. +//! +//! \tparam Event the type of event for which this subscription subscribes +template +class scoped_subscription { +public: + using event_type = Event; + + //! Construct a scoped_subscription not managing any subscription + scoped_subscription() = default; + + //! Move from another scoped_subscription. + scoped_subscription(scoped_subscription &&other) noexcept + : m_ebus{other.m_ebus}, + m_cookie{other.m_cookie} + { + other.m_ebus = nullptr; + } + + //! Copying a scoped_subscription is prohibited + scoped_subscription(const scoped_subscription &) = delete; + + //! Move-assign from another scoped_subscription + scoped_subscription &operator=(scoped_subscription &&other) noexcept + { + reset(); + m_ebus = other.m_ebus; + m_cookie = other.m_cookie; + other.m_ebus = nullptr; + } + + void swap(scoped_subscription &other) noexcept + { + using std::swap; + swap(m_ebus, other.m_ebus); + swap(m_cookie, other.m_cookie); + } + + //! Initializes a subscription that will be usubscribed when + //! this object goes out of scope. + //! + //! Subscription is constructed with ebus.subscribe(h) + //! + //! \tparam Alloc The allocator type of the event bus supplied + //! \tparam Handler the type of the handler supplied + //! \param ebus An instance of basic_eventbus to use for subscribing + //! \param h An instance of an event handler + template + scoped_subscription(basic_eventbus &ebus, const Handler &h) + : m_ebus{&ebus}, + m_cookie{ebus.template subscribe(h)} + {} + + //! Initializes a subscription that will be usubscribed when + //! this object goes out of scope. + //! + //! Subscription is constructed with ebus.subscribe(h). If + //! this object currently manages a subscription, that subscription + //! will be cleared as though reset() had been called. + //! + //! \tparam Alloc The allocator type of the event bus supplied + //! \tparam Handler the type of the handler supplied + //! \param ebus An instance of basic_eventbus to use for subscribing + //! \param h An instance of an event handler + template + void assign(basic_eventbus &ebus, const Handler &h) + { + reset(); + m_ebus = &ebus; + m_cookie = ebus.template subscribe(h); + } + + //! Unsubscribes this objects managed subscription if one exists + ~scoped_subscription() { reset(); } + + //! Unsubscribes this objects managed subscription if one exists + //! \returns void + void reset() + { + if (m_ebus) { m_ebus->unsubscribe(m_cookie); } + } + +private: + friend void swap(scoped_subscription &lhs, scoped_subscription &rhs) + { + lhs.swap(rhs); + } + + detail::unsubscribable *m_ebus = nullptr;//non-owning + cookie m_cookie{}; +}; +}// namespace mpm + +namespace ulib { +template +using enable_polymorphic_dispatch = mpm::enable_polymorphic_dispatch; + +// using eventbus = mpm::basic_eventbus>; +using eventbus = mpm::basic_eventbus>>; + +template +using scoped_subscription = mpm::scoped_subscription; +}// namespace ulib diff --git a/3party/nonstd/optional.h b/3party/nonstd/optional.h index ce64a90..a9bb01e 100644 --- a/3party/nonstd/optional.h +++ b/3party/nonstd/optional.h @@ -2500,7 +2500,7 @@ template using hash = std::hash>; // if <= C++14 -#if __cplusplus < 201703L +// #if __cplusplus < 201703L template using optional = tl::optional; @@ -2530,7 +2530,7 @@ make_optional(std::initializer_list il, Args &&...args) } constexpr tl::nullopt_t nullopt{tl::nullopt}; -#endif +// #endif }// namespace ulib #endif diff --git a/CMakeLists.txt b/CMakeLists.txt index 3a858d9..d78fd22 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -16,17 +16,17 @@ option(ULIB_SHARED_LIB "Build shared library" OFF) option(ULIB_BUILD_EXAMPLES "Build examples" OFF) if(NOT CMAKE_BUILD_TYPE) - set(CMAKE_BUILD_TYPE "Release") - # set stripped - set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -s") + set(CMAKE_BUILD_TYPE "Release") + # set stripped + set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -s") endif() # add -static-libgcc and -static-libstdc++ to link flags set(CMAKE_POSITION_INDEPENDENT_CODE ON) if(ULIB_SHARED_LIB) - add_library(${PROJECT_NAME} SHARED "") + add_library(${PROJECT_NAME} SHARED "") else() - add_library(${PROJECT_NAME} STATIC "" src/ulib/system/timer.cpp + add_library(${PROJECT_NAME} STATIC "" src/ulib/system/timer.cpp src/ulib/system/timer.h) endif() target_sources( @@ -63,10 +63,10 @@ list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") # CMP0063 if(POLICY CMP0063) - cmake_policy(SET CMP0063 NEW) + cmake_policy(SET CMP0063 NEW) endif() if(POLICY CMP0048) - cmake_policy(SET CMP0048 NEW) + cmake_policy(SET CMP0048 NEW) endif() set(FMT_USE_CPP11 @@ -97,6 +97,7 @@ target_compile_definitions(${PROJECT_NAME} PRIVATE ULIB_LIBRARY_IMPL) target_include_directories( ${PROJECT_NAME} PUBLIC 3party/bnflite + 3party/eventbus/include 3party/inja 3party/mongoose 3party/nlohmann @@ -109,14 +110,14 @@ target_include_directories( install(TARGETS ${PROJECT_NAME} DESTINATION lib) if(ULIB_BUILD_TESTS) - enable_testing() - set(GTEST_LANG_CXX11 OFF) - set(GTEST_HAS_TR1_TUPLE OFF) - set(GTEST_USE_OWN_TR1_TUPLE ON) - add_subdirectory(3party/googletest) - add_subdirectory(tests) + enable_testing() + set(GTEST_LANG_CXX11 OFF) + set(GTEST_HAS_TR1_TUPLE OFF) + set(GTEST_USE_OWN_TR1_TUPLE ON) + add_subdirectory(3party/googletest) + add_subdirectory(tests) endif() if(ULIB_BUILD_EXAMPLES) - add_subdirectory(examples) + add_subdirectory(examples) endif() diff --git a/tests/3party/eventbus/eventbus_unittest.cpp b/tests/3party/eventbus/eventbus_unittest.cpp new file mode 100644 index 0000000..2d58412 --- /dev/null +++ b/tests/3party/eventbus/eventbus_unittest.cpp @@ -0,0 +1,20 @@ +#include +#include + +TEST(EventBus, BaseTest) +{ + struct Event { + int x; + }; + + ulib::eventbus bus; + int calls = 0; + ulib::scoped_subscription ss(bus, [&](const Event &e) noexcept { + EXPECT_EQ(calls, e.x); + ++calls; + }); + bus.publish(Event{0}); + EXPECT_EQ(calls, 1); + bus.publish(Event{1}); + EXPECT_EQ(calls, 2); +} diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 59260d1..95601d8 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -13,6 +13,7 @@ add_executable( ulib/system/thread_unittest.cpp ulib/system/thread_pool_unittest.cpp ulib/system/timer_unittest.cpp + 3party/eventbus/eventbus_unittest.cpp 3party/inja/inja_unittest.cpp 3party/optional/optional_unittest.cpp 3party/sqlpp11/sqlpp11_unittest.cpp