fix async deadlock
All checks were successful
linux-x64-gcc / linux-gcc (Release) (push) Successful in 56s
linux-x64-gcc / linux-gcc (Debug) (push) Successful in 3m36s

This commit is contained in:
tqcq 2024-03-28 22:19:28 +08:00
parent fd46ca62ae
commit 181823d4fe
6 changed files with 126 additions and 88 deletions

View File

@ -19,7 +19,7 @@
// THE SOFTWARE.
#ifndef ASYNCXX_H_
# error "Do not include this header directly, include <async++.h> instead."
#error "Do not include this header directly, include <async++.h> instead."
#endif
namespace async {
@ -37,17 +37,19 @@ namespace detail {
// Detect whether an object is a scheduler
template<typename T, typename = decltype(std::declval<T>().schedule(std::declval<task_run_handle>()))>
two& is_scheduler_helper(int);
two &is_scheduler_helper(int);
template<typename T>
one& is_scheduler_helper(...);
one &is_scheduler_helper(...);
template<typename T>
struct is_scheduler: public std::integral_constant<bool, sizeof(is_scheduler_helper<T>(0)) - 1> {};
struct is_scheduler : public std::integral_constant<bool, sizeof(is_scheduler_helper<T>(0)) - 1> {};
// Singleton scheduler classes
class thread_scheduler_impl {
public:
LIBASYNC_EXPORT static void schedule(task_run_handle t);
};
class inline_scheduler_impl {
public:
static void schedule(task_run_handle t);
@ -59,19 +61,22 @@ typedef ref_count_ptr<task_base> task_ptr;
// Helper function to schedule a task using a scheduler
template<typename Sched>
void schedule_task(Sched& sched, task_ptr t);
void schedule_task(Sched &sched, task_ptr t);
// Wait for the given task to finish. This will call the wait handler currently
// active for this thread, which causes the thread to sleep by default.
LIBASYNC_EXPORT void wait_for_task(task_base* wait_task);
#ifndef LIBASYNC_CUSTOM_WAIT_FOR_TASK
LIBASYNC_EXPORT void wait_for_task(task_base *wait_task);
#endif
// Forward-declaration for data used by threadpool_scheduler
struct threadpool_data;
} // namespace detail
}// namespace detail
// Run a task in the current thread as soon as it is scheduled
inline detail::inline_scheduler_impl& inline_scheduler()
inline detail::inline_scheduler_impl &
inline_scheduler()
{
static detail::inline_scheduler_impl instance;
return instance;
@ -80,7 +85,8 @@ inline detail::inline_scheduler_impl& inline_scheduler()
// Run a task in a separate thread. Note that this scheduler does not wait for
// threads to finish at process exit. You must ensure that all threads finish
// before ending the process.
inline detail::thread_scheduler_impl& thread_scheduler()
inline detail::thread_scheduler_impl &
thread_scheduler()
{
static detail::thread_scheduler_impl instance;
return instance;
@ -89,15 +95,17 @@ inline detail::thread_scheduler_impl& thread_scheduler()
// Built-in thread pool scheduler with a size that is configurable from the
// LIBASYNC_NUM_THREADS environment variable. If that variable does not exist
// then the number of CPUs in the system is used instead.
LIBASYNC_EXPORT threadpool_scheduler& default_threadpool_scheduler();
LIBASYNC_EXPORT threadpool_scheduler &default_threadpool_scheduler();
// Default scheduler that is used when one isn't specified. This defaults to
// default_threadpool_scheduler(), but can be overriden by defining
// LIBASYNC_CUSTOM_DEFAULT_SCHEDULER before including async++.h. Keep in mind
// that in that case async::default_scheduler should be declared before
// including async++.h.
#ifndef LIBASYNC_CUSTOM_DEFAULT_SCHEDULER
inline threadpool_scheduler& default_scheduler()
inline threadpool_scheduler &
default_scheduler()
{
return default_threadpool_scheduler();
}
@ -130,16 +138,15 @@ class threadpool_scheduler {
std::unique_ptr<detail::threadpool_data> impl;
public:
LIBASYNC_EXPORT threadpool_scheduler(threadpool_scheduler&& other);
LIBASYNC_EXPORT threadpool_scheduler(threadpool_scheduler &&other);
// Create a thread pool with the given number of threads
LIBASYNC_EXPORT threadpool_scheduler(std::size_t num_threads);
// Create a thread pool with the given number of threads. Call `prerun`
// function before execution loop and `postrun` after.
LIBASYNC_EXPORT threadpool_scheduler(std::size_t num_threads,
std::function<void()>&& prerun_,
std::function<void()>&& postrun_);
LIBASYNC_EXPORT
threadpool_scheduler(std::size_t num_threads, std::function<void()> &&prerun_, std::function<void()> &&postrun_);
// Destroy the thread pool, tasks that haven't been started are dropped
LIBASYNC_EXPORT ~threadpool_scheduler();
@ -153,5 +160,5 @@ namespace detail {
// Work-around for Intel compiler handling decltype poorly in function returns
typedef std::remove_reference<decltype(::async::default_scheduler())>::type default_scheduler_type;
} // namespace detail
} // namespace async
}// namespace detail
}// namespace async

View File

@ -141,6 +141,9 @@ if(SLED_BUILD_TESTS)
src/system/thread_pool_test.cc
src/rx_test.cc
src/uri_test.cc)
if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
target_compile_options(sled_tests PRIVATE -Wthread-safety)
endif()
target_link_libraries(sled_tests PRIVATE sled GTest::gtest GTest::gtest_main)
add_test(NAME sled_tests COMMAND sled_tests)
endif(SLED_BUILD_TESTS)

View File

@ -3,18 +3,28 @@
namespace sled {
class FiberScheduler;
}
namespace async {
sled::FiberScheduler &default_scheduler();
}
class task_base;
namespace detail {
void wait_for_task(task_base *wait_task);
}
}// namespace async
#define LIBASYNC_CUSTON_EVENT
#define LIBASYNC_CUSTOM_DEFAULT_SCHEDULER
#include <async++.h>
namespace sled {
void SleepWaitHandler(async::task_wait_handle t);
class FiberScheduler {
public:
FiberScheduler();
void schedule(async::task_run_handle t);
};

View File

@ -33,28 +33,29 @@ struct HasLockAndUnlock {
};
}// namespace internal
using Mutex = marl::mutex;
// using Mutex = marl::mutex;
// class Mutex final {
// public:
// Mutex() = default;
// Mutex(const Mutex &) = delete;
// Mutex &operator=(const Mutex &) = delete;
//
// inline void Lock() { impl_.lock(); };
//
// inline bool TryLock() { return impl_.try_lock(); }
//
// inline void AssertHeld() {}
//
// inline void Unlock() { impl_.unlock(); }
//
// private:
// std::mutex impl_;
// friend class ConditionVariable;
// };
class SLED_LOCKABLE Mutex final {
public:
Mutex() = default;
Mutex(const Mutex &) = delete;
Mutex &operator=(const Mutex &) = delete;
class RecursiveMutex final {
inline void Lock() SLED_EXCLUSIVE_LOCK_FUNCTION(impl_) { impl_.lock(); };
inline bool TryLock() SLED_EXCLUSIVE_TRYLOCK_FUNCTION(true) { return impl_.try_lock(); }
inline void AssertHeld() SLED_ASSERT_EXCLUSIVE_LOCK(impl_) {}
inline void Unlock() SLED_UNLOCK_FUNCTION(impl_) { impl_.unlock(); }
private:
marl::mutex impl_;
friend class ConditionVariable;
friend class MutexLock;
};
class SLED_LOCKABLE RecursiveMutex final {
public:
RecursiveMutex() = default;
RecursiveMutex(const RecursiveMutex &) = delete;
@ -72,17 +73,14 @@ private:
std::recursive_mutex impl_;
};
class RecursiveMutexLock final {
class SLED_SCOPED_CAPABILITY RecursiveMutexLock final {
public:
RecursiveMutexLock(const RecursiveMutexLock &) = delete;
RecursiveMutexLock &operator=(const RecursiveMutexLock &) = delete;
explicit RecursiveMutexLock(RecursiveMutex *mutex) SLED_EXCLUSIVE_LOCK_FUNCTION(mutex) : mutex_(mutex)
{
mutex->Lock();
}
explicit RecursiveMutexLock(RecursiveMutex *mutex) SLED_ACQUIRE_SHARED(mutex) : mutex_(mutex) { mutex->Lock(); }
~RecursiveMutexLock() SLED_UNLOCK_FUNCTION() { mutex_->Unlock(); }
~RecursiveMutexLock() SLED_RELEASE_SHARED(mutex_) { mutex_->Unlock(); }
private:
RecursiveMutex *mutex_;
@ -101,11 +99,11 @@ private:
// friend class ConditionVariable;
// };
//
class MutexLock final {
class SLED_SCOPED_CAPABILITY MutexLock final {
public:
MutexLock(Mutex *mutex) SLED_EXCLUSIVE_LOCK_FUNCTION(mutex) : lock_(*mutex) {}
MutexLock(Mutex *mutex) SLED_ACQUIRE(mutex) : lock_(mutex->impl_) {}
~MutexLock() SLED_UNLOCK_FUNCTION() = default;
~MutexLock() SLED_RELEASE() = default;
MutexLock(const MutexLock &) = delete;
MutexLock &operator=(const MutexLock &) = delete;

View File

@ -5,15 +5,6 @@
// clang-format off
#include <async++.h>
// clang-format on
namespace async {
sled::FiberScheduler &
default_scheduler()
{
static sled::FiberScheduler scheduler;
return scheduler;
}
}// namespace async
namespace sled {
@ -25,13 +16,33 @@ SleepWaitHandler(async::task_wait_handle t)
event.Wait(sled::Event::kForever);
}
FiberScheduler::FiberScheduler()
{
}
void
FiberScheduler::schedule(async::task_run_handle t)
{
static ThreadPool thread_pool;
auto move_on_copy = sled::MakeMoveOnCopy(t);
// thread_pool.PostTask([move_on_copy] { move_on_copy.value.run_with_wait_handler(SleepWaitHandler); });
thread_pool.submit([move_on_copy] { move_on_copy.value.run_with_wait_handler(SleepWaitHandler); });
// thread_pool.submit([move_on_copy] { move_on_copy.value.run(); });
}
}// namespace sled
// clang-format on
namespace async {
sled::FiberScheduler &
default_scheduler()
{
static sled::FiberScheduler scheduler;
return scheduler;
}
void
detail::wait_for_task(task_base *wait_task)
{
sled::SleepWaitHandler(task_wait_handle(wait_task));
}
}// namespace async

View File

@ -28,3 +28,12 @@ TEST(Async, parallel_for)
// wg.Wait();
for (int i = 0; i < count; i++) { EXPECT_TRUE(values[i]) << i; }
}
TEST(Async, parallel_reduce)
{
auto r = async::parallel_reduce(async::irange(1, 5), 0, [](int x, int y) {
LOGD("", "{},{}", x, y);
return x + y;
});
LOGD("", "{}", r);
}