mirror of
http://git.haproxy.org/git/haproxy.git
synced 2026-02-07 21:13:39 +02:00
thread_isolate() and thread_isolate_full() were relying on a set of thread masks for all threads in different states (rdv, harmless, idle). This cannot work anymore when the number of threads increases beyond LONGBITS so we need to change the mechanism. What is done here is to have a counter of requesters and the number of the current isolated thread. Threads which want to isolate themselves increment the request counter and wait for all threads to be marked harmless (or idle) by scanning all groups and watching the respective masks. This is possible because threads cannot escape once they discover this counter, unless they also want to isolate and possibly pass first. Once all threads are harmless, the requesting thread tries to self-assign the isolated thread number, and if it fails it loops back to checking all threads. If it wins it's guaranted to be alone, and can drop its harmless bit, so that other competing threads go back to the loop waiting for all threads to be harmless. The benefit of proceeding this way is that there's very little write contention on the thread number (none during work), hence no cache line moves between caches, thus frozen threads do not slow down the isolated one. Once it's done, the isolated thread resets the thread number (hence lets another thread take the place) and decrements the requester count, thus possibly releasing all harmless threads. With this change there's no more need for any global mask to synchronize any thread, and we only need to loop over a number of groups to check 64 threads at a time per iteration. As such, tinfo's threads_want_rdv could be dropped. This was tested with 64 threads spread into 2 groups, running 64 tasks (from the debug dev command), 20 "show sess" (thread_isolate()), 20 "add server blah/blah" (thread_isolate()), and 20 "del server blah/blah" (thread_isolate_full()). The load remained very low (limited by external socat forks) and no stuck nor starved thread was found.
468 lines
16 KiB
C
468 lines
16 KiB
C
/*
|
|
* include/haproxy/thread.h
|
|
* definitions, macros and inline functions used by threads.
|
|
*
|
|
* Copyright (C) 2017 Christopher Faulet - cfaulet@haproxy.com
|
|
* Copyright (C) 2020 Willy Tarreau - w@1wt.eu
|
|
*
|
|
* This library is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
* License as published by the Free Software Foundation, version 2.1
|
|
* exclusively.
|
|
*
|
|
* This library is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
* Lesser General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
* License along with this library; if not, write to the Free Software
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
*/
|
|
|
|
#ifndef _HAPROXY_THREAD_H
|
|
#define _HAPROXY_THREAD_H
|
|
|
|
#include <haproxy/api.h>
|
|
#include <haproxy/thread-t.h>
|
|
#include <haproxy/tinfo.h>
|
|
|
|
|
|
/* Note: this file mainly contains 5 sections:
|
|
* - a small common part, which also corresponds to the common API
|
|
* - one used solely when USE_THREAD is *not* set
|
|
* - one used solely when USE_THREAD is set
|
|
* - one used solely when USE_THREAD is set WITHOUT debugging
|
|
* - one used solely when USE_THREAD is set WITH debugging
|
|
*
|
|
*/
|
|
|
|
|
|
/* Generic exports */
|
|
int parse_nbthread(const char *arg, char **err);
|
|
void ha_tkill(unsigned int thr, int sig);
|
|
void ha_tkillall(int sig);
|
|
void ha_thread_relax(void);
|
|
int thread_map_to_groups();
|
|
int thread_resolve_group_mask(uint igid, ulong imask, uint *ogid, ulong *omask, char **err);
|
|
extern int thread_cpus_enabled_at_boot;
|
|
|
|
|
|
#ifndef USE_THREAD
|
|
|
|
/********************** THREADS DISABLED ************************/
|
|
|
|
/* Only way found to replace variables with constants that are optimized away
|
|
* at build time.
|
|
*/
|
|
enum { all_threads_mask = 1UL };
|
|
enum { all_tgroups_mask = 1UL };
|
|
enum { tid_bit = 1UL };
|
|
enum { tid = 0 };
|
|
enum { tgid = 1 };
|
|
|
|
#define HA_SPIN_INIT(l) do { /* do nothing */ } while(0)
|
|
#define HA_SPIN_DESTROY(l) do { /* do nothing */ } while(0)
|
|
#define HA_SPIN_LOCK(lbl, l) do { /* do nothing */ } while(0)
|
|
#define HA_SPIN_TRYLOCK(lbl, l) ({ 0; })
|
|
#define HA_SPIN_UNLOCK(lbl, l) do { /* do nothing */ } while(0)
|
|
|
|
#define HA_RWLOCK_INIT(l) do { /* do nothing */ } while(0)
|
|
#define HA_RWLOCK_DESTROY(l) do { /* do nothing */ } while(0)
|
|
#define HA_RWLOCK_WRLOCK(lbl, l) do { /* do nothing */ } while(0)
|
|
#define HA_RWLOCK_TRYWRLOCK(lbl, l) ({ 0; })
|
|
#define HA_RWLOCK_WRUNLOCK(lbl, l) do { /* do nothing */ } while(0)
|
|
#define HA_RWLOCK_RDLOCK(lbl, l) do { /* do nothing */ } while(0)
|
|
#define HA_RWLOCK_TRYRDLOCK(lbl, l) ({ 0; })
|
|
#define HA_RWLOCK_RDUNLOCK(lbl, l) do { /* do nothing */ } while(0)
|
|
|
|
#define HA_RWLOCK_SKLOCK(lbl,l) do { /* do nothing */ } while(0)
|
|
#define HA_RWLOCK_SKTOWR(lbl,l) do { /* do nothing */ } while(0)
|
|
#define HA_RWLOCK_WRTOSK(lbl,l) do { /* do nothing */ } while(0)
|
|
#define HA_RWLOCK_SKTORD(lbl,l) do { /* do nothing */ } while(0)
|
|
#define HA_RWLOCK_WRTORD(lbl,l) do { /* do nothing */ } while(0)
|
|
#define HA_RWLOCK_SKUNLOCK(lbl,l) do { /* do nothing */ } while(0)
|
|
#define HA_RWLOCK_TRYSKLOCK(lbl,l) ({ 0; })
|
|
#define HA_RWLOCK_TRYRDTOSK(lbl,l) ({ 0; })
|
|
|
|
#define ha_sigmask(how, set, oldset) sigprocmask(how, set, oldset)
|
|
|
|
/* Sets the current thread to a valid one described by <thr>, or to any thread
|
|
* and any group if NULL (e.g. for use during boot where they're not totally
|
|
* initialized).
|
|
*/
|
|
static inline void ha_set_thread(const struct thread_info *thr)
|
|
{
|
|
if (thr) {
|
|
ti = thr;
|
|
tg = ti->tg;
|
|
th_ctx = &ha_thread_ctx[ti->tid];
|
|
} else {
|
|
ti = &ha_thread_info[0];
|
|
tg = &ha_tgroup_info[0];
|
|
th_ctx = &ha_thread_ctx[0];
|
|
}
|
|
}
|
|
|
|
static inline void thread_idle_now()
|
|
{
|
|
}
|
|
|
|
static inline void thread_idle_end()
|
|
{
|
|
}
|
|
|
|
static inline void thread_harmless_now()
|
|
{
|
|
}
|
|
|
|
static inline void thread_harmless_end()
|
|
{
|
|
}
|
|
|
|
static inline void thread_isolate()
|
|
{
|
|
}
|
|
|
|
static inline void thread_isolate_full()
|
|
{
|
|
}
|
|
|
|
static inline void thread_release()
|
|
{
|
|
}
|
|
|
|
static inline unsigned long thread_isolated()
|
|
{
|
|
return 1;
|
|
}
|
|
|
|
static inline void setup_extra_threads(void *(*handler)(void *))
|
|
{
|
|
}
|
|
|
|
static inline void wait_for_threads_completion()
|
|
{
|
|
}
|
|
|
|
static inline void set_thread_cpu_affinity()
|
|
{
|
|
}
|
|
|
|
static inline unsigned long long ha_get_pthread_id(unsigned int thr)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
#else /* !USE_THREAD */
|
|
|
|
/********************** THREADS ENABLED ************************/
|
|
|
|
#include <import/plock.h>
|
|
|
|
void thread_harmless_till_end(void);
|
|
void thread_isolate(void);
|
|
void thread_isolate_full(void);
|
|
void thread_release(void);
|
|
void ha_spin_init(HA_SPINLOCK_T *l);
|
|
void ha_rwlock_init(HA_RWLOCK_T *l);
|
|
void setup_extra_threads(void *(*handler)(void *));
|
|
void wait_for_threads_completion();
|
|
void set_thread_cpu_affinity();
|
|
unsigned long long ha_get_pthread_id(unsigned int thr);
|
|
|
|
extern volatile unsigned long all_threads_mask;
|
|
extern volatile unsigned long all_tgroups_mask;
|
|
extern volatile unsigned int rdv_requests;
|
|
extern volatile unsigned int isolated_thread;
|
|
extern THREAD_LOCAL unsigned long tid_bit; /* The bit corresponding to the thread id */
|
|
extern THREAD_LOCAL unsigned int tid; /* The thread id */
|
|
extern THREAD_LOCAL unsigned int tgid; /* The thread group id (starts at 1) */
|
|
|
|
#define ha_sigmask(how, set, oldset) pthread_sigmask(how, set, oldset)
|
|
|
|
/* Sets the current thread to a valid one described by <thr>, or to any thread
|
|
* and any group if NULL (e.g. for use during boot where they're not totally
|
|
* initialized).
|
|
*/
|
|
static inline void ha_set_thread(const struct thread_info *thr)
|
|
{
|
|
if (thr) {
|
|
BUG_ON(!thr->ltid_bit);
|
|
BUG_ON(!thr->tg);
|
|
BUG_ON(!thr->tgid);
|
|
|
|
ti = thr;
|
|
tg = thr->tg;
|
|
tid = thr->tid;
|
|
tgid = thr->tgid;
|
|
tid_bit = 1UL << tid; /* FIXME: must become thr->ltid_bit */
|
|
th_ctx = &ha_thread_ctx[tid];
|
|
tg_ctx = &ha_tgroup_ctx[tgid-1];
|
|
} else {
|
|
tgid = 1;
|
|
tid = 0;
|
|
tid_bit = 1;
|
|
ti = &ha_thread_info[0];
|
|
tg = &ha_tgroup_info[0];
|
|
th_ctx = &ha_thread_ctx[0];
|
|
tg_ctx = &ha_tgroup_ctx[0];
|
|
}
|
|
}
|
|
|
|
/* Marks the thread as idle, which means that not only it's not doing anything
|
|
* dangerous, but in addition it has not started anything sensitive either.
|
|
* This essentially means that the thread currently is in the poller, thus
|
|
* outside of any execution block. Needs to be terminated using
|
|
* thread_idle_end(). This is needed to release a concurrent call to
|
|
* thread_isolate_full().
|
|
*/
|
|
static inline void thread_idle_now()
|
|
{
|
|
HA_ATOMIC_OR(&tg_ctx->threads_idle, ti->ltid_bit);
|
|
}
|
|
|
|
/* Ends the harmless period started by thread_idle_now(), i.e. the thread is
|
|
* about to restart engaging in sensitive operations. This must not be done on
|
|
* a thread marked harmless, as it could cause a deadlock between another
|
|
* thread waiting for idle again and thread_harmless_end() in this thread.
|
|
*
|
|
* The right sequence is thus:
|
|
* thread_idle_now();
|
|
* thread_harmless_now();
|
|
* poll();
|
|
* thread_harmless_end();
|
|
* thread_idle_end();
|
|
*/
|
|
static inline void thread_idle_end()
|
|
{
|
|
HA_ATOMIC_AND(&tg_ctx->threads_idle, ~ti->ltid_bit);
|
|
}
|
|
|
|
|
|
/* Marks the thread as harmless. Note: this must be true, i.e. the thread must
|
|
* not be touching any unprotected shared resource during this period. Usually
|
|
* this is called before poll(), but it may also be placed around very slow
|
|
* calls (eg: some crypto operations). Needs to be terminated using
|
|
* thread_harmless_end().
|
|
*/
|
|
static inline void thread_harmless_now()
|
|
{
|
|
HA_ATOMIC_OR(&tg_ctx->threads_harmless, ti->ltid_bit);
|
|
}
|
|
|
|
/* Ends the harmless period started by thread_harmless_now(). Usually this is
|
|
* placed after the poll() call. If it is discovered that a job was running and
|
|
* is relying on the thread still being harmless, the thread waits for the
|
|
* other one to finish.
|
|
*/
|
|
static inline void thread_harmless_end()
|
|
{
|
|
while (1) {
|
|
HA_ATOMIC_AND(&tg_ctx->threads_harmless, ~ti->ltid_bit);
|
|
if (likely(_HA_ATOMIC_LOAD(&rdv_requests) == 0))
|
|
break;
|
|
thread_harmless_till_end();
|
|
}
|
|
}
|
|
|
|
/* an isolated thread has its ID in isolated_thread */
|
|
static inline unsigned long thread_isolated()
|
|
{
|
|
return _HA_ATOMIC_LOAD(&isolated_thread) == tid;
|
|
}
|
|
|
|
/* Returns 1 if the cpu set is currently restricted for the process else 0.
|
|
* Currently only implemented for the Linux platform.
|
|
*/
|
|
int thread_cpu_mask_forced(void);
|
|
|
|
#if !defined(DEBUG_THREAD) && !defined(DEBUG_FULL)
|
|
|
|
/* Thread debugging is DISABLED, these are the regular locking functions */
|
|
|
|
#define HA_SPIN_INIT(l) ({ (*l) = 0; })
|
|
#define HA_SPIN_DESTROY(l) ({ (*l) = 0; })
|
|
#define HA_SPIN_LOCK(lbl, l) pl_take_s(l)
|
|
#define HA_SPIN_TRYLOCK(lbl, l) (!pl_try_s(l))
|
|
#define HA_SPIN_UNLOCK(lbl, l) pl_drop_s(l)
|
|
|
|
#define HA_RWLOCK_INIT(l) ({ (*l) = 0; })
|
|
#define HA_RWLOCK_DESTROY(l) ({ (*l) = 0; })
|
|
#define HA_RWLOCK_WRLOCK(lbl,l) pl_take_w(l)
|
|
#define HA_RWLOCK_TRYWRLOCK(lbl,l) (!pl_try_w(l))
|
|
#define HA_RWLOCK_WRUNLOCK(lbl,l) pl_drop_w(l)
|
|
#define HA_RWLOCK_RDLOCK(lbl,l) pl_take_r(l)
|
|
#define HA_RWLOCK_TRYRDLOCK(lbl,l) (!pl_try_r(l))
|
|
#define HA_RWLOCK_RDUNLOCK(lbl,l) pl_drop_r(l)
|
|
|
|
/* rwlock upgrades via seek locks */
|
|
#define HA_RWLOCK_SKLOCK(lbl,l) pl_take_s(l) /* N --> S */
|
|
#define HA_RWLOCK_SKTOWR(lbl,l) pl_stow(l) /* S --> W */
|
|
#define HA_RWLOCK_WRTOSK(lbl,l) pl_wtos(l) /* W --> S */
|
|
#define HA_RWLOCK_SKTORD(lbl,l) pl_stor(l) /* S --> R */
|
|
#define HA_RWLOCK_WRTORD(lbl,l) pl_wtor(l) /* W --> R */
|
|
#define HA_RWLOCK_SKUNLOCK(lbl,l) pl_drop_s(l) /* S --> N */
|
|
#define HA_RWLOCK_TRYSKLOCK(lbl,l) (!pl_try_s(l)) /* N -?> S */
|
|
#define HA_RWLOCK_TRYRDTOSK(lbl,l) (!pl_try_rtos(l)) /* R -?> S */
|
|
|
|
#else /* !defined(DEBUG_THREAD) && !defined(DEBUG_FULL) */
|
|
|
|
/* Thread debugging is ENABLED, these are the instrumented functions */
|
|
|
|
#define __SPIN_INIT(l) ({ (*l) = 0; })
|
|
#define __SPIN_DESTROY(l) ({ (*l) = 0; })
|
|
#define __SPIN_LOCK(l) pl_take_s(l)
|
|
#define __SPIN_TRYLOCK(l) (!pl_try_s(l))
|
|
#define __SPIN_UNLOCK(l) pl_drop_s(l)
|
|
|
|
#define __RWLOCK_INIT(l) ({ (*l) = 0; })
|
|
#define __RWLOCK_DESTROY(l) ({ (*l) = 0; })
|
|
#define __RWLOCK_WRLOCK(l) pl_take_w(l)
|
|
#define __RWLOCK_TRYWRLOCK(l) (!pl_try_w(l))
|
|
#define __RWLOCK_WRUNLOCK(l) pl_drop_w(l)
|
|
#define __RWLOCK_RDLOCK(l) pl_take_r(l)
|
|
#define __RWLOCK_TRYRDLOCK(l) (!pl_try_r(l))
|
|
#define __RWLOCK_RDUNLOCK(l) pl_drop_r(l)
|
|
|
|
/* rwlock upgrades via seek locks */
|
|
#define __RWLOCK_SKLOCK(l) pl_take_s(l) /* N --> S */
|
|
#define __RWLOCK_SKTOWR(l) pl_stow(l) /* S --> W */
|
|
#define __RWLOCK_WRTOSK(l) pl_wtos(l) /* W --> S */
|
|
#define __RWLOCK_SKTORD(l) pl_stor(l) /* S --> R */
|
|
#define __RWLOCK_WRTORD(l) pl_wtor(l) /* W --> R */
|
|
#define __RWLOCK_SKUNLOCK(l) pl_drop_s(l) /* S --> N */
|
|
#define __RWLOCK_TRYSKLOCK(l) (!pl_try_s(l)) /* N -?> S */
|
|
#define __RWLOCK_TRYRDTOSK(l) (!pl_try_rtos(l)) /* R -?> S */
|
|
|
|
#define HA_SPIN_INIT(l) __spin_init(l)
|
|
#define HA_SPIN_DESTROY(l) __spin_destroy(l)
|
|
|
|
#define HA_SPIN_LOCK(lbl, l) __spin_lock(lbl, l, __func__, __FILE__, __LINE__)
|
|
#define HA_SPIN_TRYLOCK(lbl, l) __spin_trylock(lbl, l, __func__, __FILE__, __LINE__)
|
|
#define HA_SPIN_UNLOCK(lbl, l) __spin_unlock(lbl, l, __func__, __FILE__, __LINE__)
|
|
|
|
#define HA_RWLOCK_INIT(l) __ha_rwlock_init((l))
|
|
#define HA_RWLOCK_DESTROY(l) __ha_rwlock_destroy((l))
|
|
#define HA_RWLOCK_WRLOCK(lbl,l) __ha_rwlock_wrlock(lbl, l, __func__, __FILE__, __LINE__)
|
|
#define HA_RWLOCK_TRYWRLOCK(lbl,l) __ha_rwlock_trywrlock(lbl, l, __func__, __FILE__, __LINE__)
|
|
#define HA_RWLOCK_WRUNLOCK(lbl,l) __ha_rwlock_wrunlock(lbl, l, __func__, __FILE__, __LINE__)
|
|
#define HA_RWLOCK_RDLOCK(lbl,l) __ha_rwlock_rdlock(lbl, l)
|
|
#define HA_RWLOCK_TRYRDLOCK(lbl,l) __ha_rwlock_tryrdlock(lbl, l)
|
|
#define HA_RWLOCK_RDUNLOCK(lbl,l) __ha_rwlock_rdunlock(lbl, l)
|
|
|
|
#define HA_RWLOCK_SKLOCK(lbl,l) __ha_rwlock_sklock(lbl, l, __func__, __FILE__, __LINE__)
|
|
#define HA_RWLOCK_SKTOWR(lbl,l) __ha_rwlock_sktowr(lbl, l, __func__, __FILE__, __LINE__)
|
|
#define HA_RWLOCK_WRTOSK(lbl,l) __ha_rwlock_wrtosk(lbl, l, __func__, __FILE__, __LINE__)
|
|
#define HA_RWLOCK_SKTORD(lbl,l) __ha_rwlock_sktord(lbl, l, __func__, __FILE__, __LINE__)
|
|
#define HA_RWLOCK_WRTORD(lbl,l) __ha_rwlock_wrtord(lbl, l, __func__, __FILE__, __LINE__)
|
|
#define HA_RWLOCK_SKUNLOCK(lbl,l) __ha_rwlock_skunlock(lbl, l, __func__, __FILE__, __LINE__)
|
|
#define HA_RWLOCK_TRYSKLOCK(lbl,l) __ha_rwlock_trysklock(lbl, l, __func__, __FILE__, __LINE__)
|
|
#define HA_RWLOCK_TRYRDTOSK(lbl,l) __ha_rwlock_tryrdtosk(lbl, l, __func__, __FILE__, __LINE__)
|
|
|
|
/* WARNING!!! if you update this enum, please also keep lock_label() up to date
|
|
* below.
|
|
*/
|
|
enum lock_label {
|
|
TASK_RQ_LOCK,
|
|
TASK_WQ_LOCK,
|
|
LISTENER_LOCK,
|
|
PROXY_LOCK,
|
|
SERVER_LOCK,
|
|
LBPRM_LOCK,
|
|
SIGNALS_LOCK,
|
|
STK_TABLE_LOCK,
|
|
STK_SESS_LOCK,
|
|
APPLETS_LOCK,
|
|
PEER_LOCK,
|
|
SHCTX_LOCK,
|
|
SSL_LOCK,
|
|
SSL_GEN_CERTS_LOCK,
|
|
PATREF_LOCK,
|
|
PATEXP_LOCK,
|
|
VARS_LOCK,
|
|
COMP_POOL_LOCK,
|
|
LUA_LOCK,
|
|
NOTIF_LOCK,
|
|
SPOE_APPLET_LOCK,
|
|
DNS_LOCK,
|
|
PID_LIST_LOCK,
|
|
EMAIL_ALERTS_LOCK,
|
|
PIPES_LOCK,
|
|
TLSKEYS_REF_LOCK,
|
|
AUTH_LOCK,
|
|
LOGSRV_LOCK,
|
|
DICT_LOCK,
|
|
PROTO_LOCK,
|
|
QUEUE_LOCK,
|
|
CKCH_LOCK,
|
|
SNI_LOCK,
|
|
SSL_SERVER_LOCK,
|
|
SFT_LOCK, /* sink forward target */
|
|
IDLE_CONNS_LOCK,
|
|
QUIC_LOCK,
|
|
OTHER_LOCK,
|
|
/* WT: make sure never to use these ones outside of development,
|
|
* we need them for lock profiling!
|
|
*/
|
|
DEBUG1_LOCK,
|
|
DEBUG2_LOCK,
|
|
DEBUG3_LOCK,
|
|
DEBUG4_LOCK,
|
|
DEBUG5_LOCK,
|
|
LOCK_LABELS
|
|
};
|
|
|
|
|
|
/* Following functions are used to collect some stats about locks. We wrap
|
|
* pthread functions to known how much time we wait in a lock. */
|
|
|
|
void show_lock_stats();
|
|
void __ha_rwlock_init(struct ha_rwlock *l);
|
|
void __ha_rwlock_destroy(struct ha_rwlock *l);
|
|
void __ha_rwlock_wrlock(enum lock_label lbl, struct ha_rwlock *l,
|
|
const char *func, const char *file, int line);
|
|
int __ha_rwlock_trywrlock(enum lock_label lbl, struct ha_rwlock *l,
|
|
const char *func, const char *file, int line);
|
|
void __ha_rwlock_wrunlock(enum lock_label lbl,struct ha_rwlock *l,
|
|
const char *func, const char *file, int line);
|
|
void __ha_rwlock_rdlock(enum lock_label lbl,struct ha_rwlock *l);
|
|
int __ha_rwlock_tryrdlock(enum lock_label lbl,struct ha_rwlock *l);
|
|
void __ha_rwlock_rdunlock(enum lock_label lbl,struct ha_rwlock *l);
|
|
void __ha_rwlock_wrtord(enum lock_label lbl, struct ha_rwlock *l,
|
|
const char *func, const char *file, int line);
|
|
void __ha_rwlock_wrtosk(enum lock_label lbl, struct ha_rwlock *l,
|
|
const char *func, const char *file, int line);
|
|
void __ha_rwlock_sklock(enum lock_label lbl, struct ha_rwlock *l,
|
|
const char *func, const char *file, int line);
|
|
void __ha_rwlock_sktowr(enum lock_label lbl, struct ha_rwlock *l,
|
|
const char *func, const char *file, int line);
|
|
void __ha_rwlock_sktord(enum lock_label lbl, struct ha_rwlock *l,
|
|
const char *func, const char *file, int line);
|
|
void __ha_rwlock_skunlock(enum lock_label lbl,struct ha_rwlock *l,
|
|
const char *func, const char *file, int line);
|
|
int __ha_rwlock_trysklock(enum lock_label lbl, struct ha_rwlock *l,
|
|
const char *func, const char *file, int line);
|
|
int __ha_rwlock_tryrdtosk(enum lock_label lbl, struct ha_rwlock *l,
|
|
const char *func, const char *file, int line);
|
|
void __spin_init(struct ha_spinlock *l);
|
|
void __spin_destroy(struct ha_spinlock *l);
|
|
void __spin_lock(enum lock_label lbl, struct ha_spinlock *l,
|
|
const char *func, const char *file, int line);
|
|
int __spin_trylock(enum lock_label lbl, struct ha_spinlock *l,
|
|
const char *func, const char *file, int line);
|
|
void __spin_unlock(enum lock_label lbl, struct ha_spinlock *l,
|
|
const char *func, const char *file, int line);
|
|
|
|
#endif /* DEBUG_THREAD */
|
|
|
|
#endif /* USE_THREAD */
|
|
|
|
/* returns a mask if set, otherwise all_threads_mask */
|
|
static inline unsigned long thread_mask(unsigned long mask)
|
|
{
|
|
return mask ? mask : all_threads_mask;
|
|
}
|
|
|
|
#endif /* _HAPROXY_THREAD_H */
|