diff --git a/include/proto/fd.h b/include/proto/fd.h index a3ec5e854..7123377f9 100644 --- a/include/proto/fd.h +++ b/include/proto/fd.h @@ -289,9 +289,11 @@ static inline void fd_stop_recv(int fd) if ((old ^ new) & FD_EV_POLLED_R) updt_fd_polling(fd); - HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); + if (atleast2(fdtab[fd].thread_mask)) + HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); fd_update_cache(fd); /* need an update entry to change the state */ - HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); + if (atleast2(fdtab[fd].thread_mask)) + HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); } /* Disable processing send events on fd */ @@ -310,9 +312,11 @@ static inline void fd_stop_send(int fd) if ((old ^ new) & FD_EV_POLLED_W) updt_fd_polling(fd); - HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); + if (atleast2(fdtab[fd].thread_mask)) + HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); fd_update_cache(fd); /* need an update entry to change the state */ - HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); + if (atleast2(fdtab[fd].thread_mask)) + HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); } /* Disable processing of events on fd for both directions. */ @@ -331,9 +335,11 @@ static inline void fd_stop_both(int fd) if ((old ^ new) & FD_EV_POLLED_RW) updt_fd_polling(fd); - HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); + if (atleast2(fdtab[fd].thread_mask)) + HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); fd_update_cache(fd); /* need an update entry to change the state */ - HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); + if (atleast2(fdtab[fd].thread_mask)) + HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); } /* Report that FD cannot receive anymore without polling (EAGAIN detected). */ @@ -353,9 +359,11 @@ static inline void fd_cant_recv(const int fd) if ((old ^ new) & FD_EV_POLLED_R) updt_fd_polling(fd); - HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); + if (atleast2(fdtab[fd].thread_mask)) + HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); fd_update_cache(fd); /* need an update entry to change the state */ - HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); + if (atleast2(fdtab[fd].thread_mask)) + HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); } /* Report that FD can receive anymore without polling. */ @@ -364,9 +372,11 @@ static inline void fd_may_recv(const int fd) /* marking ready never changes polled status */ HA_ATOMIC_OR(&fdtab[fd].state, FD_EV_READY_R); - HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); + if (atleast2(fdtab[fd].thread_mask)) + HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); fd_update_cache(fd); /* need an update entry to change the state */ - HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); + if (atleast2(fdtab[fd].thread_mask)) + HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); } /* Disable readiness when polled. This is useful to interrupt reading when it @@ -390,9 +400,11 @@ static inline void fd_done_recv(const int fd) if ((old ^ new) & FD_EV_POLLED_R) updt_fd_polling(fd); - HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); + if (atleast2(fdtab[fd].thread_mask)) + HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); fd_update_cache(fd); /* need an update entry to change the state */ - HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); + if (atleast2(fdtab[fd].thread_mask)) + HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); } /* Report that FD cannot send anymore without polling (EAGAIN detected). */ @@ -412,9 +424,11 @@ static inline void fd_cant_send(const int fd) if ((old ^ new) & FD_EV_POLLED_W) updt_fd_polling(fd); - HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); + if (atleast2(fdtab[fd].thread_mask)) + HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); fd_update_cache(fd); /* need an update entry to change the state */ - HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); + if (atleast2(fdtab[fd].thread_mask)) + HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); } /* Report that FD can send anymore without polling (EAGAIN detected). */ @@ -423,9 +437,11 @@ static inline void fd_may_send(const int fd) /* marking ready never changes polled status */ HA_ATOMIC_OR(&fdtab[fd].state, FD_EV_READY_W); - HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); + if (atleast2(fdtab[fd].thread_mask)) + HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); fd_update_cache(fd); /* need an update entry to change the state */ - HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); + if (atleast2(fdtab[fd].thread_mask)) + HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); } /* Prepare FD to try to receive */ @@ -445,9 +461,11 @@ static inline void fd_want_recv(int fd) if ((old ^ new) & FD_EV_POLLED_R) updt_fd_polling(fd); - HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); + if (atleast2(fdtab[fd].thread_mask)) + HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); fd_update_cache(fd); /* need an update entry to change the state */ - HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); + if (atleast2(fdtab[fd].thread_mask)) + HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); } /* Prepare FD to try to send */ @@ -467,19 +485,23 @@ static inline void fd_want_send(int fd) if ((old ^ new) & FD_EV_POLLED_W) updt_fd_polling(fd); - HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); + if (atleast2(fdtab[fd].thread_mask)) + HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); fd_update_cache(fd); /* need an update entry to change the state */ - HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); + if (atleast2(fdtab[fd].thread_mask)) + HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); } /* Update events seen for FD and its state if needed. This should be called * by the poller to set FD_POLL_* flags. */ static inline void fd_update_events(int fd, int evts) { - HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); + if (atleast2(fdtab[fd].thread_mask)) + HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); fdtab[fd].ev &= FD_POLL_STICKY; fdtab[fd].ev |= evts; - HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); + if (atleast2(fdtab[fd].thread_mask)) + HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); if (fdtab[fd].ev & (FD_POLL_IN | FD_POLL_HUP | FD_POLL_ERR)) fd_may_recv(fd); @@ -491,7 +513,8 @@ static inline void fd_update_events(int fd, int evts) /* Prepares for being polled */ static inline void fd_insert(int fd, void *owner, void (*iocb)(int fd), unsigned long thread_mask) { - HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); + if (atleast2(thread_mask)) + HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); fdtab[fd].owner = owner; fdtab[fd].iocb = iocb; fdtab[fd].ev = 0; @@ -501,7 +524,8 @@ static inline void fd_insert(int fd, void *owner, void (*iocb)(int fd), unsigned /* note: do not reset polled_mask here as it indicates which poller * still knows this FD from a possible previous round. */ - HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); + if (atleast2(thread_mask)) + HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); } /* These are replacements for FD_SET, FD_CLR, FD_ISSET, working on uints */ diff --git a/src/fd.c b/src/fd.c index 1a5419da3..c46748986 100644 --- a/src/fd.c +++ b/src/fd.c @@ -359,7 +359,10 @@ done: */ static void fd_dodelete(int fd, int do_close) { - HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); + unsigned long locked = atleast2(fdtab[fd].thread_mask); + + if (locked) + HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); if (fdtab[fd].linger_risk) { /* this is generally set when connecting to servers */ setsockopt(fd, SOL_SOCKET, SO_LINGER, @@ -379,7 +382,8 @@ static void fd_dodelete(int fd, int do_close) polled_mask[fd] = 0; close(fd); } - HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); + if (locked) + HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); } /* Deletes an FD from the fdsets. @@ -417,7 +421,7 @@ static inline void fdlist_process_cached_events(volatile struct fdlist *fdlist) continue; HA_ATOMIC_OR(&fd_cache_mask, tid_bit); - if (HA_SPIN_TRYLOCK(FD_LOCK, &fdtab[fd].lock)) { + if (atleast2(fdtab[fd].thread_mask) && HA_SPIN_TRYLOCK(FD_LOCK, &fdtab[fd].lock)) { activity[tid].fd_lock++; continue; } @@ -432,12 +436,14 @@ static inline void fdlist_process_cached_events(volatile struct fdlist *fdlist) fdtab[fd].ev |= FD_POLL_OUT; if (fdtab[fd].iocb && fdtab[fd].owner && fdtab[fd].ev) { - HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); + if (atleast2(fdtab[fd].thread_mask)) + HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); fdtab[fd].iocb(fd); } else { fd_release_cache_entry(fd); - HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); + if (atleast2(fdtab[fd].thread_mask)) + HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); } } }