Project

General

Profile

Bug #1202 » 0001-Fix-lttng-perf-counter-deadlock.patch

Mathieu Desnoyers, 10/09/2019 12:44 PM

View differences:

liblttng-ust/lttng-context-perf-counters.c
#include <urcu/ref.h>
#include <usterr-signal-safe.h>
#include <signal.h>
#include <urcu/tls-compat.h>
#include "perf_event.h"
#include "lttng-tracer-core.h"
......
static pthread_key_t perf_counter_key;
/*
* lttng_perf_lock - Protect lttng-ust perf counter data structures
*
* Nests within the ust_lock, and therefore within the libc dl lock.
* Therefore, we need to fixup the TLS before nesting into this lock.
* Nests inside RCU bp read-side lock. Protects against concurrent
* fork.
*/
static pthread_mutex_t ust_perf_mutex = PTHREAD_MUTEX_INITIALIZER;
/*
* Cancel state when grabbing the ust_perf_mutex. Saved when locking,
* restored on unlock. Protected by ust_perf_mutex.
*/
static int ust_perf_saved_cancelstate;
/*
* Track whether we are tracing from a signal handler nested on an
* application thread.
*/
static DEFINE_URCU_TLS(int, ust_perf_mutex_nest);
/*
* Force a read (imply TLS fixup for dlopen) of TLS variables.
*/
void lttng_ust_fixup_perf_counter_tls(void)
{
asm volatile ("" : : "m" (URCU_TLS(ust_perf_mutex_nest)));
}
void lttng_perf_lock(void)
{
sigset_t sig_all_blocked, orig_mask;
int ret, oldstate;
ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
if (ret) {
ERR("pthread_setcancelstate: %s", strerror(ret));
}
sigfillset(&sig_all_blocked);
ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
if (ret) {
ERR("pthread_sigmask: %s", strerror(ret));
}
if (!URCU_TLS(ust_perf_mutex_nest)++) {
/*
* Ensure the compiler don't move the store after the close()
* call in case close() would be marked as leaf.
*/
cmm_barrier();
pthread_mutex_lock(&ust_perf_mutex);
ust_perf_saved_cancelstate = oldstate;
}
ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
if (ret) {
ERR("pthread_sigmask: %s", strerror(ret));
}
}
void lttng_perf_unlock(void)
{
sigset_t sig_all_blocked, orig_mask;
int ret, newstate, oldstate;
bool restore_cancel = false;
sigfillset(&sig_all_blocked);
ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
if (ret) {
ERR("pthread_sigmask: %s", strerror(ret));
}
/*
* Ensure the compiler don't move the store before the close()
* call, in case close() would be marked as leaf.
*/
cmm_barrier();
if (!--URCU_TLS(ust_perf_mutex_nest)) {
newstate = ust_perf_saved_cancelstate;
restore_cancel = true;
pthread_mutex_unlock(&ust_perf_mutex);
}
ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
if (ret) {
ERR("pthread_sigmask: %s", strerror(ret));
}
if (restore_cancel) {
ret = pthread_setcancelstate(newstate, &oldstate);
if (ret) {
ERR("pthread_setcancelstate: %s", strerror(ret));
}
}
}
static
size_t perf_counter_get_size(struct lttng_ctx_field *field, size_t offset)
{
......
* Note: thread_field->pc can be NULL if setup_perf() fails.
* Also, thread_field->fd can be -1 if open_perf_fd() fails.
*/
ust_lock_nocheck();
lttng_perf_lock();
cds_list_add_rcu(&thread_field->rcu_field_node,
&perf_thread->rcu_field_list);
cds_list_add(&thread_field->thread_field_node,
&perf_field->thread_field_list);
ust_unlock();
lttng_perf_unlock();
skip:
ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
if (ret)
......
value->u.s64 = wrapper_perf_counter_read(field);
}
/* Called with UST lock held */
/* Called with perf lock held */
static
void lttng_destroy_perf_thread_field(
struct lttng_perf_counter_thread_field *thread_field)
......
struct lttng_perf_counter_thread *perf_thread = _key;
struct lttng_perf_counter_thread_field *pos, *p;
ust_lock_nocheck();
lttng_perf_lock();
cds_list_for_each_entry_safe(pos, p, &perf_thread->rcu_field_list,
rcu_field_node)
lttng_destroy_perf_thread_field(pos);
ust_unlock();
lttng_perf_unlock();
free(perf_thread);
}
......
/*
* This put is performed when no threads can concurrently
* perform a "get" concurrently, thanks to urcu-bp grace
* period.
* period. Holding the lttng perf lock protects against
* concurrent modification of the per-thread thread field
* list.
*/
lttng_perf_lock();
cds_list_for_each_entry_safe(pos, p, &perf_field->thread_field_list,
thread_field_node)
lttng_destroy_perf_thread_field(pos);
lttng_perf_unlock();
free(perf_field);
}
liblttng-ust/lttng-tracer-core.h
int lttng_context_is_app(const char *name);
void lttng_ust_fixup_tls(void);
#ifdef LTTNG_UST_HAVE_PERF_EVENT
void lttng_ust_fixup_perf_counter_tls(void);
void lttng_perf_lock(void);
void lttng_perf_unlock(void);
#else /* #ifdef LTTNG_UST_HAVE_PERF_EVENT */
static inline
void lttng_ust_fixup_perf_counter_tls(void)
{
}
static inline
void lttng_perf_lock(void)
{
}
static inline
void lttng_perf_unlock(void)
{
}
#endif /* #else #ifdef LTTNG_UST_HAVE_PERF_EVENT */
#endif /* _LTTNG_TRACER_CORE_H */
liblttng-ust/lttng-ust-comm.c
lttng_fixup_nest_count_tls();
lttng_fixup_procname_tls();
lttng_fixup_ust_mutex_nest_tls();
lttng_ust_fixup_perf_counter_tls();
lttng_ust_fixup_fd_tracker_tls();
}
......
ust_lock_nocheck();
urcu_bp_before_fork();
lttng_ust_lock_fd_tracker();
lttng_perf_lock();
}
static void ust_after_fork_common(sigset_t *restore_sigset)
......
int ret;
DBG("process %d", getpid());
lttng_perf_unlock();
lttng_ust_unlock_fd_tracker();
ust_unlock();
(2-2/2)