aboutsummaryrefslogtreecommitdiff
path: root/include/linux/rwsem_rt.h
blob: e94d945c58440460b3e8fb3e17429b7484517d78 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
#ifndef _LINUX_RWSEM_RT_H
#define _LINUX_RWSEM_RT_H

#ifndef _LINUX_RWSEM_H
#error "Include rwsem.h"
#endif

/*
 * RW-semaphores are a spinlock plus a reader-depth count.
 *
 * Note that the semantics are different from the usual
 * Linux rw-sems, in PREEMPT_RT mode we do not allow
 * multiple readers to hold the lock at once, we only allow
 * a read-lock owner to read-lock recursively. This is
 * better for latency, makes the implementation inherently
 * fair and makes it simpler as well.
 */

#include <linux/rtmutex.h>

struct rw_semaphore {
	struct rt_mutex		lock;
	int			read_depth;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	struct lockdep_map	dep_map;
#endif
};

#define __RWSEM_INITIALIZER(name) \
	{ .lock = __RT_MUTEX_INITIALIZER(name.lock), \
	  RW_DEP_MAP_INIT(name) }

#define DECLARE_RWSEM(lockname) \
	struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)

extern void  __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name,
				     struct lock_class_key *key);

#define __rt_init_rwsem(sem, name, key)			\
	do {						\
		rt_mutex_init(&(sem)->lock);		\
		__rt_rwsem_init((sem), (name), (key));\
	} while (0)

#define __init_rwsem(sem, name, key) __rt_init_rwsem(sem, name, key)

# define rt_init_rwsem(sem)				\
do {							\
	static struct lock_class_key __key;		\
							\
	__rt_init_rwsem((sem), #sem, &__key);		\
} while (0)

extern void  rt_down_write(struct rw_semaphore *rwsem);
extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass);
extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass);
extern void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
		struct lockdep_map *nest);
extern void  rt_down_read(struct rw_semaphore *rwsem);
extern int  rt_down_write_trylock(struct rw_semaphore *rwsem);
extern int  rt_down_read_trylock(struct rw_semaphore *rwsem);
extern void  rt_up_read(struct rw_semaphore *rwsem);
extern void  rt_up_write(struct rw_semaphore *rwsem);
extern void  rt_downgrade_write(struct rw_semaphore *rwsem);

#define init_rwsem(sem)		rt_init_rwsem(sem)
#define rwsem_is_locked(s)	rt_mutex_is_locked(&(s)->lock)

static inline void down_read(struct rw_semaphore *sem)
{
	rt_down_read(sem);
}

static inline int down_read_trylock(struct rw_semaphore *sem)
{
	return rt_down_read_trylock(sem);
}

static inline void down_write(struct rw_semaphore *sem)
{
	rt_down_write(sem);
}

static inline int down_write_trylock(struct rw_semaphore *sem)
{
	return rt_down_write_trylock(sem);
}

static inline void up_read(struct rw_semaphore *sem)
{
	rt_up_read(sem);
}

static inline void up_write(struct rw_semaphore *sem)
{
	rt_up_write(sem);
}

static inline void downgrade_write(struct rw_semaphore *sem)
{
	rt_downgrade_write(sem);
}

static inline void down_read_nested(struct rw_semaphore *sem, int subclass)
{
	return rt_down_read_nested(sem, subclass);
}

static inline void down_write_nested(struct rw_semaphore *sem, int subclass)
{
	rt_down_write_nested(sem, subclass);
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static inline void down_write_nest_lock(struct rw_semaphore *sem,
		struct rw_semaphore *nest_lock)
{
	rt_down_write_nested_lock(sem, &nest_lock->dep_map);
}

#else

static inline void down_write_nest_lock(struct rw_semaphore *sem,
		struct rw_semaphore *nest_lock)
{
	rt_down_write_nested_lock(sem, NULL);
}
#endif
#endif