aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/capability.c21
-rw-r--r--kernel/cgroup.c6
-rw-r--r--kernel/cred.c44
-rw-r--r--kernel/exit.c6
-rw-r--r--kernel/groups.c50
-rw-r--r--kernel/ptrace.c15
-rw-r--r--kernel/sched/core.c7
-rw-r--r--kernel/signal.c51
-rw-r--r--kernel/sys.c266
-rw-r--r--kernel/timer.c8
-rw-r--r--kernel/uid16.c48
-rw-r--r--kernel/user.c51
-rw-r--r--kernel/user_namespace.c595
-rw-r--r--kernel/utsname.c2
14 files changed, 883 insertions, 287 deletions
diff --git a/kernel/capability.c b/kernel/capability.c
index 3f1adb6c647..493d9725948 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -419,3 +419,24 @@ bool nsown_capable(int cap)
{
return ns_capable(current_user_ns(), cap);
}
+
+/**
+ * inode_capable - Check superior capability over inode
+ * @inode: The inode in question
+ * @cap: The capability in question
+ *
+ * Return true if the current task has the given superior capability
+ * targeted at it's own user namespace and that the given inode is owned
+ * by the current user namespace or a child namespace.
+ *
+ * Currently we check to see if an inode is owned by the current
+ * user namespace by seeing if the inode's owner maps into the
+ * current user namespace.
+ *
+ */
+bool inode_capable(const struct inode *inode, int cap)
+{
+ struct user_namespace *ns = current_user_ns();
+
+ return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
+}
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index ad8eae5bb80..a0c6af34d50 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2214,9 +2214,9 @@ retry_find_task:
* only need to check permissions on one of them.
*/
tcred = __task_cred(tsk);
- if (cred->euid &&
- cred->euid != tcred->uid &&
- cred->euid != tcred->suid) {
+ if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
+ !uid_eq(cred->euid, tcred->uid) &&
+ !uid_eq(cred->euid, tcred->suid)) {
rcu_read_unlock();
ret = -EACCES;
goto out_unlock_cgroup;
diff --git a/kernel/cred.c b/kernel/cred.c
index e70683d9ec3..430557ea488 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -49,6 +49,14 @@ struct cred init_cred = {
.subscribers = ATOMIC_INIT(2),
.magic = CRED_MAGIC,
#endif
+ .uid = GLOBAL_ROOT_UID,
+ .gid = GLOBAL_ROOT_GID,
+ .suid = GLOBAL_ROOT_UID,
+ .sgid = GLOBAL_ROOT_GID,
+ .euid = GLOBAL_ROOT_UID,
+ .egid = GLOBAL_ROOT_GID,
+ .fsuid = GLOBAL_ROOT_UID,
+ .fsgid = GLOBAL_ROOT_GID,
.securebits = SECUREBITS_DEFAULT,
.cap_inheritable = CAP_EMPTY_SET,
.cap_permitted = CAP_FULL_SET,
@@ -148,6 +156,7 @@ static void put_cred_rcu(struct rcu_head *rcu)
if (cred->group_info)
put_group_info(cred->group_info);
free_uid(cred->user);
+ put_user_ns(cred->user_ns);
kmem_cache_free(cred_jar, cred);
}
@@ -303,6 +312,7 @@ struct cred *prepare_creds(void)
set_cred_subscribers(new, 0);
get_group_info(new->group_info);
get_uid(new->user);
+ get_user_ns(new->user_ns);
#ifdef CONFIG_KEYS
key_get(new->thread_keyring);
@@ -414,11 +424,6 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
goto error_put;
}
- /* cache user_ns in cred. Doesn't need a refcount because it will
- * stay pinned by cred->user
- */
- new->user_ns = new->user->user_ns;
-
#ifdef CONFIG_KEYS
/* new threads get their own thread keyrings if their parent already
* had one */
@@ -493,10 +498,10 @@ int commit_creds(struct cred *new)
get_cred(new); /* we will require a ref for the subj creds too */
/* dumpability changes */
- if (old->euid != new->euid ||
- old->egid != new->egid ||
- old->fsuid != new->fsuid ||
- old->fsgid != new->fsgid ||
+ if (!uid_eq(old->euid, new->euid) ||
+ !gid_eq(old->egid, new->egid) ||
+ !uid_eq(old->fsuid, new->fsuid) ||
+ !gid_eq(old->fsgid, new->fsgid) ||
!cap_issubset(new->cap_permitted, old->cap_permitted)) {
if (task->mm)
set_dumpable(task->mm, suid_dumpable);
@@ -505,9 +510,9 @@ int commit_creds(struct cred *new)
}
/* alter the thread keyring */
- if (new->fsuid != old->fsuid)
+ if (!uid_eq(new->fsuid, old->fsuid))
key_fsuid_changed(task);
- if (new->fsgid != old->fsgid)
+ if (!gid_eq(new->fsgid, old->fsgid))
key_fsgid_changed(task);
/* do it
@@ -524,16 +529,16 @@ int commit_creds(struct cred *new)
alter_cred_subscribers(old, -2);
/* send notifications */
- if (new->uid != old->uid ||
- new->euid != old->euid ||
- new->suid != old->suid ||
- new->fsuid != old->fsuid)
+ if (!uid_eq(new->uid, old->uid) ||
+ !uid_eq(new->euid, old->euid) ||
+ !uid_eq(new->suid, old->suid) ||
+ !uid_eq(new->fsuid, old->fsuid))
proc_id_connector(task, PROC_EVENT_UID);
- if (new->gid != old->gid ||
- new->egid != old->egid ||
- new->sgid != old->sgid ||
- new->fsgid != old->fsgid)
+ if (!gid_eq(new->gid, old->gid) ||
+ !gid_eq(new->egid, old->egid) ||
+ !gid_eq(new->sgid, old->sgid) ||
+ !gid_eq(new->fsgid, old->fsgid))
proc_id_connector(task, PROC_EVENT_GID);
/* release the old obj and subj refs both */
@@ -678,6 +683,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
atomic_set(&new->usage, 1);
set_cred_subscribers(new, 0);
get_uid(new->user);
+ get_user_ns(new->user_ns);
get_group_info(new->group_info);
#ifdef CONFIG_KEYS
diff --git a/kernel/exit.c b/kernel/exit.c
index d8bd3b425fa..910a0716e17 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1214,7 +1214,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
unsigned long state;
int retval, status, traced;
pid_t pid = task_pid_vnr(p);
- uid_t uid = __task_cred(p)->uid;
+ uid_t uid = from_kuid_munged(current_user_ns(), __task_cred(p)->uid);
struct siginfo __user *infop;
if (!likely(wo->wo_flags & WEXITED))
@@ -1427,7 +1427,7 @@ static int wait_task_stopped(struct wait_opts *wo,
if (!unlikely(wo->wo_flags & WNOWAIT))
*p_code = 0;
- uid = task_uid(p);
+ uid = from_kuid_munged(current_user_ns(), task_uid(p));
unlock_sig:
spin_unlock_irq(&p->sighand->siglock);
if (!exit_code)
@@ -1500,7 +1500,7 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
}
if (!unlikely(wo->wo_flags & WNOWAIT))
p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
- uid = task_uid(p);
+ uid = from_kuid_munged(current_user_ns(), task_uid(p));
spin_unlock_irq(&p->sighand->siglock);
pid = task_pid_vnr(p);
diff --git a/kernel/groups.c b/kernel/groups.c
index 99b53d1eb7e..6b2588dd04f 100644
--- a/kernel/groups.c
+++ b/kernel/groups.c
@@ -31,7 +31,7 @@ struct group_info *groups_alloc(int gidsetsize)
group_info->blocks[0] = group_info->small_block;
else {
for (i = 0; i < nblocks; i++) {
- gid_t *b;
+ kgid_t *b;
b = (void *)__get_free_page(GFP_USER);
if (!b)
goto out_undo_partial_alloc;
@@ -66,18 +66,15 @@ EXPORT_SYMBOL(groups_free);
static int groups_to_user(gid_t __user *grouplist,
const struct group_info *group_info)
{
+ struct user_namespace *user_ns = current_user_ns();
int i;
unsigned int count = group_info->ngroups;
- for (i = 0; i < group_info->nblocks; i++) {
- unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
- unsigned int len = cp_count * sizeof(*grouplist);
-
- if (copy_to_user(grouplist, group_info->blocks[i], len))
+ for (i = 0; i < count; i++) {
+ gid_t gid;
+ gid = from_kgid_munged(user_ns, GROUP_AT(group_info, i));
+ if (put_user(gid, grouplist+i))
return -EFAULT;
-
- grouplist += NGROUPS_PER_BLOCK;
- count -= cp_count;
}
return 0;
}
@@ -86,18 +83,21 @@ static int groups_to_user(gid_t __user *grouplist,
static int groups_from_user(struct group_info *group_info,
gid_t __user *grouplist)
{
+ struct user_namespace *user_ns = current_user_ns();
int i;
unsigned int count = group_info->ngroups;
- for (i = 0; i < group_info->nblocks; i++) {
- unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
- unsigned int len = cp_count * sizeof(*grouplist);
-
- if (copy_from_user(group_info->blocks[i], grouplist, len))
+ for (i = 0; i < count; i++) {
+ gid_t gid;
+ kgid_t kgid;
+ if (get_user(gid, grouplist+i))
return -EFAULT;
- grouplist += NGROUPS_PER_BLOCK;
- count -= cp_count;
+ kgid = make_kgid(user_ns, gid);
+ if (!gid_valid(kgid))
+ return -EINVAL;
+
+ GROUP_AT(group_info, i) = kgid;
}
return 0;
}
@@ -117,9 +117,9 @@ static void groups_sort(struct group_info *group_info)
for (base = 0; base < max; base++) {
int left = base;
int right = left + stride;
- gid_t tmp = GROUP_AT(group_info, right);
+ kgid_t tmp = GROUP_AT(group_info, right);
- while (left >= 0 && GROUP_AT(group_info, left) > tmp) {
+ while (left >= 0 && gid_gt(GROUP_AT(group_info, left), tmp)) {
GROUP_AT(group_info, right) =
GROUP_AT(group_info, left);
right = left;
@@ -132,7 +132,7 @@ static void groups_sort(struct group_info *group_info)
}
/* a simple bsearch */
-int groups_search(const struct group_info *group_info, gid_t grp)
+int groups_search(const struct group_info *group_info, kgid_t grp)
{
unsigned int left, right;
@@ -143,9 +143,9 @@ int groups_search(const struct group_info *group_info, gid_t grp)
right = group_info->ngroups;
while (left < right) {
unsigned int mid = (left+right)/2;
- if (grp > GROUP_AT(group_info, mid))
+ if (gid_gt(grp, GROUP_AT(group_info, mid)))
left = mid + 1;
- else if (grp < GROUP_AT(group_info, mid))
+ else if (gid_lt(grp, GROUP_AT(group_info, mid)))
right = mid;
else
return 1;
@@ -256,24 +256,24 @@ SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist)
/*
* Check whether we're fsgid/egid or in the supplemental group..
*/
-int in_group_p(gid_t grp)
+int in_group_p(kgid_t grp)
{
const struct cred *cred = current_cred();
int retval = 1;
- if (grp != cred->fsgid)
+ if (!gid_eq(grp, cred->fsgid))
retval = groups_search(cred->group_info, grp);
return retval;
}
EXPORT_SYMBOL(in_group_p);
-int in_egroup_p(gid_t grp)
+int in_egroup_p(kgid_t grp)
{
const struct cred *cred = current_cred();
int retval = 1;
- if (grp != cred->egid)
+ if (!gid_eq(grp, cred->egid))
retval = groups_search(cred->group_info, grp);
return retval;
}
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index ee8d49b9c30..a232bb59d93 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -198,15 +198,14 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
return 0;
rcu_read_lock();
tcred = __task_cred(task);
- if (cred->user->user_ns == tcred->user->user_ns &&
- (cred->uid == tcred->euid &&
- cred->uid == tcred->suid &&
- cred->uid == tcred->uid &&
- cred->gid == tcred->egid &&
- cred->gid == tcred->sgid &&
- cred->gid == tcred->gid))
+ if (uid_eq(cred->uid, tcred->euid) &&
+ uid_eq(cred->uid, tcred->suid) &&
+ uid_eq(cred->uid, tcred->uid) &&
+ gid_eq(cred->gid, tcred->egid) &&
+ gid_eq(cred->gid, tcred->sgid) &&
+ gid_eq(cred->gid, tcred->gid))
goto ok;
- if (ptrace_has_cap(tcred->user->user_ns, mode))
+ if (ptrace_has_cap(tcred->user_ns, mode))
goto ok;
rcu_read_unlock();
return -EPERM;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a5a9d39b845..39eb6011bc3 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4070,11 +4070,8 @@ static bool check_same_owner(struct task_struct *p)
rcu_read_lock();
pcred = __task_cred(p);
- if (cred->user->user_ns == pcred->user->user_ns)
- match = (cred->euid == pcred->euid ||
- cred->euid == pcred->uid);
- else
- match = false;
+ match = (uid_eq(cred->euid, pcred->euid) ||
+ uid_eq(cred->euid, pcred->uid));
rcu_read_unlock();
return match;
}
diff --git a/kernel/signal.c b/kernel/signal.c
index 1a006b5d9d9..21ebe75ff85 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -767,14 +767,13 @@ static int kill_ok_by_cred(struct task_struct *t)
const struct cred *cred = current_cred();
const struct cred *tcred = __task_cred(t);
- if (cred->user->user_ns == tcred->user->user_ns &&
- (cred->euid == tcred->suid ||
- cred->euid == tcred->uid ||
- cred->uid == tcred->suid ||
- cred->uid == tcred->uid))
+ if (uid_eq(cred->euid, tcred->suid) ||
+ uid_eq(cred->euid, tcred->uid) ||
+ uid_eq(cred->uid, tcred->suid) ||
+ uid_eq(cred->uid, tcred->uid))
return 1;
- if (ns_capable(tcred->user->user_ns, CAP_KILL))
+ if (ns_capable(tcred->user_ns, CAP_KILL))
return 1;
return 0;
@@ -1020,15 +1019,6 @@ static inline int legacy_queue(struct sigpending *signals, int sig)
return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
}
-/*
- * map the uid in struct cred into user namespace *ns
- */
-static inline uid_t map_cred_ns(const struct cred *cred,
- struct user_namespace *ns)
-{
- return user_ns_map_uid(ns, cred, cred->uid);
-}
-
#ifdef CONFIG_USER_NS
static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
{
@@ -1038,8 +1028,10 @@ static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_str
if (SI_FROMKERNEL(info))
return;
- info->si_uid = user_ns_map_uid(task_cred_xxx(t, user_ns),
- current_cred(), info->si_uid);
+ rcu_read_lock();
+ info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
+ make_kuid(current_user_ns(), info->si_uid));
+ rcu_read_unlock();
}
#else
static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
@@ -1106,7 +1098,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
q->info.si_code = SI_USER;
q->info.si_pid = task_tgid_nr_ns(current,
task_active_pid_ns(t));
- q->info.si_uid = current_uid();
+ q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
break;
case (unsigned long) SEND_SIG_PRIV:
q->info.si_signo = sig;
@@ -1387,10 +1379,8 @@ static int kill_as_cred_perm(const struct cred *cred,
struct task_struct *target)
{
const struct cred *pcred = __task_cred(target);
- if (cred->user_ns != pcred->user_ns)
- return 0;
- if (cred->euid != pcred->suid && cred->euid != pcred->uid &&
- cred->uid != pcred->suid && cred->uid != pcred->uid)
+ if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
+ !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
return 0;
return 1;
}
@@ -1678,8 +1668,8 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
*/
rcu_read_lock();
info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
- info.si_uid = map_cred_ns(__task_cred(tsk),
- task_cred_xxx(tsk->parent, user_ns));
+ info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
+ task_uid(tsk));
rcu_read_unlock();
info.si_utime = cputime_to_clock_t(tsk->utime + tsk->signal->utime);
@@ -1762,8 +1752,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
*/
rcu_read_lock();
info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
- info.si_uid = map_cred_ns(__task_cred(tsk),
- task_cred_xxx(parent, user_ns));
+ info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
rcu_read_unlock();
info.si_utime = cputime_to_clock_t(tsk->utime);
@@ -1973,7 +1962,7 @@ static void ptrace_do_notify(int signr, int exit_code, int why)
info.si_signo = signr;
info.si_code = exit_code;
info.si_pid = task_pid_vnr(current);
- info.si_uid = current_uid();
+ info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
/* Let the debugger run. */
ptrace_stop(exit_code, why, 1, &info);
@@ -2181,8 +2170,8 @@ static int ptrace_signal(int signr, siginfo_t *info,
info->si_code = SI_USER;
rcu_read_lock();
info->si_pid = task_pid_vnr(current->parent);
- info->si_uid = map_cred_ns(__task_cred(current->parent),
- current_user_ns());
+ info->si_uid = from_kuid_munged(current_user_ns(),
+ task_uid(current->parent));
rcu_read_unlock();
}
@@ -2835,7 +2824,7 @@ SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
info.si_errno = 0;
info.si_code = SI_USER;
info.si_pid = task_tgid_vnr(current);
- info.si_uid = current_uid();
+ info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
return kill_something_info(sig, &info, pid);
}
@@ -2878,7 +2867,7 @@ static int do_tkill(pid_t tgid, pid_t pid, int sig)
info.si_errno = 0;
info.si_code = SI_TKILL;
info.si_pid = task_tgid_vnr(current);
- info.si_uid = current_uid();
+ info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
return do_send_specific(tgid, pid, sig, &info);
}
diff --git a/kernel/sys.c b/kernel/sys.c
index ba0ae8eea6f..6df42624e45 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -93,10 +93,8 @@
int overflowuid = DEFAULT_OVERFLOWUID;
int overflowgid = DEFAULT_OVERFLOWGID;
-#ifdef CONFIG_UID16
EXPORT_SYMBOL(overflowuid);
EXPORT_SYMBOL(overflowgid);
-#endif
/*
* the same as above, but for filesystems which can only store a 16-bit
@@ -133,11 +131,10 @@ static bool set_one_prio_perm(struct task_struct *p)
{
const struct cred *cred = current_cred(), *pcred = __task_cred(p);
- if (pcred->user->user_ns == cred->user->user_ns &&
- (pcred->uid == cred->euid ||
- pcred->euid == cred->euid))
+ if (uid_eq(pcred->uid, cred->euid) ||
+ uid_eq(pcred->euid, cred->euid))
return true;
- if (ns_capable(pcred->user->user_ns, CAP_SYS_NICE))
+ if (ns_capable(pcred->user_ns, CAP_SYS_NICE))
return true;
return false;
}
@@ -177,6 +174,7 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
const struct cred *cred = current_cred();
int error = -EINVAL;
struct pid *pgrp;
+ kuid_t uid;
if (which > PRIO_USER || which < PRIO_PROCESS)
goto out;
@@ -209,18 +207,19 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
break;
case PRIO_USER:
- user = (struct user_struct *) cred->user;
+ uid = make_kuid(cred->user_ns, who);
+ user = cred->user;
if (!who)
- who = cred->uid;
- else if ((who != cred->uid) &&
- !(user = find_user(who)))
+ uid = cred->uid;
+ else if (!uid_eq(uid, cred->uid) &&
+ !(user = find_user(uid)))
goto out_unlock; /* No processes for this user */
do_each_thread(g, p) {
- if (__task_cred(p)->uid == who)
+ if (uid_eq(task_uid(p), uid))
error = set_one_prio(p, niceval, error);
} while_each_thread(g, p);
- if (who != cred->uid)
+ if (!uid_eq(uid, cred->uid))
free_uid(user); /* For find_user() */
break;
}
@@ -244,6 +243,7 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
const struct cred *cred = current_cred();
long niceval, retval = -ESRCH;
struct pid *pgrp;
+ kuid_t uid;
if (which > PRIO_USER || which < PRIO_PROCESS)
return -EINVAL;
@@ -274,21 +274,22 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
break;
case PRIO_USER:
- user = (struct user_struct *) cred->user;
+ uid = make_kuid(cred->user_ns, who);
+ user = cred->user;
if (!who)
- who = cred->uid;
- else if ((who != cred->uid) &&
- !(user = find_user(who)))
+ uid = cred->uid;
+ else if (!uid_eq(uid, cred->uid) &&
+ !(user = find_user(uid)))
goto out_unlock; /* No processes for this user */
do_each_thread(g, p) {
- if (__task_cred(p)->uid == who) {
+ if (uid_eq(task_uid(p), uid)) {
niceval = 20 - task_nice(p);
if (niceval > retval)
retval = niceval;
}
} while_each_thread(g, p);
- if (who != cred->uid)
+ if (!uid_eq(uid, cred->uid))
free_uid(user); /* for find_user() */
break;
}
@@ -553,9 +554,19 @@ void ctrl_alt_del(void)
*/
SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
{
+ struct user_namespace *ns = current_user_ns();
const struct cred *old;
struct cred *new;
int retval;
+ kgid_t krgid, kegid;
+
+ krgid = make_kgid(ns, rgid);
+ kegid = make_kgid(ns, egid);
+
+ if ((rgid != (gid_t) -1) && !gid_valid(krgid))
+ return -EINVAL;
+ if ((egid != (gid_t) -1) && !gid_valid(kegid))
+ return -EINVAL;
new = prepare_creds();
if (!new)
@@ -564,25 +575,25 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
retval = -EPERM;
if (rgid != (gid_t) -1) {
- if (old->gid == rgid ||
- old->egid == rgid ||
+ if (gid_eq(old->gid, krgid) ||
+ gid_eq(old->egid, krgid) ||
nsown_capable(CAP_SETGID))
- new->gid = rgid;
+ new->gid = krgid;
else
goto error;
}
if (egid != (gid_t) -1) {
- if (old->gid == egid ||
- old->egid == egid ||
- old->sgid == egid ||
+ if (gid_eq(old->gid, kegid) ||
+ gid_eq(old->egid, kegid) ||
+ gid_eq(old->sgid, kegid) ||
nsown_capable(CAP_SETGID))
- new->egid = egid;
+ new->egid = kegid;
else
goto error;
}
if (rgid != (gid_t) -1 ||
- (egid != (gid_t) -1 && egid != old->gid))
+ (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
new->sgid = new->egid;
new->fsgid = new->egid;
@@ -600,9 +611,15 @@ error:
*/
SYSCALL_DEFINE1(setgid, gid_t, gid)
{
+ struct user_namespace *ns = current_user_ns();
const struct cred *old;
struct cred *new;
int retval;
+ kgid_t kgid;
+
+ kgid = make_kgid(ns, gid);
+ if (!gid_valid(kgid))
+ return -EINVAL;
new = prepare_creds();
if (!new)
@@ -611,9 +628,9 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
retval = -EPERM;
if (nsown_capable(CAP_SETGID))
- new->gid = new->egid = new->sgid = new->fsgid = gid;
- else if (gid == old->gid || gid == old->sgid)
- new->egid = new->fsgid = gid;
+ new->gid = new->egid = new->sgid = new->fsgid = kgid;
+ else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
+ new->egid = new->fsgid = kgid;
else
goto error;
@@ -631,7 +648,7 @@ static int set_user(struct cred *new)
{
struct user_struct *new_user;
- new_user = alloc_uid(current_user_ns(), new->uid);
+ new_user = alloc_uid(new->uid);
if (!new_user)
return -EAGAIN;
@@ -670,9 +687,19 @@ static int set_user(struct cred *new)
*/
SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
{
+ struct user_namespace *ns = current_user_ns();
const struct cred *old;
struct cred *new;
int retval;
+ kuid_t kruid, keuid;
+
+ kruid = make_kuid(ns, ruid);
+ keuid = make_kuid(ns, euid);
+
+ if ((ruid != (uid_t) -1) && !uid_valid(kruid))
+ return -EINVAL;
+ if ((euid != (uid_t) -1) && !uid_valid(keuid))
+ return -EINVAL;
new = prepare_creds();
if (!new)
@@ -681,29 +708,29 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
retval = -EPERM;
if (ruid != (uid_t) -1) {
- new->uid = ruid;
- if (old->uid != ruid &&
- old->euid != ruid &&
+ new->uid = kruid;
+ if (!uid_eq(old->uid, kruid) &&
+ !uid_eq(old->euid, kruid) &&
!nsown_capable(CAP_SETUID))
goto error;
}
if (euid != (uid_t) -1) {
- new->euid = euid;
- if (old->uid != euid &&
- old->euid != euid &&
- old->suid != euid &&
+ new->euid = keuid;
+ if (!uid_eq(old->uid, keuid) &&
+ !uid_eq(old->euid, keuid) &&
+ !uid_eq(old->suid, keuid) &&
!nsown_capable(CAP_SETUID))
goto error;
}
- if (new->uid != old->uid) {
+ if (!uid_eq(new->uid, old->uid)) {
retval = set_user(new);
if (retval < 0)
goto error;
}
if (ruid != (uid_t) -1 ||
- (euid != (uid_t) -1 && euid != old->uid))
+ (euid != (uid_t) -1 && !uid_eq(keuid, old->uid)))
new->suid = new->euid;
new->fsuid = new->euid;
@@ -731,9 +758,15 @@ error:
*/
SYSCALL_DEFINE1(setuid, uid_t, uid)
{
+ struct user_namespace *ns = current_user_ns();
const struct cred *old;
struct cred *new;
int retval;
+ kuid_t kuid;
+
+ kuid = make_kuid(ns, uid);
+ if (!uid_valid(kuid))
+ return -EINVAL;
new = prepare_creds();
if (!new)
@@ -742,17 +775,17 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
retval = -EPERM;
if (nsown_capable(CAP_SETUID)) {
- new->suid = new->uid = uid;
- if (uid != old->uid) {
+ new->suid = new->uid = kuid;
+ if (!uid_eq(kuid, old->uid)) {
retval = set_user(new);
if (retval < 0)
goto error;
}
- } else if (uid != old->uid && uid != new->suid) {
+ } else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) {
goto error;
}
- new->fsuid = new->euid = uid;
+ new->fsuid = new->euid = kuid;
retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
if (retval < 0)
@@ -772,9 +805,24 @@ error:
*/
SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
{
+ struct user_namespace *ns = current_user_ns();
const struct cred *old;
struct cred *new;
int retval;
+ kuid_t kruid, keuid, ksuid;
+
+ kruid = make_kuid(ns, ruid);
+ keuid = make_kuid(ns, euid);
+ ksuid = make_kuid(ns, suid);
+
+ if ((ruid != (uid_t) -1) && !uid_valid(kruid))
+ return -EINVAL;
+
+ if ((euid != (uid_t) -1) && !uid_valid(keuid))
+ return -EINVAL;
+
+ if ((suid != (uid_t) -1) && !uid_valid(ksuid))
+ return -EINVAL;
new = prepare_creds();
if (!new)
@@ -784,29 +832,29 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
retval = -EPERM;
if (!nsown_capable(CAP_SETUID)) {
- if (ruid != (uid_t) -1 && ruid != old->uid &&
- ruid != old->euid && ruid != old->suid)
+ if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) &&
+ !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
goto error;
- if (euid != (uid_t) -1 && euid != old->uid &&
- euid != old->euid && euid != old->suid)
+ if (euid != (uid_t) -1 && !uid_eq(keuid, old->uid) &&
+ !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
goto error;
- if (suid != (uid_t) -1 && suid != old->uid &&
- suid != old->euid && suid != old->suid)
+ if (suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) &&
+ !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
goto error;
}
if (ruid != (uid_t) -1) {
- new->uid = ruid;
- if (ruid != old->uid) {
+ new->uid = kruid;
+ if (!uid_eq(kruid, old->uid)) {
retval = set_user(new);
if (retval < 0)
goto error;
}
}
if (euid != (uid_t) -1)
- new->euid = euid;
+ new->euid = keuid;
if (suid != (uid_t) -1)
- new->suid = suid;
+ new->suid = ksuid;
new->fsuid = new->euid;
retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
@@ -820,14 +868,19 @@ error:
return retval;
}
-SYSCALL_DEFINE3(getresuid, uid_t __user *, ruid, uid_t __user *, euid, uid_t __user *, suid)
+SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp)
{
const struct cred *cred = current_cred();
int retval;
+ uid_t ruid, euid, suid;
+
+ ruid = from_kuid_munged(cred->user_ns, cred->uid);
+ euid = from_kuid_munged(cred->user_ns, cred->euid);
+ suid = from_kuid_munged(cred->user_ns, cred->suid);
- if (!(retval = put_user(cred->uid, ruid)) &&
- !(retval = put_user(cred->euid, euid)))
- retval = put_user(cred->suid, suid);
+ if (!(retval = put_user(ruid, ruidp)) &&
+ !(retval = put_user(euid, euidp)))
+ retval = put_user(suid, suidp);
return retval;
}
@@ -837,9 +890,22 @@ SYSCALL_DEFINE3(getresuid, uid_t __user *, ruid, uid_t __user *, euid, uid_t __u
*/
SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
{
+ struct user_namespace *ns = current_user_ns();
const struct cred *old;
struct cred *new;
int retval;
+ kgid_t krgid, kegid, ksgid;
+
+ krgid = make_kgid(ns, rgid);
+ kegid = make_kgid(ns, egid);
+ ksgid = make_kgid(ns, sgid);
+
+ if ((rgid != (gid_t) -1) && !gid_valid(krgid))
+ return -EINVAL;
+ if ((egid != (gid_t) -1) && !gid_valid(kegid))
+ return -EINVAL;
+ if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
+ return -EINVAL;
new = prepare_creds();
if (!new)
@@ -848,23 +914,23 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
retval = -EPERM;
if (!nsown_capable(CAP_SETGID)) {
- if (rgid != (gid_t) -1 && rgid != old->gid &&
- rgid != old->egid && rgid != old->sgid)
+ if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) &&
+ !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
goto error;
- if (egid != (gid_t) -1 && egid != old->gid &&
- egid != old->egid && egid != old->sgid)
+ if (egid != (gid_t) -1 && !gid_eq(kegid, old->gid) &&
+ !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
goto error;
- if (sgid != (gid_t) -1 && sgid != old->gid &&
- sgid != old->egid && sgid != old->sgid)
+ if (sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) &&
+ !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
goto error;
}
if (rgid != (gid_t) -1)
- new->gid = rgid;
+ new->gid = krgid;
if (egid != (gid_t) -1)
- new->egid = egid;
+ new->egid = kegid;
if (sgid != (gid_t) -1)
- new->sgid = sgid;
+ new->sgid = ksgid;
new->fsgid = new->egid;
return commit_creds(new);
@@ -874,14 +940,19 @@ error:
return retval;
}
-SYSCALL_DEFINE3(getresgid, gid_t __user *, rgid, gid_t __user *, egid, gid_t __user *, sgid)
+SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp)
{
const struct cred *cred = current_cred();
int retval;
+ gid_t rgid, egid, sgid;
+
+ rgid = from_kgid_munged(cred->user_ns, cred->gid);
+ egid = from_kgid_munged(cred->user_ns, cred->egid);
+ sgid = from_kgid_munged(cred->user_ns, cred->sgid);
- if (!(retval = put_user(cred->gid, rgid)) &&
- !(retval = put_user(cred->egid, egid)))
- retval = put_user(cred->sgid, sgid);
+ if (!(retval = put_user(rgid, rgidp)) &&
+ !(retval = put_user(egid, egidp)))
+ retval = put_user(sgid, sgidp);
return retval;
}
@@ -898,18 +969,24 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
const struct cred *old;
struct cred *new;
uid_t old_fsuid;
+ kuid_t kuid;
+
+ old = current_cred();
+ old_fsuid = from_kuid_munged(old->user_ns, old->fsuid);
+
+ kuid = make_kuid(old->user_ns, uid);
+ if (!uid_valid(kuid))
+ return old_fsuid;
new = prepare_creds();
if (!new)
- return current_fsuid();
- old = current_cred();
- old_fsuid = old->fsuid;
+ return old_fsuid;
- if (uid == old->uid || uid == old->euid ||
- uid == old->suid || uid == old->fsuid ||
+ if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) ||
+ uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
nsown_capable(CAP_SETUID)) {
- if (uid != old_fsuid) {
- new->fsuid = uid;
+ if (!uid_eq(kuid, old->fsuid)) {
+ new->fsuid = kuid;
if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
goto change_okay;
}
@@ -931,18 +1008,24 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
const struct cred *old;
struct cred *new;
gid_t old_fsgid;
+ kgid_t kgid;
+
+ old = current_cred();
+ old_fsgid = from_kgid_munged(old->user_ns, old->fsgid);
+
+ kgid = make_kgid(old->user_ns, gid);
+ if (!gid_valid(kgid))
+ return old_fsgid;
new = prepare_creds();
if (!new)
- return current_fsgid();
- old = current_cred();
- old_fsgid = old->fsgid;
+ return old_fsgid;
- if (gid == old->gid || gid == old->egid ||
- gid == old->sgid || gid == old->fsgid ||
+ if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
+ gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
nsown_capable(CAP_SETGID)) {
- if (gid != old_fsgid) {
- new->fsgid = gid;
+ if (!gid_eq(kgid, old->fsgid)) {
+ new->fsgid = kgid;
goto change_okay;
}
}
@@ -1498,15 +1581,14 @@ static int check_prlimit_permission(struct task_struct *task)
return 0;
tcred = __task_cred(task);
- if (cred->user->user_ns == tcred->user->user_ns &&
- (cred->uid == tcred->euid &&
- cred->uid == tcred->suid &&
- cred->uid == tcred->uid &&
- cred->gid == tcred->egid &&
- cred->gid == tcred->sgid &&
- cred->gid == tcred->gid))
+ if (uid_eq(cred->uid, tcred->euid) &&
+ uid_eq(cred->uid, tcred->suid) &&
+ uid_eq(cred->uid, tcred->uid) &&
+ gid_eq(cred->gid, tcred->egid) &&
+ gid_eq(cred->gid, tcred->sgid) &&
+ gid_eq(cred->gid, tcred->gid))
return 0;
- if (ns_capable(tcred->user->user_ns, CAP_SYS_RESOURCE))
+ if (ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
return 0;
return -EPERM;
diff --git a/kernel/timer.c b/kernel/timer.c
index 09de9a941cd..6ec7e7e0db4 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1435,25 +1435,25 @@ SYSCALL_DEFINE0(getppid)
SYSCALL_DEFINE0(getuid)
{
/* Only we change this so SMP safe */
- return current_uid();
+ return from_kuid_munged(current_user_ns(), current_uid());
}
SYSCALL_DEFINE0(geteuid)
{
/* Only we change this so SMP safe */
- return current_euid();
+ return from_kuid_munged(current_user_ns(), current_euid());
}
SYSCALL_DEFINE0(getgid)
{
/* Only we change this so SMP safe */
- return current_gid();
+ return from_kgid_munged(current_user_ns(), current_gid());
}
SYSCALL_DEFINE0(getegid)
{
/* Only we change this so SMP safe */
- return current_egid();
+ return from_kgid_munged(current_user_ns(), current_egid());
}
#endif
diff --git a/kernel/uid16.c b/kernel/uid16.c
index 51c6e89e861..d7948eb1022 100644
--- a/kernel/uid16.c
+++ b/kernel/uid16.c
@@ -81,14 +81,19 @@ SYSCALL_DEFINE3(setresuid16, old_uid_t, ruid, old_uid_t, euid, old_uid_t, suid)
return ret;
}
-SYSCALL_DEFINE3(getresuid16, old_uid_t __user *, ruid, old_uid_t __user *, euid, old_uid_t __user *, suid)
+SYSCALL_DEFINE3(getresuid16, old_uid_t __user *, ruidp, old_uid_t __user *, euidp, old_uid_t __user *, suidp)
{
const struct cred *cred = current_cred();
int retval;
+ old_uid_t ruid, euid, suid;
- if (!(retval = put_user(high2lowuid(cred->uid), ruid)) &&
- !(retval = put_user(high2lowuid(cred->euid), euid)))
- retval = put_user(high2lowuid(cred->suid), suid);
+ ruid = high2lowuid(from_kuid_munged(cred->user_ns, cred->uid));
+ euid = high2lowuid(from_kuid_munged(cred->user_ns, cred->euid));
+ suid = high2lowuid(from_kuid_munged(cred->user_ns, cred->suid));
+
+ if (!(retval = put_user(ruid, ruidp)) &&
+ !(retval = put_user(euid, euidp)))
+ retval = put_user(suid, suidp);
return retval;
}
@@ -103,14 +108,19 @@ SYSCALL_DEFINE3(setresgid16, old_gid_t, rgid, old_gid_t, egid, old_gid_t, sgid)
}
-SYSCALL_DEFINE3(getresgid16, old_gid_t __user *, rgid, old_gid_t __user *, egid, old_gid_t __user *, sgid)
+SYSCALL_DEFINE3(getresgid16, old_gid_t __user *, rgidp, old_gid_t __user *, egidp, old_gid_t __user *, sgidp)
{
const struct cred *cred = current_cred();
int retval;
+ old_gid_t rgid, egid, sgid;
+
+ rgid = high2lowgid(from_kgid_munged(cred->user_ns, cred->gid));
+ egid = high2lowgid(from_kgid_munged(cred->user_ns, cred->egid));
+ sgid = high2lowgid(from_kgid_munged(cred->user_ns, cred->sgid));
- if (!(retval = put_user(high2lowgid(cred->gid), rgid)) &&
- !(retval = put_user(high2lowgid(cred->egid), egid)))
- retval = put_user(high2lowgid(cred->sgid), sgid);
+ if (!(retval = put_user(rgid, rgidp)) &&
+ !(retval = put_user(egid, egidp)))
+ retval = put_user(sgid, sgidp);
return retval;
}
@@ -134,11 +144,14 @@ SYSCALL_DEFINE1(setfsgid16, old_gid_t, gid)
static int groups16_to_user(old_gid_t __user *grouplist,
struct group_info *group_info)
{
+ struct user_namespace *user_ns = current_user_ns();
int i;
old_gid_t group;
+ kgid_t kgid;
for (i = 0; i < group_info->ngroups; i++) {
- group = high2lowgid(GROUP_AT(group_info, i));
+ kgid = GROUP_AT(group_info, i);
+ group = high2lowgid(from_kgid_munged(user_ns, kgid));
if (put_user(group, grouplist+i))
return -EFAULT;
}
@@ -149,13 +162,20 @@ static int groups16_to_user(old_gid_t __user *grouplist,
static int groups16_from_user(struct group_info *group_info,
old_gid_t __user *grouplist)
{
+ struct user_namespace *user_ns = current_user_ns();
int i;
old_gid_t group;
+ kgid_t kgid;
for (i = 0; i < group_info->ngroups; i++) {
if (get_user(group, grouplist+i))
return -EFAULT;
- GROUP_AT(group_info, i) = low2highgid(group);
+
+ kgid = make_kgid(user_ns, low2highgid(group));
+ if (!gid_valid(kgid))
+ return -EINVAL;
+
+ GROUP_AT(group_info, i) = kgid;
}
return 0;
@@ -211,20 +231,20 @@ SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist)
SYSCALL_DEFINE0(getuid16)
{
- return high2lowuid(current_uid());
+ return high2lowuid(from_kuid_munged(current_user_ns(), current_uid()));
}
SYSCALL_DEFINE0(geteuid16)
{
- return high2lowuid(current_euid());
+ return high2lowuid(from_kuid_munged(current_user_ns(), current_euid()));
}
SYSCALL_DEFINE0(getgid16)
{
- return high2lowgid(current_gid());
+ return high2lowgid(from_kgid_munged(current_user_ns(), current_gid()));
}
SYSCALL_DEFINE0(getegid16)
{
- return high2lowgid(current_egid());
+ return high2lowgid(from_kgid_munged(current_user_ns(), current_egid()));
}
diff --git a/kernel/user.c b/kernel/user.c
index 71dd2363ab0..b815fefbe76 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -22,10 +22,27 @@
* and 1 for... ?
*/
struct user_namespace init_user_ns = {
+ .uid_map = {
+ .nr_extents = 1,
+ .extent[0] = {
+ .first = 0,
+ .lower_first = 0,
+ .count = 4294967295U,
+ },
+ },
+ .gid_map = {
+ .nr_extents = 1,
+ .extent[0] = {
+ .first = 0,
+ .lower_first = 0,
+ .count = 4294967295U,
+ },
+ },
.kref = {
.refcount = ATOMIC_INIT(3),
},
- .creator = &root_user,
+ .owner = GLOBAL_ROOT_UID,
+ .group = GLOBAL_ROOT_GID,
};
EXPORT_SYMBOL_GPL(init_user_ns);
@@ -34,11 +51,14 @@ EXPORT_SYMBOL_GPL(init_user_ns);
* when changing user ID's (ie setuid() and friends).
*/
+#define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 7)
+#define UIDHASH_SZ (1 << UIDHASH_BITS)
#define UIDHASH_MASK (UIDHASH_SZ - 1)
#define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
-#define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid)))
+#define uidhashentry(uid) (uidhash_table + __uidhashfn((__kuid_val(uid))))
static struct kmem_cache *uid_cachep;
+struct hlist_head uidhash_table[UIDHASH_SZ];
/*
* The uidhash_lock is mostly taken from process context, but it is
@@ -51,14 +71,14 @@ static struct kmem_cache *uid_cachep;
*/
static DEFINE_SPINLOCK(uidhash_lock);
-/* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->user_ns */
+/* root_user.__count is 1, for init task cred */
struct user_struct root_user = {
- .__count = ATOMIC_INIT(2),
+ .__count = ATOMIC_INIT(1),
.processes = ATOMIC_INIT(1),
.files = ATOMIC_INIT(0),
.sigpending = ATOMIC_INIT(0),
.locked_shm = 0,
- .user_ns = &init_user_ns,
+ .uid = GLOBAL_ROOT_UID,
};
/*
@@ -72,16 +92,15 @@ static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
static void uid_hash_remove(struct user_struct *up)
{
hlist_del_init(&up->uidhash_node);
- put_user_ns(up->user_ns);
}
-static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
+static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
{
struct user_struct *user;
struct hlist_node *h;
hlist_for_each_entry(user, h, hashent, uidhash_node) {
- if (user->uid == uid) {
+ if (uid_eq(user->uid, uid)) {
atomic_inc(&user->__count);
return user;
}
@@ -110,14 +129,13 @@ static void free_user(struct user_struct *up, unsigned long flags)
*
* If the user_struct could not be found, return NULL.
*/
-struct user_struct *find_user(uid_t uid)
+struct user_struct *find_user(kuid_t uid)
{
struct user_struct *ret;
unsigned long flags;
- struct user_namespace *ns = current_user_ns();
spin_lock_irqsave(&uidhash_lock, flags);
- ret = uid_hash_find(uid, uidhashentry(ns, uid));
+ ret = uid_hash_find(uid, uidhashentry(uid));
spin_unlock_irqrestore(&uidhash_lock, flags);
return ret;
}
@@ -136,9 +154,9 @@ void free_uid(struct user_struct *up)
local_irq_restore(flags);
}
-struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
+struct user_struct *alloc_uid(kuid_t uid)
{
- struct hlist_head *hashent = uidhashentry(ns, uid);
+ struct hlist_head *hashent = uidhashentry(uid);
struct user_struct *up, *new;
spin_lock_irq(&uidhash_lock);
@@ -153,8 +171,6 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
new->uid = uid;
atomic_set(&new->__count, 1);
- new->user_ns = get_user_ns(ns);
-
/*
* Before adding this, check whether we raced
* on adding the same user already..
@@ -162,7 +178,6 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
spin_lock_irq(&uidhash_lock);
up = uid_hash_find(uid, hashent);
if (up) {
- put_user_ns(ns);
key_put(new->uid_keyring);
key_put(new->session_keyring);
kmem_cache_free(uid_cachep, new);
@@ -187,11 +202,11 @@ static int __init uid_cache_init(void)
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
for(n = 0; n < UIDHASH_SZ; ++n)
- INIT_HLIST_HEAD(init_user_ns.uidhash_table + n);
+ INIT_HLIST_HEAD(uidhash_table + n);
/* Insert the root user immediately (init already runs as root) */
spin_lock_irq(&uidhash_lock);
- uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0));
+ uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
spin_unlock_irq(&uidhash_lock);
return 0;
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 3b906e98b1d..86602316422 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -11,9 +11,20 @@
#include <linux/user_namespace.h>
#include <linux/highuid.h>
#include <linux/cred.h>
+#include <linux/securebits.h>
+#include <linux/keyctl.h>
+#include <linux/key-type.h>
+#include <keys/user-type.h>
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/ctype.h>
static struct kmem_cache *user_ns_cachep __read_mostly;
+static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid,
+ struct uid_gid_map *map);
+
/*
* Create a new user namespace, deriving the creator from the user in the
* passed credentials, and replacing that user with the new root user for the
@@ -24,109 +35,565 @@ static struct kmem_cache *user_ns_cachep __read_mostly;
*/
int create_user_ns(struct cred *new)
{
- struct user_namespace *ns;
- struct user_struct *root_user;
- int n;
+ struct user_namespace *ns, *parent_ns = new->user_ns;
+ kuid_t owner = new->euid;
+ kgid_t group = new->egid;
+
+ /* The creator needs a mapping in the parent user namespace
+ * or else we won't be able to reasonably tell userspace who
+ * created a user_namespace.
+ */
+ if (!kuid_has_mapping(parent_ns, owner) ||
+ !kgid_has_mapping(parent_ns, group))
+ return -EPERM;
- ns = kmem_cache_alloc(user_ns_cachep, GFP_KERNEL);
+ ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
if (!ns)
return -ENOMEM;
kref_init(&ns->kref);
+ ns->parent = parent_ns;
+ ns->owner = owner;
+ ns->group = group;
- for (n = 0; n < UIDHASH_SZ; ++n)
- INIT_HLIST_HEAD(ns->uidhash_table + n);
-
- /* Alloc new root user. */
- root_user = alloc_uid(ns, 0);
- if (!root_user) {
- kmem_cache_free(user_ns_cachep, ns);
- return -ENOMEM;
- }
-
- /* set the new root user in the credentials under preparation */
- ns->creator = new->user;
- new->user = root_user;
- new->uid = new->euid = new->suid = new->fsuid = 0;
- new->gid = new->egid = new->sgid = new->fsgid = 0;
- put_group_info(new->group_info);
- new->group_info = get_group_info(&init_groups);
+ /* Start with the same capabilities as init but useless for doing
+ * anything as the capabilities are bound to the new user namespace.
+ */
+ new->securebits = SECUREBITS_DEFAULT;
+ new->cap_inheritable = CAP_EMPTY_SET;
+ new->cap_permitted = CAP_FULL_SET;
+ new->cap_effective = CAP_FULL_SET;
+ new->cap_bset = CAP_FULL_SET;
#ifdef CONFIG_KEYS
key_put(new->request_key_auth);
new->request_key_auth = NULL;
#endif
/* tgcred will be cleared in our caller bc CLONE_THREAD won't be set */
- /* root_user holds a reference to ns, our reference can be dropped */
- put_user_ns(ns);
+ /* Leave the new->user_ns reference with the new user namespace. */
+ /* Leave the reference to our user_ns with the new cred. */
+ new->user_ns = ns;
return 0;
}
-/*
- * Deferred destructor for a user namespace. This is required because
- * free_user_ns() may be called with uidhash_lock held, but we need to call
- * back to free_uid() which will want to take the lock again.
- */
-static void free_user_ns_work(struct work_struct *work)
+void free_user_ns(struct kref *kref)
{
- struct user_namespace *ns =
- container_of(work, struct user_namespace, destroyer);
- free_uid(ns->creator);
+ struct user_namespace *parent, *ns =
+ container_of(kref, struct user_namespace, kref);
+
+ parent = ns->parent;
kmem_cache_free(user_ns_cachep, ns);
+ put_user_ns(parent);
}
+EXPORT_SYMBOL(free_user_ns);
-void free_user_ns(struct kref *kref)
+static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
{
- struct user_namespace *ns =
- container_of(kref, struct user_namespace, kref);
+ unsigned idx, extents;
+ u32 first, last, id2;
+
+ id2 = id + count - 1;
+
+ /* Find the matching extent */
+ extents = map->nr_extents;
+ smp_read_barrier_depends();
+ for (idx = 0; idx < extents; idx++) {
+ first = map->extent[idx].first;
+ last = first + map->extent[idx].count - 1;
+ if (id >= first && id <= last &&
+ (id2 >= first && id2 <= last))
+ break;
+ }
+ /* Map the id or note failure */
+ if (idx < extents)
+ id = (id - first) + map->extent[idx].lower_first;
+ else
+ id = (u32) -1;
- INIT_WORK(&ns->destroyer, free_user_ns_work);
- schedule_work(&ns->destroyer);
+ return id;
}
-EXPORT_SYMBOL(free_user_ns);
-uid_t user_ns_map_uid(struct user_namespace *to, const struct cred *cred, uid_t uid)
+static u32 map_id_down(struct uid_gid_map *map, u32 id)
{
- struct user_namespace *tmp;
+ unsigned idx, extents;
+ u32 first, last;
- if (likely(to == cred->user->user_ns))
- return uid;
+ /* Find the matching extent */
+ extents = map->nr_extents;
+ smp_read_barrier_depends();
+ for (idx = 0; idx < extents; idx++) {
+ first = map->extent[idx].first;
+ last = first + map->extent[idx].count - 1;
+ if (id >= first && id <= last)
+ break;
+ }
+ /* Map the id or note failure */
+ if (idx < extents)
+ id = (id - first) + map->extent[idx].lower_first;
+ else
+ id = (u32) -1;
+ return id;
+}
- /* Is cred->user the creator of the target user_ns
- * or the creator of one of it's parents?
- */
- for ( tmp = to; tmp != &init_user_ns;
- tmp = tmp->creator->user_ns ) {
- if (cred->user == tmp->creator) {
- return (uid_t)0;
- }
+static u32 map_id_up(struct uid_gid_map *map, u32 id)
+{
+ unsigned idx, extents;
+ u32 first, last;
+
+ /* Find the matching extent */
+ extents = map->nr_extents;
+ smp_read_barrier_depends();
+ for (idx = 0; idx < extents; idx++) {
+ first = map->extent[idx].lower_first;
+ last = first + map->extent[idx].count - 1;
+ if (id >= first && id <= last)
+ break;
}
+ /* Map the id or note failure */
+ if (idx < extents)
+ id = (id - first) + map->extent[idx].first;
+ else
+ id = (u32) -1;
+
+ return id;
+}
+
+/**
+ * make_kuid - Map a user-namespace uid pair into a kuid.
+ * @ns: User namespace that the uid is in
+ * @uid: User identifier
+ *
+ * Maps a user-namespace uid pair into a kernel internal kuid,
+ * and returns that kuid.
+ *
+ * When there is no mapping defined for the user-namespace uid
+ * pair INVALID_UID is returned. Callers are expected to test
+ * for and handle handle INVALID_UID being returned. INVALID_UID
+ * may be tested for using uid_valid().
+ */
+kuid_t make_kuid(struct user_namespace *ns, uid_t uid)
+{
+ /* Map the uid to a global kernel uid */
+ return KUIDT_INIT(map_id_down(&ns->uid_map, uid));
+}
+EXPORT_SYMBOL(make_kuid);
+
+/**
+ * from_kuid - Create a uid from a kuid user-namespace pair.
+ * @targ: The user namespace we want a uid in.
+ * @kuid: The kernel internal uid to start with.
+ *
+ * Map @kuid into the user-namespace specified by @targ and
+ * return the resulting uid.
+ *
+ * There is always a mapping into the initial user_namespace.
+ *
+ * If @kuid has no mapping in @targ (uid_t)-1 is returned.
+ */
+uid_t from_kuid(struct user_namespace *targ, kuid_t kuid)
+{
+ /* Map the uid from a global kernel uid */
+ return map_id_up(&targ->uid_map, __kuid_val(kuid));
+}
+EXPORT_SYMBOL(from_kuid);
- /* No useful relationship so no mapping */
- return overflowuid;
+/**
+ * from_kuid_munged - Create a uid from a kuid user-namespace pair.
+ * @targ: The user namespace we want a uid in.
+ * @kuid: The kernel internal uid to start with.
+ *
+ * Map @kuid into the user-namespace specified by @targ and
+ * return the resulting uid.
+ *
+ * There is always a mapping into the initial user_namespace.
+ *
+ * Unlike from_kuid from_kuid_munged never fails and always
+ * returns a valid uid. This makes from_kuid_munged appropriate
+ * for use in syscalls like stat and getuid where failing the
+ * system call and failing to provide a valid uid are not an
+ * options.
+ *
+ * If @kuid has no mapping in @targ overflowuid is returned.
+ */
+uid_t from_kuid_munged(struct user_namespace *targ, kuid_t kuid)
+{
+ uid_t uid;
+ uid = from_kuid(targ, kuid);
+
+ if (uid == (uid_t) -1)
+ uid = overflowuid;
+ return uid;
}
+EXPORT_SYMBOL(from_kuid_munged);
-gid_t user_ns_map_gid(struct user_namespace *to, const struct cred *cred, gid_t gid)
+/**
+ * make_kgid - Map a user-namespace gid pair into a kgid.
+ * @ns: User namespace that the gid is in
+ * @uid: group identifier
+ *
+ * Maps a user-namespace gid pair into a kernel internal kgid,
+ * and returns that kgid.
+ *
+ * When there is no mapping defined for the user-namespace gid
+ * pair INVALID_GID is returned. Callers are expected to test
+ * for and handle INVALID_GID being returned. INVALID_GID may be
+ * tested for using gid_valid().
+ */
+kgid_t make_kgid(struct user_namespace *ns, gid_t gid)
{
- struct user_namespace *tmp;
+ /* Map the gid to a global kernel gid */
+ return KGIDT_INIT(map_id_down(&ns->gid_map, gid));
+}
+EXPORT_SYMBOL(make_kgid);
- if (likely(to == cred->user->user_ns))
- return gid;
+/**
+ * from_kgid - Create a gid from a kgid user-namespace pair.
+ * @targ: The user namespace we want a gid in.
+ * @kgid: The kernel internal gid to start with.
+ *
+ * Map @kgid into the user-namespace specified by @targ and
+ * return the resulting gid.
+ *
+ * There is always a mapping into the initial user_namespace.
+ *
+ * If @kgid has no mapping in @targ (gid_t)-1 is returned.
+ */
+gid_t from_kgid(struct user_namespace *targ, kgid_t kgid)
+{
+ /* Map the gid from a global kernel gid */
+ return map_id_up(&targ->gid_map, __kgid_val(kgid));
+}
+EXPORT_SYMBOL(from_kgid);
+
+/**
+ * from_kgid_munged - Create a gid from a kgid user-namespace pair.
+ * @targ: The user namespace we want a gid in.
+ * @kgid: The kernel internal gid to start with.
+ *
+ * Map @kgid into the user-namespace specified by @targ and
+ * return the resulting gid.
+ *
+ * There is always a mapping into the initial user_namespace.
+ *
+ * Unlike from_kgid from_kgid_munged never fails and always
+ * returns a valid gid. This makes from_kgid_munged appropriate
+ * for use in syscalls like stat and getgid where failing the
+ * system call and failing to provide a valid gid are not options.
+ *
+ * If @kgid has no mapping in @targ overflowgid is returned.
+ */
+gid_t from_kgid_munged(struct user_namespace *targ, kgid_t kgid)
+{
+ gid_t gid;
+ gid = from_kgid(targ, kgid);
- /* Is cred->user the creator of the target user_ns
- * or the creator of one of it's parents?
+ if (gid == (gid_t) -1)
+ gid = overflowgid;
+ return gid;
+}
+EXPORT_SYMBOL(from_kgid_munged);
+
+static int uid_m_show(struct seq_file *seq, void *v)
+{
+ struct user_namespace *ns = seq->private;
+ struct uid_gid_extent *extent = v;
+ struct user_namespace *lower_ns;
+ uid_t lower;
+
+ lower_ns = current_user_ns();
+ if ((lower_ns == ns) && lower_ns->parent)
+ lower_ns = lower_ns->parent;
+
+ lower = from_kuid(lower_ns, KUIDT_INIT(extent->lower_first));
+
+ seq_printf(seq, "%10u %10u %10u\n",
+ extent->first,
+ lower,
+ extent->count);
+
+ return 0;
+}
+
+static int gid_m_show(struct seq_file *seq, void *v)
+{
+ struct user_namespace *ns = seq->private;
+ struct uid_gid_extent *extent = v;
+ struct user_namespace *lower_ns;
+ gid_t lower;
+
+ lower_ns = current_user_ns();
+ if ((lower_ns == ns) && lower_ns->parent)
+ lower_ns = lower_ns->parent;
+
+ lower = from_kgid(lower_ns, KGIDT_INIT(extent->lower_first));
+
+ seq_printf(seq, "%10u %10u %10u\n",
+ extent->first,
+ lower,
+ extent->count);
+
+ return 0;
+}
+
+static void *m_start(struct seq_file *seq, loff_t *ppos, struct uid_gid_map *map)
+{
+ struct uid_gid_extent *extent = NULL;
+ loff_t pos = *ppos;
+
+ if (pos < map->nr_extents)
+ extent = &map->extent[pos];
+
+ return extent;
+}
+
+static void *uid_m_start(struct seq_file *seq, loff_t *ppos)
+{
+ struct user_namespace *ns = seq->private;
+
+ return m_start(seq, ppos, &ns->uid_map);
+}
+
+static void *gid_m_start(struct seq_file *seq, loff_t *ppos)
+{
+ struct user_namespace *ns = seq->private;
+
+ return m_start(seq, ppos, &ns->gid_map);
+}
+
+static void *m_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return seq->op->start(seq, pos);
+}
+
+static void m_stop(struct seq_file *seq, void *v)
+{
+ return;
+}
+
+struct seq_operations proc_uid_seq_operations = {
+ .start = uid_m_start,
+ .stop = m_stop,
+ .next = m_next,
+ .show = uid_m_show,
+};
+
+struct seq_operations proc_gid_seq_operations = {
+ .start = gid_m_start,
+ .stop = m_stop,
+ .next = m_next,
+ .show = gid_m_show,
+};
+
+static DEFINE_MUTEX(id_map_mutex);
+
+static ssize_t map_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos,
+ int cap_setid,
+ struct uid_gid_map *map,
+ struct uid_gid_map *parent_map)
+{
+ struct seq_file *seq = file->private_data;
+ struct user_namespace *ns = seq->private;
+ struct uid_gid_map new_map;
+ unsigned idx;
+ struct uid_gid_extent *extent, *last = NULL;
+ unsigned long page = 0;
+ char *kbuf, *pos, *next_line;
+ ssize_t ret = -EINVAL;
+
+ /*
+ * The id_map_mutex serializes all writes to any given map.
+ *
+ * Any map is only ever written once.
+ *
+ * An id map fits within 1 cache line on most architectures.
+ *
+ * On read nothing needs to be done unless you are on an
+ * architecture with a crazy cache coherency model like alpha.
+ *
+ * There is a one time data dependency between reading the
+ * count of the extents and the values of the extents. The
+ * desired behavior is to see the values of the extents that
+ * were written before the count of the extents.
+ *
+ * To achieve this smp_wmb() is used on guarantee the write
+ * order and smp_read_barrier_depends() is guaranteed that we
+ * don't have crazy architectures returning stale data.
+ *
*/
- for ( tmp = to; tmp != &init_user_ns;
- tmp = tmp->creator->user_ns ) {
- if (cred->user == tmp->creator) {
- return (gid_t)0;
+ mutex_lock(&id_map_mutex);
+
+ ret = -EPERM;
+ /* Only allow one successful write to the map */
+ if (map->nr_extents != 0)
+ goto out;
+
+ /* Require the appropriate privilege CAP_SETUID or CAP_SETGID
+ * over the user namespace in order to set the id mapping.
+ */
+ if (!ns_capable(ns, cap_setid))
+ goto out;
+
+ /* Get a buffer */
+ ret = -ENOMEM;
+ page = __get_free_page(GFP_TEMPORARY);
+ kbuf = (char *) page;
+ if (!page)
+ goto out;
+
+ /* Only allow <= page size writes at the beginning of the file */
+ ret = -EINVAL;
+ if ((*ppos != 0) || (count >= PAGE_SIZE))
+ goto out;
+
+ /* Slurp in the user data */
+ ret = -EFAULT;
+ if (copy_from_user(kbuf, buf, count))
+ goto out;
+ kbuf[count] = '\0';
+
+ /* Parse the user data */
+ ret = -EINVAL;
+ pos = kbuf;
+ new_map.nr_extents = 0;
+ for (;pos; pos = next_line) {
+ extent = &new_map.extent[new_map.nr_extents];
+
+ /* Find the end of line and ensure I don't look past it */
+ next_line = strchr(pos, '\n');
+ if (next_line) {
+ *next_line = '\0';
+ next_line++;
+ if (*next_line == '\0')
+ next_line = NULL;
}
+
+ pos = skip_spaces(pos);
+ extent->first = simple_strtoul(pos, &pos, 10);
+ if (!isspace(*pos))
+ goto out;
+
+ pos = skip_spaces(pos);
+ extent->lower_first = simple_strtoul(pos, &pos, 10);
+ if (!isspace(*pos))
+ goto out;
+
+ pos = skip_spaces(pos);
+ extent->count = simple_strtoul(pos, &pos, 10);
+ if (*pos && !isspace(*pos))
+ goto out;
+
+ /* Verify there is not trailing junk on the line */
+ pos = skip_spaces(pos);
+ if (*pos != '\0')
+ goto out;
+
+ /* Verify we have been given valid starting values */
+ if ((extent->first == (u32) -1) ||
+ (extent->lower_first == (u32) -1 ))
+ goto out;
+
+ /* Verify count is not zero and does not cause the extent to wrap */
+ if ((extent->first + extent->count) <= extent->first)
+ goto out;
+ if ((extent->lower_first + extent->count) <= extent->lower_first)
+ goto out;
+
+ /* For now only accept extents that are strictly in order */
+ if (last &&
+ (((last->first + last->count) > extent->first) ||
+ ((last->lower_first + last->count) > extent->lower_first)))
+ goto out;
+
+ new_map.nr_extents++;
+ last = extent;
+
+ /* Fail if the file contains too many extents */
+ if ((new_map.nr_extents == UID_GID_MAP_MAX_EXTENTS) &&
+ (next_line != NULL))
+ goto out;
+ }
+ /* Be very certaint the new map actually exists */
+ if (new_map.nr_extents == 0)
+ goto out;
+
+ ret = -EPERM;
+ /* Validate the user is allowed to use user id's mapped to. */
+ if (!new_idmap_permitted(ns, cap_setid, &new_map))
+ goto out;
+
+ /* Map the lower ids from the parent user namespace to the
+ * kernel global id space.
+ */
+ for (idx = 0; idx < new_map.nr_extents; idx++) {
+ u32 lower_first;
+ extent = &new_map.extent[idx];
+
+ lower_first = map_id_range_down(parent_map,
+ extent->lower_first,
+ extent->count);
+
+ /* Fail if we can not map the specified extent to
+ * the kernel global id space.
+ */
+ if (lower_first == (u32) -1)
+ goto out;
+
+ extent->lower_first = lower_first;
}
- /* No useful relationship so no mapping */
- return overflowgid;
+ /* Install the map */
+ memcpy(map->extent, new_map.extent,
+ new_map.nr_extents*sizeof(new_map.extent[0]));
+ smp_wmb();
+ map->nr_extents = new_map.nr_extents;
+
+ *ppos = count;
+ ret = count;
+out:
+ mutex_unlock(&id_map_mutex);
+ if (page)
+ free_page(page);
+ return ret;
+}
+
+ssize_t proc_uid_map_write(struct file *file, const char __user *buf, size_t size, loff_t *ppos)
+{
+ struct seq_file *seq = file->private_data;
+ struct user_namespace *ns = seq->private;
+
+ if (!ns->parent)
+ return -EPERM;
+
+ return map_write(file, buf, size, ppos, CAP_SETUID,
+ &ns->uid_map, &ns->parent->uid_map);
+}
+
+ssize_t proc_gid_map_write(struct file *file, const char __user *buf, size_t size, loff_t *ppos)
+{
+ struct seq_file *seq = file->private_data;
+ struct user_namespace *ns = seq->private;
+
+ if (!ns->parent)
+ return -EPERM;
+
+ return map_write(file, buf, size, ppos, CAP_SETGID,
+ &ns->gid_map, &ns->parent->gid_map);
+}
+
+static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid,
+ struct uid_gid_map *new_map)
+{
+ /* Allow the specified ids if we have the appropriate capability
+ * (CAP_SETUID or CAP_SETGID) over the parent user namespace.
+ */
+ if (ns_capable(ns->parent, cap_setid))
+ return true;
+
+ return false;
}
static __init int user_namespaces_init(void)
diff --git a/kernel/utsname.c b/kernel/utsname.c
index 405caf91aad..679d97a5d3f 100644
--- a/kernel/utsname.c
+++ b/kernel/utsname.c
@@ -43,7 +43,7 @@ static struct uts_namespace *clone_uts_ns(struct task_struct *tsk,
down_read(&uts_sem);
memcpy(&ns->name, &old_ns->name, sizeof(ns->name));
- ns->user_ns = get_user_ns(task_cred_xxx(tsk, user)->user_ns);
+ ns->user_ns = get_user_ns(task_cred_xxx(tsk, user_ns));
up_read(&uts_sem);
return ns;
}