2 * Copyright (C) 1998-2007 Novell/SUSE
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation, version 2 of the
9 * AppArmor userspace policy interface
12 #include <asm/unaligned.h>
18 * This mutex is used to synchronize profile adds, replacements, and
19 * removals: we only allow one of these operations at a time.
20 * We do not use the profile list lock here in order to avoid blocking
21 * exec during those operations. (Exec involves a profile list lookup
22 * for named-profile transitions.)
24 DEFINE_MUTEX(aa_interface_lock);
27 * The AppArmor interface treats data as a type byte followed by the
28 * actual data. The interface has the notion of a a named entry
29 * which has a name (AA_NAME typecode followed by name string) followed by
30 * the entries typecode and data. Named types allow for optional
31 * elements and extensions to be added and tested for without breaking
32 * backwards compatability.
40 AA_NAME, /* same as string except it is items name */
52 * aa_ext is the read of the buffer containing the serialized profile. The
53 * data is copied into a kernel buffer in apparmorfs and then handed off to
54 * the unpack routines.
59 void *pos; /* pointer to current position in the buffer */
63 static inline int aa_inbounds(struct aa_ext *e, size_t size)
65 return (size <= e->end - e->pos);
69 * aa_u16_chunck - test and do bounds checking for a u16 size based chunk
70 * @e: serialized data read head
71 * @chunk: start address for chunk of data
73 * return the size of chunk found with the read head at the end of
76 static size_t aa_is_u16_chunk(struct aa_ext *e, char **chunk)
81 if (!aa_inbounds(e, sizeof(u16)))
83 size = le16_to_cpu(get_unaligned((u16 *)e->pos));
84 e->pos += sizeof(u16);
85 if (!aa_inbounds(e, size))
96 static inline int aa_is_X(struct aa_ext *e, enum aa_code code)
98 if (!aa_inbounds(e, 1))
100 if (*(u8 *) e->pos != code)
107 * aa_is_nameX - check is the next element is of type X with a name of @name
108 * @e: serialized data extent information
110 * @name: name to match to the serialized element.
112 * check that the next serialized data element is of type X and has a tag
113 * name @name. If @name is specified then there must be a matching
114 * name element in the stream. If @name is NULL any name element will be
115 * skipped and only the typecode will be tested.
116 * returns 1 on success (both type code and name tests match) and the read
117 * head is advanced past the headers
118 * returns %0 if either match failes, the read head does not move
120 static int aa_is_nameX(struct aa_ext *e, enum aa_code code, const char *name)
124 * Check for presence of a tagname, and if present name size
125 * AA_NAME tag value is a u16.
127 if (aa_is_X(e, AA_NAME)) {
129 size_t size = aa_is_u16_chunk(e, &tag);
130 /* if a name is specified it must match. otherwise skip tag */
131 if (name && (!size || strcmp(name, tag)))
134 /* if a name is specified and there is no name tag fail */
138 /* now check if type code matches */
139 if (aa_is_X(e, code))
147 static int aa_is_u16(struct aa_ext *e, u16 *data, const char *name)
150 if (aa_is_nameX(e, AA_U16, name)) {
151 if (!aa_inbounds(e, sizeof(u16)))
154 *data = le16_to_cpu(get_unaligned((u16 *)e->pos));
155 e->pos += sizeof(u16);
163 static int aa_is_u32(struct aa_ext *e, u32 *data, const char *name)
166 if (aa_is_nameX(e, AA_U32, name)) {
167 if (!aa_inbounds(e, sizeof(u32)))
170 *data = le32_to_cpu(get_unaligned((u32 *)e->pos));
171 e->pos += sizeof(u32);
179 static size_t aa_is_array(struct aa_ext *e, const char *name)
182 if (aa_is_nameX(e, AA_ARRAY, name)) {
184 if (!aa_inbounds(e, sizeof(u16)))
186 size = (int) le16_to_cpu(get_unaligned((u16 *)e->pos));
187 e->pos += sizeof(u16);
195 static size_t aa_is_blob(struct aa_ext *e, char **blob, const char *name)
198 if (aa_is_nameX(e, AA_BLOB, name)) {
200 if (!aa_inbounds(e, sizeof(u32)))
202 size = le32_to_cpu(get_unaligned((u32 *)e->pos));
203 e->pos += sizeof(u32);
204 if (aa_inbounds(e, (size_t) size)) {
215 static int aa_is_dynstring(struct aa_ext *e, char **string, const char *name)
221 if (aa_is_nameX(e, AA_STRING, name) &&
222 (size = aa_is_u16_chunk(e, &src_str))) {
224 if (!(str = kmalloc(size, GFP_KERNEL)))
226 memcpy(str, src_str, size);
238 * aa_unpack_dfa - unpack a file rule dfa
239 * @e: serialized data extent information
241 * returns dfa or ERR_PTR
243 struct aa_dfa *aa_unpack_dfa(struct aa_ext *e)
246 size_t size, error = 0;
247 struct aa_dfa *dfa = NULL;
249 size = aa_is_blob(e, &blob, "aadfa");
251 dfa = aa_match_alloc();
254 * The dfa is aligned with in the blob to 8 bytes
255 * from the beginning of the stream.
257 size_t sz = blob - (char *) e->start;
258 size_t pad = ALIGN(sz, 8) - sz;
259 error = unpack_dfa(dfa, blob + pad, size - pad);
261 error = verify_dfa(dfa);
268 dfa = ERR_PTR(error);
276 * aa_unpack_profile - unpack a serialized profile
277 * @e: serialized data extent information
278 * @operation: operation profile is being unpacked for
280 static struct aa_profile *aa_unpack_profile(struct aa_ext *e,
281 const char *operation)
283 struct aa_profile *profile = NULL;
290 profile = alloc_aa_profile();
292 return ERR_PTR(-ENOMEM);
294 /* check that we have the right struct being passed */
295 if (!aa_is_nameX(e, AA_STRUCT, "profile"))
297 if (!aa_is_dynstring(e, &profile->name, NULL))
300 /* per profile debug flags (complain, audit) */
301 if (!aa_is_nameX(e, AA_STRUCT, "flags"))
303 if (!aa_is_u32(e, NULL, NULL))
305 if (!aa_is_u32(e, &(profile->flags.complain), NULL))
307 if (!aa_is_u32(e, &(profile->flags.audit), NULL))
309 if (!aa_is_nameX(e, AA_STRUCTEND, NULL))
312 /* XXX: This supports only the low order capabilities. -jeffm */
313 if (!aa_is_u32(e, &(profile->capabilities.cap[0]), NULL))
316 size = aa_is_array(e, "net_allowed_af");
321 for (i = 0; i < size; i++) {
322 if (!aa_is_u16(e, &profile->network_families[i], NULL))
325 if (!aa_is_nameX(e, AA_ARRAYEND, NULL))
327 /* allow unix domain and netlink sockets they are handled
331 profile->network_families[AF_UNIX] = 0xffff;
332 profile->network_families[AF_NETLINK] = 0xffff;
335 profile->file_rules = aa_unpack_dfa(e);
336 if (IS_ERR(profile->file_rules)) {
337 error = PTR_ERR(profile->file_rules);
338 profile->file_rules = NULL;
342 if (!aa_is_nameX(e, AA_STRUCTEND, NULL))
348 memset(&sa, 0, sizeof(sa));
349 sa.operation = operation;
350 sa.gfp_mask = GFP_KERNEL;
351 sa.name = profile && profile->name ? profile->name : "unknown";
352 sa.info = "failed to unpack profile";
353 aa_audit_status(NULL, &sa);
356 free_aa_profile(profile);
358 return ERR_PTR(error);
362 * aa_verify_head - unpack serialized stream header
363 * @e: serialized data read head
364 * @operation: operation header is being verified for
366 * returns error or 0 if header is good
368 static int aa_verify_header(struct aa_ext *e, const char *operation)
370 /* get the interface version */
371 if (!aa_is_u32(e, &e->version, "version")) {
373 memset(&sa, 0, sizeof(sa));
374 sa.operation = operation;
375 sa.gfp_mask = GFP_KERNEL;
376 sa.info = "invalid profile format";
377 aa_audit_status(NULL, &sa);
378 return -EPROTONOSUPPORT;
381 /* check that the interface version is currently supported */
382 if (e->version != 3) {
384 memset(&sa, 0, sizeof(sa));
385 sa.operation = operation;
386 sa.gfp_mask = GFP_KERNEL;
387 sa.info = "unsupported interface version";
388 aa_audit_status(NULL, &sa);
389 return -EPROTONOSUPPORT;
395 * aa_add_profile - Unpack and add a new profile to the profile list
396 * @data: serialized data stream
397 * @size: size of the serialized data stream
399 ssize_t aa_add_profile(void *data, size_t size)
401 struct aa_profile *profile = NULL;
407 ssize_t error = aa_verify_header(&e, "profile_load");
411 profile = aa_unpack_profile(&e, "profile_load");
413 return PTR_ERR(profile);
415 mutex_lock(&aa_interface_lock);
416 write_lock(&profile_list_lock);
417 if (__aa_find_profile(profile->name, &profile_list)) {
418 /* A profile with this name exists already. */
419 write_unlock(&profile_list_lock);
420 mutex_unlock(&aa_interface_lock);
421 aa_put_profile(profile);
424 list_add(&profile->list, &profile_list);
425 write_unlock(&profile_list_lock);
426 mutex_unlock(&aa_interface_lock);
432 * task_replace - replace a task's profile
433 * @task: task to replace profile on
434 * @new_cxt: new aa_task_context to do replacement with
435 * @new_profile: new profile
437 static inline void task_replace(struct task_struct *task,
438 struct aa_task_context *new_cxt,
439 struct aa_profile *new_profile)
441 struct aa_task_context *cxt = aa_task_context(task);
443 AA_DEBUG("%s: replacing profile for task %d "
447 cxt->profile->name, cxt->profile);
449 aa_change_task_context(task, new_cxt, new_profile, cxt->cookie,
450 cxt->previous_profile);
454 * aa_replace_profile - replace a profile on the profile list
455 * @udata: serialized data stream
456 * @size: size of the serialized data stream
458 * unpack and replace a profile on the profile list and uses of that profile
459 * by any aa_task_context. If the profile does not exist on the profile list
460 * it is added. Return %0 or error.
462 ssize_t aa_replace_profile(void *udata, size_t size)
464 struct aa_profile *old_profile, *new_profile;
465 struct aa_task_context *new_cxt;
471 ssize_t error = aa_verify_header(&e, "profile_replace");
475 new_profile = aa_unpack_profile(&e, "profile_replace");
476 if (IS_ERR(new_profile))
477 return PTR_ERR(new_profile);
479 mutex_lock(&aa_interface_lock);
480 write_lock(&profile_list_lock);
481 old_profile = __aa_find_profile(new_profile->name, &profile_list);
483 lock_profile(old_profile);
484 old_profile->isstale = 1;
485 unlock_profile(old_profile);
486 list_del_init(&old_profile->list);
488 list_add(&new_profile->list, &profile_list);
489 write_unlock(&profile_list_lock);
495 * Replacement needs to allocate a new aa_task_context for each
496 * task confined by old_profile. To do this the profile locks
497 * are only held when the actual switch is done per task. While
498 * looping to allocate a new aa_task_context the old_task list
499 * may get shorter if tasks exit/change their profile but will
500 * not get longer as new task will not use old_profile detecting
504 new_cxt = aa_alloc_task_context(GFP_KERNEL | __GFP_NOFAIL);
506 lock_both_profiles(old_profile, new_profile);
507 if (!list_empty(&old_profile->task_contexts)) {
508 struct task_struct *task =
509 list_entry(old_profile->task_contexts.next,
510 struct aa_task_context, list)->task;
512 task_replace(task, new_cxt, new_profile);
516 unlock_both_profiles(old_profile, new_profile);
518 aa_free_task_context(new_cxt);
519 aa_put_profile(old_profile);
522 mutex_unlock(&aa_interface_lock);
527 * aa_remove_profile - remove a profile from the system
528 * @name: name of the profile to remove
529 * @size: size of the name
531 * remove a profile from the profile list and all aa_task_context references
534 ssize_t aa_remove_profile(const char *name, size_t size)
536 struct aa_profile *profile;
538 mutex_lock(&aa_interface_lock);
539 write_lock(&profile_list_lock);
540 profile = __aa_find_profile(name, &profile_list);
542 write_unlock(&profile_list_lock);
543 mutex_unlock(&aa_interface_lock);
547 /* Remove the profile from each task context it is on. */
548 lock_profile(profile);
549 profile->isstale = 1;
550 aa_unconfine_tasks(profile);
551 unlock_profile(profile);
553 /* Release the profile itself. */
554 list_del_init(&profile->list);
555 aa_put_profile(profile);
556 write_unlock(&profile_list_lock);
557 mutex_unlock(&aa_interface_lock);
563 * free_aa_profile_kref - free aa_profile by kref (called by aa_put_profile)
564 * @kr: kref callback for freeing of a profile
566 void free_aa_profile_kref(struct kref *kref)
568 struct aa_profile *p=container_of(kref, struct aa_profile, count);
574 * alloc_aa_profile - allocate, initialize and return a new profile
575 * Returns NULL on failure.
577 struct aa_profile *alloc_aa_profile(void)
579 struct aa_profile *profile;
581 profile = kzalloc(sizeof(*profile), GFP_KERNEL);
582 AA_DEBUG("%s(%p)\n", __FUNCTION__, profile);
584 INIT_LIST_HEAD(&profile->list);
585 kref_init(&profile->count);
586 INIT_LIST_HEAD(&profile->task_contexts);
587 spin_lock_init(&profile->lock);
593 * free_aa_profile - free a profile
594 * @profile: the profile to free
596 * Free a profile, its hats and null_profile. All references to the profile,
597 * its hats and null_profile must have been put.
599 * If the profile was referenced from a task context, free_aa_profile() will
600 * be called from an rcu callback routine, so we must not sleep here.
602 void free_aa_profile(struct aa_profile *profile)
604 AA_DEBUG("%s(%p)\n", __FUNCTION__, profile);
609 /* profile is still on global profile list -- invalid */
610 if (!list_empty(&profile->list)) {
611 AA_ERROR("%s: internal error, "
612 "profile '%s' still on global list\n",
618 aa_match_free(profile->file_rules);
621 AA_DEBUG("%s: %s\n", __FUNCTION__, profile->name);
622 kfree(profile->name);
629 * aa_unconfine_tasks - remove tasks on a profile's task context list
630 * @profile: profile to remove tasks from
632 * Assumes that @profile lock is held.
634 void aa_unconfine_tasks(struct aa_profile *profile)
636 while (!list_empty(&profile->task_contexts)) {
637 struct task_struct *task =
638 list_entry(profile->task_contexts.next,
639 struct aa_task_context, list)->task;
641 aa_change_task_context(task, NULL, NULL, 0, NULL);