restricted ia64 patches to ia64 again, they still break builds on
[linux-flexiantxendom0-3.2.10.git] / drivers / acpi / osl.c
1 /*
2  *  acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3  *
4  *  Copyright (C) 2000       Andrew Henroid
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *
8  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9  *
10  *  This program is free software; you can redistribute it and/or modify
11  *  it under the terms of the GNU General Public License as published by
12  *  the Free Software Foundation; either version 2 of the License, or
13  *  (at your option) any later version.
14  *
15  *  This program is distributed in the hope that it will be useful,
16  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  *  GNU General Public License for more details.
19  *
20  *  You should have received a copy of the GNU General Public License
21  *  along with this program; if not, write to the Free Software
22  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
23  *
24  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25  *
26  */
27
28 #include <linux/config.h>
29 #include <linux/kernel.h>
30 #include <linux/slab.h>
31 #include <linux/mm.h>
32 #include <linux/pci.h>
33 #include <linux/smp_lock.h>
34 #include <linux/interrupt.h>
35 #include <linux/kmod.h>
36 #include <linux/delay.h>
37 #include <linux/workqueue.h>
38 #include <acpi/acpi.h>
39 #include <asm/io.h>
40 #include <acpi/acpi_bus.h>
41
42 #ifdef CONFIG_ACPI_EFI
43 #include <linux/efi.h>
44 u64 efi_mem_attributes (u64 phys_addr);
45 #endif
46
47
48 #define _COMPONENT              ACPI_OS_SERVICES
49 ACPI_MODULE_NAME        ("osl")
50
51 #define PREFIX          "ACPI: "
52
53 struct acpi_os_dpc
54 {
55     OSD_EXECUTION_CALLBACK  function;
56     void                    *context;
57 };
58
59
60 #ifdef ENABLE_DEBUGGER
61 #include <linux/kdb.h>
62 /* stuff for debugger support */
63 int acpi_in_debugger = 0;
64 extern char line_buf[80];
65 #endif /*ENABLE_DEBUGGER*/
66
67 static int acpi_irq_irq = 0;
68 static OSD_HANDLER acpi_irq_handler = NULL;
69 static void *acpi_irq_context = NULL;
70
71 extern struct pci_ops *pci_root_ops;
72
73 acpi_status
74 acpi_os_initialize(void)
75 {
76         /*
77          * Initialize PCI configuration space access, as we'll need to access
78          * it while walking the namespace (bus 0 and root bridges w/ _BBNs).
79          */
80 #ifdef CONFIG_ACPI_PCI
81         if (!pci_root_ops) {
82                 printk(KERN_ERR PREFIX "Access to PCI configuration space unavailable\n");
83                 return AE_NULL_ENTRY;
84         }
85 #endif
86
87         return AE_OK;
88 }
89
90 acpi_status
91 acpi_os_terminate(void)
92 {
93         if (acpi_irq_handler) {
94                 acpi_os_remove_interrupt_handler(acpi_irq_irq,
95                                                  acpi_irq_handler);
96         }
97
98         return AE_OK;
99 }
100
101 void
102 acpi_os_printf(const char *fmt,...)
103 {
104         va_list args;
105         va_start(args, fmt);
106         acpi_os_vprintf(fmt, args);
107         va_end(args);
108 }
109
110 void
111 acpi_os_vprintf(const char *fmt, va_list args)
112 {
113         static char buffer[512];
114         
115         vsprintf(buffer, fmt, args);
116
117 #ifdef ENABLE_DEBUGGER
118         if (acpi_in_debugger) {
119                 kdb_printf("%s", buffer);
120         } else {
121                 printk("%s", buffer);
122         }
123 #else
124         printk("%s", buffer);
125 #endif
126 }
127
128 void *
129 acpi_os_allocate(acpi_size size)
130 {
131         return kmalloc(size, GFP_KERNEL);
132 }
133
134 void
135 acpi_os_free(void *ptr)
136 {
137         kfree(ptr);
138 }
139
140 acpi_status
141 acpi_os_get_root_pointer(u32 flags, struct acpi_pointer *addr)
142 {
143 #ifdef CONFIG_ACPI_EFI
144         addr->pointer_type = ACPI_PHYSICAL_POINTER;
145         if (efi.acpi20)
146                 addr->pointer.physical = (acpi_physical_address) virt_to_phys(efi.acpi20);
147         else if (efi.acpi)
148                 addr->pointer.physical = (acpi_physical_address) virt_to_phys(efi.acpi);
149         else {
150                 printk(KERN_ERR PREFIX "System description tables not found\n");
151                 return AE_NOT_FOUND;
152         }
153 #else
154         if (ACPI_FAILURE(acpi_find_root_pointer(flags, addr))) {
155                 printk(KERN_ERR PREFIX "System description tables not found\n");
156                 return AE_NOT_FOUND;
157         }
158 #endif /*CONFIG_ACPI_EFI*/
159
160         return AE_OK;
161 }
162
163 acpi_status
164 acpi_os_map_memory(acpi_physical_address phys, acpi_size size, void **virt)
165 {
166 #ifdef CONFIG_ACPI_EFI
167         if (EFI_MEMORY_WB & efi_mem_attributes(phys)) {
168                 *virt = phys_to_virt(phys);
169         } else {
170                 *virt = ioremap(phys, size);
171         }
172 #else
173         if (phys > ULONG_MAX) {
174                 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
175                 return AE_BAD_PARAMETER;
176         }
177         /*
178          * ioremap checks to ensure this is in reserved space
179          */
180         *virt = ioremap((unsigned long) phys, size);
181 #endif
182
183         if (!*virt)
184                 return AE_NO_MEMORY;
185
186         return AE_OK;
187 }
188
189 void
190 acpi_os_unmap_memory(void *virt, acpi_size size)
191 {
192         iounmap(virt);
193 }
194
195 acpi_status
196 acpi_os_get_physical_address(void *virt, acpi_physical_address *phys)
197 {
198         if(!phys || !virt)
199                 return AE_BAD_PARAMETER;
200
201         *phys = virt_to_phys(virt);
202
203         return AE_OK;
204 }
205
206 #define ACPI_MAX_OVERRIDE_LEN 100
207
208 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
209
210 acpi_status
211 acpi_os_predefined_override (const struct acpi_predefined_names *init_val,
212                              acpi_string *new_val)
213 {
214         if (!init_val || !new_val)
215                 return AE_BAD_PARAMETER;
216
217         *new_val = NULL;
218         if (!memcmp (init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
219                 printk(KERN_INFO PREFIX "Overriding _OS definition\n");
220                 *new_val = acpi_os_name;
221         }
222
223         return AE_OK;
224 }
225
226 acpi_status
227 acpi_os_table_override (struct acpi_table_header *existing_table,
228                         struct acpi_table_header **new_table)
229 {
230         if (!existing_table || !new_table)
231                 return AE_BAD_PARAMETER;
232
233         *new_table = NULL;
234         return AE_OK;
235 }
236
237 static irqreturn_t
238 acpi_irq(int irq, void *dev_id, struct pt_regs *regs)
239 {
240         return (*acpi_irq_handler)(acpi_irq_context);
241 }
242
243 acpi_status
244 acpi_os_install_interrupt_handler(u32 irq, OSD_HANDLER handler, void *context)
245 {
246         /*
247          * Ignore the irq from the core, and use the value in our copy of the
248          * FADT. It may not be the same if an interrupt source override exists
249          * for the SCI.
250          */
251         irq = acpi_fadt.sci_int;
252
253 #ifdef CONFIG_IA64
254         irq = gsi_to_vector(irq);
255 #endif
256         acpi_irq_irq = irq;
257         acpi_irq_handler = handler;
258         acpi_irq_context = context;
259         if (request_irq(irq, acpi_irq, SA_SHIRQ, "acpi", acpi_irq)) {
260                 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
261                 return AE_NOT_ACQUIRED;
262         }
263
264         return AE_OK;
265 }
266
267 acpi_status
268 acpi_os_remove_interrupt_handler(u32 irq, OSD_HANDLER handler)
269 {
270         if (acpi_irq_handler) {
271 #ifdef CONFIG_IA64
272                 irq = gsi_to_vector(irq);
273 #endif
274                 free_irq(irq, acpi_irq);
275                 acpi_irq_handler = NULL;
276         }
277
278         return AE_OK;
279 }
280
281 /*
282  * Running in interpreter thread context, safe to sleep
283  */
284
285 void
286 acpi_os_sleep(u32 sec, u32 ms)
287 {
288         current->state = TASK_INTERRUPTIBLE;
289         schedule_timeout(HZ * sec + (ms * HZ) / 1000);
290 }
291
292 void
293 acpi_os_stall(u32 us)
294 {
295         if (us > 10000) {
296                 mdelay(us / 1000);
297         }
298         else {
299                 udelay(us);
300         }
301 }
302
303 acpi_status
304 acpi_os_read_port(
305         acpi_io_address port,
306         u32             *value,
307         u32             width)
308 {
309         u32 dummy;
310
311         if (!value)
312                 value = &dummy;
313
314         switch (width)
315         {
316         case 8:
317                 *(u8*)  value = inb(port);
318                 break;
319         case 16:
320                 *(u16*) value = inw(port);
321                 break;
322         case 32:
323                 *(u32*) value = inl(port);
324                 break;
325         default:
326                 BUG();
327         }
328
329         return AE_OK;
330 }
331
332 acpi_status
333 acpi_os_write_port(
334         acpi_io_address port,
335         u32             value,
336         u32             width)
337 {
338         switch (width)
339         {
340         case 8:
341                 outb(value, port);
342                 break;
343         case 16:
344                 outw(value, port);
345                 break;
346         case 32:
347                 outl(value, port);
348                 break;
349         default:
350                 BUG();
351         }
352
353         return AE_OK;
354 }
355
356 acpi_status
357 acpi_os_read_memory(
358         acpi_physical_address   phys_addr,
359         u32                     *value,
360         u32                     width)
361 {
362         u32                     dummy;
363         void                    *virt_addr;
364
365 #ifdef CONFIG_ACPI_EFI
366         int                     iomem = 0;
367
368         if (EFI_MEMORY_WB & efi_mem_attributes(phys_addr)) {
369                 virt_addr = phys_to_virt(phys_addr);
370         } else {
371                 iomem = 1;
372                 virt_addr = ioremap(phys_addr, width);
373         }
374 #else
375         virt_addr = phys_to_virt(phys_addr);
376 #endif
377         if (!value)
378                 value = &dummy;
379
380         switch (width) {
381         case 8:
382                 *(u8*) value = *(u8*) virt_addr;
383                 break;
384         case 16:
385                 *(u16*) value = *(u16*) virt_addr;
386                 break;
387         case 32:
388                 *(u32*) value = *(u32*) virt_addr;
389                 break;
390         default:
391                 BUG();
392         }
393
394 #ifdef CONFIG_ACPI_EFI
395         if (iomem)
396                 iounmap(virt_addr);
397 #endif
398
399         return AE_OK;
400 }
401
402 acpi_status
403 acpi_os_write_memory(
404         acpi_physical_address   phys_addr,
405         u32                     value,
406         u32                     width)
407 {
408         void                    *virt_addr;
409
410 #ifdef CONFIG_ACPI_EFI
411         int                     iomem = 0;
412
413         if (EFI_MEMORY_WB & efi_mem_attributes(phys_addr)) {
414                 virt_addr = phys_to_virt(phys_addr);
415         } else {
416                 iomem = 1;
417                 virt_addr = ioremap(phys_addr, width);
418         }
419 #else
420         virt_addr = phys_to_virt(phys_addr);
421 #endif
422
423         switch (width) {
424         case 8:
425                 *(u8*) virt_addr = value;
426                 break;
427         case 16:
428                 *(u16*) virt_addr = value;
429                 break;
430         case 32:
431                 *(u32*) virt_addr = value;
432                 break;
433         default:
434                 BUG();
435         }
436
437 #ifdef CONFIG_ACPI_EFI
438         if (iomem)
439                 iounmap(virt_addr);
440 #endif
441
442         return AE_OK;
443 }
444
445 #ifdef CONFIG_ACPI_PCI
446
447 acpi_status
448 acpi_os_read_pci_configuration (
449         struct acpi_pci_id      *pci_id,
450         u32                     reg,
451         void                    *value,
452         u32                     width)
453 {
454         int                     result = 0;
455         int                     size = 0;
456         struct pci_bus          bus;
457
458         if (!value)
459                 return AE_BAD_PARAMETER;
460
461         switch (width) {
462         case 8:
463                 size = 1;
464                 break;
465         case 16:
466                 size = 2;
467                 break;
468         case 32:
469                 size = 4;
470                 break;
471         default:
472                 BUG();
473         }
474
475         bus.number = pci_id->bus;
476         result = pci_root_ops->read(&bus, PCI_DEVFN(pci_id->device,
477                                                     pci_id->function),
478                                     reg, size, value);
479
480         return (result ? AE_ERROR : AE_OK);
481 }
482
483 acpi_status
484 acpi_os_write_pci_configuration (
485         struct acpi_pci_id      *pci_id,
486         u32                     reg,
487         acpi_integer            value,
488         u32                     width)
489 {
490         int                     result = 0;
491         int                     size = 0;
492         struct pci_bus          bus;
493
494         switch (width) {
495         case 8:
496                 size = 1;
497                 break;
498         case 16:
499                 size = 2;
500                 break;
501         case 32:
502                 size = 4;
503                 break;
504         default:
505                 BUG();
506         }
507
508         bus.number = pci_id->bus;
509         result = pci_root_ops->write(&bus, PCI_DEVFN(pci_id->device,
510                                                      pci_id->function),
511                                      reg, size, value);
512         return (result ? AE_ERROR : AE_OK);
513 }
514
515 /* TODO: Change code to take advantage of driver model more */
516 void
517 acpi_os_derive_pci_id_2 (
518         acpi_handle             rhandle,        /* upper bound  */
519         acpi_handle             chandle,        /* current node */
520         struct acpi_pci_id      **id,
521         int                     *is_bridge,
522         u8                      *bus_number)
523 {
524         acpi_handle             handle;
525         struct acpi_pci_id      *pci_id = *id;
526         acpi_status             status;
527         unsigned long           temp;
528         acpi_object_type        type;
529         u8                      tu8;
530
531         acpi_get_parent(chandle, &handle);
532         if (handle != rhandle) {
533                 acpi_os_derive_pci_id_2(rhandle, handle, &pci_id, is_bridge, bus_number);
534
535                 status = acpi_get_type(handle, &type);
536                 if ( (ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE) )
537                         return;
538
539                 status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &temp);
540                 if (ACPI_SUCCESS(status)) {
541                         pci_id->device  = ACPI_HIWORD (ACPI_LODWORD (temp));
542                         pci_id->function = ACPI_LOWORD (ACPI_LODWORD (temp));
543
544                         if (*is_bridge)
545                                 pci_id->bus = *bus_number;
546
547                         /* any nicer way to get bus number of bridge ? */
548                         status = acpi_os_read_pci_configuration(pci_id, 0x0e, &tu8, 8);
549                         if (ACPI_SUCCESS(status) &&
550                             ((tu8 & 0x7f) == 1 || (tu8 & 0x7f) == 2)) {
551                                 status = acpi_os_read_pci_configuration(pci_id, 0x18, &tu8, 8);
552                                 if (!ACPI_SUCCESS(status)) {
553                                         /* Certainly broken...  FIX ME */
554                                         return;
555                                 }
556                                 *is_bridge = 1;
557                                 pci_id->bus = tu8;
558                                 status = acpi_os_read_pci_configuration(pci_id, 0x19, &tu8, 8);
559                                 if (ACPI_SUCCESS(status)) {
560                                         *bus_number = tu8;
561                                 }
562                         } else
563                                 *is_bridge = 0;
564                 }
565         }
566 }
567
568 void
569 acpi_os_derive_pci_id (
570         acpi_handle             rhandle,        /* upper bound  */
571         acpi_handle             chandle,        /* current node */
572         struct acpi_pci_id      **id)
573 {
574         int is_bridge = 1;
575         u8 bus_number = (*id)->bus;
576
577         acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
578 }
579
580 #else /*!CONFIG_ACPI_PCI*/
581
582 acpi_status
583 acpi_os_write_pci_configuration (
584         struct acpi_pci_id      *pci_id,
585         u32                     reg,
586         acpi_integer            value,
587         u32                     width)
588 {
589         return (AE_SUPPORT);
590 }
591
592 acpi_status
593 acpi_os_read_pci_configuration (
594         struct acpi_pci_id      *pci_id,
595         u32                     reg,
596         void                    *value,
597         u32                     width)
598 {
599         return (AE_SUPPORT);
600 }
601
602 void
603 acpi_os_derive_pci_id (
604         acpi_handle             rhandle,        /* upper bound  */
605         acpi_handle             chandle,        /* current node */
606         struct acpi_pci_id      **id)
607 {
608 }
609
610 #endif /*CONFIG_ACPI_PCI*/
611
612 static void
613 acpi_os_execute_deferred (
614         void *context)
615 {
616         struct acpi_os_dpc      *dpc = NULL;
617
618         ACPI_FUNCTION_TRACE ("os_execute_deferred");
619
620         dpc = (struct acpi_os_dpc *) context;
621         if (!dpc) {
622                 ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Invalid (NULL) context.\n"));
623                 return_VOID;
624         }
625
626         dpc->function(dpc->context);
627
628         kfree(dpc);
629
630         return_VOID;
631 }
632
633 acpi_status
634 acpi_os_queue_for_execution(
635         u32                     priority,
636         OSD_EXECUTION_CALLBACK  function,
637         void                    *context)
638 {
639         acpi_status             status = AE_OK;
640         struct acpi_os_dpc      *dpc;
641         struct work_struct      *task;
642
643         ACPI_FUNCTION_TRACE ("os_queue_for_execution");
644
645         ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Scheduling function [%p(%p)] for deferred execution.\n", function, context));
646
647         if (!function)
648                 return_ACPI_STATUS (AE_BAD_PARAMETER);
649
650         /*
651          * Allocate/initialize DPC structure.  Note that this memory will be
652          * freed by the callee.  The kernel handles the tq_struct list  in a
653          * way that allows us to also free its memory inside the callee.
654          * Because we may want to schedule several tasks with different
655          * parameters we can't use the approach some kernel code uses of
656          * having a static tq_struct.
657          * We can save time and code by allocating the DPC and tq_structs
658          * from the same memory.
659          */
660
661         dpc = kmalloc(sizeof(struct acpi_os_dpc)+sizeof(struct work_struct), GFP_ATOMIC);
662         if (!dpc)
663                 return_ACPI_STATUS (AE_NO_MEMORY);
664
665         dpc->function = function;
666         dpc->context = context;
667
668         task = (void *)(dpc+1);
669         INIT_WORK(task, acpi_os_execute_deferred, (void*)dpc);
670
671         if (!schedule_work(task)) {
672                 ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Call to schedule_work() failed.\n"));
673                 kfree(dpc);
674                 status = AE_ERROR;
675         }
676
677         return_ACPI_STATUS (status);
678 }
679
680 /*
681  * Allocate the memory for a spinlock and initialize it.
682  */
683 acpi_status
684 acpi_os_create_lock (
685         acpi_handle     *out_handle)
686 {
687         spinlock_t *lock_ptr;
688
689         ACPI_FUNCTION_TRACE ("os_create_lock");
690
691         lock_ptr = acpi_os_allocate(sizeof(spinlock_t));
692
693         spin_lock_init(lock_ptr);
694
695         ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Creating spinlock[%p].\n", lock_ptr));
696
697         *out_handle = lock_ptr;
698
699         return_ACPI_STATUS (AE_OK);
700 }
701
702
703 /*
704  * Deallocate the memory for a spinlock.
705  */
706 void
707 acpi_os_delete_lock (
708         acpi_handle     handle)
709 {
710         ACPI_FUNCTION_TRACE ("os_create_lock");
711
712         ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Deleting spinlock[%p].\n", handle));
713
714         acpi_os_free(handle);
715
716         return_VOID;
717 }
718
719 /*
720  * Acquire a spinlock.
721  *
722  * handle is a pointer to the spinlock_t.
723  * flags is *not* the result of save_flags - it is an ACPI-specific flag variable
724  *   that indicates whether we are at interrupt level.
725  */
726 void
727 acpi_os_acquire_lock (
728         acpi_handle     handle,
729         u32             flags)
730 {
731         ACPI_FUNCTION_TRACE ("os_acquire_lock");
732
733         ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Acquiring spinlock[%p] from %s level\n", handle,
734                 ((flags & ACPI_NOT_ISR) ? "non-interrupt" : "interrupt")));
735
736         if (flags & ACPI_NOT_ISR)
737                 ACPI_DISABLE_IRQS();
738
739         spin_lock((spinlock_t *)handle);
740
741         return_VOID;
742 }
743
744
745 /*
746  * Release a spinlock. See above.
747  */
748 void
749 acpi_os_release_lock (
750         acpi_handle     handle,
751         u32             flags)
752 {
753         ACPI_FUNCTION_TRACE ("os_release_lock");
754
755         ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Releasing spinlock[%p] from %s level\n", handle,
756                 ((flags & ACPI_NOT_ISR) ? "non-interrupt" : "interrupt")));
757
758         spin_unlock((spinlock_t *)handle);
759
760         if (flags & ACPI_NOT_ISR)
761                 ACPI_ENABLE_IRQS();
762
763         return_VOID;
764 }
765
766
767 acpi_status
768 acpi_os_create_semaphore(
769         u32             max_units,
770         u32             initial_units,
771         acpi_handle     *handle)
772 {
773         struct semaphore        *sem = NULL;
774
775         ACPI_FUNCTION_TRACE ("os_create_semaphore");
776
777         sem = acpi_os_allocate(sizeof(struct semaphore));
778         if (!sem)
779                 return_ACPI_STATUS (AE_NO_MEMORY);
780         memset(sem, 0, sizeof(struct semaphore));
781
782         sema_init(sem, initial_units);
783
784         *handle = (acpi_handle*)sem;
785
786         ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n", *handle, initial_units));
787
788         return_ACPI_STATUS (AE_OK);
789 }
790
791
792 /*
793  * TODO: A better way to delete semaphores?  Linux doesn't have a
794  * 'delete_semaphore()' function -- may result in an invalid
795  * pointer dereference for non-synchronized consumers.  Should
796  * we at least check for blocked threads and signal/cancel them?
797  */
798
799 acpi_status
800 acpi_os_delete_semaphore(
801         acpi_handle     handle)
802 {
803         struct semaphore *sem = (struct semaphore*) handle;
804
805         ACPI_FUNCTION_TRACE ("os_delete_semaphore");
806
807         if (!sem)
808                 return_ACPI_STATUS (AE_BAD_PARAMETER);
809
810         ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
811
812         acpi_os_free(sem); sem =  NULL;
813
814         return_ACPI_STATUS (AE_OK);
815 }
816
817
818 /*
819  * TODO: The kernel doesn't have a 'down_timeout' function -- had to
820  * improvise.  The process is to sleep for one scheduler quantum
821  * until the semaphore becomes available.  Downside is that this
822  * may result in starvation for timeout-based waits when there's
823  * lots of semaphore activity.
824  *
825  * TODO: Support for units > 1?
826  */
827 acpi_status
828 acpi_os_wait_semaphore(
829         acpi_handle             handle,
830         u32                     units,
831         u16                     timeout)
832 {
833         acpi_status             status = AE_OK;
834         struct semaphore        *sem = (struct semaphore*)handle;
835         int                     ret = 0;
836
837         ACPI_FUNCTION_TRACE ("os_wait_semaphore");
838
839         if (!sem || (units < 1))
840                 return_ACPI_STATUS (AE_BAD_PARAMETER);
841
842         if (units > 1)
843                 return_ACPI_STATUS (AE_SUPPORT);
844
845         ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", handle, units, timeout));
846
847         if (in_atomic())
848                 timeout = 0;
849
850         switch (timeout)
851         {
852                 /*
853                  * No Wait:
854                  * --------
855                  * A zero timeout value indicates that we shouldn't wait - just
856                  * acquire the semaphore if available otherwise return AE_TIME
857                  * (a.k.a. 'would block').
858                  */
859                 case 0:
860                 if(down_trylock(sem))
861                         status = AE_TIME;
862                 break;
863
864                 /*
865                  * Wait Indefinitely:
866                  * ------------------
867                  */
868                 case ACPI_WAIT_FOREVER:
869                 down(sem);
870                 break;
871
872                 /*
873                  * Wait w/ Timeout:
874                  * ----------------
875                  */
876                 default:
877                 // TODO: A better timeout algorithm?
878                 {
879                         int i = 0;
880                         static const int quantum_ms = 1000/HZ;
881
882                         ret = down_trylock(sem);
883                         for (i = timeout; (i > 0 && ret < 0); i -= quantum_ms) {
884                                 current->state = TASK_INTERRUPTIBLE;
885                                 schedule_timeout(1);
886                                 ret = down_trylock(sem);
887                         }
888         
889                         if (ret != 0)
890                                 status = AE_TIME;
891                 }
892                 break;
893         }
894
895         if (ACPI_FAILURE(status)) {
896                 ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Failed to acquire semaphore[%p|%d|%d], %s\n", 
897                         handle, units, timeout, acpi_format_exception(status)));
898         }
899         else {
900                 ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Acquired semaphore[%p|%d|%d]\n", handle, units, timeout));
901         }
902
903         return_ACPI_STATUS (status);
904 }
905
906
907 /*
908  * TODO: Support for units > 1?
909  */
910 acpi_status
911 acpi_os_signal_semaphore(
912     acpi_handle             handle,
913     u32                     units)
914 {
915         struct semaphore *sem = (struct semaphore *) handle;
916
917         ACPI_FUNCTION_TRACE ("os_signal_semaphore");
918
919         if (!sem || (units < 1))
920                 return_ACPI_STATUS (AE_BAD_PARAMETER);
921
922         if (units > 1)
923                 return_ACPI_STATUS (AE_SUPPORT);
924
925         ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle, units));
926
927         up(sem);
928
929         return_ACPI_STATUS (AE_OK);
930 }
931
932 u32
933 acpi_os_get_line(char *buffer)
934 {
935
936 #ifdef ENABLE_DEBUGGER
937         if (acpi_in_debugger) {
938                 u32 chars;
939
940                 kdb_read(buffer, sizeof(line_buf));
941
942                 /* remove the CR kdb includes */
943                 chars = strlen(buffer) - 1;
944                 buffer[chars] = '\0';
945         }
946 #endif
947
948         return 0;
949 }
950
951 /*
952  * We just have to assume we're dealing with valid memory
953  */
954
955 BOOLEAN
956 acpi_os_readable(void *ptr, u32 len)
957 {
958         return 1;
959 }
960
961 BOOLEAN
962 acpi_os_writable(void *ptr, u32 len)
963 {
964         return 1;
965 }
966
967 u32
968 acpi_os_get_thread_id (void)
969 {
970         if (!in_atomic())
971                 return current->pid;
972
973         return 0;
974 }
975
976 acpi_status
977 acpi_os_signal (
978     u32         function,
979     void        *info)
980 {
981         switch (function)
982         {
983         case ACPI_SIGNAL_FATAL:
984                 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
985                 break;
986         case ACPI_SIGNAL_BREAKPOINT:
987                 {
988                         char *bp_info = (char*) info;
989
990                         printk(KERN_ERR "ACPI breakpoint: %s\n", bp_info);
991                 }
992         default:
993                 break;
994         }
995
996         return AE_OK;
997 }
998
999 int __init
1000 acpi_os_name_setup(char *str)
1001 {
1002         char *p = acpi_os_name;
1003         int count = ACPI_MAX_OVERRIDE_LEN-1;
1004
1005         if (!str || !*str)
1006                 return 0;
1007
1008         for (; count-- && str && *str; str++) {
1009                 if (isalnum(*str) || *str == ' ')
1010                         *p++ = *str;
1011                 else if (*str == '\'' || *str == '"')
1012                         continue;
1013                 else
1014                         break;
1015         }
1016         *p = 0;
1017
1018         return 1;
1019                 
1020 }
1021
1022 __setup("acpi_os_name=", acpi_os_name_setup);