2 * Dynamic DMA mapping support for AMD Hammer.
4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
8 * See Documentation/DMA-mapping.txt for the interface specification.
10 * Copyright 2002 Andi Kleen, SuSE Labs.
13 #include <linux/config.h>
14 #include <linux/types.h>
15 #include <linux/ctype.h>
16 #include <linux/agp_backend.h>
17 #include <linux/init.h>
19 #include <linux/string.h>
20 #include <linux/spinlock.h>
21 #include <linux/pci.h>
22 #include <linux/module.h>
23 #include <linux/topology.h>
24 #include <asm/atomic.h>
27 #include <asm/bitops.h>
28 #include <asm/pgtable.h>
29 #include <asm/proto.h>
30 #include <asm/cacheflush.h>
31 #include <asm/kdebug.h>
32 #include <asm/proto.h>
34 dma_addr_t bad_dma_address;
36 unsigned long iommu_bus_base; /* GART remapping area (physical) */
37 static unsigned long iommu_size; /* size of remapping area bytes */
38 static unsigned long iommu_pages; /* .. and in pages */
40 u32 *iommu_gatt_base; /* Remapping table */
44 #ifdef CONFIG_IOMMU_DEBUG
45 int panic_on_overflow = 1;
48 int panic_on_overflow = 0;
52 int iommu_sac_force = 0;
53 int iommu_fullflush = 1;
57 /* Allocation bitmap for the remapping area */
58 static spinlock_t iommu_bitmap_lock = SPIN_LOCK_UNLOCKED;
59 static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */
62 #define GPTE_COHERENT 2
63 #define GPTE_ENCODE(x) \
64 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
65 #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
67 #define to_pages(addr,size) \
68 (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
70 #define for_all_nb(dev) \
72 while ((dev = pci_find_device(PCI_VENDOR_ID_AMD, 0x1103, dev))!=NULL)\
73 if (dev->bus->number == 0 && \
74 (PCI_SLOT(dev->devfn) >= 24) && (PCI_SLOT(dev->devfn) <= 31))
76 static struct pci_dev *northbridges[MAX_NB];
77 static u32 northbridge_flush_word[MAX_NB];
79 #define EMERGENCY_PAGES 32 /* = 128KB */
82 #define AGPEXTERN extern
87 /* backdoor interface to AGP driver */
88 AGPEXTERN int agp_memory_reserved;
89 AGPEXTERN __u32 *agp_gatt_table;
91 static unsigned long next_bit; /* protected by iommu_bitmap_lock */
92 static int need_flush; /* global flush state. set for each gart wrap */
94 static unsigned long alloc_iommu(int size)
96 unsigned long offset, flags;
98 spin_lock_irqsave(&iommu_bitmap_lock, flags);
99 offset = find_next_zero_string(iommu_gart_bitmap,next_bit,iommu_pages,size);
102 offset = find_next_zero_string(iommu_gart_bitmap,0,next_bit,size);
105 set_bit_string(iommu_gart_bitmap, offset, size);
106 next_bit = offset+size;
107 if (next_bit >= iommu_pages) {
114 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
118 static void free_iommu(unsigned long offset, int size)
122 clear_bit(offset, iommu_gart_bitmap);
125 spin_lock_irqsave(&iommu_bitmap_lock, flags);
126 __clear_bit_string(iommu_gart_bitmap, offset, size);
127 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
131 * Use global flush state to avoid races with multiple flushers.
133 static void flush_gart(struct pci_dev *dev)
136 int bus = dev ? dev->bus->number : -1;
137 cpumask_const_t bus_cpumask = pcibus_to_cpumask(bus);
141 spin_lock_irqsave(&iommu_bitmap_lock, flags);
143 for (i = 0; i < MAX_NB; i++) {
145 if (!northbridges[i])
147 if (bus >= 0 && !(cpu_isset_const(i, bus_cpumask)))
149 pci_write_config_dword(northbridges[i], 0x9c,
150 northbridge_flush_word[i] | 1);
151 /* Make sure the hardware actually executed the flush. */
153 pci_read_config_dword(northbridges[i], 0x9c, &w);
158 printk("nothing to flush? %d\n", bus);
161 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
165 * Allocate memory for a consistent mapping.
166 * All mappings are consistent here, so this is just a wrapper around
169 void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
170 dma_addr_t *dma_handle)
173 int gfp = GFP_ATOMIC;
174 unsigned long dma_mask;
178 dma_mask = 0xffffffff;
180 dma_mask = hwdev->consistent_dma_mask;
184 dma_mask = 0xffffffff;
185 if (dma_mask < 0xffffffff || no_iommu)
188 /* Kludge to make it bug-to-bug compatible with i386. i386
189 uses the normal dma_mask for alloc_consistent. */
190 dma_mask &= hwdev->dma_mask;
192 memory = (void *)__get_free_pages(gfp, get_order(size));
193 if (memory == NULL) {
197 high = ((unsigned long)virt_to_bus(memory) + size) >= dma_mask;
199 if (force_iommu && !(gfp & GFP_DMA))
202 if (high) goto error;
205 memset(memory, 0, size);
207 *dma_handle = virt_to_bus(memory);
212 *dma_handle = pci_map_single(hwdev, memory, size, 0);
213 if (*dma_handle == bad_dma_address)
219 if (panic_on_overflow)
220 panic("pci_map_single: overflow %lu bytes\n", size);
221 free_pages((unsigned long)memory, get_order(size));
226 * Unmap consistent memory.
227 * The caller must ensure that the device has finished accessing the mapping.
229 void pci_free_consistent(struct pci_dev *hwdev, size_t size,
230 void *vaddr, dma_addr_t bus)
232 pci_unmap_single(hwdev, bus, size, 0);
233 free_pages((unsigned long)vaddr, get_order(size));
236 #ifdef CONFIG_IOMMU_LEAK
238 #define SET_LEAK(x) if (iommu_leak_tab) \
239 iommu_leak_tab[x] = __builtin_return_address(0);
240 #define CLEAR_LEAK(x) if (iommu_leak_tab) \
241 iommu_leak_tab[x] = 0;
243 /* Debugging aid for drivers that don't free their IOMMU tables */
244 static void **iommu_leak_tab;
245 static int leak_trace;
246 int iommu_leak_pages = 20;
251 if (dump || !iommu_leak_tab) return;
253 show_stack(NULL,NULL);
254 /* Very crude. dump some from the end of the table too */
255 printk("Dumping %d pages from end of IOMMU:\n", iommu_leak_pages);
256 for (i = 0; i < iommu_leak_pages; i+=2) {
257 printk("%lu: ", iommu_pages-i);
258 printk_address((unsigned long) iommu_leak_tab[iommu_pages-i]);
259 printk("%c", (i+1)%2 == 0 ? '\n' : ' ');
265 #define CLEAR_LEAK(x)
268 static void iommu_full(struct pci_dev *dev, size_t size, int dir)
271 * Ran out of IOMMU space for this operation. This is very bad.
272 * Unfortunately the drivers cannot handle this operation properly.
273 * Return some non mapped prereserved space in the aperture and
274 * let the Northbridge deal with it. This will result in garbage
275 * in the IO operation. When the size exceeds the prereserved space
276 * memory corruption will occur or random memory will be DMAed
277 * out. Hopefully no network devices use single mappings that big.
281 "PCI-DMA: Out of IOMMU space for %lu bytes at device %s[%s]\n",
282 size, dev ? pci_pretty_name(dev) : "", dev ? dev->slot_name : "?");
284 if (size > PAGE_SIZE*EMERGENCY_PAGES) {
285 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
286 panic("PCI-DMA: Memory will be corrupted\n");
287 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
288 panic("PCI-DMA: Random memory will be DMAed\n");
291 #ifdef CONFIG_IOMMU_LEAK
296 static inline int need_iommu(struct pci_dev *dev, unsigned long addr, size_t size)
298 u64 mask = dev ? dev->dma_mask : 0xffffffff;
299 int high = addr + size >= mask;
305 panic("PCI-DMA: high address but no IOMMU.\n");
311 static inline int nonforced_iommu(struct pci_dev *dev, unsigned long addr, size_t size)
313 u64 mask = dev ? dev->dma_mask : 0xffffffff;
314 int high = addr + size >= mask;
318 panic("PCI-DMA: high address but no IOMMU.\n");
324 /* Map a single continuous physical area into the IOMMU.
325 * Caller needs to check if the iommu is needed and flush.
327 static dma_addr_t pci_map_area(struct pci_dev *dev, unsigned long phys_mem,
328 size_t size, int dir)
330 unsigned long npages = to_pages(phys_mem, size);
331 unsigned long iommu_page = alloc_iommu(npages);
333 if (iommu_page == -1) {
334 if (!nonforced_iommu(dev, phys_mem, size))
336 if (panic_on_overflow)
337 panic("pci_map_area overflow %lu bytes\n", size);
338 iommu_full(dev, size, dir);
339 return bad_dma_address;
342 for (i = 0; i < npages; i++) {
343 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
344 SET_LEAK(iommu_page + i);
345 phys_mem += PAGE_SIZE;
347 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
350 /* Map a single area into the IOMMU */
351 dma_addr_t pci_map_single(struct pci_dev *dev, void *addr, size_t size, int dir)
353 unsigned long phys_mem, bus;
355 BUG_ON(dir == PCI_DMA_NONE);
357 #ifdef CONFIG_SWIOTLB
359 return swiotlb_map_single(&dev->dev,addr,size,dir);
362 phys_mem = virt_to_phys(addr);
363 if (!need_iommu(dev, phys_mem, size))
366 bus = pci_map_area(dev, phys_mem, size, dir);
371 /* Fallback for pci_map_sg in case of overflow */
372 static int pci_map_sg_nonforce(struct pci_dev *dev, struct scatterlist *sg,
377 #ifdef CONFIG_IOMMU_DEBUG
378 printk(KERN_DEBUG "pci_map_sg overflow\n");
381 for (i = 0; i < nents; i++ ) {
382 struct scatterlist *s = &sg[i];
383 unsigned long addr = page_to_phys(s->page) + s->offset;
384 if (nonforced_iommu(dev, addr, s->length)) {
385 addr = pci_map_area(dev, addr, s->length, dir);
386 if (addr == bad_dma_address) {
388 pci_unmap_sg(dev, sg, i, dir);
390 sg[0].dma_length = 0;
394 s->dma_address = addr;
395 s->dma_length = s->length;
401 /* Map multiple scatterlist entries continuous into the first. */
402 static int __pci_map_cont(struct scatterlist *sg, int start, int stopat,
403 struct scatterlist *sout, unsigned long pages)
405 unsigned long iommu_start = alloc_iommu(pages);
406 unsigned long iommu_page = iommu_start;
409 if (iommu_start == -1)
412 for (i = start; i < stopat; i++) {
413 struct scatterlist *s = &sg[i];
414 unsigned long pages, addr;
415 unsigned long phys_addr = s->dma_address;
417 BUG_ON(i > start && s->offset);
420 sout->dma_address = iommu_bus_base;
421 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
422 sout->dma_length = s->length;
424 sout->dma_length += s->length;
428 pages = to_pages(s->offset, s->length);
430 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
431 SET_LEAK(iommu_page);
436 BUG_ON(iommu_page - iommu_start != pages);
440 static inline int pci_map_cont(struct scatterlist *sg, int start, int stopat,
441 struct scatterlist *sout,
442 unsigned long pages, int need)
445 BUG_ON(stopat - start != 1);
447 sout->dma_length = sg[start].length;
450 return __pci_map_cont(sg, start, stopat, sout, pages);
454 * DMA map all entries in a scatterlist.
455 * Merge chunks that have page aligned sizes into a continuous mapping.
457 int pci_map_sg(struct pci_dev *dev, struct scatterlist *sg, int nents, int dir)
462 unsigned long pages = 0;
463 int need = 0, nextneed;
465 BUG_ON(dir == PCI_DMA_NONE);
469 #ifdef CONFIG_SWIOTLB
471 return swiotlb_map_sg(&dev->dev,sg,nents,dir);
476 for (i = 0; i < nents; i++) {
477 struct scatterlist *s = &sg[i];
478 dma_addr_t addr = page_to_phys(s->page) + s->offset;
479 s->dma_address = addr;
480 BUG_ON(s->length == 0);
482 nextneed = need_iommu(dev, addr, s->length);
484 /* Handle the previous not yet processed entries */
486 struct scatterlist *ps = &sg[i-1];
487 /* Can only merge when the last chunk ends on a page
488 boundary and the new one doesn't have an offset. */
489 if (!iommu_merge || !nextneed || !need || s->offset ||
490 (ps->offset + ps->length) % PAGE_SIZE) {
491 if (pci_map_cont(sg, start, i, sg+out, pages,
501 pages += to_pages(s->offset, s->length);
503 if (pci_map_cont(sg, start, i, sg+out, pages, need) < 0)
508 sg[out].dma_length = 0;
513 pci_unmap_sg(dev, sg, nents, dir);
514 /* When it was forced try again unforced */
516 return pci_map_sg_nonforce(dev, sg, nents, dir);
517 if (panic_on_overflow)
518 panic("pci_map_sg: overflow on %lu pages\n", pages);
519 iommu_full(dev, pages << PAGE_SHIFT, dir);
520 for (i = 0; i < nents; i++)
521 sg[i].dma_address = bad_dma_address;
526 * Free a PCI mapping.
528 void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
529 size_t size, int direction)
531 unsigned long iommu_page;
535 #ifdef CONFIG_SWIOTLB
537 swiotlb_unmap_single(&hwdev->dev,dma_addr,size,direction);
542 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
543 dma_addr >= iommu_bus_base + iommu_size)
545 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
546 npages = to_pages(dma_addr, size);
547 for (i = 0; i < npages; i++) {
548 iommu_gatt_base[iommu_page + i] = 0;
549 CLEAR_LEAK(iommu_page + i);
551 free_iommu(iommu_page, npages);
555 * Wrapper for pci_unmap_single working with scatterlists.
557 void pci_unmap_sg(struct pci_dev *dev, struct scatterlist *sg, int nents,
561 for (i = 0; i < nents; i++) {
562 struct scatterlist *s = &sg[i];
563 if (!s->dma_length || !s->length)
565 pci_unmap_single(dev, s->dma_address, s->dma_length, dir);
569 int pci_dma_supported(struct pci_dev *dev, u64 mask)
571 /* Copied from i386. Doesn't make much sense, because it will
572 only work for pci_alloc_consistent.
573 The caller just has to use GFP_DMA in this case. */
574 if (mask < 0x00ffffff)
577 /* Tell the device to use SAC when IOMMU force is on.
578 This allows the driver to use cheaper accesses in some cases.
580 Problem with this is that if we overflow the IOMMU area
581 and return DAC as fallback address the device may not handle it correctly.
583 As a special case some controllers have a 39bit address mode
584 that is as efficient as 32bit (aic79xx). Don't force SAC for these.
585 Assume all masks <= 40 bits are of this type. Normally this doesn't
586 make any difference, but gives more gentle handling of IOMMU overflow. */
587 if (iommu_sac_force && (mask >= 0xffffffffffULL)) {
588 printk(KERN_INFO "%s: Force SAC with mask %Lx\n", dev->slot_name,mask);
592 if (no_iommu && (mask < (end_pfn << PAGE_SHIFT)) && !swiotlb)
598 EXPORT_SYMBOL(pci_unmap_sg);
599 EXPORT_SYMBOL(pci_map_sg);
600 EXPORT_SYMBOL(pci_map_single);
601 EXPORT_SYMBOL(pci_unmap_single);
602 EXPORT_SYMBOL(pci_dma_supported);
603 EXPORT_SYMBOL(no_iommu);
604 EXPORT_SYMBOL(force_iommu);
606 static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
610 iommu_size = aper_size;
615 a = aper + iommu_size;
616 iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a;
618 if (iommu_size < 64*1024*1024)
620 "PCI-DMA: Warning: Small IOMMU %luMB. Consider increasing the AGP aperture in BIOS\n",iommu_size>>20);
625 static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
627 unsigned aper_size = 0, aper_base_32;
631 pci_read_config_dword(dev, 0x94, &aper_base_32);
632 pci_read_config_dword(dev, 0x90, &aper_order);
633 aper_order = (aper_order >> 1) & 7;
635 aper_base = aper_base_32 & 0x7fff;
638 aper_size = (32 * 1024 * 1024) << aper_order;
639 if (aper_base + aper_size >= 0xffffffff || !aper_size)
647 * Private Northbridge GATT initialization in case we cannot use the
648 * AGP driver for some reason.
650 static __init int init_k8_gatt(struct agp_kern_info *info)
654 unsigned aper_base, new_aper_base;
655 unsigned aper_size, gatt_size, new_aper_size;
657 aper_size = aper_base = info->aper_size = 0;
659 new_aper_base = read_aperture(dev, &new_aper_size);
664 aper_size = new_aper_size;
665 aper_base = new_aper_base;
667 if (aper_size != new_aper_size || aper_base != new_aper_base)
672 info->aper_base = aper_base;
673 info->aper_size = aper_size>>20;
675 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
676 gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
678 panic("Cannot allocate GATT table");
679 memset(gatt, 0, gatt_size);
680 agp_gatt_table = gatt;
686 gatt_reg = __pa(gatt) >> 12;
688 pci_write_config_dword(dev, 0x98, gatt_reg);
689 pci_read_config_dword(dev, 0x90, &ctl);
692 ctl &= ~((1<<4) | (1<<5));
694 pci_write_config_dword(dev, 0x90, ctl);
698 printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10);
702 /* Should not happen anymore */
703 printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
704 KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.");
708 extern int agp_amd64_init(void);
710 static int __init pci_iommu_init(void)
712 struct agp_kern_info info;
713 unsigned long aper_size;
714 unsigned long iommu_start;
718 #ifndef CONFIG_AGP_AMD64
721 /* Makefile puts PCI initialization via subsys_initcall first. */
722 /* Add other K8 AGP bridge drivers here */
724 (agp_amd64_init() < 0) ||
725 (agp_copy_info(&info) < 0);
730 printk(KERN_INFO "PCI-DMA: Using SWIOTLB :-(\n");
734 if (no_iommu || (!force_iommu && end_pfn < 0xffffffff>>PAGE_SHIFT) ||
736 printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
743 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
745 if (force_iommu || end_pfn >= 0xffffffff>>PAGE_SHIFT)
746 err = init_k8_gatt(&info);
748 printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
754 aper_size = info.aper_size * 1024 * 1024;
755 iommu_size = check_iommu_size(info.aper_base, aper_size);
756 iommu_pages = iommu_size >> PAGE_SHIFT;
758 iommu_gart_bitmap = (void*)__get_free_pages(GFP_KERNEL,
759 get_order(iommu_pages/8));
760 if (!iommu_gart_bitmap)
761 panic("Cannot allocate iommu bitmap\n");
762 memset(iommu_gart_bitmap, 0, iommu_pages/8);
764 #ifdef CONFIG_IOMMU_LEAK
766 iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL,
767 get_order(iommu_pages*sizeof(void *)));
769 memset(iommu_leak_tab, 0, iommu_pages * 8);
771 printk("PCI-DMA: Cannot allocate leak trace area\n");
776 * Out of IOMMU space handling.
777 * Reserve some invalid pages at the beginning of the GART.
779 set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
781 agp_memory_reserved = iommu_size;
783 "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
786 iommu_start = aper_size - iommu_size;
787 iommu_bus_base = info.aper_base + iommu_start;
788 bad_dma_address = iommu_bus_base;
789 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
792 * Unmap the IOMMU part of the GART. The alias of the page is
793 * always mapped with cache enabled and there is no full cache
794 * coherency across the GART remapping. The unmapping avoids
795 * automatic prefetches from the CPU allocating cache lines in
796 * there. All CPU accesses are done via the direct mapping to
797 * the backing memory. The GART address is only used by PCI
800 clear_kernel_mapping((unsigned long)__va(iommu_bus_base), iommu_size);
804 int cpu = PCI_SLOT(dev->devfn) - 24;
807 northbridges[cpu] = dev;
808 pci_read_config_dword(dev, 0x9c, &flag); /* cache flush word */
809 northbridge_flush_word[cpu] = flag;
817 /* Must execute after PCI subsystem */
818 fs_initcall(pci_iommu_init);
820 /* iommu=[size][,noagp][,off][,force][,noforce][,leak][,memaper[=order]][,merge]
821 [,forcesac][,fullflush][,nomerge]
822 size set size of iommu (in bytes)
823 noagp don't initialize the AGP driver and use full aperture.
824 off don't use the IOMMU
825 leak turn on simple iommu leak tracing (only when CONFIG_IOMMU_LEAK is on)
826 memaper[=order] allocate an own aperture over RAM with size 32MB^order.
827 noforce don't force IOMMU usage. Default.
829 merge Do SG merging. Implies force (experimental)
830 nomerge Don't do SG merging.
831 forcesac For SAC mode for masks <40bits (experimental)
832 fullflush Flush IOMMU on each allocation (default)
833 nofullflush Don't use IOMMU fullflush
835 __init int iommu_setup(char *opt)
841 if (!memcmp(p,"noagp", 5))
843 if (!memcmp(p,"off", 3))
845 if (!memcmp(p,"force", 5))
847 if (!memcmp(p,"noforce", 7)) {
851 if (!memcmp(p, "memaper", 7)) {
852 fallback_aper_force = 1;
854 if (*p == '=' && get_option(&p, &arg))
855 fallback_aper_order = arg;
857 if (!memcmp(p, "panic", 5))
858 panic_on_overflow = 1;
859 if (!memcmp(p, "nopanic", 7))
860 panic_on_overflow = 0;
861 if (!memcmp(p, "merge", 5)) {
865 if (!memcmp(p, "nomerge", 7))
867 if (!memcmp(p, "forcesac", 8))
869 if (!memcmp(p, "fullflush", 9))
871 if (!memcmp(p, "nofullflush", 11))
873 #ifdef CONFIG_IOMMU_LEAK
874 if (!memcmp(p,"leak", 4)) {
878 if (isdigit(*p) && get_option(&p, &arg))
879 iommu_leak_pages = arg;
882 if (isdigit(*p) && get_option(&p, &arg))
885 if (*p == ' ' || *p == 0)
887 } while (*p++ != ',');