1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
31 #include "drm_crtc_helper.h"
32 #include "drm_fb_helper.h"
33 #include "intel_drv.h"
36 #include "i915_trace.h"
37 #include <linux/pci.h>
38 #include <linux/vgaarb.h>
39 #include <linux/acpi.h>
40 #include <linux/pnp.h>
41 #include <linux/vga_switcheroo.h>
42 #include <linux/slab.h>
45 * Sets up the hardware status page for devices that need a physical address
48 static int i915_init_phys_hws(struct drm_device *dev)
50 drm_i915_private_t *dev_priv = dev->dev_private;
51 /* Program Hardware Status Page */
52 dev_priv->status_page_dmah =
53 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
55 if (!dev_priv->status_page_dmah) {
56 DRM_ERROR("Can not allocate hardware status page\n");
59 dev_priv->render_ring.status_page.page_addr
60 = dev_priv->status_page_dmah->vaddr;
61 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
63 memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
66 dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
69 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
70 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
75 * Frees the hardware status page, whether it's a physical address or a virtual
76 * address set up by the X Server.
78 static void i915_free_hws(struct drm_device *dev)
80 drm_i915_private_t *dev_priv = dev->dev_private;
81 if (dev_priv->status_page_dmah) {
82 drm_pci_free(dev, dev_priv->status_page_dmah);
83 dev_priv->status_page_dmah = NULL;
86 if (dev_priv->render_ring.status_page.gfx_addr) {
87 dev_priv->render_ring.status_page.gfx_addr = 0;
88 drm_core_ioremapfree(&dev_priv->hws_map, dev);
91 /* Need to rewrite hardware status page */
92 I915_WRITE(HWS_PGA, 0x1ffff000);
95 void i915_kernel_lost_context(struct drm_device * dev)
97 drm_i915_private_t *dev_priv = dev->dev_private;
98 struct drm_i915_master_private *master_priv;
99 struct intel_ring_buffer *ring = &dev_priv->render_ring;
102 * We should never lose context on the ring with modesetting
103 * as we don't expose it to userspace
105 if (drm_core_check_feature(dev, DRIVER_MODESET))
108 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
109 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
110 ring->space = ring->head - (ring->tail + 8);
112 ring->space += ring->size;
114 if (!dev->primary->master)
117 master_priv = dev->primary->master->driver_priv;
118 if (ring->head == ring->tail && master_priv->sarea_priv)
119 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
122 static int i915_dma_cleanup(struct drm_device * dev)
124 drm_i915_private_t *dev_priv = dev->dev_private;
125 /* Make sure interrupts are disabled here because the uninstall ioctl
126 * may not have been called from userspace and after dev_private
127 * is freed, it's too late.
129 if (dev->irq_enabled)
130 drm_irq_uninstall(dev);
132 mutex_lock(&dev->struct_mutex);
133 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
135 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
136 mutex_unlock(&dev->struct_mutex);
138 /* Clear the HWS virtual address at teardown */
139 if (I915_NEED_GFX_HWS(dev))
145 static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
147 drm_i915_private_t *dev_priv = dev->dev_private;
148 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
150 master_priv->sarea = drm_getsarea(dev);
151 if (master_priv->sarea) {
152 master_priv->sarea_priv = (drm_i915_sarea_t *)
153 ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
155 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
158 if (init->ring_size != 0) {
159 if (dev_priv->render_ring.gem_object != NULL) {
160 i915_dma_cleanup(dev);
161 DRM_ERROR("Client tried to initialize ringbuffer in "
166 dev_priv->render_ring.size = init->ring_size;
168 dev_priv->render_ring.map.offset = init->ring_start;
169 dev_priv->render_ring.map.size = init->ring_size;
170 dev_priv->render_ring.map.type = 0;
171 dev_priv->render_ring.map.flags = 0;
172 dev_priv->render_ring.map.mtrr = 0;
174 drm_core_ioremap_wc(&dev_priv->render_ring.map, dev);
176 if (dev_priv->render_ring.map.handle == NULL) {
177 i915_dma_cleanup(dev);
178 DRM_ERROR("can not ioremap virtual address for"
184 dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle;
186 dev_priv->cpp = init->cpp;
187 dev_priv->back_offset = init->back_offset;
188 dev_priv->front_offset = init->front_offset;
189 dev_priv->current_page = 0;
190 if (master_priv->sarea_priv)
191 master_priv->sarea_priv->pf_current_page = 0;
193 /* Allow hardware batchbuffers unless told otherwise.
195 dev_priv->allow_batchbuffer = 1;
200 static int i915_dma_resume(struct drm_device * dev)
202 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
204 struct intel_ring_buffer *ring;
205 DRM_DEBUG_DRIVER("%s\n", __func__);
207 ring = &dev_priv->render_ring;
209 if (ring->map.handle == NULL) {
210 DRM_ERROR("can not ioremap virtual address for"
215 /* Program Hardware Status Page */
216 if (!ring->status_page.page_addr) {
217 DRM_ERROR("Can not find hardware status page\n");
220 DRM_DEBUG_DRIVER("hw status page @ %p\n",
221 ring->status_page.page_addr);
222 if (ring->status_page.gfx_addr != 0)
223 ring->setup_status_page(dev, ring);
225 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
227 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
232 static int i915_dma_init(struct drm_device *dev, void *data,
233 struct drm_file *file_priv)
235 drm_i915_init_t *init = data;
238 switch (init->func) {
240 retcode = i915_initialize(dev, init);
242 case I915_CLEANUP_DMA:
243 retcode = i915_dma_cleanup(dev);
245 case I915_RESUME_DMA:
246 retcode = i915_dma_resume(dev);
256 /* Implement basically the same security restrictions as hardware does
257 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
259 * Most of the calculations below involve calculating the size of a
260 * particular instruction. It's important to get the size right as
261 * that tells us where the next instruction to check is. Any illegal
262 * instruction detected will be given a size of zero, which is a
263 * signal to abort the rest of the buffer.
265 static int do_validate_cmd(int cmd)
267 switch (((cmd >> 29) & 0x7)) {
269 switch ((cmd >> 23) & 0x3f) {
271 return 1; /* MI_NOOP */
273 return 1; /* MI_FLUSH */
275 return 0; /* disallow everything else */
279 return 0; /* reserved */
281 return (cmd & 0xff) + 2; /* 2d commands */
283 if (((cmd >> 24) & 0x1f) <= 0x18)
286 switch ((cmd >> 24) & 0x1f) {
290 switch ((cmd >> 16) & 0xff) {
292 return (cmd & 0x1f) + 2;
294 return (cmd & 0xf) + 2;
296 return (cmd & 0xffff) + 2;
300 return (cmd & 0xffff) + 1;
304 if ((cmd & (1 << 23)) == 0) /* inline vertices */
305 return (cmd & 0x1ffff) + 2;
306 else if (cmd & (1 << 17)) /* indirect random */
307 if ((cmd & 0xffff) == 0)
308 return 0; /* unknown length, too hard */
310 return (((cmd & 0xffff) + 1) / 2) + 1;
312 return 2; /* indirect sequential */
323 static int validate_cmd(int cmd)
325 int ret = do_validate_cmd(cmd);
327 /* printk("validate_cmd( %x ): %d\n", cmd, ret); */
332 static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
334 drm_i915_private_t *dev_priv = dev->dev_private;
337 if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8)
340 BEGIN_LP_RING((dwords+1)&~1);
342 for (i = 0; i < dwords;) {
347 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
366 i915_emit_box(struct drm_device *dev,
367 struct drm_clip_rect *boxes,
368 int i, int DR1, int DR4)
370 struct drm_clip_rect box = boxes[i];
372 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
373 DRM_ERROR("Bad box %d,%d..%d,%d\n",
374 box.x1, box.y1, box.x2, box.y2);
380 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
381 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
382 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
387 OUT_RING(GFX_OP_DRAWRECT_INFO);
389 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
390 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
399 /* XXX: Emitting the counter should really be moved to part of the IRQ
400 * emit. For now, do it in both places:
403 static void i915_emit_breadcrumb(struct drm_device *dev)
405 drm_i915_private_t *dev_priv = dev->dev_private;
406 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
409 if (dev_priv->counter > 0x7FFFFFFFUL)
410 dev_priv->counter = 0;
411 if (master_priv->sarea_priv)
412 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
415 OUT_RING(MI_STORE_DWORD_INDEX);
416 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
417 OUT_RING(dev_priv->counter);
422 static int i915_dispatch_cmdbuffer(struct drm_device * dev,
423 drm_i915_cmdbuffer_t *cmd,
424 struct drm_clip_rect *cliprects,
427 int nbox = cmd->num_cliprects;
428 int i = 0, count, ret;
431 DRM_ERROR("alignment");
435 i915_kernel_lost_context(dev);
437 count = nbox ? nbox : 1;
439 for (i = 0; i < count; i++) {
441 ret = i915_emit_box(dev, cliprects, i,
447 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
452 i915_emit_breadcrumb(dev);
456 static int i915_dispatch_batchbuffer(struct drm_device * dev,
457 drm_i915_batchbuffer_t * batch,
458 struct drm_clip_rect *cliprects)
460 int nbox = batch->num_cliprects;
463 if ((batch->start | batch->used) & 0x7) {
464 DRM_ERROR("alignment");
468 i915_kernel_lost_context(dev);
470 count = nbox ? nbox : 1;
472 for (i = 0; i < count; i++) {
474 int ret = i915_emit_box(dev, cliprects, i,
475 batch->DR1, batch->DR4);
480 if (!IS_I830(dev) && !IS_845G(dev)) {
483 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
484 OUT_RING(batch->start);
486 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
487 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
492 OUT_RING(MI_BATCH_BUFFER);
493 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
494 OUT_RING(batch->start + batch->used - 4);
500 i915_emit_breadcrumb(dev);
505 static int i915_dispatch_flip(struct drm_device * dev)
507 drm_i915_private_t *dev_priv = dev->dev_private;
508 struct drm_i915_master_private *master_priv =
509 dev->primary->master->driver_priv;
511 if (!master_priv->sarea_priv)
514 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
516 dev_priv->current_page,
517 master_priv->sarea_priv->pf_current_page);
519 i915_kernel_lost_context(dev);
522 OUT_RING(MI_FLUSH | MI_READ_FLUSH);
527 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
529 if (dev_priv->current_page == 0) {
530 OUT_RING(dev_priv->back_offset);
531 dev_priv->current_page = 1;
533 OUT_RING(dev_priv->front_offset);
534 dev_priv->current_page = 0;
540 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
544 master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
547 OUT_RING(MI_STORE_DWORD_INDEX);
548 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
549 OUT_RING(dev_priv->counter);
553 master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
557 static int i915_quiescent(struct drm_device * dev)
559 drm_i915_private_t *dev_priv = dev->dev_private;
561 i915_kernel_lost_context(dev);
562 return intel_wait_ring_buffer(dev, &dev_priv->render_ring,
563 dev_priv->render_ring.size - 8);
566 static int i915_flush_ioctl(struct drm_device *dev, void *data,
567 struct drm_file *file_priv)
571 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
573 mutex_lock(&dev->struct_mutex);
574 ret = i915_quiescent(dev);
575 mutex_unlock(&dev->struct_mutex);
580 static int i915_batchbuffer(struct drm_device *dev, void *data,
581 struct drm_file *file_priv)
583 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
584 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
585 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
586 master_priv->sarea_priv;
587 drm_i915_batchbuffer_t *batch = data;
589 struct drm_clip_rect *cliprects = NULL;
591 if (!dev_priv->allow_batchbuffer) {
592 DRM_ERROR("Batchbuffer ioctl disabled\n");
596 DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
597 batch->start, batch->used, batch->num_cliprects);
599 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
601 if (batch->num_cliprects < 0)
604 if (batch->num_cliprects) {
605 cliprects = kcalloc(batch->num_cliprects,
606 sizeof(struct drm_clip_rect),
608 if (cliprects == NULL)
611 ret = copy_from_user(cliprects, batch->cliprects,
612 batch->num_cliprects *
613 sizeof(struct drm_clip_rect));
618 mutex_lock(&dev->struct_mutex);
619 ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
620 mutex_unlock(&dev->struct_mutex);
623 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
631 static int i915_cmdbuffer(struct drm_device *dev, void *data,
632 struct drm_file *file_priv)
634 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
635 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
636 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
637 master_priv->sarea_priv;
638 drm_i915_cmdbuffer_t *cmdbuf = data;
639 struct drm_clip_rect *cliprects = NULL;
643 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
644 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
646 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
648 if (cmdbuf->num_cliprects < 0)
651 batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL);
652 if (batch_data == NULL)
655 ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
657 goto fail_batch_free;
659 if (cmdbuf->num_cliprects) {
660 cliprects = kcalloc(cmdbuf->num_cliprects,
661 sizeof(struct drm_clip_rect), GFP_KERNEL);
662 if (cliprects == NULL) {
664 goto fail_batch_free;
667 ret = copy_from_user(cliprects, cmdbuf->cliprects,
668 cmdbuf->num_cliprects *
669 sizeof(struct drm_clip_rect));
674 mutex_lock(&dev->struct_mutex);
675 ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
676 mutex_unlock(&dev->struct_mutex);
678 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
683 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
693 static int i915_flip_bufs(struct drm_device *dev, void *data,
694 struct drm_file *file_priv)
698 DRM_DEBUG_DRIVER("%s\n", __func__);
700 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
702 mutex_lock(&dev->struct_mutex);
703 ret = i915_dispatch_flip(dev);
704 mutex_unlock(&dev->struct_mutex);
709 static int i915_getparam(struct drm_device *dev, void *data,
710 struct drm_file *file_priv)
712 drm_i915_private_t *dev_priv = dev->dev_private;
713 drm_i915_getparam_t *param = data;
717 DRM_ERROR("called with no initialization\n");
721 switch (param->param) {
722 case I915_PARAM_IRQ_ACTIVE:
723 value = dev->pdev->irq ? 1 : 0;
725 case I915_PARAM_ALLOW_BATCHBUFFER:
726 value = dev_priv->allow_batchbuffer ? 1 : 0;
728 case I915_PARAM_LAST_DISPATCH:
729 value = READ_BREADCRUMB(dev_priv);
731 case I915_PARAM_CHIPSET_ID:
732 value = dev->pci_device;
734 case I915_PARAM_HAS_GEM:
735 value = dev_priv->has_gem;
737 case I915_PARAM_NUM_FENCES_AVAIL:
738 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
740 case I915_PARAM_HAS_OVERLAY:
741 value = dev_priv->overlay ? 1 : 0;
743 case I915_PARAM_HAS_PAGEFLIPPING:
746 case I915_PARAM_HAS_EXECBUF2:
748 value = dev_priv->has_gem;
750 case I915_PARAM_HAS_BSD:
751 value = HAS_BSD(dev);
754 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
759 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
760 DRM_ERROR("DRM_COPY_TO_USER failed\n");
767 static int i915_setparam(struct drm_device *dev, void *data,
768 struct drm_file *file_priv)
770 drm_i915_private_t *dev_priv = dev->dev_private;
771 drm_i915_setparam_t *param = data;
774 DRM_ERROR("called with no initialization\n");
778 switch (param->param) {
779 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
781 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
782 dev_priv->tex_lru_log_granularity = param->value;
784 case I915_SETPARAM_ALLOW_BATCHBUFFER:
785 dev_priv->allow_batchbuffer = param->value;
787 case I915_SETPARAM_NUM_USED_FENCES:
788 if (param->value > dev_priv->num_fence_regs ||
791 /* Userspace can use first N regs */
792 dev_priv->fence_reg_start = param->value;
795 DRM_DEBUG_DRIVER("unknown parameter %d\n",
803 static int i915_set_status_page(struct drm_device *dev, void *data,
804 struct drm_file *file_priv)
806 drm_i915_private_t *dev_priv = dev->dev_private;
807 drm_i915_hws_addr_t *hws = data;
808 struct intel_ring_buffer *ring = &dev_priv->render_ring;
810 if (!I915_NEED_GFX_HWS(dev))
814 DRM_ERROR("called with no initialization\n");
818 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
819 WARN(1, "tried to set status page when mode setting active\n");
823 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
825 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
827 dev_priv->hws_map.offset = dev->agp->base + hws->addr;
828 dev_priv->hws_map.size = 4*1024;
829 dev_priv->hws_map.type = 0;
830 dev_priv->hws_map.flags = 0;
831 dev_priv->hws_map.mtrr = 0;
833 drm_core_ioremap_wc(&dev_priv->hws_map, dev);
834 if (dev_priv->hws_map.handle == NULL) {
835 i915_dma_cleanup(dev);
836 ring->status_page.gfx_addr = 0;
837 DRM_ERROR("can not ioremap virtual address for"
838 " G33 hw status page\n");
841 ring->status_page.page_addr = dev_priv->hws_map.handle;
842 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
843 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
845 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
846 ring->status_page.gfx_addr);
847 DRM_DEBUG_DRIVER("load hws at %p\n",
848 ring->status_page.page_addr);
852 static int i915_get_bridge_dev(struct drm_device *dev)
854 struct drm_i915_private *dev_priv = dev->dev_private;
856 dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
857 if (!dev_priv->bridge_dev) {
858 DRM_ERROR("bridge device not found\n");
864 #define MCHBAR_I915 0x44
865 #define MCHBAR_I965 0x48
866 #define MCHBAR_SIZE (4*4096)
868 #define DEVEN_REG 0x54
869 #define DEVEN_MCHBAR_EN (1 << 28)
871 /* Allocate space for the MCH regs if needed, return nonzero on error */
873 intel_alloc_mchbar_resource(struct drm_device *dev)
875 drm_i915_private_t *dev_priv = dev->dev_private;
876 int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
877 u32 temp_lo, temp_hi = 0;
882 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
883 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
884 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
886 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
889 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
895 /* Get some space for it */
896 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res,
897 MCHBAR_SIZE, MCHBAR_SIZE,
899 0, pcibios_align_resource,
900 dev_priv->bridge_dev);
902 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
903 dev_priv->mch_res.start = 0;
908 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
909 upper_32_bits(dev_priv->mch_res.start));
911 pci_write_config_dword(dev_priv->bridge_dev, reg,
912 lower_32_bits(dev_priv->mch_res.start));
917 /* Setup MCHBAR if possible, return true if we should disable it again */
919 intel_setup_mchbar(struct drm_device *dev)
921 drm_i915_private_t *dev_priv = dev->dev_private;
922 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
926 dev_priv->mchbar_need_disable = false;
928 if (IS_I915G(dev) || IS_I915GM(dev)) {
929 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
930 enabled = !!(temp & DEVEN_MCHBAR_EN);
932 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
936 /* If it's already enabled, don't have to do anything */
940 if (intel_alloc_mchbar_resource(dev))
943 dev_priv->mchbar_need_disable = true;
945 /* Space is allocated or reserved, so enable it. */
946 if (IS_I915G(dev) || IS_I915GM(dev)) {
947 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
948 temp | DEVEN_MCHBAR_EN);
950 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
951 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
956 intel_teardown_mchbar(struct drm_device *dev)
958 drm_i915_private_t *dev_priv = dev->dev_private;
959 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
962 if (dev_priv->mchbar_need_disable) {
963 if (IS_I915G(dev) || IS_I915GM(dev)) {
964 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
965 temp &= ~DEVEN_MCHBAR_EN;
966 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
968 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
970 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
974 if (dev_priv->mch_res.start)
975 release_resource(&dev_priv->mch_res);
979 * i915_probe_agp - get AGP bootup configuration
981 * @aperture_size: returns AGP aperture configured size
982 * @preallocated_size: returns size of BIOS preallocated AGP space
984 * Since Intel integrated graphics are UMA, the BIOS has to set aside
985 * some RAM for the framebuffer at early boot. This code figures out
986 * how much was set aside so we can use it for our own purposes.
988 static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
989 uint32_t *preallocated_size,
992 struct drm_i915_private *dev_priv = dev->dev_private;
994 unsigned long overhead;
995 unsigned long stolen;
997 /* Get the fb aperture size and "stolen" memory amount. */
998 pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &tmp);
1000 *aperture_size = 1024 * 1024;
1001 *preallocated_size = 1024 * 1024;
1003 switch (dev->pdev->device) {
1004 case PCI_DEVICE_ID_INTEL_82830_CGC:
1005 case PCI_DEVICE_ID_INTEL_82845G_IG:
1006 case PCI_DEVICE_ID_INTEL_82855GM_IG:
1007 case PCI_DEVICE_ID_INTEL_82865_IG:
1008 if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M)
1009 *aperture_size *= 64;
1011 *aperture_size *= 128;
1014 /* 9xx supports large sizes, just look at the length */
1015 *aperture_size = pci_resource_len(dev->pdev, 2);
1020 * Some of the preallocated space is taken by the GTT
1021 * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
1023 if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev))
1026 overhead = (*aperture_size / 1024) + 4096;
1029 /* SNB has memory control reg at 0x50.w */
1030 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &tmp);
1032 switch (tmp & SNB_GMCH_GMS_STOLEN_MASK) {
1033 case INTEL_855_GMCH_GMS_DISABLED:
1034 DRM_ERROR("video memory is disabled\n");
1036 case SNB_GMCH_GMS_STOLEN_32M:
1037 stolen = 32 * 1024 * 1024;
1039 case SNB_GMCH_GMS_STOLEN_64M:
1040 stolen = 64 * 1024 * 1024;
1042 case SNB_GMCH_GMS_STOLEN_96M:
1043 stolen = 96 * 1024 * 1024;
1045 case SNB_GMCH_GMS_STOLEN_128M:
1046 stolen = 128 * 1024 * 1024;
1048 case SNB_GMCH_GMS_STOLEN_160M:
1049 stolen = 160 * 1024 * 1024;
1051 case SNB_GMCH_GMS_STOLEN_192M:
1052 stolen = 192 * 1024 * 1024;
1054 case SNB_GMCH_GMS_STOLEN_224M:
1055 stolen = 224 * 1024 * 1024;
1057 case SNB_GMCH_GMS_STOLEN_256M:
1058 stolen = 256 * 1024 * 1024;
1060 case SNB_GMCH_GMS_STOLEN_288M:
1061 stolen = 288 * 1024 * 1024;
1063 case SNB_GMCH_GMS_STOLEN_320M:
1064 stolen = 320 * 1024 * 1024;
1066 case SNB_GMCH_GMS_STOLEN_352M:
1067 stolen = 352 * 1024 * 1024;
1069 case SNB_GMCH_GMS_STOLEN_384M:
1070 stolen = 384 * 1024 * 1024;
1072 case SNB_GMCH_GMS_STOLEN_416M:
1073 stolen = 416 * 1024 * 1024;
1075 case SNB_GMCH_GMS_STOLEN_448M:
1076 stolen = 448 * 1024 * 1024;
1078 case SNB_GMCH_GMS_STOLEN_480M:
1079 stolen = 480 * 1024 * 1024;
1081 case SNB_GMCH_GMS_STOLEN_512M:
1082 stolen = 512 * 1024 * 1024;
1085 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
1086 tmp & SNB_GMCH_GMS_STOLEN_MASK);
1090 switch (tmp & INTEL_GMCH_GMS_MASK) {
1091 case INTEL_855_GMCH_GMS_DISABLED:
1092 DRM_ERROR("video memory is disabled\n");
1094 case INTEL_855_GMCH_GMS_STOLEN_1M:
1095 stolen = 1 * 1024 * 1024;
1097 case INTEL_855_GMCH_GMS_STOLEN_4M:
1098 stolen = 4 * 1024 * 1024;
1100 case INTEL_855_GMCH_GMS_STOLEN_8M:
1101 stolen = 8 * 1024 * 1024;
1103 case INTEL_855_GMCH_GMS_STOLEN_16M:
1104 stolen = 16 * 1024 * 1024;
1106 case INTEL_855_GMCH_GMS_STOLEN_32M:
1107 stolen = 32 * 1024 * 1024;
1109 case INTEL_915G_GMCH_GMS_STOLEN_48M:
1110 stolen = 48 * 1024 * 1024;
1112 case INTEL_915G_GMCH_GMS_STOLEN_64M:
1113 stolen = 64 * 1024 * 1024;
1115 case INTEL_GMCH_GMS_STOLEN_128M:
1116 stolen = 128 * 1024 * 1024;
1118 case INTEL_GMCH_GMS_STOLEN_256M:
1119 stolen = 256 * 1024 * 1024;
1121 case INTEL_GMCH_GMS_STOLEN_96M:
1122 stolen = 96 * 1024 * 1024;
1124 case INTEL_GMCH_GMS_STOLEN_160M:
1125 stolen = 160 * 1024 * 1024;
1127 case INTEL_GMCH_GMS_STOLEN_224M:
1128 stolen = 224 * 1024 * 1024;
1130 case INTEL_GMCH_GMS_STOLEN_352M:
1131 stolen = 352 * 1024 * 1024;
1134 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
1135 tmp & INTEL_GMCH_GMS_MASK);
1140 *preallocated_size = stolen - overhead;
1146 #define PTE_ADDRESS_MASK 0xfffff000
1147 #define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
1148 #define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
1149 #define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
1150 #define PTE_MAPPING_TYPE_CACHED (3 << 1)
1151 #define PTE_MAPPING_TYPE_MASK (3 << 1)
1152 #define PTE_VALID (1 << 0)
1155 * i915_gtt_to_phys - take a GTT address and turn it into a physical one
1157 * @gtt_addr: address to translate
1159 * Some chip functions require allocations from stolen space but need the
1160 * physical address of the memory in question. We use this routine
1161 * to get a physical address suitable for register programming from a given
1164 static unsigned long i915_gtt_to_phys(struct drm_device *dev,
1165 unsigned long gtt_addr)
1168 unsigned long entry, phys;
1169 int gtt_bar = IS_I9XX(dev) ? 0 : 1;
1170 int gtt_offset, gtt_size;
1172 if (IS_I965G(dev)) {
1173 if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
1174 gtt_offset = 2*1024*1024;
1175 gtt_size = 2*1024*1024;
1177 gtt_offset = 512*1024;
1178 gtt_size = 512*1024;
1183 gtt_size = pci_resource_len(dev->pdev, gtt_bar);
1186 gtt = ioremap_wc(pci_resource_start(dev->pdev, gtt_bar) + gtt_offset,
1189 DRM_ERROR("ioremap of GTT failed\n");
1193 entry = *(volatile u32 *)(gtt + (gtt_addr / 1024));
1195 DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
1197 /* Mask out these reserved bits on this hardware. */
1198 if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) ||
1199 IS_I945G(dev) || IS_I945GM(dev)) {
1200 entry &= ~PTE_ADDRESS_MASK_HIGH;
1203 /* If it's not a mapping type we know, then bail. */
1204 if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED &&
1205 (entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_CACHED) {
1210 if (!(entry & PTE_VALID)) {
1211 DRM_ERROR("bad GTT entry in stolen space\n");
1218 phys =(entry & PTE_ADDRESS_MASK) |
1219 ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4));
1221 DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
1226 static void i915_warn_stolen(struct drm_device *dev)
1228 DRM_ERROR("not enough stolen space for compressed buffer, disabling\n");
1229 DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
1232 static void i915_setup_compression(struct drm_device *dev, int size)
1234 struct drm_i915_private *dev_priv = dev->dev_private;
1235 struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
1236 unsigned long cfb_base;
1237 unsigned long ll_base = 0;
1239 /* Leave 1M for line length buffer & misc. */
1240 compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
1241 if (!compressed_fb) {
1242 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1243 i915_warn_stolen(dev);
1247 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
1248 if (!compressed_fb) {
1249 i915_warn_stolen(dev);
1250 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1254 cfb_base = i915_gtt_to_phys(dev, compressed_fb->start);
1256 DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
1257 drm_mm_put_block(compressed_fb);
1260 if (!IS_GM45(dev)) {
1261 compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096,
1263 if (!compressed_llb) {
1264 i915_warn_stolen(dev);
1268 compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096);
1269 if (!compressed_llb) {
1270 i915_warn_stolen(dev);
1274 ll_base = i915_gtt_to_phys(dev, compressed_llb->start);
1276 DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
1277 drm_mm_put_block(compressed_fb);
1278 drm_mm_put_block(compressed_llb);
1282 dev_priv->cfb_size = size;
1284 intel_disable_fbc(dev);
1285 dev_priv->compressed_fb = compressed_fb;
1288 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
1290 I915_WRITE(FBC_CFB_BASE, cfb_base);
1291 I915_WRITE(FBC_LL_BASE, ll_base);
1292 dev_priv->compressed_llb = compressed_llb;
1295 DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
1296 ll_base, size >> 20);
1299 static void i915_cleanup_compression(struct drm_device *dev)
1301 struct drm_i915_private *dev_priv = dev->dev_private;
1303 drm_mm_put_block(dev_priv->compressed_fb);
1305 drm_mm_put_block(dev_priv->compressed_llb);
1308 /* true = enable decode, false = disable decoder */
1309 static unsigned int i915_vga_set_decode(void *cookie, bool state)
1311 struct drm_device *dev = cookie;
1313 intel_modeset_vga_set_state(dev, state);
1315 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1316 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1318 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1321 static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1323 struct drm_device *dev = pci_get_drvdata(pdev);
1324 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
1325 if (state == VGA_SWITCHEROO_ON) {
1326 printk(KERN_INFO "i915: switched on\n");
1327 /* i915 resume handler doesn't set to D0 */
1328 pci_set_power_state(dev->pdev, PCI_D0);
1330 drm_kms_helper_poll_enable(dev);
1332 printk(KERN_ERR "i915: switched off\n");
1333 drm_kms_helper_poll_disable(dev);
1334 i915_suspend(dev, pmm);
1338 static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
1340 struct drm_device *dev = pci_get_drvdata(pdev);
1343 spin_lock(&dev->count_lock);
1344 can_switch = (dev->open_count == 0);
1345 spin_unlock(&dev->count_lock);
1349 static int i915_load_modeset_init(struct drm_device *dev,
1350 unsigned long prealloc_start,
1351 unsigned long prealloc_size,
1352 unsigned long agp_size)
1354 struct drm_i915_private *dev_priv = dev->dev_private;
1355 int fb_bar = IS_I9XX(dev) ? 2 : 0;
1358 dev->mode_config.fb_base = pci_resource_start(dev->pdev, fb_bar) &
1361 /* Basic memrange allocator for stolen space (aka vram) */
1362 drm_mm_init(&dev_priv->vram, 0, prealloc_size);
1363 DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024));
1365 /* We're off and running w/KMS */
1366 dev_priv->mm.suspended = 0;
1368 /* Let GEM Manage from end of prealloc space to end of aperture.
1370 * However, leave one page at the end still bound to the scratch page.
1371 * There are a number of places where the hardware apparently
1372 * prefetches past the end of the object, and we've seen multiple
1373 * hangs with the GPU head pointer stuck in a batchbuffer bound
1374 * at the last page of the aperture. One page should be enough to
1375 * keep any prefetching inside of the aperture.
1377 i915_gem_do_init(dev, prealloc_size, agp_size - 4096);
1379 mutex_lock(&dev->struct_mutex);
1380 ret = i915_gem_init_ringbuffer(dev);
1381 mutex_unlock(&dev->struct_mutex);
1385 /* Try to set up FBC with a reasonable compressed buffer size */
1386 if (I915_HAS_FBC(dev) && i915_powersave) {
1389 /* Try to get an 8M buffer... */
1390 if (prealloc_size > (9*1024*1024))
1391 cfb_size = 8*1024*1024;
1392 else /* fall back to 7/8 of the stolen space */
1393 cfb_size = prealloc_size * 7 / 8;
1394 i915_setup_compression(dev, cfb_size);
1397 /* Allow hardware batchbuffers unless told otherwise.
1399 dev_priv->allow_batchbuffer = 1;
1401 ret = intel_init_bios(dev);
1403 DRM_INFO("failed to find VBIOS tables\n");
1405 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
1406 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
1408 goto cleanup_ringbuffer;
1410 ret = vga_switcheroo_register_client(dev->pdev,
1411 i915_switcheroo_set_state,
1412 i915_switcheroo_can_switch);
1414 goto cleanup_vga_client;
1416 /* IIR "flip pending" bit means done if this bit is set */
1417 if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE))
1418 dev_priv->flip_pending_is_done = true;
1420 intel_modeset_init(dev);
1422 ret = drm_irq_install(dev);
1424 goto cleanup_vga_switcheroo;
1426 /* Always safe in the mode setting case. */
1427 /* FIXME: do pre/post-mode set stuff in core KMS code */
1428 dev->vblank_disable_allowed = 1;
1431 * Initialize the hardware status page IRQ location.
1434 I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
1436 ret = intel_fbdev_init(dev);
1440 drm_kms_helper_poll_init(dev);
1444 drm_irq_uninstall(dev);
1445 cleanup_vga_switcheroo:
1446 vga_switcheroo_unregister_client(dev->pdev);
1448 vga_client_register(dev->pdev, NULL, NULL, NULL);
1450 mutex_lock(&dev->struct_mutex);
1451 i915_gem_cleanup_ringbuffer(dev);
1452 mutex_unlock(&dev->struct_mutex);
1457 int i915_master_create(struct drm_device *dev, struct drm_master *master)
1459 struct drm_i915_master_private *master_priv;
1461 master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
1465 master->driver_priv = master_priv;
1469 void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1471 struct drm_i915_master_private *master_priv = master->driver_priv;
1478 master->driver_priv = NULL;
1481 static void i915_pineview_get_mem_freq(struct drm_device *dev)
1483 drm_i915_private_t *dev_priv = dev->dev_private;
1486 tmp = I915_READ(CLKCFG);
1488 switch (tmp & CLKCFG_FSB_MASK) {
1489 case CLKCFG_FSB_533:
1490 dev_priv->fsb_freq = 533; /* 133*4 */
1492 case CLKCFG_FSB_800:
1493 dev_priv->fsb_freq = 800; /* 200*4 */
1495 case CLKCFG_FSB_667:
1496 dev_priv->fsb_freq = 667; /* 167*4 */
1498 case CLKCFG_FSB_400:
1499 dev_priv->fsb_freq = 400; /* 100*4 */
1503 switch (tmp & CLKCFG_MEM_MASK) {
1504 case CLKCFG_MEM_533:
1505 dev_priv->mem_freq = 533;
1507 case CLKCFG_MEM_667:
1508 dev_priv->mem_freq = 667;
1510 case CLKCFG_MEM_800:
1511 dev_priv->mem_freq = 800;
1515 /* detect pineview DDR3 setting */
1516 tmp = I915_READ(CSHRDDR3CTL);
1517 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
1520 static void i915_ironlake_get_mem_freq(struct drm_device *dev)
1522 drm_i915_private_t *dev_priv = dev->dev_private;
1525 ddrpll = I915_READ16(DDRMPLL1);
1526 csipll = I915_READ16(CSIPLL0);
1528 switch (ddrpll & 0xff) {
1530 dev_priv->mem_freq = 800;
1533 dev_priv->mem_freq = 1066;
1536 dev_priv->mem_freq = 1333;
1539 dev_priv->mem_freq = 1600;
1542 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
1544 dev_priv->mem_freq = 0;
1548 dev_priv->r_t = dev_priv->mem_freq;
1550 switch (csipll & 0x3ff) {
1552 dev_priv->fsb_freq = 3200;
1555 dev_priv->fsb_freq = 3733;
1558 dev_priv->fsb_freq = 4266;
1561 dev_priv->fsb_freq = 4800;
1564 dev_priv->fsb_freq = 5333;
1567 dev_priv->fsb_freq = 5866;
1570 dev_priv->fsb_freq = 6400;
1573 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
1575 dev_priv->fsb_freq = 0;
1579 if (dev_priv->fsb_freq == 3200) {
1581 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
1590 unsigned long vd; /* in .1 mil */
1591 unsigned long vm; /* in .1 mil */
1595 static struct v_table v_table[] = {
1596 { 0, 16125, 15000, 0x7f, },
1597 { 1, 16000, 14875, 0x7e, },
1598 { 2, 15875, 14750, 0x7d, },
1599 { 3, 15750, 14625, 0x7c, },
1600 { 4, 15625, 14500, 0x7b, },
1601 { 5, 15500, 14375, 0x7a, },
1602 { 6, 15375, 14250, 0x79, },
1603 { 7, 15250, 14125, 0x78, },
1604 { 8, 15125, 14000, 0x77, },
1605 { 9, 15000, 13875, 0x76, },
1606 { 10, 14875, 13750, 0x75, },
1607 { 11, 14750, 13625, 0x74, },
1608 { 12, 14625, 13500, 0x73, },
1609 { 13, 14500, 13375, 0x72, },
1610 { 14, 14375, 13250, 0x71, },
1611 { 15, 14250, 13125, 0x70, },
1612 { 16, 14125, 13000, 0x6f, },
1613 { 17, 14000, 12875, 0x6e, },
1614 { 18, 13875, 12750, 0x6d, },
1615 { 19, 13750, 12625, 0x6c, },
1616 { 20, 13625, 12500, 0x6b, },
1617 { 21, 13500, 12375, 0x6a, },
1618 { 22, 13375, 12250, 0x69, },
1619 { 23, 13250, 12125, 0x68, },
1620 { 24, 13125, 12000, 0x67, },
1621 { 25, 13000, 11875, 0x66, },
1622 { 26, 12875, 11750, 0x65, },
1623 { 27, 12750, 11625, 0x64, },
1624 { 28, 12625, 11500, 0x63, },
1625 { 29, 12500, 11375, 0x62, },
1626 { 30, 12375, 11250, 0x61, },
1627 { 31, 12250, 11125, 0x60, },
1628 { 32, 12125, 11000, 0x5f, },
1629 { 33, 12000, 10875, 0x5e, },
1630 { 34, 11875, 10750, 0x5d, },
1631 { 35, 11750, 10625, 0x5c, },
1632 { 36, 11625, 10500, 0x5b, },
1633 { 37, 11500, 10375, 0x5a, },
1634 { 38, 11375, 10250, 0x59, },
1635 { 39, 11250, 10125, 0x58, },
1636 { 40, 11125, 10000, 0x57, },
1637 { 41, 11000, 9875, 0x56, },
1638 { 42, 10875, 9750, 0x55, },
1639 { 43, 10750, 9625, 0x54, },
1640 { 44, 10625, 9500, 0x53, },
1641 { 45, 10500, 9375, 0x52, },
1642 { 46, 10375, 9250, 0x51, },
1643 { 47, 10250, 9125, 0x50, },
1644 { 48, 10125, 9000, 0x4f, },
1645 { 49, 10000, 8875, 0x4e, },
1646 { 50, 9875, 8750, 0x4d, },
1647 { 51, 9750, 8625, 0x4c, },
1648 { 52, 9625, 8500, 0x4b, },
1649 { 53, 9500, 8375, 0x4a, },
1650 { 54, 9375, 8250, 0x49, },
1651 { 55, 9250, 8125, 0x48, },
1652 { 56, 9125, 8000, 0x47, },
1653 { 57, 9000, 7875, 0x46, },
1654 { 58, 8875, 7750, 0x45, },
1655 { 59, 8750, 7625, 0x44, },
1656 { 60, 8625, 7500, 0x43, },
1657 { 61, 8500, 7375, 0x42, },
1658 { 62, 8375, 7250, 0x41, },
1659 { 63, 8250, 7125, 0x40, },
1660 { 64, 8125, 7000, 0x3f, },
1661 { 65, 8000, 6875, 0x3e, },
1662 { 66, 7875, 6750, 0x3d, },
1663 { 67, 7750, 6625, 0x3c, },
1664 { 68, 7625, 6500, 0x3b, },
1665 { 69, 7500, 6375, 0x3a, },
1666 { 70, 7375, 6250, 0x39, },
1667 { 71, 7250, 6125, 0x38, },
1668 { 72, 7125, 6000, 0x37, },
1669 { 73, 7000, 5875, 0x36, },
1670 { 74, 6875, 5750, 0x35, },
1671 { 75, 6750, 5625, 0x34, },
1672 { 76, 6625, 5500, 0x33, },
1673 { 77, 6500, 5375, 0x32, },
1674 { 78, 6375, 5250, 0x31, },
1675 { 79, 6250, 5125, 0x30, },
1676 { 80, 6125, 5000, 0x2f, },
1677 { 81, 6000, 4875, 0x2e, },
1678 { 82, 5875, 4750, 0x2d, },
1679 { 83, 5750, 4625, 0x2c, },
1680 { 84, 5625, 4500, 0x2b, },
1681 { 85, 5500, 4375, 0x2a, },
1682 { 86, 5375, 4250, 0x29, },
1683 { 87, 5250, 4125, 0x28, },
1684 { 88, 5125, 4000, 0x27, },
1685 { 89, 5000, 3875, 0x26, },
1686 { 90, 4875, 3750, 0x25, },
1687 { 91, 4750, 3625, 0x24, },
1688 { 92, 4625, 3500, 0x23, },
1689 { 93, 4500, 3375, 0x22, },
1690 { 94, 4375, 3250, 0x21, },
1691 { 95, 4250, 3125, 0x20, },
1692 { 96, 4125, 3000, 0x1f, },
1693 { 97, 4125, 3000, 0x1e, },
1694 { 98, 4125, 3000, 0x1d, },
1695 { 99, 4125, 3000, 0x1c, },
1696 { 100, 4125, 3000, 0x1b, },
1697 { 101, 4125, 3000, 0x1a, },
1698 { 102, 4125, 3000, 0x19, },
1699 { 103, 4125, 3000, 0x18, },
1700 { 104, 4125, 3000, 0x17, },
1701 { 105, 4125, 3000, 0x16, },
1702 { 106, 4125, 3000, 0x15, },
1703 { 107, 4125, 3000, 0x14, },
1704 { 108, 4125, 3000, 0x13, },
1705 { 109, 4125, 3000, 0x12, },
1706 { 110, 4125, 3000, 0x11, },
1707 { 111, 4125, 3000, 0x10, },
1708 { 112, 4125, 3000, 0x0f, },
1709 { 113, 4125, 3000, 0x0e, },
1710 { 114, 4125, 3000, 0x0d, },
1711 { 115, 4125, 3000, 0x0c, },
1712 { 116, 4125, 3000, 0x0b, },
1713 { 117, 4125, 3000, 0x0a, },
1714 { 118, 4125, 3000, 0x09, },
1715 { 119, 4125, 3000, 0x08, },
1716 { 120, 1125, 0, 0x07, },
1717 { 121, 1000, 0, 0x06, },
1718 { 122, 875, 0, 0x05, },
1719 { 123, 750, 0, 0x04, },
1720 { 124, 625, 0, 0x03, },
1721 { 125, 500, 0, 0x02, },
1722 { 126, 375, 0, 0x01, },
1723 { 127, 0, 0, 0x00, },
1733 static struct cparams cparams[] = {
1734 { 1, 1333, 301, 28664 },
1735 { 1, 1066, 294, 24460 },
1736 { 1, 800, 294, 25192 },
1737 { 0, 1333, 276, 27605 },
1738 { 0, 1066, 276, 27605 },
1739 { 0, 800, 231, 23784 },
1742 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
1744 u64 total_count, diff, ret;
1745 u32 count1, count2, count3, m = 0, c = 0;
1746 unsigned long now = jiffies_to_msecs(jiffies), diff1;
1749 diff1 = now - dev_priv->last_time1;
1751 count1 = I915_READ(DMIEC);
1752 count2 = I915_READ(DDREC);
1753 count3 = I915_READ(CSIEC);
1755 total_count = count1 + count2 + count3;
1757 /* FIXME: handle per-counter overflow */
1758 if (total_count < dev_priv->last_count1) {
1759 diff = ~0UL - dev_priv->last_count1;
1760 diff += total_count;
1762 diff = total_count - dev_priv->last_count1;
1765 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
1766 if (cparams[i].i == dev_priv->c_m &&
1767 cparams[i].t == dev_priv->r_t) {
1774 div_u64(diff, diff1);
1775 ret = ((m * diff) + c);
1778 dev_priv->last_count1 = total_count;
1779 dev_priv->last_time1 = now;
1784 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
1786 unsigned long m, x, b;
1789 tsfs = I915_READ(TSFS);
1791 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
1792 x = I915_READ8(TR1);
1794 b = tsfs & TSFS_INTR_MASK;
1796 return ((m * x) / 127) - b;
1799 static unsigned long pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
1801 unsigned long val = 0;
1804 for (i = 0; i < ARRAY_SIZE(v_table); i++) {
1805 if (v_table[i].pvid == pxvid) {
1806 if (IS_MOBILE(dev_priv->dev))
1807 val = v_table[i].vm;
1809 val = v_table[i].vd;
1816 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
1818 struct timespec now, diff1;
1820 unsigned long diffms;
1823 getrawmonotonic(&now);
1824 diff1 = timespec_sub(now, dev_priv->last_time2);
1826 /* Don't divide by 0 */
1827 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
1831 count = I915_READ(GFXEC);
1833 if (count < dev_priv->last_count2) {
1834 diff = ~0UL - dev_priv->last_count2;
1837 diff = count - dev_priv->last_count2;
1840 dev_priv->last_count2 = count;
1841 dev_priv->last_time2 = now;
1843 /* More magic constants... */
1845 div_u64(diff, diffms * 10);
1846 dev_priv->gfx_power = diff;
1849 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
1851 unsigned long t, corr, state1, corr2, state2;
1854 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
1855 pxvid = (pxvid >> 24) & 0x7f;
1856 ext_v = pvid_to_extvid(dev_priv, pxvid);
1860 t = i915_mch_val(dev_priv);
1862 /* Revel in the empirically derived constants */
1864 /* Correction factor in 1/100000 units */
1866 corr = ((t * 2349) + 135940);
1868 corr = ((t * 964) + 29317);
1870 corr = ((t * 301) + 1004);
1872 corr = corr * ((150142 * state1) / 10000 - 78642);
1874 corr2 = (corr * dev_priv->corr);
1876 state2 = (corr2 * state1) / 10000;
1877 state2 /= 100; /* convert to mW */
1879 i915_update_gfx_val(dev_priv);
1881 return dev_priv->gfx_power + state2;
1884 /* Global for IPS driver to get at the current i915 device */
1885 static struct drm_i915_private *i915_mch_dev;
1887 * Lock protecting IPS related data structures
1889 * - dev_priv->max_delay
1890 * - dev_priv->min_delay
1892 * - dev_priv->gpu_busy
1894 DEFINE_SPINLOCK(mchdev_lock);
1897 * i915_read_mch_val - return value for IPS use
1899 * Calculate and return a value for the IPS driver to use when deciding whether
1900 * we have thermal and power headroom to increase CPU or GPU power budget.
1902 unsigned long i915_read_mch_val(void)
1904 struct drm_i915_private *dev_priv;
1905 unsigned long chipset_val, graphics_val, ret = 0;
1907 spin_lock(&mchdev_lock);
1910 dev_priv = i915_mch_dev;
1912 chipset_val = i915_chipset_val(dev_priv);
1913 graphics_val = i915_gfx_val(dev_priv);
1915 ret = chipset_val + graphics_val;
1918 spin_unlock(&mchdev_lock);
1922 EXPORT_SYMBOL_GPL(i915_read_mch_val);
1925 * i915_gpu_raise - raise GPU frequency limit
1927 * Raise the limit; IPS indicates we have thermal headroom.
1929 bool i915_gpu_raise(void)
1931 struct drm_i915_private *dev_priv;
1934 spin_lock(&mchdev_lock);
1935 if (!i915_mch_dev) {
1939 dev_priv = i915_mch_dev;
1941 if (dev_priv->max_delay > dev_priv->fmax)
1942 dev_priv->max_delay--;
1945 spin_unlock(&mchdev_lock);
1949 EXPORT_SYMBOL_GPL(i915_gpu_raise);
1952 * i915_gpu_lower - lower GPU frequency limit
1954 * IPS indicates we're close to a thermal limit, so throttle back the GPU
1955 * frequency maximum.
1957 bool i915_gpu_lower(void)
1959 struct drm_i915_private *dev_priv;
1962 spin_lock(&mchdev_lock);
1963 if (!i915_mch_dev) {
1967 dev_priv = i915_mch_dev;
1969 if (dev_priv->max_delay < dev_priv->min_delay)
1970 dev_priv->max_delay++;
1973 spin_unlock(&mchdev_lock);
1977 EXPORT_SYMBOL_GPL(i915_gpu_lower);
1980 * i915_gpu_busy - indicate GPU business to IPS
1982 * Tell the IPS driver whether or not the GPU is busy.
1984 bool i915_gpu_busy(void)
1986 struct drm_i915_private *dev_priv;
1989 spin_lock(&mchdev_lock);
1992 dev_priv = i915_mch_dev;
1994 ret = dev_priv->busy;
1997 spin_unlock(&mchdev_lock);
2001 EXPORT_SYMBOL_GPL(i915_gpu_busy);
2004 * i915_gpu_turbo_disable - disable graphics turbo
2006 * Disable graphics turbo by resetting the max frequency and setting the
2007 * current frequency to the default.
2009 bool i915_gpu_turbo_disable(void)
2011 struct drm_i915_private *dev_priv;
2014 spin_lock(&mchdev_lock);
2015 if (!i915_mch_dev) {
2019 dev_priv = i915_mch_dev;
2021 dev_priv->max_delay = dev_priv->fstart;
2023 if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
2027 spin_unlock(&mchdev_lock);
2031 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
2034 * i915_driver_load - setup chip and create an initial config
2036 * @flags: startup flags
2038 * The driver load routine has to do several things:
2039 * - drive output discovery via intel_modeset_init()
2040 * - initialize the memory manager
2041 * - allocate initial config memory
2042 * - setup the DRM framebuffer with the allocated memory
2044 int i915_driver_load(struct drm_device *dev, unsigned long flags)
2046 struct drm_i915_private *dev_priv;
2047 resource_size_t base, size;
2048 int ret = 0, mmio_bar;
2049 uint32_t agp_size, prealloc_size, prealloc_start;
2050 /* i915 has 4 more counters */
2052 dev->types[6] = _DRM_STAT_IRQ;
2053 dev->types[7] = _DRM_STAT_PRIMARY;
2054 dev->types[8] = _DRM_STAT_SECONDARY;
2055 dev->types[9] = _DRM_STAT_DMA;
2057 dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
2058 if (dev_priv == NULL)
2061 dev->dev_private = (void *)dev_priv;
2062 dev_priv->dev = dev;
2063 dev_priv->info = (struct intel_device_info *) flags;
2065 /* Add register map (needed for suspend/resume) */
2066 mmio_bar = IS_I9XX(dev) ? 0 : 1;
2067 base = pci_resource_start(dev->pdev, mmio_bar);
2068 size = pci_resource_len(dev->pdev, mmio_bar);
2070 if (i915_get_bridge_dev(dev)) {
2075 dev_priv->regs = ioremap(base, size);
2076 if (!dev_priv->regs) {
2077 DRM_ERROR("failed to map registers\n");
2082 dev_priv->mm.gtt_mapping =
2083 io_mapping_create_wc(dev->agp->base,
2084 dev->agp->agp_info.aper_size * 1024*1024);
2085 if (dev_priv->mm.gtt_mapping == NULL) {
2090 /* Set up a WC MTRR for non-PAT systems. This is more common than
2091 * one would think, because the kernel disables PAT on first
2092 * generation Core chips because WC PAT gets overridden by a UC
2093 * MTRR if present. Even if a UC MTRR isn't present.
2095 dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
2096 dev->agp->agp_info.aper_size *
2098 MTRR_TYPE_WRCOMB, 1);
2099 if (dev_priv->mm.gtt_mtrr < 0) {
2100 DRM_INFO("MTRR allocation failed. Graphics "
2101 "performance may suffer.\n");
2104 ret = i915_probe_agp(dev, &agp_size, &prealloc_size, &prealloc_start);
2108 dev_priv->wq = create_singlethread_workqueue("i915");
2109 if (dev_priv->wq == NULL) {
2110 DRM_ERROR("Failed to create our workqueue.\n");
2115 /* enable GEM by default */
2116 dev_priv->has_gem = 1;
2118 if (prealloc_size > agp_size * 3 / 4) {
2119 DRM_ERROR("Detected broken video BIOS with %d/%dkB of video "
2121 prealloc_size / 1024, agp_size / 1024);
2122 DRM_ERROR("Disabling GEM. (try reducing stolen memory or "
2123 "updating the BIOS to fix).\n");
2124 dev_priv->has_gem = 0;
2127 if (dev_priv->has_gem == 0 &&
2128 drm_core_check_feature(dev, DRIVER_MODESET)) {
2129 DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n");
2134 dev->driver->get_vblank_counter = i915_get_vblank_counter;
2135 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
2136 if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
2137 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
2138 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
2141 /* Try to make sure MCHBAR is enabled before poking at it */
2142 intel_setup_mchbar(dev);
2147 if (!I915_NEED_GFX_HWS(dev)) {
2148 ret = i915_init_phys_hws(dev);
2150 goto out_workqueue_free;
2153 if (IS_PINEVIEW(dev))
2154 i915_pineview_get_mem_freq(dev);
2155 else if (IS_IRONLAKE(dev))
2156 i915_ironlake_get_mem_freq(dev);
2158 /* On the 945G/GM, the chipset reports the MSI capability on the
2159 * integrated graphics even though the support isn't actually there
2160 * according to the published specs. It doesn't appear to function
2161 * correctly in testing on 945G.
2162 * This may be a side effect of MSI having been made available for PEG
2163 * and the registers being closely associated.
2165 * According to chipset errata, on the 965GM, MSI interrupts may
2166 * be lost or delayed, but we use them anyways to avoid
2167 * stuck interrupts on some machines.
2169 if (!IS_I945G(dev) && !IS_I945GM(dev))
2170 pci_enable_msi(dev->pdev);
2172 spin_lock_init(&dev_priv->user_irq_lock);
2173 spin_lock_init(&dev_priv->error_lock);
2174 dev_priv->trace_irq_seqno = 0;
2176 ret = drm_vblank_init(dev, I915_NUM_PIPE);
2179 (void) i915_driver_unload(dev);
2183 /* Start out suspended */
2184 dev_priv->mm.suspended = 1;
2186 intel_detect_pch(dev);
2188 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
2189 ret = i915_load_modeset_init(dev, prealloc_start,
2190 prealloc_size, agp_size);
2192 DRM_ERROR("failed to init modeset\n");
2193 goto out_workqueue_free;
2197 /* Must be done after probing outputs */
2198 intel_opregion_init(dev, 0);
2200 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
2201 (unsigned long) dev);
2203 spin_lock(&mchdev_lock);
2204 i915_mch_dev = dev_priv;
2205 dev_priv->mchdev_lock = &mchdev_lock;
2206 spin_unlock(&mchdev_lock);
2211 destroy_workqueue(dev_priv->wq);
2213 io_mapping_free(dev_priv->mm.gtt_mapping);
2215 iounmap(dev_priv->regs);
2217 pci_dev_put(dev_priv->bridge_dev);
2223 int i915_driver_unload(struct drm_device *dev)
2225 struct drm_i915_private *dev_priv = dev->dev_private;
2227 i915_destroy_error_state(dev);
2229 spin_lock(&mchdev_lock);
2230 i915_mch_dev = NULL;
2231 spin_unlock(&mchdev_lock);
2233 destroy_workqueue(dev_priv->wq);
2234 del_timer_sync(&dev_priv->hangcheck_timer);
2236 io_mapping_free(dev_priv->mm.gtt_mapping);
2237 if (dev_priv->mm.gtt_mtrr >= 0) {
2238 mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
2239 dev->agp->agp_info.aper_size * 1024 * 1024);
2240 dev_priv->mm.gtt_mtrr = -1;
2243 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
2244 intel_modeset_cleanup(dev);
2247 * free the memory space allocated for the child device
2248 * config parsed from VBT
2250 if (dev_priv->child_dev && dev_priv->child_dev_num) {
2251 kfree(dev_priv->child_dev);
2252 dev_priv->child_dev = NULL;
2253 dev_priv->child_dev_num = 0;
2255 drm_irq_uninstall(dev);
2256 vga_switcheroo_unregister_client(dev->pdev);
2257 vga_client_register(dev->pdev, NULL, NULL, NULL);
2260 if (dev->pdev->msi_enabled)
2261 pci_disable_msi(dev->pdev);
2263 if (dev_priv->regs != NULL)
2264 iounmap(dev_priv->regs);
2266 intel_opregion_free(dev, 0);
2268 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
2269 i915_gem_free_all_phys_object(dev);
2271 mutex_lock(&dev->struct_mutex);
2272 i915_gem_cleanup_ringbuffer(dev);
2273 mutex_unlock(&dev->struct_mutex);
2274 if (I915_HAS_FBC(dev) && i915_powersave)
2275 i915_cleanup_compression(dev);
2276 drm_mm_takedown(&dev_priv->vram);
2277 i915_gem_lastclose(dev);
2279 intel_cleanup_overlay(dev);
2282 intel_teardown_mchbar(dev);
2284 pci_dev_put(dev_priv->bridge_dev);
2285 kfree(dev->dev_private);
2290 int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
2292 struct drm_i915_file_private *i915_file_priv;
2294 DRM_DEBUG_DRIVER("\n");
2295 i915_file_priv = (struct drm_i915_file_private *)
2296 kmalloc(sizeof(*i915_file_priv), GFP_KERNEL);
2298 if (!i915_file_priv)
2301 file_priv->driver_priv = i915_file_priv;
2303 INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
2309 * i915_driver_lastclose - clean up after all DRM clients have exited
2312 * Take care of cleaning up after all DRM clients have exited. In the
2313 * mode setting case, we want to restore the kernel's initial mode (just
2314 * in case the last client left us in a bad state).
2316 * Additionally, in the non-mode setting case, we'll tear down the AGP
2317 * and DMA structures, since the kernel won't be using them, and clea
2320 void i915_driver_lastclose(struct drm_device * dev)
2322 drm_i915_private_t *dev_priv = dev->dev_private;
2324 if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
2325 drm_fb_helper_restore();
2326 vga_switcheroo_process_delayed_switch();
2330 i915_gem_lastclose(dev);
2332 if (dev_priv->agp_heap)
2333 i915_mem_takedown(&(dev_priv->agp_heap));
2335 i915_dma_cleanup(dev);
2338 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
2340 drm_i915_private_t *dev_priv = dev->dev_private;
2341 i915_gem_release(dev, file_priv);
2342 if (!drm_core_check_feature(dev, DRIVER_MODESET))
2343 i915_mem_release(dev, file_priv, dev_priv->agp_heap);
2346 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
2348 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
2350 kfree(i915_file_priv);
2353 struct drm_ioctl_desc i915_ioctls[] = {
2354 DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2355 DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
2356 DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
2357 DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
2358 DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
2359 DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
2360 DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
2361 DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2362 DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
2363 DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
2364 DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2365 DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
2366 DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
2367 DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
2368 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
2369 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
2370 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2371 DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
2372 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
2373 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
2374 DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
2375 DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
2376 DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
2377 DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
2378 DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
2379 DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
2380 DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
2381 DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
2382 DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
2383 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
2384 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
2385 DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
2386 DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
2387 DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
2388 DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
2389 DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
2390 DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
2391 DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
2392 DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
2393 DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
2396 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
2399 * Determine if the device really is AGP or not.
2401 * All Intel graphics chipsets are treated as AGP, even if they are really
2404 * \param dev The device to be tested.
2407 * A value of 1 is always retured to indictate every i9x5 is AGP.
2409 int i915_driver_device_is_agp(struct drm_device * dev)