drm/nv50-nvc0: work around an evo channel hang that some people see
[linux-flexiantxendom0-natty.git] / drivers / gpu / drm / nouveau / nv50_evo.c
1 /*
2  * Copyright 2010 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24
25 #include "drmP.h"
26
27 #include "nouveau_drv.h"
28 #include "nouveau_dma.h"
29 #include "nouveau_ramht.h"
30
31 static void
32 nv50_evo_channel_del(struct nouveau_channel **pevo)
33 {
34         struct drm_nouveau_private *dev_priv;
35         struct nouveau_channel *evo = *pevo;
36
37         if (!evo)
38                 return;
39         *pevo = NULL;
40
41         dev_priv = evo->dev->dev_private;
42         dev_priv->evo_alloc &= ~(1 << evo->id);
43
44         nouveau_gpuobj_channel_takedown(evo);
45         nouveau_bo_unmap(evo->pushbuf_bo);
46         nouveau_bo_ref(NULL, &evo->pushbuf_bo);
47
48         if (evo->user)
49                 iounmap(evo->user);
50
51         kfree(evo);
52 }
53
54 int
55 nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 class, u32 name,
56                     u32 tile_flags, u32 magic_flags, u32 offset, u32 limit,
57                     u32 flags5)
58 {
59         struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
60         struct drm_device *dev = evo->dev;
61         struct nouveau_gpuobj *obj = NULL;
62         int ret;
63
64         ret = nouveau_gpuobj_new(dev, dev_priv->evo, 6*4, 32, 0, &obj);
65         if (ret)
66                 return ret;
67         obj->engine = NVOBJ_ENGINE_DISPLAY;
68
69         nv_wo32(obj,  0, (tile_flags << 22) | (magic_flags << 16) | class);
70         nv_wo32(obj,  4, limit);
71         nv_wo32(obj,  8, offset);
72         nv_wo32(obj, 12, 0x00000000);
73         nv_wo32(obj, 16, 0x00000000);
74         nv_wo32(obj, 20, flags5);
75         dev_priv->engine.instmem.flush(dev);
76
77         ret = nouveau_ramht_insert(evo, name, obj);
78         nouveau_gpuobj_ref(NULL, &obj);
79         if (ret) {
80                 return ret;
81         }
82
83         return 0;
84 }
85
86 static int
87 nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo)
88 {
89         struct drm_nouveau_private *dev_priv = dev->dev_private;
90         struct nouveau_channel *evo;
91         int ret;
92
93         evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
94         if (!evo)
95                 return -ENOMEM;
96         *pevo = evo;
97
98         for (evo->id = 0; evo->id < 5; evo->id++) {
99                 if (dev_priv->evo_alloc & (1 << evo->id))
100                         continue;
101
102                 dev_priv->evo_alloc |= (1 << evo->id);
103                 break;
104         }
105
106         if (evo->id == 5) {
107                 kfree(evo);
108                 return -ENODEV;
109         }
110
111         evo->dev = dev;
112         evo->user_get = 4;
113         evo->user_put = 0;
114
115         ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
116                              false, true, &evo->pushbuf_bo);
117         if (ret == 0)
118                 ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM);
119         if (ret) {
120                 NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
121                 nv50_evo_channel_del(pevo);
122                 return ret;
123         }
124
125         ret = nouveau_bo_map(evo->pushbuf_bo);
126         if (ret) {
127                 NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
128                 nv50_evo_channel_del(pevo);
129                 return ret;
130         }
131
132         evo->user = ioremap(pci_resource_start(dev->pdev, 0) +
133                             NV50_PDISPLAY_USER(evo->id), PAGE_SIZE);
134         if (!evo->user) {
135                 NV_ERROR(dev, "Error mapping EVO control regs.\n");
136                 nv50_evo_channel_del(pevo);
137                 return -ENOMEM;
138         }
139
140         /* bind primary evo channel's ramht to the channel */
141         if (dev_priv->evo && evo != dev_priv->evo)
142                 nouveau_ramht_ref(dev_priv->evo->ramht, &evo->ramht, NULL);
143
144         return 0;
145 }
146
147 static int
148 nv50_evo_channel_init(struct nouveau_channel *evo)
149 {
150         struct drm_device *dev = evo->dev;
151         int id = evo->id, ret, i;
152         u64 pushbuf = evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT;
153         u32 tmp;
154
155         tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
156         if ((tmp & 0x009f0000) == 0x00020000)
157                 nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000);
158
159         tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
160         if ((tmp & 0x003f0000) == 0x00030000)
161                 nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000);
162
163         /* initialise fifo */
164         nv_wr32(dev, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 |
165                      NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM |
166                      NV50_PDISPLAY_EVO_DMA_CB_VALID);
167         nv_wr32(dev, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000);
168         nv_wr32(dev, NV50_PDISPLAY_EVO_HASH_TAG(id), id);
169         nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA,
170                      NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
171
172         nv_wr32(dev, NV50_PDISPLAY_USER_PUT(id), 0x00000000);
173         nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 |
174                      NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
175         if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) {
176                 NV_ERROR(dev, "EvoCh %d init timeout: 0x%08x\n", id,
177                          nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)));
178                 return -EBUSY;
179         }
180
181         /* enable error reporting on the channel */
182         nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id);
183
184         evo->dma.max = (4096/4) - 2;
185         evo->dma.max &= ~7;
186         evo->dma.put = 0;
187         evo->dma.cur = evo->dma.put;
188         evo->dma.free = evo->dma.max - evo->dma.cur;
189
190         ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
191         if (ret)
192                 return ret;
193
194         for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
195                 OUT_RING(evo, 0);
196
197         return 0;
198 }
199
200 static void
201 nv50_evo_channel_fini(struct nouveau_channel *evo)
202 {
203         struct drm_device *dev = evo->dev;
204         int id = evo->id;
205
206         nv_mask(dev, 0x610028, 0x00010001 << id, 0x00000000);
207         nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000);
208         nv_wr32(dev, NV50_PDISPLAY_INTR_0, (1 << id));
209         nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000);
210         if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) {
211                 NV_ERROR(dev, "EvoCh %d takedown timeout: 0x%08x\n", id,
212                          nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)));
213         }
214 }
215
216 static int
217 nv50_evo_create(struct drm_device *dev)
218 {
219         struct drm_nouveau_private *dev_priv = dev->dev_private;
220         struct nouveau_gpuobj *ramht = NULL;
221         struct nouveau_channel *evo;
222         int ret;
223
224         /* create primary evo channel, the one we use for modesetting
225          * purporses
226          */
227         ret = nv50_evo_channel_new(dev, &dev_priv->evo);
228         if (ret)
229                 return ret;
230         evo = dev_priv->evo;
231
232         /* setup object management on it, any other evo channel will
233          * use this also as there's no per-channel support on the
234          * hardware
235          */
236         ret = nouveau_gpuobj_new(dev, NULL, 32768, 65536,
237                                  NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin);
238         if (ret) {
239                 NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
240                 nv50_evo_channel_del(&dev_priv->evo);
241                 return ret;
242         }
243
244         ret = drm_mm_init(&evo->ramin_heap, 0, 32768);
245         if (ret) {
246                 NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
247                 nv50_evo_channel_del(&dev_priv->evo);
248                 return ret;
249         }
250
251         ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht);
252         if (ret) {
253                 NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
254                 nv50_evo_channel_del(&dev_priv->evo);
255                 return ret;
256         }
257
258         ret = nouveau_ramht_new(dev, ramht, &evo->ramht);
259         nouveau_gpuobj_ref(NULL, &ramht);
260         if (ret) {
261                 nv50_evo_channel_del(&dev_priv->evo);
262                 return ret;
263         }
264
265         /* create some default objects for the scanout memtypes we support */
266         if (dev_priv->card_type >= NV_C0) {
267                 ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0xfe, 0x19,
268                                           0, 0xffffffff, 0x00000000);
269                 if (ret) {
270                         nv50_evo_channel_del(&dev_priv->evo);
271                         return ret;
272                 }
273
274                 ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19,
275                                           0, dev_priv->vram_size, 0x00020000);
276                 if (ret) {
277                         nv50_evo_channel_del(&dev_priv->evo);
278                         return ret;
279                 }
280
281                 ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19,
282                                           0, dev_priv->vram_size, 0x00000000);
283                 if (ret) {
284                         nv50_evo_channel_del(&dev_priv->evo);
285                         return ret;
286                 }
287         } else {
288                 ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19,
289                                           0, 0xffffffff, 0x00010000);
290                 if (ret) {
291                         nv50_evo_channel_del(&dev_priv->evo);
292                         return ret;
293                 }
294
295
296                 ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0x7a, 0x19,
297                                           0, 0xffffffff, 0x00010000);
298                 if (ret) {
299                         nv50_evo_channel_del(&dev_priv->evo);
300                         return ret;
301                 }
302
303                 ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19,
304                                           0, dev_priv->vram_size, 0x00010000);
305                 if (ret) {
306                         nv50_evo_channel_del(&dev_priv->evo);
307                         return ret;
308                 }
309
310                 ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19,
311                                           0, dev_priv->vram_size, 0x00010000);
312                 if (ret) {
313                         nv50_evo_channel_del(&dev_priv->evo);
314                         return ret;
315                 }
316         }
317
318         return 0;
319 }
320
321 int
322 nv50_evo_init(struct drm_device *dev)
323 {
324         struct drm_nouveau_private *dev_priv = dev->dev_private;
325         int ret;
326
327         if (!dev_priv->evo) {
328                 ret = nv50_evo_create(dev);
329                 if (ret)
330                         return ret;
331         }
332
333         return nv50_evo_channel_init(dev_priv->evo);
334 }
335
336 void
337 nv50_evo_fini(struct drm_device *dev)
338 {
339         struct drm_nouveau_private *dev_priv = dev->dev_private;
340
341         if (dev_priv->evo) {
342                 nv50_evo_channel_fini(dev_priv->evo);
343                 nv50_evo_channel_del(&dev_priv->evo);
344         }
345 }