Fix common misspellings
[linux-flexiantxendom0-3.2.10.git] / drivers / staging / tidspbridge / hw / hw_mmu.c
1 /*
2  * hw_mmu.c
3  *
4  * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5  *
6  * API definitions to setup MMU TLB and PTE
7  *
8  * Copyright (C) 2007 Texas Instruments, Inc.
9  *
10  * This package is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17  */
18
19 #include <linux/io.h>
20 #include "MMURegAcM.h"
21 #include <hw_defs.h>
22 #include <hw_mmu.h>
23 #include <linux/types.h>
24 #include <linux/err.h>
25
26 #define MMU_BASE_VAL_MASK       0xFC00
27 #define MMU_PAGE_MAX         3
28 #define MMU_ELEMENTSIZE_MAX      3
29 #define MMU_ADDR_MASK       0xFFFFF000
30 #define MMU_TTB_MASK         0xFFFFC000
31 #define MMU_SECTION_ADDR_MASK    0xFFF00000
32 #define MMU_SSECTION_ADDR_MASK   0xFF000000
33 #define MMU_PAGE_TABLE_MASK      0xFFFFFC00
34 #define MMU_LARGE_PAGE_MASK      0xFFFF0000
35 #define MMU_SMALL_PAGE_MASK      0xFFFFF000
36
37 #define MMU_LOAD_TLB    0x00000001
38 #define MMU_GFLUSH      0x60
39
40 /*
41  * hw_mmu_page_size_t: Enumerated Type used to specify the MMU Page Size(SLSS)
42  */
43 enum hw_mmu_page_size_t {
44         HW_MMU_SECTION,
45         HW_MMU_LARGE_PAGE,
46         HW_MMU_SMALL_PAGE,
47         HW_MMU_SUPERSECTION
48 };
49
50 /*
51  * FUNCTION           : mmu_flush_entry
52  *
53  * INPUTS:
54  *
55  *       Identifier      : base_address
56  *       Type           : const u32
57  *       Description     : Base Address of instance of MMU module
58  *
59  * RETURNS:
60  *
61  *       Type           : hw_status
62  *       Description     : 0             -- No errors occurred
63  *                       RET_BAD_NULL_PARAM     -- A Pointer
64  *                                              Paramater was set to NULL
65  *
66  * PURPOSE:           : Flush the TLB entry pointed by the
67  *                      lock counter register
68  *                      even if this entry is set protected
69  *
70  * METHOD:             : Check the Input parameter and Flush a
71  *                       single entry in the TLB.
72  */
73 static hw_status mmu_flush_entry(const void __iomem *base_address);
74
75 /*
76  * FUNCTION           : mmu_set_cam_entry
77  *
78  * INPUTS:
79  *
80  *       Identifier      : base_address
81  *       TypE           : const u32
82  *       Description     : Base Address of instance of MMU module
83  *
84  *       Identifier      : page_sz
85  *       TypE           : const u32
86  *       Description     : It indicates the page size
87  *
88  *       Identifier      : preserved_bit
89  *       Type           : const u32
90  *       Description     : It indicates the TLB entry is preserved entry
91  *                                                      or not
92  *
93  *       Identifier      : valid_bit
94  *       Type           : const u32
95  *       Description     : It indicates the TLB entry is valid entry or not
96  *
97  *
98  *       Identifier      : virtual_addr_tag
99  *       Type           : const u32
100  *       Description     : virtual Address
101  *
102  * RETURNS:
103  *
104  *       Type           : hw_status
105  *       Description     : 0             -- No errors occurred
106  *                       RET_BAD_NULL_PARAM     -- A Pointer Paramater
107  *                                                 was set to NULL
108  *                       RET_PARAM_OUT_OF_RANGE -- Input Parameter out
109  *                                                 of Range
110  *
111  * PURPOSE:             : Set MMU_CAM reg
112  *
113  * METHOD:              : Check the Input parameters and set the CAM entry.
114  */
115 static hw_status mmu_set_cam_entry(const void __iomem *base_address,
116                                    const u32 page_sz,
117                                    const u32 preserved_bit,
118                                    const u32 valid_bit,
119                                    const u32 virtual_addr_tag);
120
121 /*
122  * FUNCTION           : mmu_set_ram_entry
123  *
124  * INPUTS:
125  *
126  *       Identifier      : base_address
127  *       Type           : const u32
128  *       Description     : Base Address of instance of MMU module
129  *
130  *       Identifier      : physical_addr
131  *       Type           : const u32
132  *       Description     : Physical Address to which the corresponding
133  *                       virtual   Address shouldpoint
134  *
135  *       Identifier      : endianism
136  *       Type           : hw_endianism_t
137  *       Description     : endianism for the given page
138  *
139  *       Identifier      : element_size
140  *       Type           : hw_element_size_t
141  *       Description     : The element size ( 8,16, 32 or 64 bit)
142  *
143  *       Identifier      : mixed_size
144  *       Type           : hw_mmu_mixed_size_t
145  *       Description     : Element Size to follow CPU or TLB
146  *
147  * RETURNS:
148  *
149  *       Type           : hw_status
150  *       Description     : 0             -- No errors occurred
151  *                       RET_BAD_NULL_PARAM     -- A Pointer Paramater
152  *                                                      was set to NULL
153  *                       RET_PARAM_OUT_OF_RANGE -- Input Parameter
154  *                                                      out of Range
155  *
156  * PURPOSE:           : Set MMU_CAM reg
157  *
158  * METHOD:             : Check the Input parameters and set the RAM entry.
159  */
160 static hw_status mmu_set_ram_entry(const void __iomem *base_address,
161                                    const u32 physical_addr,
162                                    enum hw_endianism_t endianism,
163                                    enum hw_element_size_t element_size,
164                                    enum hw_mmu_mixed_size_t mixed_size);
165
166 /* HW FUNCTIONS */
167
168 hw_status hw_mmu_enable(const void __iomem *base_address)
169 {
170         hw_status status = 0;
171
172         MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_SET);
173
174         return status;
175 }
176
177 hw_status hw_mmu_disable(const void __iomem *base_address)
178 {
179         hw_status status = 0;
180
181         MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_CLEAR);
182
183         return status;
184 }
185
186 hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
187                                 u32 num_locked_entries)
188 {
189         hw_status status = 0;
190
191         MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, num_locked_entries);
192
193         return status;
194 }
195
196 hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
197                                 u32 victim_entry_num)
198 {
199         hw_status status = 0;
200
201         MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, victim_entry_num);
202
203         return status;
204 }
205
206 hw_status hw_mmu_event_ack(const void __iomem *base_address, u32 irq_mask)
207 {
208         hw_status status = 0;
209
210         MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, irq_mask);
211
212         return status;
213 }
214
215 hw_status hw_mmu_event_disable(const void __iomem *base_address, u32 irq_mask)
216 {
217         hw_status status = 0;
218         u32 irq_reg;
219
220         irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
221
222         MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg & ~irq_mask);
223
224         return status;
225 }
226
227 hw_status hw_mmu_event_enable(const void __iomem *base_address, u32 irq_mask)
228 {
229         hw_status status = 0;
230         u32 irq_reg;
231
232         irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
233
234         MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg | irq_mask);
235
236         return status;
237 }
238
239 hw_status hw_mmu_event_status(const void __iomem *base_address, u32 *irq_mask)
240 {
241         hw_status status = 0;
242
243         *irq_mask = MMUMMU_IRQSTATUS_READ_REGISTER32(base_address);
244
245         return status;
246 }
247
248 hw_status hw_mmu_fault_addr_read(const void __iomem *base_address, u32 *addr)
249 {
250         hw_status status = 0;
251
252         /* read values from register */
253         *addr = MMUMMU_FAULT_AD_READ_REGISTER32(base_address);
254
255         return status;
256 }
257
258 hw_status hw_mmu_ttb_set(const void __iomem *base_address, u32 ttb_phys_addr)
259 {
260         hw_status status = 0;
261         u32 load_ttb;
262
263         load_ttb = ttb_phys_addr & ~0x7FUL;
264         /* write values to register */
265         MMUMMU_TTB_WRITE_REGISTER32(base_address, load_ttb);
266
267         return status;
268 }
269
270 hw_status hw_mmu_twl_enable(const void __iomem *base_address)
271 {
272         hw_status status = 0;
273
274         MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_SET);
275
276         return status;
277 }
278
279 hw_status hw_mmu_twl_disable(const void __iomem *base_address)
280 {
281         hw_status status = 0;
282
283         MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_CLEAR);
284
285         return status;
286 }
287
288 hw_status hw_mmu_tlb_flush(const void __iomem *base_address, u32 virtual_addr,
289                            u32 page_sz)
290 {
291         hw_status status = 0;
292         u32 virtual_addr_tag;
293         enum hw_mmu_page_size_t pg_size_bits;
294
295         switch (page_sz) {
296         case HW_PAGE_SIZE4KB:
297                 pg_size_bits = HW_MMU_SMALL_PAGE;
298                 break;
299
300         case HW_PAGE_SIZE64KB:
301                 pg_size_bits = HW_MMU_LARGE_PAGE;
302                 break;
303
304         case HW_PAGE_SIZE1MB:
305                 pg_size_bits = HW_MMU_SECTION;
306                 break;
307
308         case HW_PAGE_SIZE16MB:
309                 pg_size_bits = HW_MMU_SUPERSECTION;
310                 break;
311
312         default:
313                 return -EINVAL;
314         }
315
316         /* Generate the 20-bit tag from virtual address */
317         virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
318
319         mmu_set_cam_entry(base_address, pg_size_bits, 0, 0, virtual_addr_tag);
320
321         mmu_flush_entry(base_address);
322
323         return status;
324 }
325
326 hw_status hw_mmu_tlb_add(const void __iomem *base_address,
327                          u32 physical_addr,
328                          u32 virtual_addr,
329                          u32 page_sz,
330                          u32 entry_num,
331                          struct hw_mmu_map_attrs_t *map_attrs,
332                          s8 preserved_bit, s8 valid_bit)
333 {
334         hw_status status = 0;
335         u32 lock_reg;
336         u32 virtual_addr_tag;
337         enum hw_mmu_page_size_t mmu_pg_size;
338
339         /*Check the input Parameters */
340         switch (page_sz) {
341         case HW_PAGE_SIZE4KB:
342                 mmu_pg_size = HW_MMU_SMALL_PAGE;
343                 break;
344
345         case HW_PAGE_SIZE64KB:
346                 mmu_pg_size = HW_MMU_LARGE_PAGE;
347                 break;
348
349         case HW_PAGE_SIZE1MB:
350                 mmu_pg_size = HW_MMU_SECTION;
351                 break;
352
353         case HW_PAGE_SIZE16MB:
354                 mmu_pg_size = HW_MMU_SUPERSECTION;
355                 break;
356
357         default:
358                 return -EINVAL;
359         }
360
361         lock_reg = MMUMMU_LOCK_READ_REGISTER32(base_address);
362
363         /* Generate the 20-bit tag from virtual address */
364         virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
365
366         /* Write the fields in the CAM Entry Register */
367         mmu_set_cam_entry(base_address, mmu_pg_size, preserved_bit, valid_bit,
368                           virtual_addr_tag);
369
370         /* Write the different fields of the RAM Entry Register */
371         /* endianism of the page,Element Size of the page (8, 16, 32, 64 bit) */
372         mmu_set_ram_entry(base_address, physical_addr, map_attrs->endianism,
373                           map_attrs->element_size, map_attrs->mixed_size);
374
375         /* Update the MMU Lock Register */
376         /* currentVictim between lockedBaseValue and (MMU_Entries_Number - 1) */
377         MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, entry_num);
378
379         /* Enable loading of an entry in TLB by writing 1
380            into LD_TLB_REG register */
381         MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, MMU_LOAD_TLB);
382
383         MMUMMU_LOCK_WRITE_REGISTER32(base_address, lock_reg);
384
385         return status;
386 }
387
388 hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
389                          u32 physical_addr,
390                          u32 virtual_addr,
391                          u32 page_sz, struct hw_mmu_map_attrs_t *map_attrs)
392 {
393         hw_status status = 0;
394         u32 pte_addr, pte_val;
395         s32 num_entries = 1;
396
397         switch (page_sz) {
398         case HW_PAGE_SIZE4KB:
399                 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
400                                               virtual_addr &
401                                               MMU_SMALL_PAGE_MASK);
402                 pte_val =
403                     ((physical_addr & MMU_SMALL_PAGE_MASK) |
404                      (map_attrs->endianism << 9) | (map_attrs->
405                                                     element_size << 4) |
406                      (map_attrs->mixed_size << 11) | 2);
407                 break;
408
409         case HW_PAGE_SIZE64KB:
410                 num_entries = 16;
411                 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
412                                               virtual_addr &
413                                               MMU_LARGE_PAGE_MASK);
414                 pte_val =
415                     ((physical_addr & MMU_LARGE_PAGE_MASK) |
416                      (map_attrs->endianism << 9) | (map_attrs->
417                                                     element_size << 4) |
418                      (map_attrs->mixed_size << 11) | 1);
419                 break;
420
421         case HW_PAGE_SIZE1MB:
422                 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
423                                               virtual_addr &
424                                               MMU_SECTION_ADDR_MASK);
425                 pte_val =
426                     ((((physical_addr & MMU_SECTION_ADDR_MASK) |
427                        (map_attrs->endianism << 15) | (map_attrs->
428                                                        element_size << 10) |
429                        (map_attrs->mixed_size << 17)) & ~0x40000) | 0x2);
430                 break;
431
432         case HW_PAGE_SIZE16MB:
433                 num_entries = 16;
434                 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
435                                               virtual_addr &
436                                               MMU_SSECTION_ADDR_MASK);
437                 pte_val =
438                     (((physical_addr & MMU_SSECTION_ADDR_MASK) |
439                       (map_attrs->endianism << 15) | (map_attrs->
440                                                       element_size << 10) |
441                       (map_attrs->mixed_size << 17)
442                      ) | 0x40000 | 0x2);
443                 break;
444
445         case HW_MMU_COARSE_PAGE_SIZE:
446                 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
447                                               virtual_addr &
448                                               MMU_SECTION_ADDR_MASK);
449                 pte_val = (physical_addr & MMU_PAGE_TABLE_MASK) | 1;
450                 break;
451
452         default:
453                 return -EINVAL;
454         }
455
456         while (--num_entries >= 0)
457                 ((u32 *) pte_addr)[num_entries] = pte_val;
458
459         return status;
460 }
461
462 hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, u32 virtual_addr, u32 page_size)
463 {
464         hw_status status = 0;
465         u32 pte_addr;
466         s32 num_entries = 1;
467
468         switch (page_size) {
469         case HW_PAGE_SIZE4KB:
470                 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
471                                               virtual_addr &
472                                               MMU_SMALL_PAGE_MASK);
473                 break;
474
475         case HW_PAGE_SIZE64KB:
476                 num_entries = 16;
477                 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
478                                               virtual_addr &
479                                               MMU_LARGE_PAGE_MASK);
480                 break;
481
482         case HW_PAGE_SIZE1MB:
483         case HW_MMU_COARSE_PAGE_SIZE:
484                 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
485                                               virtual_addr &
486                                               MMU_SECTION_ADDR_MASK);
487                 break;
488
489         case HW_PAGE_SIZE16MB:
490                 num_entries = 16;
491                 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
492                                               virtual_addr &
493                                               MMU_SSECTION_ADDR_MASK);
494                 break;
495
496         default:
497                 return -EINVAL;
498         }
499
500         while (--num_entries >= 0)
501                 ((u32 *) pte_addr)[num_entries] = 0;
502
503         return status;
504 }
505
506 /* mmu_flush_entry */
507 static hw_status mmu_flush_entry(const void __iomem *base_address)
508 {
509         hw_status status = 0;
510         u32 flush_entry_data = 0x1;
511
512         /* write values to register */
513         MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, flush_entry_data);
514
515         return status;
516 }
517
518 /* mmu_set_cam_entry */
519 static hw_status mmu_set_cam_entry(const void __iomem *base_address,
520                                    const u32 page_sz,
521                                    const u32 preserved_bit,
522                                    const u32 valid_bit,
523                                    const u32 virtual_addr_tag)
524 {
525         hw_status status = 0;
526         u32 mmu_cam_reg;
527
528         mmu_cam_reg = (virtual_addr_tag << 12);
529         mmu_cam_reg = (mmu_cam_reg) | (page_sz) | (valid_bit << 2) |
530             (preserved_bit << 3);
531
532         /* write values to register */
533         MMUMMU_CAM_WRITE_REGISTER32(base_address, mmu_cam_reg);
534
535         return status;
536 }
537
538 /* mmu_set_ram_entry */
539 static hw_status mmu_set_ram_entry(const void __iomem *base_address,
540                                    const u32 physical_addr,
541                                    enum hw_endianism_t endianism,
542                                    enum hw_element_size_t element_size,
543                                    enum hw_mmu_mixed_size_t mixed_size)
544 {
545         hw_status status = 0;
546         u32 mmu_ram_reg;
547
548         mmu_ram_reg = (physical_addr & MMU_ADDR_MASK);
549         mmu_ram_reg = (mmu_ram_reg) | ((endianism << 9) | (element_size << 7) |
550                                        (mixed_size << 6));
551
552         /* write values to register */
553         MMUMMU_RAM_WRITE_REGISTER32(base_address, mmu_ram_reg);
554
555         return status;
556
557 }
558
559 void hw_mmu_tlb_flush_all(const void __iomem *base)
560 {
561         __raw_writeb(1, base + MMU_GFLUSH);
562 }