diff -urN --exclude=CVS ../10-May-04/arch/ppc/mm/44x_remap.c ./arch/ppc/mm/44x_remap.c --- ../10-May-04/arch/ppc/mm/44x_remap.c Thu Jan 1 01:00:00 1970 +++ ./arch/ppc/mm/44x_remap.c Thu Jun 3 16:54:44 2004 @@ -0,0 +1,102 @@ +/* + * Implementation of remap_page_range() for IBM440GP where physical + * addresses are 36 bits. + * + * This differs form ioremap64() in that it manipulates the + * current process's page tables. + * + * $Id: 44x_remap.c,v 1.1 2004/06/03 15:54:44 medp Exp $ + * $Log: 44x_remap.c,v $ + * Revision 1.1 2004/06/03 15:54:44 medp + * Added io_remap_page_range64 + * + */ +#include + + +static inline void forget_pte(pte_t page) +{ + if (!pte_none(page)) { + printk("forget_pte: old mapping existed!\n"); + BUG(); + } +} + +/* + * maps a range of physical memory into the requested pages. the old + * mappings are removed. any references to nonexistent pages results + * in null mappings (currently treated as "copy-on-access") + */ +static inline void io_remap_pte_range64(pte_t * pte, unsigned long address, unsigned long size, + phys_addr_t phys_addr, pgprot_t prot) +{ + unsigned long end; + + address &= ~PMD_MASK; + end = address + size; + if (end > PMD_SIZE) + end = PMD_SIZE; + do { + pte_t oldpage; + oldpage = ptep_get_and_clear(pte); + + set_pte(pte, mk_pte_phys(phys_addr, prot)); + forget_pte(oldpage); + address += PAGE_SIZE; + phys_addr += PAGE_SIZE; + pte++; + } while (address && (address < end)); +} + +static inline int io_remap_pmd_range64(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size, + phys_addr_t phys_addr, pgprot_t prot) +{ + unsigned long end; + + address &= ~PGDIR_MASK; + end = address + size; + if (end > PGDIR_SIZE) + end = PGDIR_SIZE; + phys_addr -= address; + do { + pte_t * pte = pte_alloc(mm, pmd, address); + if (!pte) + return -ENOMEM; + io_remap_pte_range64(pte, address, end - address, address + phys_addr, prot); + address = (address + PMD_SIZE) & PMD_MASK; + pmd++; + } while (address && (address < end)); + return 0; +} + +/* Note: this is only safe if the mm semaphore is held when called. */ +int io_remap_page_range64(unsigned long from, phys_addr_t phys_addr, unsigned long size, pgprot_t prot) +{ + int error = 0; + pgd_t * dir; + unsigned long beg = from; + unsigned long end = from + size; + struct mm_struct *mm = current->mm; + + phys_addr -= from; + dir = pgd_offset(mm, from); + flush_cache_range(mm, beg, end); + if (from >= end) + BUG(); + + spin_lock(&mm->page_table_lock); + do { + pmd_t *pmd = pmd_alloc(mm, dir, from); + error = -ENOMEM; + if (!pmd) + break; + error = io_remap_pmd_range64(mm, pmd, from, end - from, phys_addr + from, prot); + if (error) + break; + from = (from + PGDIR_SIZE) & PGDIR_MASK; + dir++; + } while (from && (from < end)); + spin_unlock(&mm->page_table_lock); + flush_tlb_range(mm, beg, end); + return error; +} diff -urN --exclude=CVS ../10-May-04/arch/ppc/mm/Makefile ./arch/ppc/mm/Makefile --- ../10-May-04/arch/ppc/mm/Makefile Tue Dec 16 20:12:55 2003 +++ ./arch/ppc/mm/Makefile Thu Jun 3 16:54:44 2004 @@ -19,7 +19,7 @@ obj-$(CONFIG_PPC_STD_MMU) += hashtable.o ppc_mmu.o tlb.o obj-$(CONFIG_40x) += 4xx_mmu.o -obj-$(CONFIG_44x) += 44x_mmu.o +obj-$(CONFIG_44x) += 44x_mmu.o 44x_remap.o obj-$(CONFIG_NOT_COHERENT_CACHE) += cachemap.o include $(TOPDIR)/Rules.make