Back to home page

Quest Cross Reference

 
 

    


Warning, cross-references for /kernel/mem/virtual.c need to be fixed.

0001 /*                    The Quest Operating System
0002  *  Copyright (C) 2005-2010  Richard West, Boston University
0003  *
0004  *  This program is free software: you can redistribute it and/or modify
0005  *  it under the terms of the GNU General Public License as published by
0006  *  the Free Software Foundation, either version 3 of the License, or
0007  *  (at your option) any later version.
0008  *
0009  *  This program is distributed in the hope that it will be useful,
0010  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
0011  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
0012  *  GNU General Public License for more details.
0013  *
0014  *  You should have received a copy of the GNU General Public License
0015  *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
0016  */
0017 
0018 #include "arch/i386.h"
0019 #include "kernel.h"
0020 #include "types.h"
0021 #include "mem/physical.h"
0022 #include "mem/virtual.h"
0023 
0024 extern uint32 _kernelstart;
0025 
0026 
0027 /* Find free virtual page and map it to a corresponding physical frame
0028  *
0029  * Returns virtual address
0030  *
0031  */
0032 void *
0033 map_virtual_page (uint32 phys_frame)
0034 {
0035 
0036   uint32 *page_table = (uint32 *) KERN_PGT;
0037   int i;
0038   void *va;
0039 
0040   for (i = 0; i < 0x400; i++)
0041     if (!page_table[i]) {       /* Free page */
0042       page_table[i] = phys_frame;
0043 
0044       va = (char *) &_kernelstart + (i << 12);
0045 
0046       /* Invalidate page in case it was cached in the TLB */
0047       invalidate_page (va);
0048 
0049       return va;
0050     }
0051 
0052   return NULL;                  /* Invalid address */
0053 }
0054 
0055 /* Map contiguous physical to virtual memory */
0056 void *
0057 map_contiguous_virtual_pages (uint32 phys_frame, uint32 count)
0058 {
0059   uint32 *page_table = (uint32 *) KERN_PGT;
0060   int i, j;
0061   void *va;
0062 
0063   if (count == 0)
0064     return NULL;
0065 
0066   for (i = 0; i < 0x400 - count + 1; i++) {
0067     if (!page_table[i]) {       /* Free page */
0068       for (j = 0; j < count; j++) {
0069         if (page_table[i + j]) {
0070           /* Not enough pages in this window */
0071           i = i + j;
0072           goto keep_searching;
0073         }
0074       }
0075 
0076       for (j = 0; j < count; j++) {
0077         page_table[i + j] = phys_frame + j * 0x1000;
0078       }
0079 
0080       va = (char *) &_kernelstart + (i << 12);
0081 
0082       /* Invalidate page in case it was cached in the TLB */
0083       for (j = 0; j < count; j++) {
0084         invalidate_page (va + j * 0x1000);
0085       }
0086 
0087       return va;
0088     }
0089   keep_searching:
0090     ;
0091   }
0092 
0093   return NULL;                  /* Invalid address */
0094 }
0095 
0096 /* Map non-contiguous physical memory to contiguous virtual memory */
0097 void *
0098 map_virtual_pages (uint32 * phys_frames, uint32 count)
0099 {
0100   uint32 *page_table = (uint32 *) KERN_PGT;
0101   int i, j;
0102   void *va;
0103 
0104   if (count == 0)
0105     return NULL;
0106 
0107   for (i = 0; i < 0x400 - count + 1; i++) {
0108     if (!page_table[i]) {       /* Free page */
0109       for (j = 0; j < count; j++) {
0110         if (page_table[i + j]) {
0111           /* Not enough pages in this window */
0112           i = i + j;
0113           goto keep_searching;
0114         }
0115       }
0116 
0117       for (j = 0; j < count; j++) {
0118         page_table[i + j] = phys_frames[j];
0119       }
0120 
0121       va = (char *) &_kernelstart + (i << 12);
0122 
0123       /* Invalidate page in case it was cached in the TLB */
0124       for (j = 0; j < count; j++) {
0125         invalidate_page (va + j * 0x1000);
0126       }
0127 
0128       return va;
0129     }
0130   keep_searching:
0131     ;
0132   }
0133 
0134   return NULL;                  /* Invalid address */
0135 }
0136 
0137 
0138 /*
0139  * Release previously mapped virtual page
0140  */
0141 void
0142 unmap_virtual_page (void *virt_addr)
0143 {
0144 
0145   uint32 *page_table = (uint32 *) KERN_PGT;
0146 
0147   page_table[((uint32) virt_addr >> 12) & 0x3FF] = 0;
0148 
0149   /* Invalidate page in case it was cached in the TLB */
0150   invalidate_page (virt_addr);
0151 }
0152 
0153 void
0154 unmap_virtual_pages (void *virt_addr, uint32 count)
0155 {
0156   int j;
0157   for (j = 0; j < count; j++)
0158     unmap_virtual_page (virt_addr + j * 0x1000);
0159 }
0160 
0161 
0162 void *
0163 get_phys_addr (void *virt_addr)
0164 {
0165 
0166   void *pa;
0167   uint32 phys_frame;
0168   uint32 va = (uint32) virt_addr;
0169 
0170   uint32 phys_pdbr = (uint32) get_pdbr (), phys_ptbr;
0171   uint32 *virt_pdbr, *virt_ptbr;
0172 
0173   virt_pdbr = map_virtual_page (phys_pdbr | 3);
0174   phys_ptbr = (virt_pdbr[va >> 22] & 0xFFFFF000);
0175   virt_ptbr = map_virtual_page (phys_ptbr | 3);
0176   phys_frame = virt_ptbr[(va >> 12) & 0x3FF] & 0xFFFFF000;
0177   pa = (void *) (phys_frame + (va & 0x00000FFF));
0178   unmap_virtual_page (virt_ptbr);
0179   unmap_virtual_page (virt_pdbr);
0180 
0181   return pa;
0182 }
0183 
0184 
0185 /* ************************************************** */
0186 
0187 void *
0188 map_virtual_page_pt (pgtbl_entry_t entry, pgtbl_t *tbl)
0189 {
0190   uint i;
0191   void *va;
0192 
0193   /* perhaps use a "primitive" mapping facility for this */
0194   if (tbl->table_va == NULL) return NULL;
0195 
0196   for (i = 0; i < PGTBL_NUM_ENTRIES; i++)
0197     if (!tbl->table_va[i].flags.present) { /* Free page */
0198       tbl->table_va[i].raw = entry.raw;
0199 
0200       va = (void *) &tbl->starting_va[i << 12];
0201 
0202       /* Invalidate page in case it was cached in the TLB */
0203       invalidate_page (va);
0204 
0205       return va;
0206     }
0207 
0208   return NULL;                  /* Invalid address */
0209 }
0210 
0211 /* precondition: entry is valid and dir has valid VA
0212  * postcondition: tbl->table_pa and starting_va are valid */
0213 bool
0214 map_pgdir_entry (/* in */ pgdir_t dir,
0215                  /* in */ bool from_top,
0216                  /* in */ pgdir_entry_t entry,
0217                  /* out */ pgtbl_t *tbl)
0218 {
0219   uint i;
0220   pgdir_entry_t *dir_va = dir.dir_va;
0221 
0222   tbl->table_pa = FRAMENUM_TO_FRAME (entry.table_framenum);
0223   tbl->table_va = NULL;
0224 
0225   if (from_top) {
0226     for (i=PGDIR_NUM_ENTRIES; i>=0; i--) {
0227       if (!dir_va[i].flags.present) { /* Free entry */
0228         dir_va[i] = entry;
0229         tbl->starting_va = (uint8 *) (i << 22);
0230         return TRUE;
0231       }
0232     } 
0233   } else {
0234     for (i=0; i<PGDIR_NUM_ENTRIES; i++) {
0235       if (!dir_va[i].flags.present) { /* Free entry */
0236         dir_va[i] = entry;
0237         tbl->starting_va = (uint8 *) (i << 22);
0238         return TRUE;
0239       }
0240     }
0241   }
0242 
0243   return FALSE;
0244 }
0245 
0246 /* for now */
0247 #define _prim_map_virtual_page map_virtual_page
0248 #define _prim_unmap_virtual_page unmap_virtual_page
0249 
0250 /* precondition: dir PA and VA are valid, va is aligned */
0251 /* postcondition: returned frame is aligned */
0252 /* failure: -1 */
0253 frame_t
0254 pgdir_get_frame (pgdir_t dir, void *va)
0255 {
0256   linear_address_t la; la.raw = (u32) va;
0257   pgdir_entry_t *entry = &dir.dir_va[la.pgdir_i];
0258 
0259   if (!entry->flags.present)
0260     goto abort;
0261 
0262   if (entry->flags.page_size) {
0263     /* big page */
0264     return (frame_t) BIGFRAMENUM_TO_FRAME (entry->framenum);
0265   } else {
0266     /* regular page */
0267     pgtbl_entry_t *table = _prim_map_virtual_page (FRAMENUM_TO_FRAME (entry->table_framenum) | 3);
0268     if (table == NULL)
0269       goto abort;
0270     if (!table[la.pgtbl_i].flags.present) {
0271       _prim_unmap_virtual_page (table);
0272       goto abort;
0273     } else {
0274       frame_t frame = FRAMENUM_TO_FRAME (table[la.pgtbl_i].framenum);
0275       _prim_unmap_virtual_page (table);
0276       return frame;
0277     }
0278   }
0279 
0280  abort:
0281   return (frame_t) -1;
0282 }
0283 
0284 /* Obtains the physical address of a virtual address in the given
0285  * page directory. */
0286 /* precondition: dir PA and VA are valid */
0287 /* failure: -1 */
0288 phys_addr_t
0289 pgdir_get_phys_addr (pgdir_t dir, void *va)
0290 {
0291   linear_address_t la; la.raw = (u32) va;
0292   frame_t frame = pgdir_get_frame (dir, (void *) (((u32) va) & (~(PAGE_SIZE-1))));
0293   if (frame == -1)
0294     return -1;
0295   return frame + la.offset;
0296 }
0297 
0298 /* Clone the contents of a page table, copying data. */
0299 /* failure result is (-1, 0) */
0300 /* precondition: all of tbl is valid */
0301 /* postcondition: new physical and virtual address are valid in returned pgtbl */
0302 pgtbl_t
0303 clone_page_table (pgtbl_t tbl)
0304 {
0305   pgtbl_t new_tbl;
0306   uint i;
0307 
0308   new_tbl.table_pa = alloc_phys_frame ();
0309   if (new_tbl.table_pa == -1)
0310     goto abort;
0311   new_tbl.table_va = _prim_map_virtual_page (new_tbl.table_pa | 3);
0312   if (new_tbl.table_va == NULL)
0313     goto abort_tbl_pa;
0314   new_tbl.starting_va = tbl.starting_va;
0315 
0316   memset (new_tbl.table_va, 0, PGTBL_NUM_ENTRIES * sizeof (pgtbl_entry_t));
0317 
0318   for (i=0; i<PGTBL_NUM_ENTRIES; i++) {
0319     if (tbl.table_va[i].flags.present) {
0320       frame_t new_frame = alloc_phys_frame ();
0321       frame_t old_frame = FRAMENUM_TO_FRAME (tbl.table_va[i].framenum);
0322 
0323       /* temporarily map frames */
0324       void *old_page_tmp = _prim_map_virtual_page (old_frame | 3);
0325       if (old_page_tmp == NULL)
0326         goto abort_tbl_va;
0327       void *new_page_tmp = _prim_map_virtual_page (new_frame | 3);
0328       if (new_page_tmp == NULL) {
0329         _prim_unmap_virtual_page (old_page_tmp);
0330         goto abort_tbl_va;
0331       }
0332 
0333       /* copy contents of old frame to new frame */
0334       memcpy (new_page_tmp, old_page_tmp, PAGE_SIZE);
0335 
0336       /* setup new page table entry */
0337       new_tbl.table_va[i].flags.raw = tbl.table_va[i].flags.raw;
0338       new_tbl.table_va[i].framenum = FRAME_TO_FRAMENUM (new_frame);
0339 
0340       _prim_unmap_virtual_page (old_page_tmp);
0341       _prim_unmap_virtual_page (new_page_tmp);
0342     }
0343   }
0344 
0345   return new_tbl;
0346 
0347  abort_tbl_va:
0348   _prim_unmap_virtual_page (new_tbl.table_va);
0349  abort_tbl_pa:
0350   free_phys_frame (new_tbl.table_pa);
0351  abort:
0352   new_tbl.table_pa = -1;
0353   new_tbl.table_va = NULL;
0354   return new_tbl;
0355 }
0356 
0357 /* Clone an entire address space making copies of data where
0358  * appropriate (e.g. userspace and kernel stack). */
0359 
0360 /* precondition: dir has valid VA, PA 
0361  * postcondition: return has valid VA, PA
0362  * failure result is (-1, 0) */
0363 pgdir_t
0364 clone_page_directory (pgdir_t dir)
0365 {
0366   frame_t new_pgd_pa;
0367   pgdir_t new_dir;
0368   uint i;
0369 
0370   new_pgd_pa = alloc_phys_frame ();
0371 
0372   if (new_pgd_pa == -1)
0373     goto abort;
0374 
0375   new_dir.dir_pa = new_pgd_pa;
0376   new_dir.dir_va = _prim_map_virtual_page (new_pgd_pa | 3);
0377   if (new_dir.dir_va == NULL)
0378     goto abort_pgd_pa;
0379 
0380   memset (new_dir.dir_va, 0, PGDIR_NUM_ENTRIES * sizeof (pgdir_entry_t));
0381 
0382   /* run through dir and make copies of tables */
0383   for (i=0; i<PGDIR_NUM_ENTRIES; i++) {
0384     if (dir.dir_va[i].flags.present) {
0385       if (i >= PGDIR_KERNEL_BEGIN && i != PGDIR_KERNEL_STACK) {
0386         /* shared kernel-space */
0387         new_dir.dir_va[i].raw = dir.dir_va[i].raw;
0388       } else if (dir.dir_va[i].flags.page_size) {
0389         /* clone 4 MiB page */
0390         panic ("userspace 4 MiB pages not supported");
0391       } else {
0392         /* clone a page table */
0393         pgtbl_t tbl, new_tbl;
0394 
0395         /* setup a pgtbl struct with physical and virtual addresses of
0396          * the existing page table */
0397         tbl.table_pa = FRAMENUM_TO_FRAME (dir.dir_va[i].table_framenum);
0398         tbl.table_va = _prim_map_virtual_page (tbl.table_pa | 3);
0399         if (tbl.table_va == NULL)
0400           goto abort_pgd_va;
0401         tbl.starting_va = (uint8 *) (i << 22);
0402 
0403         new_tbl = clone_page_table (tbl);
0404 
0405         _prim_unmap_virtual_page (tbl.table_va);
0406 
0407         if (new_tbl.table_pa == -1)
0408           goto abort_pgd_va;
0409 
0410         _prim_unmap_virtual_page (new_tbl.table_va);
0411 
0412         /* setup new directory entry with flags and new table address */
0413         new_dir.dir_va[i].flags = dir.dir_va[i].flags;
0414         new_dir.dir_va[i].table_framenum = FRAME_TO_FRAMENUM (new_tbl.table_pa);
0415       }
0416     }
0417   }
0418 
0419   return new_dir;
0420 
0421  abort_pgd_va:
0422   _prim_unmap_virtual_page (new_dir.dir_va);
0423  abort_pgd_pa:
0424   free_phys_frame (new_pgd_pa);
0425  abort:
0426   new_dir.dir_va = NULL;
0427   new_dir.dir_pa = -1;
0428   return new_dir;
0429 }
0430 
0431 
0432 /*
0433  * Local Variables:
0434  * indent-tabs-mode: nil
0435  * mode: C
0436  * c-file-style: "gnu"
0437  * c-basic-offset: 2
0438  * End:
0439  */
0440 
0441 /* vi: set et sw=2 sts=2: */