From: Stanislav Kinsbursky --- include/linux/mm.h | 2 ++ kernel/cpt/rst_mm.c | 4 ++-- mm/mlock.c | 31 ++++++++++++++++++++++--------- 3 files changed, 26 insertions(+), 11 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index b3d5c97..fc68a9c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -869,6 +869,8 @@ extern unsigned long shmem_get_unmapped_area(struct file *file, #endif extern int can_do_mlock(void); +extern int __mlock(unsigned long, size_t, bool); +extern int __munlock(unsigned long, size_t, bool); extern int user_shm_lock(size_t, struct user_struct *); extern void user_shm_unlock(size_t, struct user_struct *); diff --git a/kernel/cpt/rst_mm.c b/kernel/cpt/rst_mm.c index fa0bda5..33e7b6f 100644 --- a/kernel/cpt/rst_mm.c +++ b/kernel/cpt/rst_mm.c @@ -911,9 +911,9 @@ check: dprintk_ctx("fixing up VM_LOCKED %Ld\n", vmapos); up_read(&mm->mmap_sem); if (vma->vm_flags&VM_LOCKED) - err = sc_munlock(vmai->cpt_start, vmai->cpt_end-vmai->cpt_start); + err = __munlock(vmai->cpt_start, vmai->cpt_end-vmai->cpt_start, false); else - err = sc_mlock(vmai->cpt_start, vmai->cpt_end-vmai->cpt_start); + err = __mlock(vmai->cpt_start, vmai->cpt_end-vmai->cpt_start, false); /* When mlock fails with EFAULT, it means * that it could not bring in pages. * It can happen after mlock() on unreadable diff --git a/mm/mlock.c b/mm/mlock.c index 13ba93a..90cc44b 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -385,7 +385,8 @@ void __munlock_vma_pages_range(struct vm_area_struct *vma, * For vmas that pass the filters, merge/split as appropriate. */ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, - unsigned long start, unsigned long end, unsigned int newflags) + unsigned long start, unsigned long end, unsigned int newflags, + bool convert_error) { struct mm_struct *mm = vma->vm_mm; pgoff_t pgoff; @@ -449,7 +450,7 @@ success: if (lock) { vma->vm_flags = newflags; ret = __mlock_vma_pages_range(vma, start, end); - if (ret < 0) + if ((ret < 0) && (convert_error == true)) ret = __mlock_posix_error_return(ret); } else { munlock_vma_pages_range(vma, start, end); @@ -465,7 +466,7 @@ out_uncharge: goto out; } -static int do_mlock(unsigned long start, size_t len, int on) +static int do_mlock(unsigned long start, size_t len, int on, bool convert_error) { unsigned long nstart, end, tmp; struct vm_area_struct * vma, * prev; @@ -496,7 +497,7 @@ static int do_mlock(unsigned long start, size_t len, int on) tmp = vma->vm_end; if (tmp > end) tmp = end; - error = mlock_fixup(vma, &prev, nstart, tmp, newflags); + error = mlock_fixup(vma, &prev, nstart, tmp, newflags, convert_error); if (error) break; nstart = tmp; @@ -514,7 +515,7 @@ static int do_mlock(unsigned long start, size_t len, int on) return error; } -SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) +int __mlock(unsigned long start, size_t len, bool convert_error) { unsigned long locked; unsigned long lock_limit; @@ -537,23 +538,35 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) /* check against resource limits */ if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) - error = do_mlock(start, len, 1); + error = do_mlock(start, len, 1, convert_error); up_write(¤t->mm->mmap_sem); return error; } +EXPORT_SYMBOL_GPL(__mlock); + +SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) +{ + return __mlock(start, len, true); +} EXPORT_SYMBOL(sys_mlock); -SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) +int __munlock(unsigned long start, size_t len, bool convert_error) { int ret; down_write(¤t->mm->mmap_sem); len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); start &= PAGE_MASK; - ret = do_mlock(start, len, 0); + ret = do_mlock(start, len, 0, convert_error); up_write(¤t->mm->mmap_sem); return ret; } +EXPORT_SYMBOL_GPL(__munlock); + +SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) +{ + return __munlock(start, len, true); +} EXPORT_SYMBOL(sys_munlock); static int do_mlockall(int flags) @@ -575,7 +588,7 @@ static int do_mlockall(int flags) newflags &= ~VM_LOCKED; /* Ignore errors */ - mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); + mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags, true); } out: return 0;