/** * process_vm_rw - check iovecs before calling core routine * @pid: PID of process to read/write from/to * @lvec: iovec array specifying where to copy to/from locally * @liovcnt: size of lvec array * @rvec: iovec array specifying where to copy to/from in the other process * @riovcnt: size of rvec array * @flags: currently unused * @vm_write: 0 if reading from other process, 1 if writing to other process * * Returns the number of bytes read/written or error code. May * return less bytes than expected if an error occurs during the copying * process. */ staticssize_tprocess_vm_rw(pid_t pid, conststruct iovec __user *lvec, unsignedlong liovcnt, conststruct iovec __user *rvec, unsignedlong riovcnt, unsignedlong flags, int vm_write) { structioveciovstack_l[UIO_FASTIOV]; structioveciovstack_r[UIO_FASTIOV]; structiovec *iov_l = iovstack_l; structiovec *iov_r; structiov_iteriter; ssize_t rc; int dir = vm_write ? ITER_SOURCE : ITER_DEST;
/** * process_vm_rw_core - core of reading/writing pages from task specified * @pid: PID of process to read/write from/to * @iter: where to copy to/from locally * @rvec: iovec array specifying where to copy to/from in the other process * @riovcnt: size of rvec array * @flags: currently unused * @vm_write: 0 if reading from other process, 1 if writing to other process * * Returns the number of bytes read/written or error code. May * return less bytes than expected if an error occurs during the copying * process. */ staticssize_tprocess_vm_rw_core(pid_t pid, struct iov_iter *iter, conststruct iovec *rvec, unsignedlong riovcnt, unsignedlong flags, int vm_write) { structtask_struct *task; structpage *pp_stack[PVM_MAX_PP_ARRAY_COUNT]; structpage **process_pages = pp_stack; structmm_struct *mm; unsignedlong i; ssize_t rc = 0; unsignedlong nr_pages = 0; unsignedlong nr_pages_iov; ssize_t iov_len; size_t total_len = iov_iter_count(iter);
/* * Work out how many pages of struct pages we're going to need * when eventually calling get_user_pages */ for (i = 0; i < riovcnt; i++) { iov_len = rvec[i].iov_len; if (iov_len > 0) { nr_pages_iov = ((unsignedlong)rvec[i].iov_base + iov_len) / PAGE_SIZE - (unsignedlong)rvec[i].iov_base / PAGE_SIZE + 1; nr_pages = max(nr_pages, nr_pages_iov); } }
if (nr_pages == 0) return0;
if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) { /* For reliability don't try to kmalloc more than 2 pages worth */ process_pages = kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES, sizeof(struct pages *)*nr_pages), GFP_KERNEL);
if (!process_pages) return -ENOMEM; }
/* Get process information */ task = find_get_task_by_vpid(pid); if (!task) { rc = -ESRCH; goto free_proc_pages; }
mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS); if (!mm || IS_ERR(mm)) { rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; /* * Explicitly map EACCES to EPERM as EPERM is a more * appropriate error code for process_vw_readv/writev */ if (rc == -EACCES) rc = -EPERM; goto put_task_struct; }
for (i = 0; i < riovcnt && iov_iter_count(iter) && !rc; i++) rc = process_vm_rw_single_vec( (unsignedlong)rvec[i].iov_base, rvec[i].iov_len, iter, process_pages, mm, task, vm_write);
/* copied = space before - space after */ total_len -= iov_iter_count(iter);
/* If we have managed to copy any data at all then we return the number of bytes copied. Otherwise we return the error code */ if (total_len) rc = total_len;
mmput(mm);
put_task_struct: put_task_struct(task);
free_proc_pages: if (process_pages != pp_stack) kfree(process_pages); return rc; }
/** * process_vm_rw_single_vec - read/write pages from task specified * @addr: start memory address of target process * @len: size of area to copy to/from * @iter: where to copy to/from locally * @process_pages: struct pages area that can store at least * nr_pages_to_copy struct page pointers * @mm: mm for task * @task: task to read/write from * @vm_write: 0 means copy from, 1 means copy to * Returns 0 on success or on failure error code */ staticintprocess_vm_rw_single_vec(unsignedlong addr, unsignedlong len, struct iov_iter *iter, struct page **process_pages, struct mm_struct *mm, struct task_struct *task, int vm_write) { unsignedlong pa = addr & PAGE_MASK; unsignedlong start_offset = addr - pa; unsignedlong nr_pages; ssize_t rc = 0; unsignedlong max_pages_per_loop = PVM_MAX_KMALLOC_PAGES / sizeof(struct pages *); unsignedint flags = 0;
/* Work out address and page range required */ if (len == 0) return0; nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
if (vm_write) flags |= FOLL_WRITE;
while (!rc && nr_pages && iov_iter_count(iter)) { int pinned_pages = min(nr_pages, max_pages_per_loop); int locked = 1; size_t bytes;
/* * Get the pages we're interested in. We must * access remotely because task/mm might not * current/current->mm */ mmap_read_lock(mm); pinned_pages = pin_user_pages_remote(mm, pa, pinned_pages, flags, process_pages, &locked); if (locked) mmap_read_unlock(mm); if (pinned_pages <= 0) return -EFAULT;
/** * process_vm_rw_pages - read/write pages from task specified * @pages: array of pointers to pages we want to copy * @offset: offset in page to start copying from/to * @len: number of bytes to copy * @iter: where to copy to/from locally * @vm_write: 0 means copy from, 1 means copy to * Returns 0 on success, error code otherwise */ staticintprocess_vm_rw_pages(struct page **pages, unsigned offset, size_t len, struct iov_iter *iter, int vm_write) { /* Do the copy for each page */ while (len && iov_iter_count(iter)) { structpage *page = *pages++; size_t copy = PAGE_SIZE - offset; size_t copied;