Path:
drivers/char/mem.c
Lines:
1018
Non-empty lines:
905
Non-empty lines covered with requirements:
480 / 905 (53.0%)
Functions:
37
Functions covered by requirements:
6 / 37 (16.2%)
1
// SPDX-License-Identifier: GPL-2.02
/*3
* linux/drivers/char/mem.c4
*5
* Copyright (C) 1991, 1992 Linus Torvalds6
*7
* Added devfs support.8
* Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>9
* Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>10
*/11
12
#include <linux/mm.h>
13
#include <linux/miscdevice.h>
14
#include <linux/slab.h>
15
#include <linux/vmalloc.h>
16
#include <linux/mman.h>
17
#include <linux/random.h>
18
#include <linux/init.h>
19
#include <linux/tty.h>
20
#include <linux/capability.h>
21
#include <linux/ptrace.h>
22
#include <linux/device.h>
23
#include <linux/highmem.h>
24
#include <linux/backing-dev.h>
25
#include <linux/shmem_fs.h>
26
#include <linux/splice.h>
27
#include <linux/pfn.h>
28
#include <linux/export.h>
29
#include <linux/io.h>
30
#include <linux/uio.h>
31
#include <linux/uaccess.h>
32
#include <linux/security.h>
33
34
#define DEVMEM_MINOR 135
#define DEVPORT_MINOR 436
37
static inline unsigned long size_inside_page(unsigned long start,
38
unsigned long size)
39
{40
unsigned long sz;
41
42
sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
43
44
return min(sz, size);
45
}46
47
#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE48
static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
49
{50
return addr + count <= __pa(high_memory);
51
}52
53
static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
54
{55
return 1;
56
}57
#endif58
59
#ifdef CONFIG_STRICT_DEVMEM60
static inline int page_is_allowed(unsigned long pfn)
61
{62
return devmem_is_allowed(pfn);
63
}64
#else65
static inline int page_is_allowed(unsigned long pfn)
66
{67
return 1;
68
}69
#endif70
71
static inline bool should_stop_iteration(void)
72
{73
if (need_resched())
74
cond_resched();
75
return signal_pending(current);
76
}77
78
/**79
* SPDX-Req-ID: a89784c55426aec4b8ba345f281a0ec478d43897a0a248618cb140c03c770c7580
* SPDX-Req-Text:81
* read_mem - read from physical memory (/dev/mem).82
* @file: struct file associated with /dev/mem.83
* @buf: user-space buffer to copy data to.84
* @count: number of bytes to read.85
* @ppos: pointer to the current file position, representing the physical86
* address to read from.87
*88
* This function checks if the requested physical memory range is valid89
* and accessible by the user, then it copies data to the input90
* user-space buffer up to the requested number of bytes.91
*92
* Function's expectations:93
*94
* 1. This function shall check if the value pointed by ppos exceeds the95
* maximum addressable physical address;96
*97
* 2. This function shall check if the physical address range to be read98
* is valid (i.e. it falls within a memory block and if it can be mapped99
* to the kernel address space);100
*101
* 3. For each memory page falling in the requested physical range102
* [ppos, ppos + count - 1]:103
* 3.1. this function shall check if user space access is allowed (if104
* config STRICT_DEVMEM is not set, access is always granted);105
*106
* 3.2. if access is allowed, the memory content from the page range falling107
* within the requested physical range shall be copied to the user space108
* buffer;109
*110
* 3.3. zeros shall be copied to the user space buffer (for the page range111
* falling within the requested physical range):112
* 3.3.1. if access to the memory page is restricted or,113
* 3.2.2. if the current page is page 0 on HW architectures where page 0 is114
* not mapped.115
*116
* 4. The file position '*ppos' shall be advanced by the number of bytes117
* successfully copied to user space (including zeros).118
*119
* Context: process context.120
*121
* Return:122
* * the number of bytes copied to user on success123
* * %-EFAULT - the requested address range is not valid or a fault happened124
* when copying to user-space (i.e. copy_from_kernel_nofault() failed)125
* * %-EPERM - access to any of the required physical pages is not allowed126
* * %-ENOMEM - out of memory error for auxiliary kernel buffers supporting127
* the operation of copying content from the physical pages128
*129
* SPDX-Req-End130
*/131
static ssize_t read_mem(struct file *file, char __user *buf,
132
size_t count, loff_t *ppos)
133
{134
phys_addr_t p = *ppos;
135
ssize_t read, sz;
136
void *ptr;
137
char *bounce;
138
int err;
139
140
if (p != *ppos)
141
return 0;
142
143
if (!valid_phys_addr_range(p, count))
144
return -EFAULT;
145
read = 0;
146
#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED147
/* we don't have page 0 mapped on sparc and m68k.. */
148
if (p < PAGE_SIZE) {
149
sz = size_inside_page(p, count);
150
if (sz > 0) {
151
if (clear_user(buf, sz))
152
return -EFAULT;
153
buf += sz;
154
p += sz;
155
count -= sz;
156
read += sz;
157
}
158
}
159
#endif160
161
bounce = kmalloc(PAGE_SIZE, GFP_KERNEL);
162
if (!bounce)
163
return -ENOMEM;
164
165
while (count > 0) {
166
unsigned long remaining;
167
int allowed, probe;
168
169
sz = size_inside_page(p, count);
170
171
err = -EPERM;
172
allowed = page_is_allowed(p >> PAGE_SHIFT);
173
if (!allowed)
174
goto failed;
175
176
err = -EFAULT;
177
if (allowed == 2) {
178
/* Show zeros for restricted memory. */
179
remaining = clear_user(buf, sz);
180
} else {
181
/*
182
* On ia64 if a page has been mapped somewhere as183
* uncached, then it must also be accessed uncached184
* by the kernel or data corruption may occur.185
*/186
ptr = xlate_dev_mem_ptr(p);
187
if (!ptr)
188
goto failed;
189
190
probe = copy_from_kernel_nofault(bounce, ptr, sz);
191
unxlate_dev_mem_ptr(p, ptr);
192
if (probe)
193
goto failed;
194
195
remaining = copy_to_user(buf, bounce, sz);
196
}
197
198
if (remaining)
199
goto failed;
200
201
buf += sz;
202
p += sz;
203
count -= sz;
204
read += sz;
205
if (should_stop_iteration())
206
break;
207
}
208
kfree(bounce);
209
210
*ppos += read;
211
return read;
212
213
failed:
214
kfree(bounce);
215
return err;
216
}217
218
/**219
* SPDX-Req-ID: 6e16917c09ee583de5dc9e8a24a406e75bb229554699a501cfa8efdb308862d7220
* SPDX-Req-Text:221
* write_mem - write to physical memory (/dev/mem).222
* @file: struct file associated with /dev/mem.223
* @buf: user-space buffer containing the data to write.224
* @count: number of bytes to write.225
* @ppos: pointer to the current file position, representing the physical226
* address to write to.227
*228
* This function checks if the target physical memory range is valid229
* and accessible by the user, then it writes data from the input230
* user-space buffer up to the requested number of bytes.231
*232
* Function's expectations:233
* 1. This function shall check if the value pointed by ppos exceeds the234
* maximum addressable physical address;235
*236
* 2. This function shall check if the physical address range to be written237
* is valid (i.e. it falls within a memory block and if it can be mapped238
* to the kernel address space);239
*240
* 3. For each memory page falling in the physical range to be written241
* [ppos, ppos + count - 1]:242
* 3.1. this function shall check if user space access is allowed (if243
* config STRICT_DEVMEM is not set, access is always granted);244
*245
* 3.2. the content from the user space buffer shall be copied to the page246
* range falling within the physical range to be written if access is247
* allowed;248
*249
* 3.3. the data to be copied from the user space buffer (for the page range250
* falling within the range to be written) shall be skipped:251
* 3.3.1. if access to the memory page is restricted or,252
* 3.3.2. if the current page is page 0 on HW architectures where page 0253
* is not mapped.254
*255
* 4. The file position '*ppos' shall be advanced by the number of bytes256
* successfully copied from user space (including skipped bytes).257
*258
* Context: process context.259
*260
* Return:261
* * the number of bytes copied from user-space on success262
* * %-EFBIG - the value pointed by ppos exceeds the maximum addressable263
* physical address264
* * %-EFAULT - the physical address range is not valid or no bytes could265
* be copied from user-space266
* * %-EPERM - access to any of the required pages is not allowed267
*268
* SPDX-Req-End269
*/270
static ssize_t write_mem(struct file *file, const char __user *buf,
271
size_t count, loff_t *ppos)
272
{273
phys_addr_t p = *ppos;
274
ssize_t written, sz;
275
unsigned long copied;
276
void *ptr;
277
278
if (p != *ppos)
279
return -EFBIG;
280
281
if (!valid_phys_addr_range(p, count))
282
return -EFAULT;
283
284
written = 0;
285
286
#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED287
/* we don't have page 0 mapped on sparc and m68k.. */
288
if (p < PAGE_SIZE) {
289
sz = size_inside_page(p, count);
290
/* Hmm. Do something? */
291
buf += sz;
292
p += sz;
293
count -= sz;
294
written += sz;
295
}
296
#endif297
298
while (count > 0) {
299
int allowed;
300
301
sz = size_inside_page(p, count);
302
303
allowed = page_is_allowed(p >> PAGE_SHIFT);
304
if (!allowed)
305
return -EPERM;
306
307
/* Skip actual writing when a page is marked as restricted. */
308
if (allowed == 1) {
309
/*
310
* On ia64 if a page has been mapped somewhere as311
* uncached, then it must also be accessed uncached312
* by the kernel or data corruption may occur.313
*/314
ptr = xlate_dev_mem_ptr(p);
315
if (!ptr) {
316
if (written)
317
break;
318
return -EFAULT;
319
}
320
321
copied = copy_from_user(ptr, buf, sz);
322
unxlate_dev_mem_ptr(p, ptr);
323
if (copied) {
324
written += sz - copied;
325
if (written)
326
break;
327
return -EFAULT;
328
}
329
}
330
331
buf += sz;
332
p += sz;
333
count -= sz;
334
written += sz;
335
if (should_stop_iteration())
336
break;
337
}
338
339
*ppos += written;
340
return written;
341
}342
343
int __weak phys_mem_access_prot_allowed(struct file *file,
344
unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
345
{346
return 1;
347
}348
349
#ifndef __HAVE_PHYS_MEM_ACCESS_PROT350
351
/*352
* Architectures vary in how they handle caching for addresses353
* outside of main memory.354
*355
*/356
#ifdef pgprot_noncached357
static int uncached_access(struct file *file, phys_addr_t addr)
358
{359
/*
360
* Accessing memory above the top the kernel knows about or through a361
* file pointer362
* that was marked O_DSYNC will be done non-cached.363
*/364
if (file->f_flags & O_DSYNC)
365
return 1;
366
return addr >= __pa(high_memory);
367
}368
#endif369
370
static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
371
unsigned long size, pgprot_t vma_prot)
372
{373
#ifdef pgprot_noncached374
phys_addr_t offset = pfn << PAGE_SHIFT;
375
376
if (uncached_access(file, offset))
377
return pgprot_noncached(vma_prot);
378
#endif379
return vma_prot;
380
}381
#endif382
383
#ifndef CONFIG_MMU384
static unsigned long get_unmapped_area_mem(struct file *file,
385
unsigned long addr,
386
unsigned long len,
387
unsigned long pgoff,
388
unsigned long flags)
389
{390
if (!valid_mmap_phys_addr_range(pgoff, len))
391
return (unsigned long) -EINVAL;
392
return pgoff << PAGE_SHIFT;
393
}394
395
/* permit direct mmap, for read, write or exec */396
static unsigned memory_mmap_capabilities(struct file *file)
397
{398
return NOMMU_MAP_DIRECT |
399
NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
400
}401
402
static unsigned zero_mmap_capabilities(struct file *file)
403
{404
return NOMMU_MAP_COPY;
405
}406
407
/* can't do an in-place private mapping if there's no MMU */408
static inline int private_mapping_ok(struct vm_area_struct *vma)
409
{410
return is_nommu_shared_mapping(vma->vm_flags);
411
}412
#else413
414
static inline int private_mapping_ok(struct vm_area_struct *vma)
415
{416
return 1;
417
}418
#endif419
420
static const struct vm_operations_struct mmap_mem_ops = {
421
#ifdef CONFIG_HAVE_IOREMAP_PROT422
.access = generic_access_phys
423
#endif424
};425
426
/**427
* SPDX-Req-ID: 032b3f1c9e61452bf826328d95fae043c4ea4b966ad6583a0377554d3c4f2d76428
* SPDX-Req-Text:429
* mmap_mem - map physical memory into user space (/dev/mem).430
* @file: file structure for the device.431
* @vma: virtual memory area structure describing the user mapping.432
*433
* This function checks if the requested physical memory range is valid434
* and accessible by the user, then it maps the physical memory range to435
* user-mode address space.436
*437
* Function's expectations:438
* 1. This function shall check if the requested physical address range to be439
* mapped fits within the maximum addressable physical range;440
*441
* 2. This function shall check if the requested physical range corresponds to442
* a valid physical range and if access is allowed on it (if config STRICT_DEVMEM443
* is not set, access is always allowed);444
*445
* 3. This function shall check if the input virtual memory area can be used for446
* a private mapping (always OK if there is an MMU);447
*448
* 4. This function shall set the virtual memory area operations to449
* &mmap_mem_ops;450
*451
* 5. This function shall establish a mapping between the user-space452
* virtual memory area described by vma and the physical memory453
* range specified by vma->vm_pgoff and size;454
*455
* Context: process context.456
*457
* Return:458
* * 0 on success459
* * %-EAGAIN - invalid or unsupported mapping requested (remap_pfn_range()460
* fails)461
* * %-EINVAL - requested physical range to be mapped is not valid462
* * %-EPERM - no permission to access the requested physical range463
*464
* SPDX-Req-End465
*/466
static int mmap_mem(struct file *file, struct vm_area_struct *vma)
467
{468
size_t size = vma->vm_end - vma->vm_start;
469
phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
470
471
/* Does it even fit in phys_addr_t? */
472
if (offset >> PAGE_SHIFT != vma->vm_pgoff)
473
return -EINVAL;
474
475
/* It's illegal to wrap around the end of the physical address space. */
476
if (offset + (phys_addr_t)size - 1 < offset)
477
return -EINVAL;
478
479
if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
480
return -EINVAL;
481
482
if (!private_mapping_ok(vma))
483
return -ENOSYS;
484
485
if (!range_is_allowed(vma->vm_pgoff, size))
486
return -EPERM;
487
488
if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
489
&vma->vm_page_prot))
490
return -EINVAL;
491
492
vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
493
size,
494
vma->vm_page_prot);
495
496
vma->vm_ops = &mmap_mem_ops;
497
498
/* Remap-pfn-range will mark the range VM_IO */
499
if (remap_pfn_range(vma,
500
vma->vm_start,
501
vma->vm_pgoff,
502
size,
503
vma->vm_page_prot)) {
504
return -EAGAIN;
505
}
506
return 0;
507
}508
509
#ifdef CONFIG_DEVPORT510
static ssize_t read_port(struct file *file, char __user *buf,
511
size_t count, loff_t *ppos)
512
{513
unsigned long i = *ppos;
514
char __user *tmp = buf;
515
516
if (!access_ok(buf, count))
517
return -EFAULT;
518
while (count-- > 0 && i < 65536) {
519
if (__put_user(inb(i), tmp) < 0)
520
return -EFAULT;
521
i++;
522
tmp++;
523
}
524
*ppos = i;
525
return tmp-buf;
526
}527
528
static ssize_t write_port(struct file *file, const char __user *buf,
529
size_t count, loff_t *ppos)
530
{531
unsigned long i = *ppos;
532
const char __user *tmp = buf;
533
534
if (!access_ok(buf, count))
535
return -EFAULT;
536
while (count-- > 0 && i < 65536) {
537
char c;
538
539
if (__get_user(c, tmp)) {
540
if (tmp > buf)
541
break;
542
return -EFAULT;
543
}
544
outb(c, i);
545
i++;
546
tmp++;
547
}
548
*ppos = i;
549
return tmp-buf;
550
}551
#endif552
553
static ssize_t read_null(struct file *file, char __user *buf,
554
size_t count, loff_t *ppos)
555
{556
return 0;
557
}558
559
static ssize_t write_null(struct file *file, const char __user *buf,
560
size_t count, loff_t *ppos)
561
{562
return count;
563
}564
565
static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
566
{567
return 0;
568
}569
570
static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
571
{572
size_t count = iov_iter_count(from);
573
iov_iter_advance(from, count);
574
return count;
575
}576
577
static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
578
struct splice_desc *sd)
579
{580
return sd->len;
581
}582
583
static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
584
loff_t *ppos, size_t len, unsigned int flags)
585
{586
return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
587
}588
589
static int uring_cmd_null(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
590
{591
return 0;
592
}593
594
static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
595
{596
size_t written = 0;
597
598
while (iov_iter_count(iter)) {
599
size_t chunk = iov_iter_count(iter), n;
600
601
if (chunk > PAGE_SIZE)
602
chunk = PAGE_SIZE; /* Just for latency reasons */
603
n = iov_iter_zero(chunk, iter);
604
if (!n && iov_iter_count(iter))
605
return written ? written : -EFAULT;
606
written += n;
607
if (signal_pending(current))
608
return written ? written : -ERESTARTSYS;
609
if (!need_resched())
610
continue;
611
if (iocb->ki_flags & IOCB_NOWAIT)
612
return written ? written : -EAGAIN;
613
cond_resched();
614
}
615
return written;
616
}617
618
static ssize_t read_zero(struct file *file, char __user *buf,
619
size_t count, loff_t *ppos)
620
{621
size_t cleared = 0;
622
623
while (count) {
624
size_t chunk = min_t(size_t, count, PAGE_SIZE);
625
size_t left;
626
627
left = clear_user(buf + cleared, chunk);
628
if (unlikely(left)) {
629
cleared += (chunk - left);
630
if (!cleared)
631
return -EFAULT;
632
break;
633
}
634
cleared += chunk;
635
count -= chunk;
636
637
if (signal_pending(current))
638
break;
639
cond_resched();
640
}
641
642
return cleared;
643
}644
645
static int mmap_zero(struct file *file, struct vm_area_struct *vma)
646
{647
#ifndef CONFIG_MMU648
return -ENOSYS;
649
#endif650
if (vma->vm_flags & VM_SHARED)
651
return shmem_zero_setup(vma);
652
vma_set_anonymous(vma);
653
return 0;
654
}655
656
#ifndef CONFIG_MMU657
static unsigned long get_unmapped_area_zero(struct file *file,
658
unsigned long addr, unsigned long len,
659
unsigned long pgoff, unsigned long flags)
660
{661
return -ENOSYS;
662
}663
#else664
static unsigned long get_unmapped_area_zero(struct file *file,
665
unsigned long addr, unsigned long len,
666
unsigned long pgoff, unsigned long flags)
667
{668
if (flags & MAP_SHARED) {
669
/*
670
* mmap_zero() will call shmem_zero_setup() to create a file,671
* so use shmem's get_unmapped_area in case it can be huge;672
* and pass NULL for file as in mmap.c's get_unmapped_area(),673
* so as not to confuse shmem with our handle on "/dev/zero".674
*/675
return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
676
}
677
678
/*
679
* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it,680
* attempt to map aligned to huge page size if possible, otherwise we681
* fall back to system page size mappings.682
*/683
#ifdef CONFIG_TRANSPARENT_HUGEPAGE684
return thp_get_unmapped_area(file, addr, len, pgoff, flags);
685
#else686
return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags);
687
#endif688
}689
#endif /* CONFIG_MMU */
690
691
static ssize_t write_full(struct file *file, const char __user *buf,
692
size_t count, loff_t *ppos)
693
{694
return -ENOSPC;
695
}696
697
/*698
* Special lseek() function for /dev/null and /dev/zero. Most notably, you699
* can fopen() both devices with "a" now. This was previously impossible.700
* -- SRB.701
*/702
static loff_t null_lseek(struct file *file, loff_t offset, int orig)
703
{704
return file->f_pos = 0;
705
}706
707
/**708
* SPDX-Req-ID: feb4cb915f91d319078204293f6cf99eb50e775a8f670e478e30e84bd6979a14709
* SPDX-Req-Text:710
* memory_lseek - change the file position.711
* @file: file structure for the device.712
* @offset: file offset to seek to.713
* @orig: where to start seeking from (see whence in the llseek manpage).714
*715
* This function changes the file position according to the input offset716
* and orig parameters.717
*718
* Function's expectations:719
* 1. This function shall lock the semaphore of the inode corresponding to the720
* input file before any operation and unlock it before returning.721
*722
* 2. This function shall check the orig value and accordingly:723
* 2.1. if it is equal to SEEK_CUR, the current file position shall be724
* incremented by the input offset;725
* 2.2. if it is equal to SEEK_SET, the current file position shall be726
* set to the input offset value;727
* 2.3. any other value shall result in an error condition.728
*729
* 3. Before writing the current file position, the new position value730
* shall be checked to not overlap with Linux ERRNO values.731
*732
* Assumptions of Use:733
* 1. the input file pointer is expected to be valid.734
*735
* Notes:736
* The memory devices use the full 32/64 bits of the offset, and so we cannot737
* check against negative addresses: they are ok. The return value is weird,738
* though, in that case (0).739
*740
* Also note that seeking relative to the "end of file" isn't supported:741
* it has no meaning, so passing orig equal to SEEK_END returns -EINVAL.742
*743
* Context: process context, locks/unlocks inode->i_rwsem744
*745
* Return:746
* * the new file position on success747
* * %-EOVERFLOW - the new position value equals or exceeds748
* (unsigned long long) -MAX_ERRNO749
* * %-EINVAL - the orig parameter is invalid750
*751
* SPDX-Req-End752
*/753
static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
754
{755
loff_t ret;
756
757
inode_lock(file_inode(file));
758
switch (orig) {
759
case SEEK_CUR:
760
offset += file->f_pos;
761
fallthrough;
762
case SEEK_SET:
763
/* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
764
if ((unsigned long long)offset >= -MAX_ERRNO) {
765
ret = -EOVERFLOW;
766
break;
767
}
768
file->f_pos = offset;
769
ret = file->f_pos;
770
force_successful_syscall_return();
771
break;
772
default:
773
ret = -EINVAL;
774
}
775
inode_unlock(file_inode(file));
776
return ret;
777
}778
779
/**780
* SPDX-Req-ID: 6638528f181fca48b6554dc9dc34a070ce1860a9fd0e2f5857f17f0de915f5d9781
* SPDX-Req-Text:782
* open_port - open the I/O port device (/dev/port).783
* @inode: inode of the device file.784
* @filp: file structure for the device.785
*786
* This function checks if the caller can access the port device and sets787
* the f_mapping pointer of filp to the i_mapping pointer of inode.788
*789
* Function's expectations:790
* 1. This function shall check if the caller has sufficient capabilities to791
* perform raw I/O access;792
*793
* 2. This function shall check if the kernel is locked down with the794
* &LOCKDOWN_DEV_MEM restriction;795
*796
* 3. If the input inode corresponds to /dev/mem, the f_mapping pointer797
* of the input file structure shall be set to the i_mapping pointer798
* of the input inode;799
*800
* Assumptions of Use:801
* 1. The input inode and filp are expected to be valid.802
*803
* Context: process context.804
*805
* Return:806
* * 0 on success807
* * %-EPERM - caller lacks the required capability (CAP_SYS_RAWIO)808
* * any error returned by securty_locked_down()809
*810
* SPDX-Req-End811
*/812
static int open_port(struct inode *inode, struct file *filp)
813
{814
int rc;
815
816
if (!capable(CAP_SYS_RAWIO))
817
return -EPERM;
818
819
rc = security_locked_down(LOCKDOWN_DEV_MEM);
820
if (rc)
821
return rc;
822
823
if (iminor(inode) != DEVMEM_MINOR)
824
return 0;
825
826
/*
827
* Use a unified address space to have a single point to manage828
* revocations when drivers want to take over a /dev/mem mapped829
* range.830
*/831
filp->f_mapping = iomem_get_mapping();
832
833
return 0;
834
}835
836
#define zero_lseek null_lseek837
#define full_lseek null_lseek838
#define write_zero write_null839
#define write_iter_zero write_iter_null840
#define splice_write_zero splice_write_null841
#define open_mem open_port842
843
static const struct file_operations __maybe_unused mem_fops = {
844
.llseek = memory_lseek,
845
.read = read_mem,
846
.write = write_mem,
847
.mmap = mmap_mem,
848
.open = open_mem,
849
#ifndef CONFIG_MMU850
.get_unmapped_area = get_unmapped_area_mem,
851
.mmap_capabilities = memory_mmap_capabilities,
852
#endif853
.fop_flags = FOP_UNSIGNED_OFFSET,
854
};855
856
static const struct file_operations null_fops = {
857
.llseek = null_lseek,
858
.read = read_null,
859
.write = write_null,
860
.read_iter = read_iter_null,
861
.write_iter = write_iter_null,
862
.splice_write = splice_write_null,
863
.uring_cmd = uring_cmd_null,
864
};865
866
#ifdef CONFIG_DEVPORT867
static const struct file_operations port_fops = {
868
.llseek = memory_lseek,
869
.read = read_port,
870
.write = write_port,
871
.open = open_port,
872
};873
#endif874
875
static const struct file_operations zero_fops = {
876
.llseek = zero_lseek,
877
.write = write_zero,
878
.read_iter = read_iter_zero,
879
.read = read_zero,
880
.write_iter = write_iter_zero,
881
.splice_read = copy_splice_read,
882
.splice_write = splice_write_zero,
883
.mmap = mmap_zero,
884
.get_unmapped_area = get_unmapped_area_zero,
885
#ifndef CONFIG_MMU886
.mmap_capabilities = zero_mmap_capabilities,
887
#endif888
};889
890
static const struct file_operations full_fops = {
891
.llseek = full_lseek,
892
.read_iter = read_iter_zero,
893
.write = write_full,
894
.splice_read = copy_splice_read,
895
};896
897
static const struct memdev {
898
const char *name;
899
const struct file_operations *fops;
900
fmode_t fmode;
901
umode_t mode;
902
} devlist[] = {
903
#ifdef CONFIG_DEVMEM904
[DEVMEM_MINOR] = { "mem", &mem_fops, 0, 0 },
905
#endif906
[3] = { "null", &null_fops, FMODE_NOWAIT, 0666 },
907
#ifdef CONFIG_DEVPORT908
[4] = { "port", &port_fops, 0, 0 },
909
#endif910
[5] = { "zero", &zero_fops, FMODE_NOWAIT, 0666 },
911
[7] = { "full", &full_fops, 0, 0666 },
912
[8] = { "random", &random_fops, FMODE_NOWAIT, 0666 },
913
[9] = { "urandom", &urandom_fops, FMODE_NOWAIT, 0666 },
914
#ifdef CONFIG_PRINTK915
[11] = { "kmsg", &kmsg_fops, 0, 0644 },
916
#endif917
};918
919
/**920
* SPDX-Req-ID: 46dc914c8b39c0763e1d407cf985e16e68b5c153cdbdae6e03173972db455da0921
* SPDX-Req-Text:922
* memory_open - set the filp f_op to the memory device fops and invoke open().923
* @inode: inode of the device file.924
* @filp: file structure for the device.925
*926
* Function's expectations:927
* 1. This function shall retrieve the minor number associated with the input928
* inode and the memory device corresponding to such minor number;929
*930
* 2. The file operations pointer shall be set to the memory device file operations;931
*932
* 3. The file mode member of the input filp shall be OR'd with the device mode;933
*934
* 4. The memory device open() file operation shall be invoked.935
*936
* Assumptions of Use:937
* 1. The input inode and filp are expected to be non-NULL.938
*939
* Context: process context.940
*941
* Return:942
* * 0 on success943
* * %-ENXIO - the minor number corresponding to the input inode cannot be944
* associated with any device or the corresponding device has a NULL fops945
* pointer946
* * any error returned by the device specific open function pointer947
*948
* SPDX-Req-End949
*/950
static int memory_open(struct inode *inode, struct file *filp)
951
{952
int minor;
953
const struct memdev *dev;
954
955
minor = iminor(inode);
956
if (minor >= ARRAY_SIZE(devlist))
957
return -ENXIO;
958
959
dev = &devlist[minor];
960
if (!dev->fops)
961
return -ENXIO;
962
963
filp->f_op = dev->fops;
964
filp->f_mode |= dev->fmode;
965
966
if (dev->fops->open)
967
return dev->fops->open(inode, filp);
968
969
return 0;
970
}971
972
static const struct file_operations memory_fops = {
973
.open = memory_open,
974
.llseek = noop_llseek,
975
};976
977
static char *mem_devnode(const struct device *dev, umode_t *mode)
978
{979
if (mode && devlist[MINOR(dev->devt)].mode)
980
*mode = devlist[MINOR(dev->devt)].mode;
981
return NULL;
982
}983
984
static const struct class mem_class = {
985
.name = "mem",
986
.devnode = mem_devnode,
987
};988
989
static int __init chr_dev_init(void)
990
{991
int retval;
992
int minor;
993
994
if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
995
printk("unable to get major %d for memory devs\n", MEM_MAJOR);
996
997
retval = class_register(&mem_class);
998
if (retval)
999
return retval;
1000
1001
for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
1002
if (!devlist[minor].name)
1003
continue;
1004
1005
/*
1006
* Create /dev/port?1007
*/1008
if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
1009
continue;
1010
1011
device_create(&mem_class, NULL, MKDEV(MEM_MAJOR, minor),
1012
NULL, devlist[minor].name);
1013
}
1014
1015
return tty_init();
1016
}1017
1018
fs_initcall(chr_dev_init);