// 620:staticintext2_get_blocks(struct inode *inode,
sector_t iblock, unsignedlong maxblocks,
u32 *bno, bool *new, bool *boundary,
int create)
{
int err;
int offsets[4];
Indirect chain[4];
Indirect *partial;
ext2_fsblk_t goal;
int indirect_blks;
int blocks_to_boundary = 0;
int depth;
struct ext2_inode_info *ei = EXT2_I(inode);
int count = 0;
ext2_fsblk_t first_block = 0;
BUG_ON(maxblocks == 0);
depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
if (depth == 0)
return -EIO;
partial = ext2_get_branch(inode, depth, offsets, chain, &err);
/* Simplest case - block found, no allocation needed */if (!partial) {
first_block = le32_to_cpu(chain[depth - 1].key);
count++;
/*map more blocks*/while (count < maxblocks && count <= blocks_to_boundary) {
ext2_fsblk_t blk;
if (!verify_chain(chain, chain + depth - 1)) {
/* * Indirect block might be removed by * truncate while we were reading it. * Handling of that case: forget what we've * got now, go to reread. */
err = -EAGAIN;
count = 0;
partial = chain + depth - 1;
break;
}
blk = le32_to_cpu(*(chain[depth-1].p + count));
if (blk == first_block + count)
count++;
elsebreak;
}
if (err != -EAGAIN)
goto got_it;
}
/* Next simple case - plain lookup or failed read of indirect block */if (!create || err == -EIO)
goto cleanup;
mutex_lock(&ei->truncate_mutex);
/* * If the indirect block is missing while we are reading * the chain(ext2_get_branch() returns -EAGAIN err), or * if the chain has been changed after we grab the semaphore, * (either because another process truncated this branch, or * another get_block allocated this branch) re-grab the chain to see if * the request block has been allocated or not. * * Since we already block the truncate/other get_block * at this point, we will have the current copy of the chain when we * splice the branch into the tree. */if (err == -EAGAIN || !verify_chain(chain, partial)) {
while (partial > chain) {
brelse(partial->bh);
partial--;
}
partial = ext2_get_branch(inode, depth, offsets, chain, &err);
if (!partial) {
count++;
mutex_unlock(&ei->truncate_mutex);
goto got_it;
}
if (err) {
mutex_unlock(&ei->truncate_mutex);
goto cleanup;
}
}
/* * Okay, we need to do block allocation. Lazily initialize the block * allocation info here if necessary */if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
ext2_init_block_alloc_info(inode);
goal = ext2_find_goal(inode, iblock, partial);
/* the number of blocks need to allocate for [d,t]indirect blocks */
indirect_blks = (chain + depth) - partial - 1;
/* * Next look up the indirect map to count the total number of * direct blocks to allocate for this branch. */
count = ext2_blks_to_allocate(partial, indirect_blks,
maxblocks, blocks_to_boundary);
/* * XXX ???? Block out ext2_truncate while we alter the tree */
err = ext2_alloc_branch(inode, indirect_blks, &count, goal,
offsets + (partial - chain), partial);
if (err) {
mutex_unlock(&ei->truncate_mutex);
goto cleanup;
}
if (IS_DAX(inode)) {
/* * We must unmap blocks before zeroing so that writeback cannot * overwrite zeros with stale data from block device page cache. */clean_bdev_aliases(inode->i_sb->s_bdev,
le32_to_cpu(chain[depth-1].key),
count);
/* * block must be initialised before we put it in the tree * so that it's not found by another thread before it's * initialised */
err = sb_issue_zeroout(inode->i_sb,
le32_to_cpu(chain[depth-1].key), count,
GFP_NOFS);
if (err) {
mutex_unlock(&ei->truncate_mutex);
goto cleanup;
}
}
*new = true;
ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
mutex_unlock(&ei->truncate_mutex);
got_it:
if (count > blocks_to_boundary)
*boundary = true;
err = count;
/* Clean up and exit */
partial = chain + depth - 1; /* the whole chain */cleanup:
while (partial > chain) {
brelse(partial->bh);
partial--;
}
if (err > 0)
*bno = le32_to_cpu(chain[depth-1].key);
return err;
}
// 679:mutex_lock(&ei->truncate_mutex);
/* * If the indirect block is missing while we are reading * the chain(ext2_get_branch() returns -EAGAIN err), or * if the chain has been changed after we grab the semaphore, * (either because another process truncated this branch, or * another get_block allocated this branch) re-grab the chain to see if * the request block has been allocated or not. * * Since we already block the truncate/other get_block * at this point, we will have the current copy of the chain when we * splice the branch into the tree. */if (err == -EAGAIN || !verify_chain(chain, partial)) {
while (partial > chain) {
brelse(partial->bh);
partial--;
}
partial = ext2_get_branch(inode, depth, offsets, chain, &err);
if (!partial) {
count++;
mutex_unlock(&ei->truncate_mutex);
goto got_it;
}
if (err) {
mutex_unlock(&ei->truncate_mutex);
goto cleanup;
}
}
617a618 > # CONFIG_CRYPTO_CRCT10DIF_ARM64_CE is not set735c736,737 < # CONFIG_BLK_DEV_INTEGRITY is not set--- > CONFIG_BLK_DEV_INTEGRITY=y> CONFIG_BLK_DEV_INTEGRITY_T10=y2199c2201,2203< # CONFIG_DM_VERITY is not set---> CONFIG_DM_VERITY=m> # CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set> # CONFIG_DM_VERITY_FEC is not set2202c2206< # CONFIG_DM_INTEGRITY is not set---> CONFIG_DM_INTEGRITY=m7414c7418< CONFIG_CRYPTO_GF128MUL=m---> CONFIG_CRYPTO_GF128MUL=y7448,7449c7452,7453< # CONFIG_CRYPTO_CFB is not set< CONFIG_CRYPTO_CTR=m---> CONFIG_CRYPTO_CFB=y> CONFIG_CRYPTO_CTR=y7452,7454c7456,7458< # CONFIG_CRYPTO_LRW is not set< # CONFIG_CRYPTO_OFB is not set< # CONFIG_CRYPTO_PCBC is not set---> CONFIG_CRYPTO_LRW=y> CONFIG_CRYPTO_OFB=y> CONFIG_CRYPTO_PCBC=y7476c7480< # CONFIG_CRYPTO_CRCT10DIF is not set---> CONFIG_CRYPTO_CRCT10DIF=y7482,7485c7486,7489< # CONFIG_CRYPTO_RMD128 is not set< # CONFIG_CRYPTO_RMD160 is not set< # CONFIG_CRYPTO_RMD256 is not set< # CONFIG_CRYPTO_RMD320 is not set---> CONFIG_CRYPTO_RMD128=y> CONFIG_CRYPTO_RMD160=y> CONFIG_CRYPTO_RMD256=y> CONFIG_CRYPTO_RMD320=y7487c7491< CONFIG_CRYPTO_SHA256=m---> CONFIG_CRYPTO_SHA256=y7506c7510< # CONFIG_CRYPTO_CAST6 is not set---> CONFIG_CRYPTO_CAST6=m7610c7614< # CONFIG_CRC_T10DIF is not set---> CONFIG_CRC_T10DIF=y
pi@raspberrypi:~ $ sudo mount -t vfat -o ro /dev/mapper/dmtest /mnt/
dm-integrityのみ使用する場合
integrity target用のMapping tableを作成する
pi@raspberrypi:~ $ sudo integritysetup format /dev/sda1
Formatted with tag size 4, internal integrity crc32c.
Wiping device to initialize integrity checksum.
You can interrupt this by pressing CTRL+c (rest of not wiped device will contain invalid checksum).
Finished, time 18:26.389, 8064 MiB written, speed 7.3 MiB/s
device-mapper (/dev/mapper/test)を作成する
pi@raspberrypi:~ $ sudo integritysetup open /dev/sda1 test
作成したintegrity targetを確認する
pi@raspberrypi:~ $ sudo integritysetup status test
/dev/mapper/test is active.
type: INTEGRITY
tag size: 4
integrity: crc32c
device: /dev/sda1
sector size: 512 bytes
interleave sectors: 32768
size: 16516984 sectors
mode: read/write
failures: 0
journal size: 67043328 bytes
journal watermark: 50%
journal commit time: 10000 ms
pi@raspberrypi:~ $ sudo cryptsetup luksFormat --type luks2 /dev/sda1 --cipher aes-xts-plain64 --integrity hmac-sha256
Enter passphrase for /dev/sda1:
Verify passphrase:
Wiping device to initialize integrity checksum.
You can interrupt this by pressing CTRL+c (rest of not wiped device will contain invalid checksum).
Finished, time 33:38.376, 7634 MiB written, speed 3.8 MiB/s
// 3715:/* * Find or create a page at the given pagecache position. Return the locked * page. This function is specifically for buffered writes. */struct page *grab_cache_page_write_begin(struct address_space *mapping,
pgoff_t index, unsigned flags)
{
struct page *page;
int fgp_flags = FGP_LOCK|FGP_WRITE|FGP_CREAT;
if (flags & AOP_FLAG_NOFS)
fgp_flags |= FGP_NOFS;
page = pagecache_get_page(mapping, index, fgp_flags,
mapping_gfp_mask(mapping));
if (page)
wait_for_stable_page(page);
return page;
}
EXPORT_SYMBOL(grab_cache_page_write_begin);
// 1888:struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
int fgp_flags, gfp_t gfp_mask)
{
struct page *page;
repeat:
page = mapping_get_entry(mapping, index);
if (xa_is_value(page)) {
if (fgp_flags & FGP_ENTRY)
return page;
page = NULL;
}
if (!page)
goto no_page;
if (fgp_flags & FGP_LOCK) {
if (fgp_flags & FGP_NOWAIT) {
if (!trylock_page(page)) {
put_page(page);
returnNULL;
}
} else {
lock_page(page);
}
/* Has the page been truncated? */if (unlikely(page->mapping != mapping)) {
unlock_page(page);
put_page(page);
goto repeat;
}
VM_BUG_ON_PAGE(!thp_contains(page, index), page);
}
if (fgp_flags & FGP_ACCESSED)
mark_page_accessed(page);
elseif (fgp_flags & FGP_WRITE) {
/* Clear idle flag for buffer write */if (page_is_idle(page))
clear_page_idle(page);
}
if (!(fgp_flags & FGP_HEAD))
page = find_subpage(page, index);
no_page:
if (!page && (fgp_flags & FGP_CREAT)) {
int err;
if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping))
gfp_mask |= __GFP_WRITE;
if (fgp_flags & FGP_NOFS)
gfp_mask &= ~__GFP_FS;
page = __page_cache_alloc(gfp_mask);
if (!page)
returnNULL;
if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
fgp_flags |= FGP_LOCK;
/* Init accessed so avoid atomic mark_page_accessed later */if (fgp_flags & FGP_ACCESSED)
__SetPageReferenced(page);
err = add_to_page_cache_lru(page, mapping, index, gfp_mask);
if (unlikely(err)) {
put_page(page);
page = NULL;
if (err == -EEXIST)
goto repeat;
}
/* * add_to_page_cache_lru locks the page, and for mmap we expect * an unlocked page. */if (page && (fgp_flags & FGP_FOR_MMAP))
unlock_page(page);
}
return page;
}
まずは、mapping_get_entry関数の定義を確認する。
// 1817:staticstruct page *mapping_get_entry(struct address_space *mapping,
pgoff_t index)
{
XA_STATE(xas, &mapping->i_pages, index);
struct page *page;
rcu_read_lock();
repeat:
xas_reset(&xas);
page = xas_load(&xas);
if (xas_retry(&xas, page))
goto repeat;
/* * A shadow entry of a recently evicted page, or a swap entry from * shmem/tmpfs. Return it without attempting to raise page count. */if (!page || xa_is_value(page))
goto out;
if (!page_cache_get_speculative(page))
goto repeat;
/* * Has the page moved or been split? * This is part of the lockless pagecache protocol. See * include/linux/pagemap.h for details. */if (unlikely(page != xas_reload(&xas))) {
put_page(page);
goto repeat;
}
out:
rcu_read_unlock();
return page;
}
// 977:intadd_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t offset, gfp_t gfp_mask)
{
void *shadow = NULL;
int ret;
__SetPageLocked(page);
ret = __add_to_page_cache_locked(page, mapping, offset,
gfp_mask, &shadow);
if (unlikely(ret))
__ClearPageLocked(page);
else {
/* * The page might have been evicted from cache only * recently, in which case it should be activated like * any other repeatedly accessed page. * The exception is pages getting rewritten; evicting other * data from the working set, only to cache data that will * get overwritten with something else, is a waste of memory. */WARN_ON_ONCE(PageActive(page));
if (!(gfp_mask & __GFP_WRITE) && shadow)
workingset_refault(page, shadow);
lru_cache_add(page);
}
return ret;
}
leava@server:~/linux $ make ARCH=arm CROSS_COMPILE=arm-linux-gnueabihf- menuconfig
-*- Cryptographic API --->
Certificates for signature checking --->
(ca.pem) Additional X.509 keys for default system keyring
// 3832:ssize_t__generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
ssize_t written = 0;
ssize_t err;
ssize_t status;
/* We can write back this queue in page reclaim */
current->backing_dev_info = inode_to_bdi(inode);
err = file_remove_privs(file);
if (err)
goto out;
err = file_update_time(file);
if (err)
goto out;
if (iocb->ki_flags & IOCB_DIRECT) {
loff_t pos, endbyte;
written = generic_file_direct_write(iocb, from);
/* * If the write stopped short of completing, fall back to * buffered writes. Some filesystems do this for writes to * holes, for example. For DAX files, a buffered write will * not succeed (even if it did, DAX does not handle dirty * page-cache pages correctly). */if (written < 0 || !iov_iter_count(from) || IS_DAX(inode))
goto out;
status = generic_perform_write(file, from, pos = iocb->ki_pos);
/* * If generic_perform_write() returned a synchronous error * then we want to return the number of bytes which were * direct-written, or the error code if that was zero. Note * that this differs from normal direct-io semantics, which * will return -EFOO even if some bytes were written. */if (unlikely(status < 0)) {
err = status;
goto out;
}
/* * We need to ensure that the page cache pages are written to * disk and invalidated to preserve the expected O_DIRECT * semantics. */
endbyte = pos + status - 1;
err = filemap_write_and_wait_range(mapping, pos, endbyte);
if (err == 0) {
iocb->ki_pos = endbyte + 1;
written += status;
invalidate_mapping_pages(mapping,
pos >> PAGE_SHIFT,
endbyte >> PAGE_SHIFT);
} else {
/* * We don't know how much we wrote, so just return * the number of bytes which were direct-written */
}
} else {
written = generic_perform_write(file, from, iocb->ki_pos);
if (likely(written > 0))
iocb->ki_pos += written;
}
out:
current->backing_dev_info = NULL;
return written ? written : err;
}
// 1936:intfile_remove_privs(struct file *file)
{
struct dentry *dentry = file_dentry(file);
struct inode *inode = file_inode(file);
int kill;
int error = 0;
/* * Fast path for nothing security related. * As well for non-regular files, e.g. blkdev inodes. * For example, blkdev_write_iter() might get here * trying to remove privs which it is not allowed to. */if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode))
return0;
kill = dentry_needs_remove_privs(dentry);
if (kill < 0)
return kill;
if (kill)
error = __remove_privs(file_mnt_user_ns(file), dentry, kill);
if (!error)
inode_has_no_xattr(inode);
return error;
}
// 1785:staticintupdate_time(struct inode *inode, struct timespec64 *time, int flags)
{
if (inode->i_op->update_time)
return inode->i_op->update_time(inode, time, flags);
returngeneric_update_time(inode, time, flags);
}
// 2381:void__mark_inode_dirty(struct inode *inode, int flags)
{
struct super_block *sb = inode->i_sb;
int dirtytime = 0;
trace_writeback_mark_inode_dirty(inode, flags);
if (flags & I_DIRTY_INODE) {
/* * Notify the filesystem about the inode being dirtied, so that * (if needed) it can update on-disk fields and journal the * inode. This is only needed when the inode itself is being * dirtied now. I.e. it's only needed for I_DIRTY_INODE, not * for just I_DIRTY_PAGES or I_DIRTY_TIME. */trace_writeback_dirty_inode_start(inode, flags);
if (sb->s_op->dirty_inode)
sb->s_op->dirty_inode(inode, flags & I_DIRTY_INODE);
trace_writeback_dirty_inode(inode, flags);
/* I_DIRTY_INODE supersedes I_DIRTY_TIME. */
flags &= ~I_DIRTY_TIME;
} else {
/* * Else it's either I_DIRTY_PAGES, I_DIRTY_TIME, or nothing. * (We don't support setting both I_DIRTY_PAGES and I_DIRTY_TIME * in one call to __mark_inode_dirty().) */
dirtytime = flags & I_DIRTY_TIME;
WARN_ON_ONCE(dirtytime && flags != I_DIRTY_TIME);
}
/* * Paired with smp_mb() in __writeback_single_inode() for the * following lockless i_state test. See there for details. */smp_mb();
if (((inode->i_state & flags) == flags) ||
(dirtytime && (inode->i_state & I_DIRTY_INODE)))
return;
spin_lock(&inode->i_lock);
if (dirtytime && (inode->i_state & I_DIRTY_INODE))
goto out_unlock_inode;
if ((inode->i_state & flags) != flags) {
constint was_dirty = inode->i_state & I_DIRTY;
inode_attach_wb(inode, NULL);
/* I_DIRTY_INODE supersedes I_DIRTY_TIME. */if (flags & I_DIRTY_INODE)
inode->i_state &= ~I_DIRTY_TIME;
inode->i_state |= flags;
/* * If the inode is queued for writeback by flush worker, just * update its dirty state. Once the flush worker is done with * the inode it will place it on the appropriate superblock * list, based upon its state. */if (inode->i_state & I_SYNC_QUEUED)
goto out_unlock_inode;
/* * Only add valid (hashed) inodes to the superblock's * dirty list. Add blockdev inodes as well. */if (!S_ISBLK(inode->i_mode)) {
if (inode_unhashed(inode))
goto out_unlock_inode;
}
if (inode->i_state & I_FREEING)
goto out_unlock_inode;
/* * If the inode was already on b_dirty/b_io/b_more_io, don't * reposition it (that would break b_dirty time-ordering). */if (!was_dirty) {
struct bdi_writeback *wb;
struct list_head *dirty_list;
bool wakeup_bdi = false;
wb = locked_inode_to_wb_and_lock_list(inode);
inode->dirtied_when = jiffies;
if (dirtytime)
inode->dirtied_time_when = jiffies;
if (inode->i_state & I_DIRTY)
dirty_list = &wb->b_dirty;
else
dirty_list = &wb->b_dirty_time;
wakeup_bdi = inode_io_list_move_locked(inode, wb,
dirty_list);
spin_unlock(&wb->list_lock);
trace_writeback_dirty_inode_enqueue(inode);
/* * If this is the first dirty inode for this bdi, * we have to wake-up the corresponding bdi thread * to make sure background write-back happens * later. */if (wakeup_bdi &&
(wb->bdi->capabilities & BDI_CAP_WRITEBACK))
wb_wakeup_delayed(wb);
return;
}
}
out_unlock_inode:
spin_unlock(&inode->i_lock);
}
// 2476:
wakeup_bdi = inode_io_list_move_locked(inode, wb,
dirty_list);
spin_unlock(&wb->list_lock);
trace_writeback_dirty_inode_enqueue(inode);
/* * If this is the first dirty inode for this bdi, * we have to wake-up the corresponding bdi thread * to make sure background write-back happens * later. */if (wakeup_bdi &&
(wb->bdi->capabilities & BDI_CAP_WRITEBACK))
wb_wakeup_delayed(wb);
return;
// 3733:ssize_tgeneric_perform_write(struct file *file,
struct iov_iter *i, loff_t pos)
{
struct address_space *mapping = file->f_mapping;
conststruct address_space_operations *a_ops = mapping->a_ops;
long status = 0;
ssize_t written = 0;
unsignedint flags = 0;
do {
struct page *page;
unsignedlong offset; /* Offset into pagecache page */unsignedlong bytes; /* Bytes to write to page */size_t copied; /* Bytes copied from user */void *fsdata;
offset = (pos & (PAGE_SIZE - 1));
bytes = min_t(unsignedlong, PAGE_SIZE - offset,
iov_iter_count(i));
again:
/* * Bring in the user page that we will copy from _first_. * Otherwise there's a nasty deadlock on copying from the * same page as we're writing to, without it being marked * up-to-date. */if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
status = -EFAULT;
break;
}
if (fatal_signal_pending(current)) {
status = -EINTR;
break;
}
status = a_ops->write_begin(file, mapping, pos, bytes, flags,
&page, &fsdata);
if (unlikely(status < 0))
break;
if (mapping_writably_mapped(mapping))
flush_dcache_page(page);
copied = copy_page_from_iter_atomic(page, offset, bytes, i);
flush_dcache_page(page);
status = a_ops->write_end(file, mapping, pos, bytes, copied,
page, fsdata);
if (unlikely(status != copied)) {
iov_iter_revert(i, copied - max(status, 0L));
if (unlikely(status < 0))
break;
}
cond_resched();
if (unlikely(status == 0)) {
/* * A short copy made ->write_end() reject the * thing entirely. Might be memory poisoning * halfway through, might be a race with munmap, * might be severe memory pressure. */if (copied)
bytes = copied;
goto again;
}
pos += status;
written += status;
balance_dirty_pages_ratelimited(mapping);
} while (iov_iter_count(i));
return written ? written : status;
}
// 3754:/* * Bring in the user page that we will copy from _first_. * Otherwise there's a nasty deadlock on copying from the * same page as we're writing to, without it being marked * up-to-date. * * Not only is this an optimisation, but it is also required * to check that the address is actually valid, when atomic * usercopies are used, below. */if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
status = -EFAULT;
break;
}
// 260:staticinlineunsignedlong__phys_to_virt(phys_addr_t x)
{
unsignedlong t;
/* * 'unsigned long' cast discard upper word when * phys_addr_t is 64 bit, and makes sure that inline * assembler expression receives 32 bit argument * in place where 'r' 32 bit operand is expected. */__pv_stub((unsignedlong) x, t, "sub");
return t;
}
// 296:/* * Ensure cache coherency between kernel mapping and userspace mapping * of this page. * * We have three cases to consider: * - VIPT non-aliasing cache: fully coherent so nothing required. * - VIVT: fully aliasing, so we need to handle every alias in our * current VM view. * - VIPT aliasing: need to handle one alias in our current VM view. * * If we need to handle aliasing: * If the page only exists in the page cache and there are no user * space mappings, we can be lazy and remember that we may have dirty * kernel cache lines for later. Otherwise, we assume we have * aliasing mappings. * * Note that we disable the lazy flush for SMP configurations where * the cache maintenance operations are not automatically broadcasted. */voidflush_dcache_page(struct page *page)
{
struct address_space *mapping;
/* * The zero page is never written to, so never has any dirty * cache lines, and therefore never needs to be flushed. */if (page == ZERO_PAGE(0))
return;
if (!cache_ops_need_broadcast() && cache_is_vipt_nonaliasing()) {
if (test_bit(PG_dcache_clean, &page->flags))
clear_bit(PG_dcache_clean, &page->flags);
return;
}
mapping = page_mapping_file(page);
if (!cache_ops_need_broadcast() &&
mapping && !page_mapcount(page))
clear_bit(PG_dcache_clean, &page->flags);
else {
__flush_dcache_page(mapping, page);
if (mapping && cache_is_vivt())
__flush_dcache_aliases(mapping, page);
elseif (mapping)
__flush_icache_all();
set_bit(PG_dcache_clean, &page->flags);
}
}
EXPORT_SYMBOL(flush_dcache_page);
// 3790:if (unlikely(status == 0)) {
/* * A short copy made ->write_end() reject the * thing entirely. Might be memory poisoning * halfway through, might be a race with munmap, * might be severe memory pressure. */if (copied)
bytes = copied;
goto again;
}
pos += status;
written += status;
balance_dirty_pages_ratelimited(mapping);
} while (iov_iter_count(i));
return written ? written : status;
leava@server:~ $ cd linux
leava@server:~/linux $ KERNEL=kernel7l
leava@server:~/linux $ make ARCH=arm CROSS_COMPILE=arm-linux-gnueabihf- bcm2711_defconfig
コンフィグを修正する (fs-verityとdm-verityの有効化)
leava@server:~/linux $ make ARCH=arm CROSS_COMPILE=arm-linux-gnueabihf- menuconfig
File systems --->
[*] FS Verity (read-only file-based authenticity protection)
[*] FS Verity builtin signature support
Device Drivers --->
[*] Multiple devices driver support (RAID and LVM) --->
<*> Device mapper support
[*] Device mapper debugging support
<*> Verity target support
[*] Verity data device root hash signature verification support
root@server:/# apt-get install libssl-dev fio
root@server:/# wget https://git.kernel.org/pub/scm/linux/kernel/git/ebiggers/fsverity-utils.git/snapshot/fsverity-utils-1.2.tar.gz
root@server:/# tar xf fsverity-utils-1.2.tar.gz
root@server:/# cd fsverity-utils-1.2
root@server:/fsverity-utils-1.2# make && make install
Raspbian GNU/Linux 10 raspberrypi ttyS0
raspberrypi login: root
Password:
Last login: Thu Dec 3 16:27:35 GMT 2020 on ttyS0
Linux raspberrypi 5.10.3-v7l+ #1 SMP Mon Dec 28 05:53:15 UTC 2020 armv7l
The programs included with the Debian GNU/Linux system are free software;
the exact distribution terms for each program are described in the
individual files in /usr/share/doc/*/copyright.
Debian GNU/Linux comes with ABSOLUTELY NO WARRANTY, to the extent
permitted by applicable law.
SSH is enabled and the default password for the 'pi' user has not been changed.
This is a security risk - please login as the 'pi' user and type 'passwd' to set a new password.
Wi-Fi is currently blocked by rfkill.
Use raspi-config to set the country before use.
root@raspberrypi:~#
おわりに
本記事ではRaspberry Pi 用カーネルのLinux Kernel 5.10を自前でビルドして、ネットワークブートを利用して起動する方法を記載する。
次回は、作成した環境でdm-verityとfs-verityの使い方について確認する。