Run clang-format
Use platform2 clang-format config to run all over the code. No manual
change is done. Temporarily added these two statements in
platform2/.clang-format to reformat the tabs too:
UseTab: Never
IndentWidth: 2
Also turn on a few other options in presubmit checks.
BUG=chromium:886953
TEST=repo upload passes
TEST=CQ passes
Change-Id: Ib499f7db3f2023340134aa311122d0c26869dffa
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/dm-verity/+/2354977
Tested-by: Amin Hassani <ahassani@chromium.org>
Reviewed-by: Jorge Lucangeli Obes <jorgelo@chromium.org>
Reviewed-by: Mike Frysinger <vapier@chromium.org>
diff --git a/.clang-format b/.clang-format
new file mode 120000
index 0000000..c2257f2
--- /dev/null
+++ b/.clang-format
@@ -0,0 +1 @@
+../../platform2/.clang-format
\ No newline at end of file
diff --git a/PRESUBMIT.cfg b/PRESUBMIT.cfg
index ee52980..a5ff8d3 100644
--- a/PRESUBMIT.cfg
+++ b/PRESUBMIT.cfg
@@ -2,7 +2,7 @@
# Comment out the disable-flags for any checks you want to leave enabled.
[Hook Overrides]
+clang_format_check: true
stray_whitespace_check: true
-#long_line_check: false
+long_line_check: true
cros_license_check: false
-#tab_check: false
diff --git a/dm-bht-userspace.c b/dm-bht-userspace.c
index 677eab5..bb0e74a 100644
--- a/dm-bht-userspace.c
+++ b/dm-bht-userspace.c
@@ -1,4 +1,4 @@
- /*
+/*
* Copyright (C) 2011 The Chromium OS Authors <chromium-os-dev@chromium.org>
*
* Device-Mapper block hash tree interface.
@@ -19,47 +19,48 @@
/**
* dm_bht_compute_hash: hashes a page of data
*/
-static int dm_bht_compute_hash(struct dm_bht *bht, const u8 *buffer, u8 *digest)
-{
- struct hash_desc *hash_desc = &bht->hash_desc[0];
+static int dm_bht_compute_hash(struct dm_bht* bht,
+ const u8* buffer,
+ u8* digest) {
+ struct hash_desc* hash_desc = &bht->hash_desc[0];
- /* Note, this is synchronous. */
- if (crypto_hash_init(hash_desc)) {
- DMCRIT("failed to reinitialize crypto hash");
- return -EINVAL;
- }
- if (crypto_hash_update(hash_desc, buffer, PAGE_SIZE)) {
- DMCRIT("crypto_hash_update failed");
- return -EINVAL;
- }
- if (bht->have_salt) {
- if (crypto_hash_update(hash_desc, bht->salt, sizeof(bht->salt))) {
- DMCRIT("crypto_hash_update failed");
- return -EINVAL;
- }
- }
- if (crypto_hash_final(hash_desc, digest)) {
- DMCRIT("crypto_hash_final failed");
- return -EINVAL;
- }
+ /* Note, this is synchronous. */
+ if (crypto_hash_init(hash_desc)) {
+ DMCRIT("failed to reinitialize crypto hash");
+ return -EINVAL;
+ }
+ if (crypto_hash_update(hash_desc, buffer, PAGE_SIZE)) {
+ DMCRIT("crypto_hash_update failed");
+ return -EINVAL;
+ }
+ if (bht->have_salt) {
+ if (crypto_hash_update(hash_desc, bht->salt, sizeof(bht->salt))) {
+ DMCRIT("crypto_hash_update failed");
+ return -EINVAL;
+ }
+ }
+ if (crypto_hash_final(hash_desc, digest)) {
+ DMCRIT("crypto_hash_final failed");
+ return -EINVAL;
+ }
- return 0;
+ return 0;
}
-void dm_bht_set_buffer(struct dm_bht *bht, void *buffer) {
- int depth;
+void dm_bht_set_buffer(struct dm_bht* bht, void* buffer) {
+ int depth;
- for (depth = 0; depth < bht->depth; ++depth) {
- struct dm_bht_level *level = dm_bht_get_level(bht, depth);
- struct dm_bht_entry *entry_end = level->entries + level->count;
- struct dm_bht_entry *entry;
+ for (depth = 0; depth < bht->depth; ++depth) {
+ struct dm_bht_level* level = dm_bht_get_level(bht, depth);
+ struct dm_bht_entry* entry_end = level->entries + level->count;
+ struct dm_bht_entry* entry;
- for (entry = level->entries; entry < entry_end; ++entry) {
- entry->nodes = buffer;
- memset(buffer, 0, PAGE_SIZE);
- buffer += PAGE_SIZE;
- }
- }
+ for (entry = level->entries; entry < entry_end; ++entry) {
+ entry->nodes = buffer;
+ memset(buffer, 0, PAGE_SIZE);
+ buffer += PAGE_SIZE;
+ }
+ }
}
/**
@@ -72,47 +73,43 @@
* Walks the tree and computes the hashes at each level from the
* hashes below.
*/
-int dm_bht_compute(struct dm_bht *bht)
-{
- int depth, r = 0;
+int dm_bht_compute(struct dm_bht* bht) {
+ int depth, r = 0;
- for (depth = bht->depth - 2; depth >= 0; depth--) {
- struct dm_bht_level *level = dm_bht_get_level(bht, depth);
- struct dm_bht_level *child_level = level + 1;
- struct dm_bht_entry *entry = level->entries;
- struct dm_bht_entry *child = child_level->entries;
- unsigned int i, j;
+ for (depth = bht->depth - 2; depth >= 0; depth--) {
+ struct dm_bht_level* level = dm_bht_get_level(bht, depth);
+ struct dm_bht_level* child_level = level + 1;
+ struct dm_bht_entry* entry = level->entries;
+ struct dm_bht_entry* child = child_level->entries;
+ unsigned int i, j;
- for (i = 0; i < level->count; i++, entry++) {
- unsigned int count = bht->node_count;
+ for (i = 0; i < level->count; i++, entry++) {
+ unsigned int count = bht->node_count;
- memset(entry->nodes, 0, PAGE_SIZE);
- entry->state = DM_BHT_ENTRY_READY;
+ memset(entry->nodes, 0, PAGE_SIZE);
+ entry->state = DM_BHT_ENTRY_READY;
- if (i == (level->count - 1))
- count = child_level->count % bht->node_count;
- if (count == 0)
- count = bht->node_count;
- for (j = 0; j < count; j++, child++) {
- u8 *digest = dm_bht_node(bht, entry, j);
+ if (i == (level->count - 1))
+ count = child_level->count % bht->node_count;
+ if (count == 0)
+ count = bht->node_count;
+ for (j = 0; j < count; j++, child++) {
+ u8* digest = dm_bht_node(bht, entry, j);
- r = dm_bht_compute_hash(bht, child->nodes, digest);
- if (r) {
- DMERR("Failed to update (d=%d,i=%u)",
- depth, i);
- goto out;
- }
- }
- }
- }
- r = dm_bht_compute_hash(bht,
- bht->levels[0].entries->nodes,
- bht->root_digest);
- if (r)
- DMERR("Failed to update root hash");
+ r = dm_bht_compute_hash(bht, child->nodes, digest);
+ if (r) {
+ DMERR("Failed to update (d=%d,i=%u)", depth, i);
+ goto out;
+ }
+ }
+ }
+ }
+ r = dm_bht_compute_hash(bht, bht->levels[0].entries->nodes, bht->root_digest);
+ if (r)
+ DMERR("Failed to update root hash");
out:
- return r;
+ return r;
}
/**
@@ -129,12 +126,10 @@
* It is up to the users of the update interface to ensure the entry data is
* fully populated prior to use. The number of updated entries is NOT tracked.
*/
-int dm_bht_store_block(struct dm_bht *bht, unsigned int block,
- u8 *block_data)
-{
- int depth = bht->depth;
- struct dm_bht_entry *entry = dm_bht_get_entry(bht, depth - 1, block);
- u8 *node = dm_bht_get_node(bht, entry, depth, block);
+int dm_bht_store_block(struct dm_bht* bht, unsigned int block, u8* block_data) {
+ int depth = bht->depth;
+ struct dm_bht_entry* entry = dm_bht_get_entry(bht, depth - 1, block);
+ u8* node = dm_bht_get_node(bht, entry, depth, block);
- return dm_bht_compute_hash(bht, block_data, node);
+ return dm_bht_compute_hash(bht, block_data, node);
}
diff --git a/dm-bht-userspace.h b/dm-bht-userspace.h
index 8df4fda..96f5571 100644
--- a/dm-bht-userspace.h
+++ b/dm-bht-userspace.h
@@ -16,13 +16,12 @@
* should not be directly used for verification. (It should be repopulated.)
* In addition, these functions aren't meant to be called in parallel.
*/
-int dm_bht_compute(struct dm_bht *bht);
-void dm_bht_set_buffer(struct dm_bht *bht, void *buffer);
-int dm_bht_store_block(struct dm_bht *bht, unsigned int block,
- u8 *block_data);
+int dm_bht_compute(struct dm_bht* bht);
+void dm_bht_set_buffer(struct dm_bht* bht, void* buffer);
+int dm_bht_store_block(struct dm_bht* bht, unsigned int block, u8* block_data);
#ifdef __cplusplus
}
#endif
-#endif /* __LINUX_DM_BHT_USERSPACE_H */
+#endif /* __LINUX_DM_BHT_USERSPACE_H */
diff --git a/dm-bht.c b/dm-bht.c
index c2f61cb..7d339e0 100644
--- a/dm-bht.c
+++ b/dm-bht.c
@@ -1,4 +1,4 @@
- /*
+/*
* Copyright (C) 2010 The Chromium OS Authors <chromium-os-dev@chromium.org>
*
* Device-Mapper block hash tree interface.
@@ -11,7 +11,7 @@
#include <string.h>
#include <asm/page.h>
-#include <linux/bitops.h> /* for fls() */
+#include <linux/bitops.h> /* for fls() */
#include <linux/bug.h>
/* #define CONFIG_DM_DEBUG 1 */
#include <linux/device-mapper.h>
@@ -38,24 +38,22 @@
#define nr_cpu_ids 1
#define smp_processor_id(_x) 0
-static inline void *alloc_page(void)
-{
- void *memptr;
+static inline void* alloc_page(void) {
+ void* memptr;
- if (posix_memalign((void **)&memptr, PAGE_SIZE, PAGE_SIZE))
- return NULL;
- return memptr;
+ if (posix_memalign((void**)&memptr, PAGE_SIZE, PAGE_SIZE))
+ return NULL;
+ return memptr;
}
-static u8 from_hex(u8 ch)
-{
- if ((ch >= '0') && (ch <= '9'))
- return ch - '0';
- if ((ch >= 'a') && (ch <= 'f'))
- return ch - 'a' + 10;
- if ((ch >= 'A') && (ch <= 'F'))
- return ch - 'A' + 10;
- return -1;
+static u8 from_hex(u8 ch) {
+ if ((ch >= '0') && (ch <= '9'))
+ return ch - '0';
+ if ((ch >= 'a') && (ch <= 'f'))
+ return ch - 'a' + 10;
+ if ((ch >= 'A') && (ch <= 'F'))
+ return ch - 'A' + 10;
+ return -1;
}
/**
@@ -63,14 +61,12 @@
* @binary: a byte array of length @binary_len
* @hex: a byte array of length @binary_len * 2 + 1
*/
-static void dm_bht_bin_to_hex(u8 *binary, u8 *hex, unsigned int binary_len)
-{
- while (binary_len-- > 0) {
- sprintf((char *__restrict__)hex, "%02hhx",
- (unsigned char)*binary);
- hex += 2;
- binary++;
- }
+static void dm_bht_bin_to_hex(u8* binary, u8* hex, unsigned int binary_len) {
+ while (binary_len-- > 0) {
+ sprintf((char* __restrict__)hex, "%02hhx", (unsigned char)*binary);
+ hex += 2;
+ binary++;
+ }
}
/**
@@ -78,69 +74,70 @@
* @binary: a byte array of length @binary_len
* @hex: a byte array of length @binary_len * 2 + 1
*/
-static void dm_bht_hex_to_bin(u8 *binary, const u8 *hex,
- unsigned int binary_len)
-{
- while (binary_len-- > 0) {
- *binary = from_hex(*(hex++));
- *binary *= 16;
- *binary += from_hex(*(hex++));
- binary++;
- }
+static void dm_bht_hex_to_bin(u8* binary,
+ const u8* hex,
+ unsigned int binary_len) {
+ while (binary_len-- > 0) {
+ *binary = from_hex(*(hex++));
+ *binary *= 16;
+ *binary += from_hex(*(hex++));
+ binary++;
+ }
}
-static void dm_bht_log_mismatch(struct dm_bht *bht, u8 *given, u8 *computed)
-{
- u8 given_hex[DM_BHT_MAX_DIGEST_SIZE * 2 + 1];
- u8 computed_hex[DM_BHT_MAX_DIGEST_SIZE * 2 + 1];
- dm_bht_bin_to_hex(given, given_hex, bht->digest_size);
- dm_bht_bin_to_hex(computed, computed_hex, bht->digest_size);
- DMERR_LIMIT("%s != %s", given_hex, computed_hex);
+static void dm_bht_log_mismatch(struct dm_bht* bht, u8* given, u8* computed) {
+ u8 given_hex[DM_BHT_MAX_DIGEST_SIZE * 2 + 1];
+ u8 computed_hex[DM_BHT_MAX_DIGEST_SIZE * 2 + 1];
+ dm_bht_bin_to_hex(given, given_hex, bht->digest_size);
+ dm_bht_bin_to_hex(computed, computed_hex, bht->digest_size);
+ DMERR_LIMIT("%s != %s", given_hex, computed_hex);
}
/* Used for turning verifiers into computers */
-typedef int (*dm_bht_compare_cb)(struct dm_bht *, u8 *, u8 *);
+typedef int (*dm_bht_compare_cb)(struct dm_bht*, u8*, u8*);
/**
* dm_bht_compute_hash: hashes a page of data
*/
-static int dm_bht_compute_hash(struct dm_bht *bht, const u8 *buffer, u8 *digest)
-{
- struct hash_desc *hash_desc = &bht->hash_desc[smp_processor_id()];
+static int dm_bht_compute_hash(struct dm_bht* bht,
+ const u8* buffer,
+ u8* digest) {
+ struct hash_desc* hash_desc = &bht->hash_desc[smp_processor_id()];
- /* Note, this is synchronous. */
- if (crypto_hash_init(hash_desc)) {
- DMCRIT("failed to reinitialize crypto hash (proc:%d)",
- smp_processor_id());
- return -EINVAL;
- }
- if (crypto_hash_update(hash_desc, buffer, PAGE_SIZE)) {
- DMCRIT("crypto_hash_update failed");
- return -EINVAL;
- }
- if (bht->have_salt) {
- if (crypto_hash_update(hash_desc, bht->salt, sizeof(bht->salt))) {
- DMCRIT("crypto_hash_update failed");
- return -EINVAL;
- }
- }
- if (crypto_hash_final(hash_desc, digest)) {
- DMCRIT("crypto_hash_final failed");
- return -EINVAL;
- }
+ /* Note, this is synchronous. */
+ if (crypto_hash_init(hash_desc)) {
+ DMCRIT("failed to reinitialize crypto hash (proc:%d)", smp_processor_id());
+ return -EINVAL;
+ }
+ if (crypto_hash_update(hash_desc, buffer, PAGE_SIZE)) {
+ DMCRIT("crypto_hash_update failed");
+ return -EINVAL;
+ }
+ if (bht->have_salt) {
+ if (crypto_hash_update(hash_desc, bht->salt, sizeof(bht->salt))) {
+ DMCRIT("crypto_hash_update failed");
+ return -EINVAL;
+ }
+ }
+ if (crypto_hash_final(hash_desc, digest)) {
+ DMCRIT("crypto_hash_final failed");
+ return -EINVAL;
+ }
- return 0;
+ return 0;
}
/*-----------------------------------------------
* Implementation functions
*-----------------------------------------------*/
-static int dm_bht_initialize_entries(struct dm_bht *bht);
+static int dm_bht_initialize_entries(struct dm_bht* bht);
-static int dm_bht_read_callback_stub(void *ctx, sector_t start, u8 *dst,
- sector_t count,
- struct dm_bht_entry *entry);
+static int dm_bht_read_callback_stub(void* ctx,
+ sector_t start,
+ u8* dst,
+ sector_t count,
+ struct dm_bht_entry* entry);
/**
* dm_bht_create - prepares @bht for us
@@ -154,188 +151,186 @@
* Callers can offset into devices by storing the data in the io callbacks.
* TODO(wad) bust up into smaller helpers
*/
-int dm_bht_create(struct dm_bht *bht, unsigned int block_count,
- const char *alg_name)
-{
- int status = 0;
- int cpu = 0;
+int dm_bht_create(struct dm_bht* bht,
+ unsigned int block_count,
+ const char* alg_name) {
+ int status = 0;
+ int cpu = 0;
- bht->have_salt = false;
+ bht->have_salt = false;
- /* Setup the hash first. Its length determines much of the bht layout */
- for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
- bht->hash_desc[cpu].tfm = crypto_alloc_hash(alg_name, 0, 0);
- if (bht->hash_desc[cpu].tfm == NULL) {
- DMERR("failed to allocate crypto hash '%s'", alg_name);
- status = -ENOMEM;
- bht->hash_desc[cpu].tfm = NULL;
- goto bad_hash_alg;
- }
- }
- bht->digest_size = crypto_hash_digestsize(bht->hash_desc[0].tfm);
- /* We expect to be able to pack >=2 hashes into a page */
- if (PAGE_SIZE / bht->digest_size < 2) {
- DMERR("too few hashes fit in a page");
- status = -EINVAL;
- goto bad_digest_len;
- }
+ /* Setup the hash first. Its length determines much of the bht layout */
+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
+ bht->hash_desc[cpu].tfm = crypto_alloc_hash(alg_name, 0, 0);
+ if (bht->hash_desc[cpu].tfm == NULL) {
+ DMERR("failed to allocate crypto hash '%s'", alg_name);
+ status = -ENOMEM;
+ bht->hash_desc[cpu].tfm = NULL;
+ goto bad_hash_alg;
+ }
+ }
+ bht->digest_size = crypto_hash_digestsize(bht->hash_desc[0].tfm);
+ /* We expect to be able to pack >=2 hashes into a page */
+ if (PAGE_SIZE / bht->digest_size < 2) {
+ DMERR("too few hashes fit in a page");
+ status = -EINVAL;
+ goto bad_digest_len;
+ }
- if (bht->digest_size > DM_BHT_MAX_DIGEST_SIZE) {
- DMERR("DM_BHT_MAX_DIGEST_SIZE too small for chosen digest");
- status = -EINVAL;
- goto bad_digest_len;
- }
+ if (bht->digest_size > DM_BHT_MAX_DIGEST_SIZE) {
+ DMERR("DM_BHT_MAX_DIGEST_SIZE too small for chosen digest");
+ status = -EINVAL;
+ goto bad_digest_len;
+ }
- /* Configure the tree */
- bht->block_count = block_count;
- DMDEBUG("Setting block_count %u", block_count);
- if (block_count == 0) {
- DMERR("block_count must be non-zero");
- status = -EINVAL;
- goto bad_block_count;
- }
+ /* Configure the tree */
+ bht->block_count = block_count;
+ DMDEBUG("Setting block_count %u", block_count);
+ if (block_count == 0) {
+ DMERR("block_count must be non-zero");
+ status = -EINVAL;
+ goto bad_block_count;
+ }
- /* Each dm_bht_entry->nodes is one page. The node code tracks
- * how many nodes fit into one entry where a node is a single
- * hash (message digest).
- */
- bht->node_count_shift = fls(PAGE_SIZE / bht->digest_size) - 1;
- /* Round down to the nearest power of two. This makes indexing
- * into the tree much less painful.
- */
- bht->node_count = 1 << bht->node_count_shift;
+ /* Each dm_bht_entry->nodes is one page. The node code tracks
+ * how many nodes fit into one entry where a node is a single
+ * hash (message digest).
+ */
+ bht->node_count_shift = fls(PAGE_SIZE / bht->digest_size) - 1;
+ /* Round down to the nearest power of two. This makes indexing
+ * into the tree much less painful.
+ */
+ bht->node_count = 1 << bht->node_count_shift;
- /* This is unlikely to happen, but with 64k pages, who knows. */
- if (bht->node_count > UINT_MAX / bht->digest_size) {
- DMERR("node_count * hash_len exceeds UINT_MAX!");
- status = -EINVAL;
- goto bad_node_count;
- }
+ /* This is unlikely to happen, but with 64k pages, who knows. */
+ if (bht->node_count > UINT_MAX / bht->digest_size) {
+ DMERR("node_count * hash_len exceeds UINT_MAX!");
+ status = -EINVAL;
+ goto bad_node_count;
+ }
- bht->depth = DIV_ROUND_UP(fls(block_count - 1), bht->node_count_shift);
- DMDEBUG("Setting depth to %d.", bht->depth);
+ bht->depth = DIV_ROUND_UP(fls(block_count - 1), bht->node_count_shift);
+ DMDEBUG("Setting depth to %d.", bht->depth);
- /* Ensure that we can safely shift by this value. */
- if (bht->depth * bht->node_count_shift >= sizeof(unsigned int) * 8) {
- DMERR("specified depth and node_count_shift is too large");
- status = -EINVAL;
- goto bad_node_count;
- }
+ /* Ensure that we can safely shift by this value. */
+ if (bht->depth * bht->node_count_shift >= sizeof(unsigned int) * 8) {
+ DMERR("specified depth and node_count_shift is too large");
+ status = -EINVAL;
+ goto bad_node_count;
+ }
- /* Allocate levels. Each level of the tree may have an arbitrary number
- * of dm_bht_entry structs. Each entry contains node_count nodes.
- * Each node in the tree is a cryptographic digest of either node_count
- * nodes on the subsequent level or of a specific block on disk.
- */
- bht->levels = (struct dm_bht_level *)
- calloc(bht->depth, sizeof(struct dm_bht_level));
- if (!bht->levels) {
- DMERR("failed to allocate tree levels");
- status = -ENOMEM;
- goto bad_level_alloc;
- }
+ /* Allocate levels. Each level of the tree may have an arbitrary number
+ * of dm_bht_entry structs. Each entry contains node_count nodes.
+ * Each node in the tree is a cryptographic digest of either node_count
+ * nodes on the subsequent level or of a specific block on disk.
+ */
+ bht->levels =
+ (struct dm_bht_level*)calloc(bht->depth, sizeof(struct dm_bht_level));
+ if (!bht->levels) {
+ DMERR("failed to allocate tree levels");
+ status = -ENOMEM;
+ goto bad_level_alloc;
+ }
- /* Setup read callback stub */
- bht->read_cb = &dm_bht_read_callback_stub;
+ /* Setup read callback stub */
+ bht->read_cb = &dm_bht_read_callback_stub;
- status = dm_bht_initialize_entries(bht);
- if (status)
- goto bad_entries_alloc;
+ status = dm_bht_initialize_entries(bht);
+ if (status)
+ goto bad_entries_alloc;
- /* We compute depth such that there is only be 1 block at level 0. */
- BUG_ON(bht->levels[0].count != 1);
+ /* We compute depth such that there is only be 1 block at level 0. */
+ BUG_ON(bht->levels[0].count != 1);
- return 0;
+ return 0;
bad_entries_alloc:
- while (bht->depth-- > 0)
- free(bht->levels[bht->depth].entries);
- free(bht->levels);
+ while (bht->depth-- > 0)
+ free(bht->levels[bht->depth].entries);
+ free(bht->levels);
bad_node_count:
bad_level_alloc:
bad_block_count:
bad_digest_len:
bad_hash_alg:
- for (cpu = 0; cpu < nr_cpu_ids; ++cpu)
- if (bht->hash_desc[cpu].tfm)
- crypto_free_hash(bht->hash_desc[cpu].tfm);
- return status;
+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu)
+ if (bht->hash_desc[cpu].tfm)
+ crypto_free_hash(bht->hash_desc[cpu].tfm);
+ return status;
}
-static int dm_bht_initialize_entries(struct dm_bht *bht)
-{
- /* The last_index represents the index into the last
- * block digest that will be stored in the tree. By walking the
- * tree with that index, it is possible to compute the total number
- * of entries needed at each level in the tree.
- *
- * Since each entry will contain up to |node_count| nodes of the tree,
- * it is possible that the last index may not be at the end of a given
- * entry->nodes. In that case, it is assumed the value is padded.
- *
- * Note, we treat both the tree root (1 hash) and the tree leaves
- * independently from the bht data structures. Logically, the root is
- * depth=-1 and the block layer level is depth=bht->depth
- */
- unsigned int last_index = ALIGN(bht->block_count, bht->node_count) - 1;
- unsigned int total_entries = 0;
- struct dm_bht_level *level = NULL;
- int depth;
+static int dm_bht_initialize_entries(struct dm_bht* bht) {
+ /* The last_index represents the index into the last
+ * block digest that will be stored in the tree. By walking the
+ * tree with that index, it is possible to compute the total number
+ * of entries needed at each level in the tree.
+ *
+ * Since each entry will contain up to |node_count| nodes of the tree,
+ * it is possible that the last index may not be at the end of a given
+ * entry->nodes. In that case, it is assumed the value is padded.
+ *
+ * Note, we treat both the tree root (1 hash) and the tree leaves
+ * independently from the bht data structures. Logically, the root is
+ * depth=-1 and the block layer level is depth=bht->depth
+ */
+ unsigned int last_index = ALIGN(bht->block_count, bht->node_count) - 1;
+ unsigned int total_entries = 0;
+ struct dm_bht_level* level = NULL;
+ int depth;
- /* check that the largest level->count can't result in an int overflow
- * on allocation or sector calculation.
- */
- if (((last_index >> bht->node_count_shift) + 1) >
- UINT_MAX / MAX((unsigned int)sizeof(struct dm_bht_entry),
- (unsigned int)to_sector(PAGE_SIZE))) {
- DMCRIT("required entries %u is too large",
- last_index + 1);
- return -EINVAL;
- }
+ /* check that the largest level->count can't result in an int overflow
+ * on allocation or sector calculation.
+ */
+ if (((last_index >> bht->node_count_shift) + 1) >
+ UINT_MAX / MAX((unsigned int)sizeof(struct dm_bht_entry),
+ (unsigned int)to_sector(PAGE_SIZE))) {
+ DMCRIT("required entries %u is too large", last_index + 1);
+ return -EINVAL;
+ }
- /* Track the current sector location for each level so we don't have to
- * compute it during traversals.
- */
- bht->sectors = 0;
- for (depth = 0; depth < bht->depth; ++depth) {
- level = dm_bht_get_level(bht, depth);
- level->count = dm_bht_index_at_level(bht, depth,
- last_index) + 1;
- DMDEBUG("depth: %d entries: %u", depth, level->count);
- /* TODO(wad) consider the case where the data stored for each
- * level is done with contiguous pages (instead of using
- * entry->nodes) and the level just contains two bitmaps:
- * (a) which pages have been loaded from disk
- * (b) which specific nodes have been verified.
- */
- level->entries = (struct dm_bht_entry *)
- calloc(level->count, sizeof(struct dm_bht_entry));
- if (!level->entries) {
- DMERR("failed to allocate entries for depth %d",
- bht->depth);
- /* let the caller clean up the mess */
- return -ENOMEM;
- }
- total_entries += level->count;
- level->sector = bht->sectors;
- /* number of sectors per entry * entries at this level */
- bht->sectors += level->count * to_sector(PAGE_SIZE);
- /* not ideal, but since unsigned overflow behavior is defined */
- if (bht->sectors < level->sector) {
- DMCRIT("level sector calculation overflowed");
- return -EINVAL;
- }
- }
+ /* Track the current sector location for each level so we don't have to
+ * compute it during traversals.
+ */
+ bht->sectors = 0;
+ for (depth = 0; depth < bht->depth; ++depth) {
+ level = dm_bht_get_level(bht, depth);
+ level->count = dm_bht_index_at_level(bht, depth, last_index) + 1;
+ DMDEBUG("depth: %d entries: %u", depth, level->count);
+ /* TODO(wad) consider the case where the data stored for each
+ * level is done with contiguous pages (instead of using
+ * entry->nodes) and the level just contains two bitmaps:
+ * (a) which pages have been loaded from disk
+ * (b) which specific nodes have been verified.
+ */
+ level->entries =
+ (struct dm_bht_entry*)calloc(level->count, sizeof(struct dm_bht_entry));
+ if (!level->entries) {
+ DMERR("failed to allocate entries for depth %d", bht->depth);
+ /* let the caller clean up the mess */
+ return -ENOMEM;
+ }
+ total_entries += level->count;
+ level->sector = bht->sectors;
+ /* number of sectors per entry * entries at this level */
+ bht->sectors += level->count * to_sector(PAGE_SIZE);
+ /* not ideal, but since unsigned overflow behavior is defined */
+ if (bht->sectors < level->sector) {
+ DMCRIT("level sector calculation overflowed");
+ return -EINVAL;
+ }
+ }
- return 0;
+ return 0;
}
-static int dm_bht_read_callback_stub(void *ctx, sector_t start, u8 *dst,
- sector_t count, struct dm_bht_entry *entry)
-{
- DMCRIT("dm_bht_read_callback_stub called!");
- dm_bht_read_completed(entry, -EIO);
- return -EIO;
+static int dm_bht_read_callback_stub(void* ctx,
+ sector_t start,
+ u8* dst,
+ sector_t count,
+ struct dm_bht_entry* entry) {
+ DMCRIT("dm_bht_read_callback_stub called!");
+ dm_bht_read_completed(entry, -EIO);
+ return -EIO;
}
/**
@@ -344,76 +339,74 @@
* @status: I/O status. Non-zero is failure.
* MUST always be called after a read_cb completes.
*/
-void dm_bht_read_completed(struct dm_bht_entry *entry, int status)
-{
- if (status) {
- /* TODO(wad) add retry support */
- DMCRIT("an I/O error occurred while reading entry");
- entry->state = DM_BHT_ENTRY_ERROR_IO;
- /* entry->nodes will be freed later */
- return;
- }
- BUG_ON(entry->state != DM_BHT_ENTRY_PENDING);
- entry->state = DM_BHT_ENTRY_READY;
+void dm_bht_read_completed(struct dm_bht_entry* entry, int status) {
+ if (status) {
+ /* TODO(wad) add retry support */
+ DMCRIT("an I/O error occurred while reading entry");
+ entry->state = DM_BHT_ENTRY_ERROR_IO;
+ /* entry->nodes will be freed later */
+ return;
+ }
+ BUG_ON(entry->state != DM_BHT_ENTRY_PENDING);
+ entry->state = DM_BHT_ENTRY_READY;
}
/* dm_bht_verify_path
* Verifies the path. Returns 0 on ok.
*/
-static int dm_bht_verify_path(struct dm_bht *bht, unsigned int block,
- const u8 *buffer)
-{
- int depth = bht->depth;
- u8 digest[DM_BHT_MAX_DIGEST_SIZE];
- struct dm_bht_entry *entry;
- u8 *node;
- int state;
+static int dm_bht_verify_path(struct dm_bht* bht,
+ unsigned int block,
+ const u8* buffer) {
+ int depth = bht->depth;
+ u8 digest[DM_BHT_MAX_DIGEST_SIZE];
+ struct dm_bht_entry* entry;
+ u8* node;
+ int state;
- do {
- /* Need to check that the hash of the current block is accurate
- * in its parent.
- */
- entry = dm_bht_get_entry(bht, depth - 1, block);
- state = entry->state;
- /* This call is only safe if all nodes along the path
- * are already populated (i.e. READY) via dm_bht_populate.
- */
- BUG_ON(state < DM_BHT_ENTRY_READY);
- node = dm_bht_get_node(bht, entry, depth, block);
+ do {
+ /* Need to check that the hash of the current block is accurate
+ * in its parent.
+ */
+ entry = dm_bht_get_entry(bht, depth - 1, block);
+ state = entry->state;
+ /* This call is only safe if all nodes along the path
+ * are already populated (i.e. READY) via dm_bht_populate.
+ */
+ BUG_ON(state < DM_BHT_ENTRY_READY);
+ node = dm_bht_get_node(bht, entry, depth, block);
- if (dm_bht_compute_hash(bht, buffer, digest) ||
- memcmp(digest, node, bht->digest_size))
- goto mismatch;
+ if (dm_bht_compute_hash(bht, buffer, digest) ||
+ memcmp(digest, node, bht->digest_size))
+ goto mismatch;
- /* Keep the containing block of hashes to be verified in the
- * next pass.
- */
- buffer = entry->nodes;
- } while (--depth > 0 && state != DM_BHT_ENTRY_VERIFIED);
+ /* Keep the containing block of hashes to be verified in the
+ * next pass.
+ */
+ buffer = entry->nodes;
+ } while (--depth > 0 && state != DM_BHT_ENTRY_VERIFIED);
- if (depth == 0 && state != DM_BHT_ENTRY_VERIFIED) {
- if (dm_bht_compute_hash(bht, buffer, digest) ||
- memcmp(digest, bht->root_digest, bht->digest_size))
- goto mismatch;
- entry->state = DM_BHT_ENTRY_VERIFIED;
- }
+ if (depth == 0 && state != DM_BHT_ENTRY_VERIFIED) {
+ if (dm_bht_compute_hash(bht, buffer, digest) ||
+ memcmp(digest, bht->root_digest, bht->digest_size))
+ goto mismatch;
+ entry->state = DM_BHT_ENTRY_VERIFIED;
+ }
- /* Mark path to leaf as verified. */
- for (depth++; depth < bht->depth; depth++) {
- entry = dm_bht_get_entry(bht, depth, block);
- /* At this point, entry can only be in VERIFIED or READY state.
- */
- entry->state = DM_BHT_ENTRY_VERIFIED;
- }
+ /* Mark path to leaf as verified. */
+ for (depth++; depth < bht->depth; depth++) {
+ entry = dm_bht_get_entry(bht, depth, block);
+ /* At this point, entry can only be in VERIFIED or READY state.
+ */
+ entry->state = DM_BHT_ENTRY_VERIFIED;
+ }
- DMDEBUG("verify_path: node %u is verified to root", block);
- return 0;
+ DMDEBUG("verify_path: node %u is verified to root", block);
+ return 0;
mismatch:
- DMERR_LIMIT("verify_path: failed to verify hash (d=%d,bi=%u)",
- depth, block);
- dm_bht_log_mismatch(bht, node, digest);
- return DM_BHT_ENTRY_ERROR_MISMATCH;
+ DMERR_LIMIT("verify_path: failed to verify hash (d=%d,bi=%u)", depth, block);
+ dm_bht_log_mismatch(bht, node, digest);
+ return DM_BHT_ENTRY_ERROR_MISMATCH;
}
/**
@@ -430,12 +423,14 @@
* Meant for use by dm_compute() callers. It allows dm_populate to
* be used to pre-fill a tree with zeroed out entry nodes.
*/
-int dm_bht_zeroread_callback(void *ctx, sector_t start, u8 *dst,
- sector_t count, struct dm_bht_entry *entry)
-{
- memset(dst, 0, verity_to_bytes(count));
- dm_bht_read_completed(entry, 0);
- return 0;
+int dm_bht_zeroread_callback(void* ctx,
+ sector_t start,
+ u8* dst,
+ sector_t count,
+ struct dm_bht_entry* entry) {
+ memset(dst, 0, verity_to_bytes(count));
+ dm_bht_read_completed(entry, 0);
+ return 0;
}
/**
@@ -447,18 +442,16 @@
* Callers may wish to call dm_bht_is_populated() when checking an io
* for which entries were already pending.
*/
-bool dm_bht_is_populated(struct dm_bht *bht, unsigned int block)
-{
- int depth;
+bool dm_bht_is_populated(struct dm_bht* bht, unsigned int block) {
+ int depth;
- for (depth = bht->depth - 1; depth >= 0; depth--) {
- struct dm_bht_entry *entry = dm_bht_get_entry(bht, depth,
- block);
- if (entry->state < DM_BHT_ENTRY_READY)
- return false;
- }
+ for (depth = bht->depth - 1; depth >= 0; depth--) {
+ struct dm_bht_entry* entry = dm_bht_get_entry(bht, depth, block);
+ if (entry->state < DM_BHT_ENTRY_READY)
+ return false;
+ }
- return true;
+ return true;
}
/**
@@ -469,62 +462,59 @@
*
* Returns negative value on error. Returns 0 on success.
*/
-int dm_bht_populate(struct dm_bht *bht, void *ctx,
- unsigned int block)
-{
- int depth;
- int state = 0;
+int dm_bht_populate(struct dm_bht* bht, void* ctx, unsigned int block) {
+ int depth;
+ int state = 0;
- BUG_ON(block >= bht->block_count);
+ BUG_ON(block >= bht->block_count);
- DMDEBUG("dm_bht_populate(%u)", block);
+ DMDEBUG("dm_bht_populate(%u)", block);
- for (depth = bht->depth - 1; depth >= 0; --depth) {
- struct dm_bht_level *level;
- struct dm_bht_entry *entry;
- unsigned int index;
- u8 *buffer;
+ for (depth = bht->depth - 1; depth >= 0; --depth) {
+ struct dm_bht_level* level;
+ struct dm_bht_entry* entry;
+ unsigned int index;
+ u8* buffer;
- entry = dm_bht_get_entry(bht, depth, block);
- state = entry->state;
- if (state == DM_BHT_ENTRY_UNALLOCATED)
- entry->state = DM_BHT_ENTRY_PENDING;
+ entry = dm_bht_get_entry(bht, depth, block);
+ state = entry->state;
+ if (state == DM_BHT_ENTRY_UNALLOCATED)
+ entry->state = DM_BHT_ENTRY_PENDING;
- if (state == DM_BHT_ENTRY_VERIFIED)
- break;
- if (state <= DM_BHT_ENTRY_ERROR)
- goto error_state;
- if (state != DM_BHT_ENTRY_UNALLOCATED)
- continue;
+ if (state == DM_BHT_ENTRY_VERIFIED)
+ break;
+ if (state <= DM_BHT_ENTRY_ERROR)
+ goto error_state;
+ if (state != DM_BHT_ENTRY_UNALLOCATED)
+ continue;
- /* Current entry is claimed for allocation and loading */
- buffer = (u8 *)alloc_page();
- if (!buffer)
- goto nomem;
+ /* Current entry is claimed for allocation and loading */
+ buffer = (u8*)alloc_page();
+ if (!buffer)
+ goto nomem;
- /* dm-bht guarantees page-aligned memory for callbacks. */
- entry->nodes = buffer;
+ /* dm-bht guarantees page-aligned memory for callbacks. */
+ entry->nodes = buffer;
- /* TODO(wad) error check callback here too */
+ /* TODO(wad) error check callback here too */
- level = &bht->levels[depth];
- index = dm_bht_index_at_level(bht, depth, block);
- bht->read_cb(ctx, level->sector + to_sector(index * PAGE_SIZE),
- entry->nodes, to_sector(PAGE_SIZE), entry);
- }
+ level = &bht->levels[depth];
+ index = dm_bht_index_at_level(bht, depth, block);
+ bht->read_cb(ctx, level->sector + to_sector(index * PAGE_SIZE),
+ entry->nodes, to_sector(PAGE_SIZE), entry);
+ }
- return 0;
+ return 0;
error_state:
- DMCRIT("block %u at depth %d is in an error state", block, depth);
- return state;
+ DMCRIT("block %u at depth %d is in an error state", block, depth);
+ return state;
nomem:
- DMCRIT("failed to allocate memory for entry->nodes");
- return -ENOMEM;
+ DMCRIT("failed to allocate memory for entry->nodes");
+ return -ENOMEM;
}
-
/**
* dm_bht_verify_block - checks that all nodes in the path for @block are valid
* @bht: pointer to a dm_bht_create()d bht
@@ -536,12 +526,13 @@
* code on verification failure. All supporting functions called
* should return similarly.
*/
-int dm_bht_verify_block(struct dm_bht *bht, unsigned int block,
- const u8 *buffer, unsigned int offset)
-{
- BUG_ON(offset != 0);
+int dm_bht_verify_block(struct dm_bht* bht,
+ unsigned int block,
+ const u8* buffer,
+ unsigned int offset) {
+ BUG_ON(offset != 0);
- return dm_bht_verify_path(bht, block, buffer);
+ return dm_bht_verify_path(bht, block, buffer);
}
/**
@@ -550,39 +541,37 @@
*
* Returns 0 on success. Does not free @bht itself.
*/
-int dm_bht_destroy(struct dm_bht *bht)
-{
- int depth;
- int cpu = 0;
+int dm_bht_destroy(struct dm_bht* bht) {
+ int depth;
+ int cpu = 0;
- depth = bht->depth;
- while (depth-- != 0) {
- struct dm_bht_entry *entry = bht->levels[depth].entries;
- struct dm_bht_entry *entry_end = entry +
- bht->levels[depth].count;
- for (; entry < entry_end; ++entry) {
- switch (entry->state) {
- /* At present, no other states free memory,
- * but that will change.
- */
- case DM_BHT_ENTRY_UNALLOCATED:
- /* Allocated with improper state */
- BUG_ON(entry->nodes);
- continue;
- default:
- BUG_ON(!entry->nodes);
- free(entry->nodes);
- break;
- }
- }
- free(bht->levels[depth].entries);
- bht->levels[depth].entries = NULL;
- }
- free(bht->levels);
- for (cpu = 0; cpu < nr_cpu_ids; ++cpu)
- if (bht->hash_desc[cpu].tfm)
- crypto_free_hash(bht->hash_desc[cpu].tfm);
- return 0;
+ depth = bht->depth;
+ while (depth-- != 0) {
+ struct dm_bht_entry* entry = bht->levels[depth].entries;
+ struct dm_bht_entry* entry_end = entry + bht->levels[depth].count;
+ for (; entry < entry_end; ++entry) {
+ switch (entry->state) {
+ /* At present, no other states free memory,
+ * but that will change.
+ */
+ case DM_BHT_ENTRY_UNALLOCATED:
+ /* Allocated with improper state */
+ BUG_ON(entry->nodes);
+ continue;
+ default:
+ BUG_ON(!entry->nodes);
+ free(entry->nodes);
+ break;
+ }
+ }
+ free(bht->levels[depth].entries);
+ bht->levels[depth].entries = NULL;
+ }
+ free(bht->levels);
+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu)
+ if (bht->hash_desc[cpu].tfm)
+ crypto_free_hash(bht->hash_desc[cpu].tfm);
+ return 0;
}
/*-----------------------------------------------
@@ -593,9 +582,8 @@
* dm_bht_sectors - return the sectors required on disk
* @bht: pointer to a dm_bht_create()d bht
*/
-sector_t dm_bht_sectors(const struct dm_bht *bht)
-{
- return bht->sectors;
+sector_t dm_bht_sectors(const struct dm_bht* bht) {
+ return bht->sectors;
}
/**
@@ -603,9 +591,8 @@
* @bht: pointer to a dm_bht_create()d bht
* @read_cb: callback function used for all read requests by @bht
*/
-void dm_bht_set_read_cb(struct dm_bht *bht, dm_bht_callback read_cb)
-{
- bht->read_cb = read_cb;
+void dm_bht_set_read_cb(struct dm_bht* bht, dm_bht_callback read_cb) {
+ bht->read_cb = read_cb;
}
/**
@@ -614,20 +601,18 @@
* @hexdigest: array of u8s containing the new digest in binary
* Returns non-zero on error. hexdigest should be NUL terminated.
*/
-int dm_bht_set_root_hexdigest(struct dm_bht *bht, const u8 *hexdigest)
-{
- /* Make sure we have at least the bytes expected */
- if (strnlen((char *)hexdigest, bht->digest_size * 2) !=
- bht->digest_size * 2) {
- DMERR("root digest length does not match hash algorithm");
- return -1;
- }
- dm_bht_hex_to_bin(bht->root_digest, hexdigest, bht->digest_size);
+int dm_bht_set_root_hexdigest(struct dm_bht* bht, const u8* hexdigest) {
+ /* Make sure we have at least the bytes expected */
+ if (strnlen((char*)hexdigest, bht->digest_size * 2) != bht->digest_size * 2) {
+ DMERR("root digest length does not match hash algorithm");
+ return -1;
+ }
+ dm_bht_hex_to_bin(bht->root_digest, hexdigest, bht->digest_size);
#ifdef CONFIG_DM_DEBUG
- DMINFO("Set root digest to %s. Parsed as -> ", hexdigest);
- dm_bht_log_mismatch(bht, bht->root_digest, bht->root_digest);
+ DMINFO("Set root digest to %s. Parsed as -> ", hexdigest);
+ dm_bht_log_mismatch(bht, bht->root_digest, bht->root_digest);
#endif
- return 0;
+ return 0;
}
/**
@@ -636,15 +621,13 @@
* @hexdigest: u8 array of size @available
* @available: must be bht->digest_size * 2 + 1
*/
-int dm_bht_root_hexdigest(struct dm_bht *bht, u8 *hexdigest, int available)
-{
- if (available < 0 ||
- ((unsigned int) available) < bht->digest_size * 2 + 1) {
- DMERR("hexdigest has too few bytes available");
- return -EINVAL;
- }
- dm_bht_bin_to_hex(bht->root_digest, hexdigest, bht->digest_size);
- return 0;
+int dm_bht_root_hexdigest(struct dm_bht* bht, u8* hexdigest, int available) {
+ if (available < 0 || ((unsigned int)available) < bht->digest_size * 2 + 1) {
+ DMERR("hexdigest has too few bytes available");
+ return -EINVAL;
+ }
+ dm_bht_bin_to_hex(bht->root_digest, hexdigest, bht->digest_size);
+ return 0;
}
/**
@@ -653,12 +636,11 @@
* @hexsalt: salt string, as hex; will be zero-padded or truncated to
* DM_BHT_SALT_SIZE * 2 hex digits.
*/
-void dm_bht_set_salt(struct dm_bht *bht, const char *hexsalt)
-{
- size_t saltlen = MIN(strlen(hexsalt) / 2, sizeof(bht->salt));
- bht->have_salt = true;
- memset(bht->salt, 0, sizeof(bht->salt));
- dm_bht_hex_to_bin(bht->salt, (const u8 *)hexsalt, saltlen);
+void dm_bht_set_salt(struct dm_bht* bht, const char* hexsalt) {
+ size_t saltlen = MIN(strlen(hexsalt) / 2, sizeof(bht->salt));
+ bht->have_salt = true;
+ memset(bht->salt, 0, sizeof(bht->salt));
+ dm_bht_hex_to_bin(bht->salt, (const u8*)hexsalt, saltlen);
}
/**
@@ -666,10 +648,9 @@
* @bht: pointer to a dm_bht_create()d bht
* @hexsalt: buffer to put salt into, of length DM_BHT_SALT_SIZE * 2 + 1.
*/
-int dm_bht_salt(struct dm_bht *bht, char *hexsalt)
-{
- if (!bht->have_salt)
- return -EINVAL;
- dm_bht_bin_to_hex(bht->salt, (u8 *)hexsalt, sizeof(bht->salt));
- return 0;
+int dm_bht_salt(struct dm_bht* bht, char* hexsalt) {
+ if (!bht->have_salt)
+ return -EINVAL;
+ dm_bht_bin_to_hex(bht->salt, (u8*)hexsalt, sizeof(bht->salt));
+ return 0;
}
diff --git a/dm-bht.h b/dm-bht.h
index 1e3fa16..19a2d4a 100644
--- a/dm-bht.h
+++ b/dm-bht.h
@@ -19,15 +19,15 @@
/* To avoid allocating memory for digest tests, we just setup a
* max to use for now.
*/
-#define DM_BHT_MAX_DIGEST_SIZE 128 /* 1k hashes are unlikely for now */
-#define DM_BHT_SALT_SIZE 32 /* 256 bits of salt is a lot */
+#define DM_BHT_MAX_DIGEST_SIZE 128 /* 1k hashes are unlikely for now */
+#define DM_BHT_SALT_SIZE 32 /* 256 bits of salt is a lot */
/* UNALLOCATED, PENDING, READY, and VERIFIED are valid states. All other
* values are entry-related return codes.
*/
-#define DM_BHT_ENTRY_VERIFIED 8 /* 'nodes' has been checked against parent */
-#define DM_BHT_ENTRY_READY 4 /* 'nodes' is loaded and available */
-#define DM_BHT_ENTRY_PENDING 2 /* 'nodes' is being loaded */
+#define DM_BHT_ENTRY_VERIFIED 8 /* 'nodes' has been checked against parent */
+#define DM_BHT_ENTRY_READY 4 /* 'nodes' is loaded and available */
+#define DM_BHT_ENTRY_PENDING 2 /* 'nodes' is being loaded */
#define DM_BHT_ENTRY_UNALLOCATED 0 /* untouched */
#define DM_BHT_ENTRY_ERROR -1 /* entry is unsuitable for use */
#define DM_BHT_ENTRY_ERROR_IO -2 /* I/O error on load */
@@ -42,15 +42,15 @@
* level, we need to load in the data for on-demand verification.
*/
struct dm_bht_entry {
- volatile int state; /* see defines */
- /* Keeping an extra pointer per entry wastes up to ~33k of
- * memory if a 1m blocks are used (or 66 on 64-bit arch)
- */
- void *io_context; /* Reserve a pointer for use during io */
- /* data should only be non-NULL if fully populated. */
- u8 *nodes; /* The hash data used to verify the children.
- * Guaranteed to be page-aligned.
- */
+ volatile int state; /* see defines */
+ /* Keeping an extra pointer per entry wastes up to ~33k of
+ * memory if a 1m blocks are used (or 66 on 64-bit arch)
+ */
+ void* io_context; /* Reserve a pointer for use during io */
+ /* data should only be non-NULL if fully populated. */
+ u8* nodes; /* The hash data used to verify the children.
+ * Guaranteed to be page-aligned.
+ */
};
/* dm_bht_level
@@ -58,17 +58,17 @@
* each hash is a node in the tree at the given tree depth/level.
*/
struct dm_bht_level {
- struct dm_bht_entry *entries; /* array of entries of tree nodes */
- unsigned int count; /* number of entries at this level */
- sector_t sector; /* starting sector for this level */
+ struct dm_bht_entry* entries; /* array of entries of tree nodes */
+ unsigned int count; /* number of entries at this level */
+ sector_t sector; /* starting sector for this level */
};
/* opaque context, start, databuf, sector_count */
-typedef int(*dm_bht_callback)(void *, /* external context */
- sector_t, /* start sector */
- u8 *, /* destination page */
- sector_t, /* num sectors */
- struct dm_bht_entry *);
+typedef int (*dm_bht_callback)(void*, /* external context */
+ sector_t, /* start sector */
+ u8*, /* destination page */
+ sector_t, /* num sectors */
+ struct dm_bht_entry*);
/* dm_bht - Device mapper block hash tree
* dm_bht provides a fixed interface for comparing data blocks
* against a cryptographic hashes stored in a hash tree. It
@@ -84,111 +84,109 @@
* entire branch has been verified.
*/
struct dm_bht {
- /* Configured values */
- int depth; /* Depth of the tree including the root */
- unsigned int block_count; /* Number of blocks hashed */
- char hash_alg[CRYPTO_MAX_ALG_NAME];
- unsigned char salt[DM_BHT_SALT_SIZE];
+ /* Configured values */
+ int depth; /* Depth of the tree including the root */
+ unsigned int block_count; /* Number of blocks hashed */
+ char hash_alg[CRYPTO_MAX_ALG_NAME];
+ unsigned char salt[DM_BHT_SALT_SIZE];
- /* This is a temporary hack to ease the transition to salting. It will
- * be removed once salting is supported both in kernel and userspace,
- * and the salt will default to all zeroes instead. */
- bool have_salt;
+ /* This is a temporary hack to ease the transition to salting. It will
+ * be removed once salting is supported both in kernel and userspace,
+ * and the salt will default to all zeroes instead. */
+ bool have_salt;
- /* Computed values */
- unsigned int node_count; /* Data size (in hashes) for each entry */
- unsigned int node_count_shift; /* first bit set - 1 */
- /* There is one per CPU so that verified can be simultaneous. */
- /* We assume we only have one CPU in userland. */
- struct hash_desc hash_desc[1]; /* Container for the hash alg */
- unsigned int digest_size;
- sector_t sectors; /* Number of disk sectors used */
+ /* Computed values */
+ unsigned int node_count; /* Data size (in hashes) for each entry */
+ unsigned int node_count_shift; /* first bit set - 1 */
+ /* There is one per CPU so that verified can be simultaneous. */
+ /* We assume we only have one CPU in userland. */
+ struct hash_desc hash_desc[1]; /* Container for the hash alg */
+ unsigned int digest_size;
+ sector_t sectors; /* Number of disk sectors used */
- /* bool verified; Full tree is verified */
- u8 root_digest[DM_BHT_MAX_DIGEST_SIZE];
- struct dm_bht_level *levels; /* in reverse order */
- /* Callback for reading from the hash device */
- dm_bht_callback read_cb;
+ /* bool verified; Full tree is verified */
+ u8 root_digest[DM_BHT_MAX_DIGEST_SIZE];
+ struct dm_bht_level* levels; /* in reverse order */
+ /* Callback for reading from the hash device */
+ dm_bht_callback read_cb;
};
/* Constructor for struct dm_bht instances. */
-int dm_bht_create(struct dm_bht *bht,
- unsigned int block_count,
- const char *alg_name);
+int dm_bht_create(struct dm_bht* bht,
+ unsigned int block_count,
+ const char* alg_name);
/* Destructor for struct dm_bht instances. Does not free @bht */
-int dm_bht_destroy(struct dm_bht *bht);
+int dm_bht_destroy(struct dm_bht* bht);
/* Basic accessors for struct dm_bht */
-sector_t dm_bht_sectors(const struct dm_bht *bht);
-void dm_bht_set_read_cb(struct dm_bht *bht, dm_bht_callback read_cb);
-int dm_bht_set_root_hexdigest(struct dm_bht *bht, const u8 *hexdigest);
-int dm_bht_root_hexdigest(struct dm_bht *bht, u8 *hexdigest, int available);
-void dm_bht_set_salt(struct dm_bht *bht, const char *hexsalt);
-int dm_bht_salt(struct dm_bht *bht, char *hexsalt);
+sector_t dm_bht_sectors(const struct dm_bht* bht);
+void dm_bht_set_read_cb(struct dm_bht* bht, dm_bht_callback read_cb);
+int dm_bht_set_root_hexdigest(struct dm_bht* bht, const u8* hexdigest);
+int dm_bht_root_hexdigest(struct dm_bht* bht, u8* hexdigest, int available);
+void dm_bht_set_salt(struct dm_bht* bht, const char* hexsalt);
+int dm_bht_salt(struct dm_bht* bht, char* hexsalt);
/* Functions for loading in data from disk for verification */
-bool dm_bht_is_populated(struct dm_bht *bht, unsigned int block);
-int dm_bht_populate(struct dm_bht *bht, void *read_cb_ctx,
- unsigned int block);
-int dm_bht_verify_block(struct dm_bht *bht, unsigned int block,
- const u8 *buffer, unsigned int offset);
-int dm_bht_zeroread_callback(void *ctx, sector_t start, u8 *dst, sector_t count,
- struct dm_bht_entry *entry);
-void dm_bht_read_completed(struct dm_bht_entry *entry, int status);
+bool dm_bht_is_populated(struct dm_bht* bht, unsigned int block);
+int dm_bht_populate(struct dm_bht* bht, void* read_cb_ctx, unsigned int block);
+int dm_bht_verify_block(struct dm_bht* bht,
+ unsigned int block,
+ const u8* buffer,
+ unsigned int offset);
+int dm_bht_zeroread_callback(void* ctx,
+ sector_t start,
+ u8* dst,
+ sector_t count,
+ struct dm_bht_entry* entry);
+void dm_bht_read_completed(struct dm_bht_entry* entry, int status);
/* Functions for converting indices to nodes. */
-static inline struct dm_bht_level *dm_bht_get_level(struct dm_bht *bht,
- int depth)
-{
- return &bht->levels[depth];
+static inline struct dm_bht_level* dm_bht_get_level(struct dm_bht* bht,
+ int depth) {
+ return &bht->levels[depth];
}
-static inline unsigned int dm_bht_get_level_shift(struct dm_bht *bht,
- int depth)
-{
- return (bht->depth - depth) * bht->node_count_shift;
+static inline unsigned int dm_bht_get_level_shift(struct dm_bht* bht,
+ int depth) {
+ return (bht->depth - depth) * bht->node_count_shift;
}
/* For the given depth, this is the entry index. At depth+1 it is the node
* index for depth.
*/
-static inline unsigned int dm_bht_index_at_level(struct dm_bht *bht,
- int depth,
- unsigned int leaf)
-{
- return leaf >> dm_bht_get_level_shift(bht, depth);
+static inline unsigned int dm_bht_index_at_level(struct dm_bht* bht,
+ int depth,
+ unsigned int leaf) {
+ return leaf >> dm_bht_get_level_shift(bht, depth);
}
-static inline u8 *dm_bht_node(struct dm_bht *bht,
- struct dm_bht_entry *entry,
- unsigned int node_index)
-{
- return &entry->nodes[node_index * bht->digest_size];
+static inline u8* dm_bht_node(struct dm_bht* bht,
+ struct dm_bht_entry* entry,
+ unsigned int node_index) {
+ return &entry->nodes[node_index * bht->digest_size];
}
-static inline struct dm_bht_entry *dm_bht_get_entry(struct dm_bht *bht,
- int depth,
- unsigned int block)
-{
- unsigned int index = dm_bht_index_at_level(bht, depth, block);
- struct dm_bht_level *level = dm_bht_get_level(bht, depth);
+static inline struct dm_bht_entry* dm_bht_get_entry(struct dm_bht* bht,
+ int depth,
+ unsigned int block) {
+ unsigned int index = dm_bht_index_at_level(bht, depth, block);
+ struct dm_bht_level* level = dm_bht_get_level(bht, depth);
- return &level->entries[index];
+ return &level->entries[index];
}
-static inline u8 *dm_bht_get_node(struct dm_bht *bht,
- struct dm_bht_entry *entry,
- int depth,
- unsigned int block)
-{
- unsigned int index = dm_bht_index_at_level(bht, depth, block);
+static inline u8* dm_bht_get_node(struct dm_bht* bht,
+ struct dm_bht_entry* entry,
+ int depth,
+ unsigned int block) {
+ unsigned int index = dm_bht_index_at_level(bht, depth, block);
- return dm_bht_node(bht, entry, index % bht->node_count);
+ return dm_bht_node(bht, entry, index % bht->node_count);
}
#ifdef __cplusplus
}
#endif
-#endif /* __LINUX_DM_BHT_H */
+#endif /* __LINUX_DM_BHT_H */
diff --git a/dm-bht_unittest.cc b/dm-bht_unittest.cc
index 10b9c2e..1f4a46e 100644
--- a/dm-bht_unittest.cc
+++ b/dm-bht_unittest.cc
@@ -16,17 +16,17 @@
// But disable verbose logging.
extern "C" {
#ifndef NDEBUG
-# undef NDEBUG
-# include "dm-bht.c"
-# define NDEBUG 1
+#undef NDEBUG
+#include "dm-bht.c"
+#define NDEBUG 1
#else
-# include "dm-bht.c"
+#include "dm-bht.c"
#endif
}
#include "dm-bht-userspace.h"
-void *my_memalign(size_t boundary, size_t size) {
- void * memptr;
+void* my_memalign(size_t boundary, size_t size) {
+ void* memptr;
if (posix_memalign(&memptr, boundary, size))
return NULL;
return memptr;
@@ -44,13 +44,13 @@
sector_t sectors;
// This should fail.
unsigned int blocks, total_blocks = 16384;
- u8 *data = (u8 *)my_memalign(PAGE_SIZE, PAGE_SIZE);
- u8 *hash_data;
+ u8* data = (u8*)my_memalign(PAGE_SIZE, PAGE_SIZE);
+ u8* hash_data;
blocks = total_blocks;
// Store all the block hashes of blocks of 0.
- memset(reinterpret_cast<void *>(data), 0, sizeof(data));
+ memset(reinterpret_cast<void*>(data), 0, sizeof(data));
EXPECT_EQ(0, dm_bht_create(&bht, blocks, "sha256"));
dm_bht_set_read_cb(&bht, dm_bht_zeroread_callback);
sectors = dm_bht_sectors(&bht);
@@ -62,10 +62,7 @@
} while (--blocks > 0);
// Load the tree from the pre-populated hash data
for (blocks = 0; blocks < total_blocks; blocks += bht.node_count)
- EXPECT_GE(dm_bht_populate(&bht,
- reinterpret_cast<void *>(this),
- blocks),
- 0);
+ EXPECT_GE(dm_bht_populate(&bht, reinterpret_cast<void*>(this), blocks), 0);
EXPECT_EQ(0, dm_bht_compute(&bht));
EXPECT_EQ(0, dm_bht_destroy(&bht));
delete hash_data;
@@ -74,9 +71,7 @@
class MemoryBhtTest : public ::testing::Test {
public:
- void SetUp() {
- bht_ = NULL;
- }
+ void SetUp() { bht_ = NULL; }
void TearDown() {
hash_data_.clear();
@@ -85,20 +80,20 @@
bht_ = NULL;
}
- int Read(sector_t start, u8 *dst, sector_t count) {
+ int Read(sector_t start, u8* dst, sector_t count) {
EXPECT_LT(start, sectors_);
EXPECT_EQ(verity_to_bytes(count), PAGE_SIZE);
- u8 *src = &hash_data_[verity_to_bytes(start)];
+ u8* src = &hash_data_[verity_to_bytes(start)];
memcpy(dst, src, verity_to_bytes(count));
return 0;
}
- static int ReadCallback(void *mbht_instance,
+ static int ReadCallback(void* mbht_instance,
sector_t start,
- u8 *dst,
+ u8* dst,
sector_t count,
- struct dm_bht_entry *entry) {
- MemoryBhtTest *mbht = reinterpret_cast<MemoryBhtTest *>(mbht_instance);
+ struct dm_bht_entry* entry) {
+ MemoryBhtTest* mbht = reinterpret_cast<MemoryBhtTest*>(mbht_instance);
mbht->Read(start, dst, count);
dm_bht_read_completed(entry, 0);
return 0;
@@ -107,11 +102,11 @@
protected:
// Creates a new dm_bht and sets it in the existing MemoryBht.
void SetupHash(const unsigned int total_blocks,
- const char *digest_algorithm,
- const char *salt,
- void *hash_data) {
+ const char* digest_algorithm,
+ const char* salt,
+ void* hash_data) {
struct dm_bht bht;
- u8 *data = (u8 *)my_memalign(PAGE_SIZE, PAGE_SIZE);
+ u8* data = (u8*)my_memalign(PAGE_SIZE, PAGE_SIZE);
memset(data, 0, PAGE_SIZE);
@@ -134,8 +129,8 @@
free(data);
}
void SetupBht(const unsigned int total_blocks,
- const char *digest_algorithm,
- const char *salt) {
+ const char* digest_algorithm,
+ const char* salt) {
if (bht_)
delete bht_;
bht_ = new dm_bht;
@@ -153,13 +148,11 @@
// Load the tree from the pre-populated hash data
unsigned int blocks;
for (blocks = 0; blocks < total_blocks; blocks += bht_->node_count)
- EXPECT_GE(dm_bht_populate(bht_,
- reinterpret_cast<void *>(this),
- blocks),
+ EXPECT_GE(dm_bht_populate(bht_, reinterpret_cast<void*>(this), blocks),
0);
}
- struct dm_bht *bht_;
+ struct dm_bht* bht_;
std::vector<u8> hash_data_;
sector_t sectors_;
};
@@ -168,15 +161,14 @@
static const unsigned int total_blocks = 16384;
// Set the root hash for a 0-filled image
static const char kRootDigest[] =
- "45d65d6f9e5a962f4d80b5f1bd7a918152251c27bdad8c5f52b590c129833372";
+ "45d65d6f9e5a962f4d80b5f1bd7a918152251c27bdad8c5f52b590c129833372";
// A page of all zeros
- u8 *zero_page = (u8 *)my_memalign(PAGE_SIZE, PAGE_SIZE);
+ u8* zero_page = (u8*)my_memalign(PAGE_SIZE, PAGE_SIZE);
memset(zero_page, 0, PAGE_SIZE);
SetupBht(total_blocks, "sha256", NULL);
- dm_bht_set_root_hexdigest(bht_,
- reinterpret_cast<const u8 *>(kRootDigest));
+ dm_bht_set_root_hexdigest(bht_, reinterpret_cast<const u8*>(kRootDigest));
for (unsigned int blocks = 0; blocks < total_blocks; ++blocks) {
DLOG(INFO) << "verifying block: " << blocks;
@@ -191,15 +183,14 @@
static const unsigned int total_blocks = 32;
// Set the root hash for a 0-filled image
static const char kRootDigest[] =
- "2d3a43008286f56536fa24dcdbf14d342f0548827e374210415c7be0b610d2ba";
+ "2d3a43008286f56536fa24dcdbf14d342f0548827e374210415c7be0b610d2ba";
// A page of all zeros
- u8 *zero_page = (u8 *)my_memalign(PAGE_SIZE, PAGE_SIZE);
+ u8* zero_page = (u8*)my_memalign(PAGE_SIZE, PAGE_SIZE);
memset(zero_page, 0, PAGE_SIZE);
SetupBht(total_blocks, "sha256", NULL);
- dm_bht_set_root_hexdigest(bht_,
- reinterpret_cast<const u8 *>(kRootDigest));
+ dm_bht_set_root_hexdigest(bht_, reinterpret_cast<const u8*>(kRootDigest));
for (unsigned int blocks = 0; blocks < total_blocks; ++blocks) {
DLOG(INFO) << "verifying block: " << blocks;
@@ -214,15 +205,14 @@
static const unsigned int total_blocks = 217600;
// Set the root hash for a 0-filled image
static const char kRootDigest[] =
- "15d5a180b5080a1d43e3fbd1f2cd021d0fc3ea91a8e330bad468b980c2fd4d8b";
+ "15d5a180b5080a1d43e3fbd1f2cd021d0fc3ea91a8e330bad468b980c2fd4d8b";
// A page of all zeros
- u8 *zero_page = (u8 *)my_memalign(PAGE_SIZE, PAGE_SIZE);
+ u8* zero_page = (u8*)my_memalign(PAGE_SIZE, PAGE_SIZE);
memset(zero_page, 0, PAGE_SIZE);
SetupBht(total_blocks, "sha256", NULL);
- dm_bht_set_root_hexdigest(bht_,
- reinterpret_cast<const u8 *>(kRootDigest));
+ dm_bht_set_root_hexdigest(bht_, reinterpret_cast<const u8*>(kRootDigest));
for (unsigned int blocks = 0; blocks < total_blocks; ++blocks) {
DLOG(INFO) << "verifying block: " << blocks;
@@ -237,15 +227,14 @@
static const unsigned int total_blocks = 16383;
// Set the root hash for a 0-filled image
static const char kRootDigest[] =
- "dc8cec4220d388b05ba75c853f858bb8cc25edfb1d5d2f3be6bdf9edfa66dc6a";
+ "dc8cec4220d388b05ba75c853f858bb8cc25edfb1d5d2f3be6bdf9edfa66dc6a";
// A page of all zeros
- u8 *zero_page = (u8 *)my_memalign(PAGE_SIZE, PAGE_SIZE);
+ u8* zero_page = (u8*)my_memalign(PAGE_SIZE, PAGE_SIZE);
memset(zero_page, 0, PAGE_SIZE);
SetupBht(total_blocks, "sha256", NULL);
- dm_bht_set_root_hexdigest(bht_,
- reinterpret_cast<const u8 *>(kRootDigest));
+ dm_bht_set_root_hexdigest(bht_, reinterpret_cast<const u8*>(kRootDigest));
for (unsigned int blocks = 0; blocks < total_blocks; ++blocks) {
DLOG(INFO) << "verifying block: " << blocks;
@@ -260,15 +249,14 @@
static const unsigned int total_blocks = 16000;
// Set the root hash for a 0-filled image
static const char kRootDigest[] =
- "10832dd62c427bcf68c56c8de0d1f9c32b61d9e5ddf43c77c56a97b372ad4b07";
+ "10832dd62c427bcf68c56c8de0d1f9c32b61d9e5ddf43c77c56a97b372ad4b07";
// A page of all zeros
- u8 *zero_page = (u8 *)my_memalign(PAGE_SIZE, PAGE_SIZE);
+ u8* zero_page = (u8*)my_memalign(PAGE_SIZE, PAGE_SIZE);
memset(zero_page, 0, PAGE_SIZE);
SetupBht(total_blocks, "sha256", NULL);
- dm_bht_set_root_hexdigest(bht_,
- reinterpret_cast<const u8 *>(kRootDigest));
+ dm_bht_set_root_hexdigest(bht_, reinterpret_cast<const u8*>(kRootDigest));
for (unsigned int blocks = 0; blocks < total_blocks; ++blocks) {
DLOG(INFO) << "verifying block: " << blocks;
@@ -283,22 +271,21 @@
static const unsigned int total_blocks = 16384;
// Set the root hash for a 0-filled image
static const char kRootDigest[] =
- "45d65d6f9e5a962f4d80b5f1bd7a918152251c27bdad8c5f52b590c129833372";
+ "45d65d6f9e5a962f4d80b5f1bd7a918152251c27bdad8c5f52b590c129833372";
// A page of all zeros
- u8 *zero_page = (u8 *)my_memalign(PAGE_SIZE, PAGE_SIZE);
+ u8* zero_page = (u8*)my_memalign(PAGE_SIZE, PAGE_SIZE);
memset(zero_page, 0, PAGE_SIZE);
SetupBht(total_blocks, "sha256", NULL);
- dm_bht_set_root_hexdigest(bht_,
- reinterpret_cast<const u8 *>(kRootDigest));
+ dm_bht_set_root_hexdigest(bht_, reinterpret_cast<const u8*>(kRootDigest));
// TODO(wad) add tests for partial tree validity/verification
// Corrupt one has hblock
static const unsigned int kBadBlock = 256;
- u8 *bad_hash_block= (u8 *)my_memalign(PAGE_SIZE, PAGE_SIZE);
+ u8* bad_hash_block = (u8*)my_memalign(PAGE_SIZE, PAGE_SIZE);
memset(bad_hash_block, 'A', PAGE_SIZE);
EXPECT_EQ(dm_bht_store_block(bht_, kBadBlock, bad_hash_block), 0);
@@ -306,15 +293,16 @@
EXPECT_LT(dm_bht_verify_block(bht_, kBadBlock + 1, zero_page, 0), 0);
EXPECT_LT(dm_bht_verify_block(bht_, kBadBlock + 2, zero_page, 0), 0);
EXPECT_LT(dm_bht_verify_block(bht_, kBadBlock + (bht_->node_count / 2),
- zero_page, 0), 0);
+ zero_page, 0),
+ 0);
EXPECT_LT(dm_bht_verify_block(bht_, kBadBlock, zero_page, 0), 0);
// Verify that the prior entry is untouched and still safe
EXPECT_EQ(dm_bht_verify_block(bht_, kBadBlock - 1, zero_page, 0), 0);
// Same for the next entry
- EXPECT_EQ(dm_bht_verify_block(bht_, kBadBlock + bht_->node_count,
- zero_page, 0), 0);
+ EXPECT_EQ(
+ dm_bht_verify_block(bht_, kBadBlock + bht_->node_count, zero_page, 0), 0);
EXPECT_EQ(0, dm_bht_destroy(bht_));
free(bad_hash_block);
@@ -326,11 +314,10 @@
SetupBht(total_blocks, "sha256", NULL);
// Set the root hash for a 0-filled image
static const char kRootDigest[] =
- "45d65d6f9e5a962f4d80b5f1bd7a918152251c27bdad8c5f52b590c129833372";
- dm_bht_set_root_hexdigest(bht_,
- reinterpret_cast<const u8 *>(kRootDigest));
+ "45d65d6f9e5a962f4d80b5f1bd7a918152251c27bdad8c5f52b590c129833372";
+ dm_bht_set_root_hexdigest(bht_, reinterpret_cast<const u8*>(kRootDigest));
// A corrupt page
- u8 *bad_page = (u8 *)my_memalign(PAGE_SIZE, PAGE_SIZE);
+ u8* bad_page = (u8*)my_memalign(PAGE_SIZE, PAGE_SIZE);
memset(bad_page, 'A', PAGE_SIZE);
@@ -349,17 +336,16 @@
static const unsigned int total_blocks = 16384;
// Set the root hash for a 0-filled image
static const char kRootDigest[] =
- "8015fea349568f5135ecc833bbc79c9179377207382b53c68d93190b286b1256";
+ "8015fea349568f5135ecc833bbc79c9179377207382b53c68d93190b286b1256";
static const char salt[] =
- "01ad1f06255d452d91337bf037953053cc3e452541db4b8ca05811bf3e2b6027";
+ "01ad1f06255d452d91337bf037953053cc3e452541db4b8ca05811bf3e2b6027";
// A page of all zeros
- u8 *zero_page = (u8 *)my_memalign(PAGE_SIZE, PAGE_SIZE);
+ u8* zero_page = (u8*)my_memalign(PAGE_SIZE, PAGE_SIZE);
memset(zero_page, 0, PAGE_SIZE);
SetupBht(total_blocks, "sha256", salt);
- dm_bht_set_root_hexdigest(bht_,
- reinterpret_cast<const u8 *>(kRootDigest));
+ dm_bht_set_root_hexdigest(bht_, reinterpret_cast<const u8*>(kRootDigest));
for (unsigned int blocks = 0; blocks < total_blocks; ++blocks) {
DLOG(INFO) << "verifying block: " << blocks;
@@ -374,17 +360,17 @@
static const unsigned int total_blocks = 16384;
// Set the root hash for a 0-filled image
static const char kRootDigest[] =
- "8015fea349568f5135ecc833bbc79c9179377207382b53c68d93190b286b1256";
+ "8015fea349568f5135ecc833bbc79c9179377207382b53c68d93190b286b1256";
static const char salt[] =
- "01ad1f06255d452d91337bf037953053cc3e452541db4b8ca05811bf3e2b6027b2188a1d";
+ "01ad1f06255d452d91337bf037953053cc3e452541db4b8ca05811bf3e2b6027b2188a1"
+ "d";
// A page of all zeros
- u8 *zero_page = (u8 *)my_memalign(PAGE_SIZE, PAGE_SIZE);
+ u8* zero_page = (u8*)my_memalign(PAGE_SIZE, PAGE_SIZE);
memset(zero_page, 0, PAGE_SIZE);
SetupBht(total_blocks, "sha256", salt);
- dm_bht_set_root_hexdigest(bht_,
- reinterpret_cast<const u8 *>(kRootDigest));
+ dm_bht_set_root_hexdigest(bht_, reinterpret_cast<const u8*>(kRootDigest));
for (unsigned int blocks = 0; blocks < total_blocks; ++blocks) {
DLOG(INFO) << "verifying block: " << blocks;
diff --git a/file_hasher.cc b/file_hasher.cc
index 68d5458..81c20b2 100644
--- a/file_hasher.cc
+++ b/file_hasher.cc
@@ -19,21 +19,23 @@
namespace verity {
// Simple helper for Initialize.
-template<typename T>
+template <typename T>
static inline bool power_of_two(T num) {
- if (num == 0) return false;
- if (!(num & (num - 1))) return true;
+ if (num == 0)
+ return false;
+ if (!(num & (num - 1)))
+ return true;
return false;
}
-bool FileHasher::Initialize(simple_file::File *source,
- simple_file::File *destination,
+bool FileHasher::Initialize(simple_file::File* source,
+ simple_file::File* destination,
unsigned int blocks,
- const char *alg) {
+ const char* alg) {
if (!alg || !source || !destination) {
- LOG(ERROR) << "Invalid arguments supplied to Initialize";
- LOG(INFO) << "s: " << source << " d: " << destination;
- return false;
+ LOG(ERROR) << "Invalid arguments supplied to Initialize";
+ LOG(INFO) << "s: " << source << " d: " << destination;
+ return false;
}
if (source_ || destination_) {
LOG(ERROR) << "Initialize called more than once";
@@ -47,7 +49,7 @@
if (source->Size() % PAGE_SIZE) {
LOG(ERROR) << "The source file size must be divisible by the block size";
LOG(ERROR) << "Size: " << source->Size();
- LOG(INFO) << "Suggested size: " << ALIGN(source->Size(),PAGE_SIZE);
+ LOG(INFO) << "Suggested size: " << ALIGN(source->Size(), PAGE_SIZE);
return false;
}
}
@@ -94,15 +96,15 @@
return !dm_bht_compute(&tree_);
}
-const char *FileHasher::RandomSalt() {
+const char* FileHasher::RandomSalt() {
uint8_t buf[DM_BHT_SALT_SIZE];
const char urandom_path[] = "/dev/urandom";
simple_file::File source;
LOG_IF(FATAL, !source.Initialize(urandom_path, O_RDONLY, NULL))
- << "Failed to open the random source: " << urandom_path;
+ << "Failed to open the random source: " << urandom_path;
PLOG_IF(FATAL, !source.Read(sizeof(buf), buf))
- << "Failed to read the random source";
+ << "Failed to read the random source";
for (size_t i = 0; i < sizeof(buf); ++i)
sprintf(&random_salt_[i * 2], "%02x", buf[i]);
@@ -123,9 +125,12 @@
// TODO(wad) later support sizes that need 64-bit sectors.
unsigned int hash_start = 0;
unsigned int root_end = to_sector(block_limit_ << PAGE_SHIFT);
- if (colocated) hash_start = root_end;
- printf("0 %u verity payload=ROOT_DEV hashtree=HASH_DEV hashstart=%u alg=%s "
- "root_hexdigest=%s", root_end, hash_start, alg_, digest);
+ if (colocated)
+ hash_start = root_end;
+ printf(
+ "0 %u verity payload=ROOT_DEV hashtree=HASH_DEV hashstart=%u alg=%s "
+ "root_hexdigest=%s",
+ root_end, hash_start, alg_, digest);
if (have_salt)
printf(" salt=%s", hexsalt);
printf("\n");
diff --git a/file_hasher.h b/file_hasher.h
index c5fe51c..5d9e40a 100644
--- a/file_hasher.h
+++ b/file_hasher.h
@@ -19,47 +19,46 @@
// This class may not be used by multiple threads at once.
class FileHasher {
public:
- FileHasher() : source_(NULL),
- destination_(NULL),
- block_limit_(0),
- alg_(NULL) { }
+ FileHasher()
+ : source_(NULL), destination_(NULL), block_limit_(0), alg_(NULL) {}
// TODO(wad) add initialized_ variable to check.
- virtual bool Initialize(simple_file::File *source,
- simple_file::File *destination,
+ virtual bool Initialize(simple_file::File* source,
+ simple_file::File* destination,
unsigned int blocks,
- const char *alg);
+ const char* alg);
virtual bool Hash();
virtual bool Store();
// Print a table to stdout which contains a dmsetup compatible format
virtual void PrintTable(bool colocated);
- virtual const char *RandomSalt();
- virtual void set_salt(const char *salt) {
+ virtual const char* RandomSalt();
+ virtual void set_salt(const char* salt) {
if (!strcmp(salt, "random"))
salt = RandomSalt();
dm_bht_set_salt(&tree_, salt);
salt_ = salt;
}
- virtual const char *salt(void) { return salt_; }
+ virtual const char* salt(void) { return salt_; }
- virtual ~FileHasher() {};
- static int WriteCallback(void *file,
+ virtual ~FileHasher(){};
+ static int WriteCallback(void* file,
sector_t start,
- u8 *dst,
+ u8* dst,
sector_t count,
- struct dm_bht_entry *entry);
+ struct dm_bht_entry* entry);
+
private:
- simple_file::File *source_;
- simple_file::File *destination_;
+ simple_file::File* source_;
+ simple_file::File* destination_;
unsigned int block_limit_;
- const char *alg_;
- const char *salt_;
+ const char* alg_;
+ const char* salt_;
char random_salt_[DM_BHT_SALT_SIZE * 2 + 1];
- u8 *hash_data_;
+ u8* hash_data_;
struct dm_bht tree_;
sector_t sectors_;
};
} // namespace verity
-#endif // VERITY_FILE_HASHER_H__
+#endif // VERITY_FILE_HASHER_H__
diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
index c911875..c809753 100644
--- a/include/asm-generic/bitops/fls.h
+++ b/include/asm-generic/bitops/fls.h
@@ -12,33 +12,32 @@
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
-static __always_inline int fls(int x)
-{
- int r = 32;
+static __always_inline int fls(int x) {
+ int r = 32;
- if (!x)
- return 0;
- if (!(x & 0xffff0000u)) {
- x <<= 16;
- r -= 16;
- }
- if (!(x & 0xff000000u)) {
- x <<= 8;
- r -= 8;
- }
- if (!(x & 0xf0000000u)) {
- x <<= 4;
- r -= 4;
- }
- if (!(x & 0xc0000000u)) {
- x <<= 2;
- r -= 2;
- }
- if (!(x & 0x80000000u)) {
- x <<= 1;
- r -= 1;
- }
- return r;
+ if (!x)
+ return 0;
+ if (!(x & 0xffff0000u)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xff000000u)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xf0000000u)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xc0000000u)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x80000000u)) {
+ x <<= 1;
+ r -= 1;
+ }
+ return r;
}
#endif /* _ASM_GENERIC_BITOPS_FLS_H_ */
diff --git a/include/asm/page.h b/include/asm/page.h
index 5ebe34d..af8e175 100644
--- a/include/asm/page.h
+++ b/include/asm/page.h
@@ -11,4 +11,4 @@
#define PAGE_SIZE 4096
#define PAGE_SHIFT 12
-#endif /* VERITY_INCLUDE_ASM_PAGE_H_ */
+#endif /* VERITY_INCLUDE_ASM_PAGE_H_ */
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 531321f..7500395 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -16,39 +16,41 @@
#include <linux/crypto.h>
struct shash_desc {
- u32 flags;
+ u32 flags;
- void *__ctx[] CRYPTO_MINALIGN_ATTR;
+ void* __ctx[] CRYPTO_MINALIGN_ATTR;
};
struct shash_alg {
- int (*init)(struct shash_desc *desc);
- int (*update)(struct shash_desc *desc, const u8 *data,
- unsigned int len);
- int (*final)(struct shash_desc *desc, u8 *out);
- int (*finup)(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out);
- int (*digest)(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out);
- int (*export)(struct shash_desc *desc, void *out);
- int (*import)(struct shash_desc *desc, const void *in);
+ int (*init)(struct shash_desc* desc);
+ int (*update)(struct shash_desc* desc, const u8* data, unsigned int len);
+ int (*final)(struct shash_desc* desc, u8* out);
+ int (*finup)(struct shash_desc* desc,
+ const u8* data,
+ unsigned int len,
+ u8* out);
+ int (*digest)(struct shash_desc* desc,
+ const u8* data,
+ unsigned int len,
+ u8* out);
+ int (*export)(struct shash_desc* desc, void* out);
+ int (*import)(struct shash_desc* desc, const void* in);
- unsigned int descsize;
+ unsigned int descsize;
- unsigned int digestsize;
- unsigned int statesize;
+ unsigned int digestsize;
+ unsigned int statesize;
- struct crypto_alg base;
+ struct crypto_alg base;
};
struct hash_tfm {
- const struct shash_alg *alg;
- struct shash_desc desc;
+ const struct shash_alg* alg;
+ struct shash_desc desc;
};
-static inline void *shash_desc_ctx(struct shash_desc *desc)
-{
- return desc->__ctx;
+static inline void* shash_desc_ctx(struct shash_desc* desc) {
+ return desc->__ctx;
}
-#endif /* _CRYPTO_HASH_H */
+#endif /* _CRYPTO_HASH_H */
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 985580e..5635640 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -10,10 +10,9 @@
#include <crypto/hash.h>
-int crypto_register_shash(struct shash_alg *alg);
-static inline int crypto_unregister_shash(struct shash_alg *alg)
-{
- return 0;
+int crypto_register_shash(struct shash_alg* alg);
+static inline int crypto_unregister_shash(struct shash_alg* alg) {
+ return 0;
}
#endif
diff --git a/include/crypto/md5.h b/include/crypto/md5.h
index 65f299b..f131f88 100644
--- a/include/crypto/md5.h
+++ b/include/crypto/md5.h
@@ -3,15 +3,15 @@
#include <linux/types.h>
-#define MD5_DIGEST_SIZE 16
-#define MD5_HMAC_BLOCK_SIZE 64
-#define MD5_BLOCK_WORDS 16
-#define MD5_HASH_WORDS 4
+#define MD5_DIGEST_SIZE 16
+#define MD5_HMAC_BLOCK_SIZE 64
+#define MD5_BLOCK_WORDS 16
+#define MD5_HASH_WORDS 4
struct md5_state {
- u32 hash[MD5_HASH_WORDS];
- u32 block[MD5_BLOCK_WORDS];
- u64 byte_count;
+ u32 hash[MD5_HASH_WORDS];
+ u32 block[MD5_BLOCK_WORDS];
+ u64 byte_count;
};
#endif
diff --git a/include/crypto/sha.h b/include/crypto/sha.h
index 069e85b..65ddde1 100644
--- a/include/crypto/sha.h
+++ b/include/crypto/sha.h
@@ -7,79 +7,79 @@
#include <linux/types.h>
-#define SHA1_DIGEST_SIZE 20
-#define SHA1_BLOCK_SIZE 64
+#define SHA1_DIGEST_SIZE 20
+#define SHA1_BLOCK_SIZE 64
-#define SHA224_DIGEST_SIZE 28
-#define SHA224_BLOCK_SIZE 64
+#define SHA224_DIGEST_SIZE 28
+#define SHA224_BLOCK_SIZE 64
-#define SHA256_DIGEST_SIZE 32
-#define SHA256_BLOCK_SIZE 64
+#define SHA256_DIGEST_SIZE 32
+#define SHA256_BLOCK_SIZE 64
-#define SHA384_DIGEST_SIZE 48
-#define SHA384_BLOCK_SIZE 128
+#define SHA384_DIGEST_SIZE 48
+#define SHA384_BLOCK_SIZE 128
-#define SHA512_DIGEST_SIZE 64
-#define SHA512_BLOCK_SIZE 128
+#define SHA512_DIGEST_SIZE 64
+#define SHA512_BLOCK_SIZE 128
-#define SHA1_H0 0x67452301UL
-#define SHA1_H1 0xefcdab89UL
-#define SHA1_H2 0x98badcfeUL
-#define SHA1_H3 0x10325476UL
-#define SHA1_H4 0xc3d2e1f0UL
+#define SHA1_H0 0x67452301UL
+#define SHA1_H1 0xefcdab89UL
+#define SHA1_H2 0x98badcfeUL
+#define SHA1_H3 0x10325476UL
+#define SHA1_H4 0xc3d2e1f0UL
-#define SHA224_H0 0xc1059ed8UL
-#define SHA224_H1 0x367cd507UL
-#define SHA224_H2 0x3070dd17UL
-#define SHA224_H3 0xf70e5939UL
-#define SHA224_H4 0xffc00b31UL
-#define SHA224_H5 0x68581511UL
-#define SHA224_H6 0x64f98fa7UL
-#define SHA224_H7 0xbefa4fa4UL
+#define SHA224_H0 0xc1059ed8UL
+#define SHA224_H1 0x367cd507UL
+#define SHA224_H2 0x3070dd17UL
+#define SHA224_H3 0xf70e5939UL
+#define SHA224_H4 0xffc00b31UL
+#define SHA224_H5 0x68581511UL
+#define SHA224_H6 0x64f98fa7UL
+#define SHA224_H7 0xbefa4fa4UL
-#define SHA256_H0 0x6a09e667UL
-#define SHA256_H1 0xbb67ae85UL
-#define SHA256_H2 0x3c6ef372UL
-#define SHA256_H3 0xa54ff53aUL
-#define SHA256_H4 0x510e527fUL
-#define SHA256_H5 0x9b05688cUL
-#define SHA256_H6 0x1f83d9abUL
-#define SHA256_H7 0x5be0cd19UL
+#define SHA256_H0 0x6a09e667UL
+#define SHA256_H1 0xbb67ae85UL
+#define SHA256_H2 0x3c6ef372UL
+#define SHA256_H3 0xa54ff53aUL
+#define SHA256_H4 0x510e527fUL
+#define SHA256_H5 0x9b05688cUL
+#define SHA256_H6 0x1f83d9abUL
+#define SHA256_H7 0x5be0cd19UL
-#define SHA384_H0 0xcbbb9d5dc1059ed8ULL
-#define SHA384_H1 0x629a292a367cd507ULL
-#define SHA384_H2 0x9159015a3070dd17ULL
-#define SHA384_H3 0x152fecd8f70e5939ULL
-#define SHA384_H4 0x67332667ffc00b31ULL
-#define SHA384_H5 0x8eb44a8768581511ULL
-#define SHA384_H6 0xdb0c2e0d64f98fa7ULL
-#define SHA384_H7 0x47b5481dbefa4fa4ULL
+#define SHA384_H0 0xcbbb9d5dc1059ed8ULL
+#define SHA384_H1 0x629a292a367cd507ULL
+#define SHA384_H2 0x9159015a3070dd17ULL
+#define SHA384_H3 0x152fecd8f70e5939ULL
+#define SHA384_H4 0x67332667ffc00b31ULL
+#define SHA384_H5 0x8eb44a8768581511ULL
+#define SHA384_H6 0xdb0c2e0d64f98fa7ULL
+#define SHA384_H7 0x47b5481dbefa4fa4ULL
-#define SHA512_H0 0x6a09e667f3bcc908ULL
-#define SHA512_H1 0xbb67ae8584caa73bULL
-#define SHA512_H2 0x3c6ef372fe94f82bULL
-#define SHA512_H3 0xa54ff53a5f1d36f1ULL
-#define SHA512_H4 0x510e527fade682d1ULL
-#define SHA512_H5 0x9b05688c2b3e6c1fULL
-#define SHA512_H6 0x1f83d9abfb41bd6bULL
-#define SHA512_H7 0x5be0cd19137e2179ULL
+#define SHA512_H0 0x6a09e667f3bcc908ULL
+#define SHA512_H1 0xbb67ae8584caa73bULL
+#define SHA512_H2 0x3c6ef372fe94f82bULL
+#define SHA512_H3 0xa54ff53a5f1d36f1ULL
+#define SHA512_H4 0x510e527fade682d1ULL
+#define SHA512_H5 0x9b05688c2b3e6c1fULL
+#define SHA512_H6 0x1f83d9abfb41bd6bULL
+#define SHA512_H7 0x5be0cd19137e2179ULL
struct sha1_state {
- u64 count;
- u32 state[SHA1_DIGEST_SIZE / 4];
- u8 buffer[SHA1_BLOCK_SIZE];
+ u64 count;
+ u32 state[SHA1_DIGEST_SIZE / 4];
+ u8 buffer[SHA1_BLOCK_SIZE];
};
struct sha256_state {
- u64 count;
- u32 state[SHA256_DIGEST_SIZE / 4];
- u8 buf[SHA256_BLOCK_SIZE];
+ u64 count;
+ u32 state[SHA256_DIGEST_SIZE / 4];
+ u8 buf[SHA256_BLOCK_SIZE];
};
struct sha512_state {
- u64 count[2];
- u64 state[SHA512_DIGEST_SIZE / 8];
- u8 buf[SHA512_BLOCK_SIZE];
+ u64 count[2];
+ u64 state[SHA512_DIGEST_SIZE / 8];
+ u8 buf[SHA512_BLOCK_SIZE];
};
#endif
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 0428e03..33f9ded 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -8,16 +8,15 @@
#ifndef VERITY_INCLUDE_LINUX_BITOPS_H_
#define VERITY_INCLUDE_LINUX_BITOPS_H_
-
-#define BITS_PER_BYTE 8
+#define BITS_PER_BYTE 8
/* For verity, this is based on the compilation target and not
* CONFIG_64BIT. */
-#define BITS_PER_LONG (sizeof(long) * BITS_PER_BYTE)
+#define BITS_PER_LONG (sizeof(long) * BITS_PER_BYTE)
-#define BIT(nr) (1UL << (nr))
-#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
-#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
-#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
+#define BIT(nr) (1UL << (nr))
+#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
+#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
#include <asm-generic/bitops/fls.h>
#include <strings.h>
@@ -28,9 +27,8 @@
* @word: value to rotate
* @shift: bits to roll
*/
-static inline __u32 rol32(__u32 word, unsigned int shift)
-{
- return (word << shift) | (word >> (32 - shift));
+static inline __u32 rol32(__u32 word, unsigned int shift) {
+ return (word << shift) | (word >> (32 - shift));
}
/**
@@ -38,9 +36,8 @@
* @word: value to rotate
* @shift: bits to roll
*/
-static inline __u32 ror32(__u32 word, unsigned int shift)
-{
- return (word >> shift) | (word << (32 - shift));
+static inline __u32 ror32(__u32 word, unsigned int shift) {
+ return (word >> shift) | (word << (32 - shift));
}
-#endif /* VERITY_INCLUDE_LINUX_BITOPS_H_ */
+#endif /* VERITY_INCLUDE_LINUX_BITOPS_H_ */
diff --git a/include/linux/bug.h b/include/linux/bug.h
index cc73612..f55709a 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -11,12 +11,13 @@
#include <stdlib.h>
#define BUG() abort()
-#define BUG_ON(val) { \
- if (val) { \
- fprintf(stderr, "!! %s:%s:%i: BUG_ON: %s\n\n", \
- __FILE__, __func__, __LINE__, #val); \
- abort(); \
- } \
-}
+#define BUG_ON(val) \
+ { \
+ if (val) { \
+ fprintf(stderr, "!! %s:%s:%i: BUG_ON: %s\n\n", __FILE__, __func__, \
+ __LINE__, #val); \
+ abort(); \
+ } \
+ }
-#endif /* VERITY_INCLUDE_LINUX_BUG_ */
+#endif /* VERITY_INCLUDE_LINUX_BUG_ */
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 04a2e10..61e9a6c 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -13,33 +13,34 @@
#define CRYPTO_ALG_TYPE_SHASH 0
-#define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(32)))
+#define CRYPTO_MINALIGN_ATTR __attribute__((__aligned__(32)))
#define CRYPTO_MAX_ALG_NAME 64
#include <linux/types.h>
struct crypto_alg {
- u32 cra_flags;
- unsigned int cra_blocksize;
+ u32 cra_flags;
+ unsigned int cra_blocksize;
- char cra_name[CRYPTO_MAX_ALG_NAME];
- char cra_driver_name[CRYPTO_MAX_ALG_NAME];
+ char cra_name[CRYPTO_MAX_ALG_NAME];
+ char cra_driver_name[CRYPTO_MAX_ALG_NAME];
- void *cra_module;
+ void* cra_module;
};
struct hash_tfm;
struct hash_desc {
- struct hash_tfm *tfm;
+ struct hash_tfm* tfm;
};
-struct hash_tfm *crypto_alloc_hash(const char *alg_name, int a, int b);
-void crypto_free_hash(struct hash_tfm *tfm);
-unsigned int crypto_hash_digestsize(struct hash_tfm *tfm);
-int crypto_hash_init(struct hash_desc *h);
-int crypto_hash_update(struct hash_desc *h, const u8 *buffer,
- unsigned int size);
-int crypto_hash_final(struct hash_desc *h, u8 *dst);
+struct hash_tfm* crypto_alloc_hash(const char* alg_name, int a, int b);
+void crypto_free_hash(struct hash_tfm* tfm);
+unsigned int crypto_hash_digestsize(struct hash_tfm* tfm);
+int crypto_hash_init(struct hash_desc* h);
+int crypto_hash_update(struct hash_desc* h,
+ const u8* buffer,
+ unsigned int size);
+int crypto_hash_final(struct hash_desc* h, u8* dst);
-#endif /* VERITY_INCLUDE_LINUX_CRYPTO_H_ */
+#endif /* VERITY_INCLUDE_LINUX_CRYPTO_H_ */
diff --git a/include/linux/cryptohash.h b/include/linux/cryptohash.h
index ecc6cc9..61043f7 100644
--- a/include/linux/cryptohash.h
+++ b/include/linux/cryptohash.h
@@ -5,12 +5,12 @@
#define SHA_MESSAGE_BYTES (512 /*bits*/ / 8)
#define SHA_WORKSPACE_WORDS 16
-void sha_init(__u32 *buf);
-void sha_transform(__u32 *digest, const u8 *data, __u32 *W);
+void sha_init(__u32* buf);
+void sha_transform(__u32* digest, const u8* data, __u32* W);
#define MD5_DIGEST_WORDS 4
#define MD5_MESSAGE_BYTES 64
-void md5_transform(__u32 *hash, __u32 const *in);
+void md5_transform(__u32* hash, __u32 const* in);
#endif
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 8e19b55..57eec28 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -14,11 +14,12 @@
#endif
#define DMLOG(fmt, args...) \
- fprintf(stderr, "dm:" DM_MSG_PREFIX fmt "\n", ##args)
+ fprintf(stderr, "dm:" DM_MSG_PREFIX fmt "\n", ##args)
#ifdef CONFIG_DM_DEBUG
#define DMDEBUG(fmt, args...) DMLOG("[DEBUG] " fmt, ##args)
#else
-#define DMDEBUG(fmt, args...) { }
+#define DMDEBUG(fmt, args...) \
+ {}
#endif
#define DMINFO(fmt, args...) DMLOG("[INFO] " fmt, ##args)
#define DMERR(fmt, args...) DMLOG("[ERR] " fmt, ##args)
@@ -30,4 +31,4 @@
#define to_sector(x) ((x) >> SECTOR_SHIFT)
#define verity_to_bytes(x) ((x) << SECTOR_SHIFT)
-#endif /* VERITY_INCLUDE_LINUX_DEVICE_MAPPER_H_ */
+#endif /* VERITY_INCLUDE_LINUX_DEVICE_MAPPER_H_ */
diff --git a/include/linux/init.h b/include/linux/init.h
index d1527bb..ed31bfc 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -11,13 +11,14 @@
#define __init __attribute__((constructor))
#define __exit __attribute__((unused))
-#define module_init(x) int mod_init_##x(void) { return x(); }
+#define module_init(x) \
+ int mod_init_##x(void) { return x(); }
#define module_exit(x)
-#define CALL_MODULE_INIT(x) \
- do { \
- extern int mod_init_##x(void); \
- mod_init_##x(); \
- } while (0)
+#define CALL_MODULE_INIT(x) \
+ do { \
+ extern int mod_init_##x(void); \
+ mod_init_##x(); \
+ } while (0)
-#endif /* VERITY_INCLUDE_LINUX_INIT_H_ */
+#endif /* VERITY_INCLUDE_LINUX_INIT_H_ */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index f21a941..c415b4a 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -10,24 +10,28 @@
#include_next <linux/kernel.h>
-#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1)
-#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
-#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
+#define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1)
+#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
+#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a)-1)) == 0)
-#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+#define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
-#define MIN(x, y) ({ \
- typeof(x) _min1 = (x); \
- typeof(y) _min2 = (y); \
- (void) (&_min1 == &_min2); \
- _min1 < _min2 ? _min1 : _min2; })
+#define MIN(x, y) \
+ ({ \
+ typeof(x) _min1 = (x); \
+ typeof(y) _min2 = (y); \
+ (void)(&_min1 == &_min2); \
+ _min1 < _min2 ? _min1 : _min2; \
+ })
-#define MAX(x, y) ({ \
- typeof(x) _max1 = (x); \
- typeof(y) _max2 = (y); \
- (void) (&_max1 == &_max2); \
- _max1 > _max2 ? _max1 : _max2; })
+#define MAX(x, y) \
+ ({ \
+ typeof(x) _max1 = (x); \
+ typeof(y) _max2 = (y); \
+ (void)(&_max1 == &_max2); \
+ _max1 > _max2 ? _max1 : _max2; \
+ })
#include <linux/bitops.h>
-#endif /* VERITY_INCLUDE_LINUX_KERNEL_H_ */
+#endif /* VERITY_INCLUDE_LINUX_KERNEL_H_ */
diff --git a/include/linux/types.h b/include/linux/types.h
index 541386b..1cddebf 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -20,4 +20,4 @@
/* Assume CONFIG_LBDAF */
typedef __u64 sector_t;
-#endif /* VERITY_INCLUDE_LINUX_TYPES_H_ */
+#endif /* VERITY_INCLUDE_LINUX_TYPES_H_ */
diff --git a/kernel/crypto.c b/kernel/crypto.c
index 495d39b..466444d 100644
--- a/kernel/crypto.c
+++ b/kernel/crypto.c
@@ -12,71 +12,66 @@
#include <crypto/hash.h>
static size_t num_hashes = 0;
-static struct shash_alg **hashes = NULL;
+static struct shash_alg** hashes = NULL;
-int crypto_register_shash(struct shash_alg *alg)
-{
- hashes = realloc(hashes, sizeof(*hashes) * (num_hashes + 1));
- hashes[num_hashes++] = alg;
- return 0;
+int crypto_register_shash(struct shash_alg* alg) {
+ hashes = realloc(hashes, sizeof(*hashes) * (num_hashes + 1));
+ hashes[num_hashes++] = alg;
+ return 0;
}
-struct hash_tfm *crypto_alloc_hash(const char *alg_name, int a, int b)
-{
- size_t i;
- struct hash_tfm *tfm;
+struct hash_tfm* crypto_alloc_hash(const char* alg_name, int a, int b) {
+ size_t i;
+ struct hash_tfm* tfm;
- if (!hashes) {
- /* need to initialize our world */
- CALL_MODULE_INIT(md5_mod_init);
- CALL_MODULE_INIT(sha1_generic_mod_init);
- CALL_MODULE_INIT(sha256_generic_mod_init);
- }
- BUG_ON(!hashes);
+ if (!hashes) {
+ /* need to initialize our world */
+ CALL_MODULE_INIT(md5_mod_init);
+ CALL_MODULE_INIT(sha1_generic_mod_init);
+ CALL_MODULE_INIT(sha256_generic_mod_init);
+ }
+ BUG_ON(!hashes);
- for (i = 0; i < num_hashes; ++i)
- if (!strcasecmp(alg_name, hashes[i]->base.cra_name))
- break;
- BUG_ON(i == num_hashes);
+ for (i = 0; i < num_hashes; ++i)
+ if (!strcasecmp(alg_name, hashes[i]->base.cra_name))
+ break;
+ BUG_ON(i == num_hashes);
- tfm = calloc(sizeof(*tfm) + hashes[i]->statesize, 1);
- BUG_ON(!tfm);
- tfm->alg = hashes[i];
+ tfm = calloc(sizeof(*tfm) + hashes[i]->statesize, 1);
+ BUG_ON(!tfm);
+ tfm->alg = hashes[i];
- return tfm;
+ return tfm;
}
-void crypto_free_hash(struct hash_tfm *tfm)
-{
- free(tfm);
+void crypto_free_hash(struct hash_tfm* tfm) {
+ free(tfm);
}
-unsigned int crypto_hash_digestsize(struct hash_tfm *tfm)
-{
- BUG_ON(!tfm);
- return tfm->alg->digestsize;
+unsigned int crypto_hash_digestsize(struct hash_tfm* tfm) {
+ BUG_ON(!tfm);
+ return tfm->alg->digestsize;
}
-int crypto_hash_init(struct hash_desc *h)
-{
- const struct shash_alg *alg = h->tfm->alg;
- struct shash_desc *desc = &h->tfm->desc;
+int crypto_hash_init(struct hash_desc* h) {
+ const struct shash_alg* alg = h->tfm->alg;
+ struct shash_desc* desc = &h->tfm->desc;
- return alg->init(desc);
+ return alg->init(desc);
}
-int crypto_hash_update(struct hash_desc *h, const u8 *buffer, unsigned int size)
-{
- const struct shash_alg *alg = h->tfm->alg;
- struct shash_desc *desc = &h->tfm->desc;
+int crypto_hash_update(struct hash_desc* h,
+ const u8* buffer,
+ unsigned int size) {
+ const struct shash_alg* alg = h->tfm->alg;
+ struct shash_desc* desc = &h->tfm->desc;
- return alg->update(desc, buffer, size);
+ return alg->update(desc, buffer, size);
}
-int crypto_hash_final(struct hash_desc *h, u8 *dst)
-{
- const struct shash_alg *alg = h->tfm->alg;
- struct shash_desc *desc = &h->tfm->desc;
+int crypto_hash_final(struct hash_desc* h, u8* dst) {
+ const struct shash_alg* alg = h->tfm->alg;
+ struct shash_desc* desc = &h->tfm->desc;
- return alg->final(desc, dst);
+ return alg->final(desc, dst);
}
diff --git a/kernel/md5.c b/kernel/md5.c
index f5a4693..287c2af 100644
--- a/kernel/md5.c
+++ b/kernel/md5.c
@@ -1,93 +1,92 @@
#include <linux/kernel.h>
#include <linux/cryptohash.h>
-#define F1(x, y, z) (z ^ (x & (y ^ z)))
-#define F2(x, y, z) F1(z, x, y)
-#define F3(x, y, z) (x ^ y ^ z)
-#define F4(x, y, z) (y ^ (x | ~z))
+#define F1(x, y, z) (z ^ (x & (y ^ z)))
+#define F2(x, y, z) F1(z, x, y)
+#define F3(x, y, z) (x ^ y ^ z)
+#define F4(x, y, z) (y ^ (x | ~z))
#define MD5STEP(f, w, x, y, z, in, s) \
- (w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x)
+ (w += f(x, y, z) + in, w = (w << s | w >> (32 - s)) + x)
-void md5_transform(__u32 *hash, __u32 const *in)
-{
- u32 a, b, c, d;
+void md5_transform(__u32* hash, __u32 const* in) {
+ u32 a, b, c, d;
- a = hash[0];
- b = hash[1];
- c = hash[2];
- d = hash[3];
+ a = hash[0];
+ b = hash[1];
+ c = hash[2];
+ d = hash[3];
- MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
- MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
- MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
- MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
- MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
- MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
- MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
- MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
- MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
- MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
- MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
- MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
- MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
- MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
- MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
- MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
+ MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
+ MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
+ MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
+ MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
+ MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
+ MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
+ MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
+ MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
+ MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
+ MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
+ MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
+ MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
+ MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
+ MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
+ MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
+ MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
- MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
- MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
- MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
- MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
- MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
- MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
- MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
- MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
- MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
- MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
- MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
- MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
- MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
- MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
- MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
- MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
+ MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
+ MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
+ MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
+ MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
+ MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
+ MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
+ MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
+ MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
+ MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
+ MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
+ MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
+ MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
+ MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
+ MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
+ MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
+ MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
- MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
- MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
- MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
- MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
- MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
- MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
- MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
- MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
- MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
- MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
- MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
- MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
- MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
- MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
- MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
- MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
+ MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
+ MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
+ MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
+ MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
+ MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
+ MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
+ MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
+ MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
+ MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
+ MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
+ MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
+ MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
+ MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
+ MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
+ MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
+ MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
- MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
- MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
- MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
- MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
- MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
- MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
- MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
- MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
- MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
- MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
- MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
- MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
- MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
- MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
- MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
- MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
+ MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
+ MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
+ MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
+ MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
+ MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
+ MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
+ MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
+ MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
+ MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
+ MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
+ MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
+ MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
+ MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
+ MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
+ MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
+ MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
- hash[0] += a;
- hash[1] += b;
- hash[2] += c;
- hash[3] += d;
+ hash[0] += a;
+ hash[1] += b;
+ hash[2] += c;
+ hash[3] += d;
}
diff --git a/kernel/md5_generic.c b/kernel/md5_generic.c
index 1a9a966..5449d9e 100644
--- a/kernel/md5_generic.c
+++ b/kernel/md5_generic.c
@@ -24,142 +24,130 @@
#include <asm/byteorder.h>
/* XXX: this stuff can be optimized */
-static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
-{
- while (words--) {
- __le32_to_cpus(buf);
- buf++;
- }
+static inline void le32_to_cpu_array(u32* buf, unsigned int words) {
+ while (words--) {
+ __le32_to_cpus(buf);
+ buf++;
+ }
}
-static inline void cpu_to_le32_array(u32 *buf, unsigned int words)
-{
- while (words--) {
- __cpu_to_le32s(buf);
- buf++;
- }
+static inline void cpu_to_le32_array(u32* buf, unsigned int words) {
+ while (words--) {
+ __cpu_to_le32s(buf);
+ buf++;
+ }
}
-static inline void md5_transform_helper(struct md5_state *ctx)
-{
- le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32));
- md5_transform(ctx->hash, ctx->block);
+static inline void md5_transform_helper(struct md5_state* ctx) {
+ le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32));
+ md5_transform(ctx->hash, ctx->block);
}
-static int md5_init(struct shash_desc *desc)
-{
- struct md5_state *mctx = shash_desc_ctx(desc);
+static int md5_init(struct shash_desc* desc) {
+ struct md5_state* mctx = shash_desc_ctx(desc);
- mctx->hash[0] = 0x67452301;
- mctx->hash[1] = 0xefcdab89;
- mctx->hash[2] = 0x98badcfe;
- mctx->hash[3] = 0x10325476;
- mctx->byte_count = 0;
+ mctx->hash[0] = 0x67452301;
+ mctx->hash[1] = 0xefcdab89;
+ mctx->hash[2] = 0x98badcfe;
+ mctx->hash[3] = 0x10325476;
+ mctx->byte_count = 0;
- return 0;
+ return 0;
}
-static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len)
-{
- struct md5_state *mctx = shash_desc_ctx(desc);
- const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
+static int md5_update(struct shash_desc* desc,
+ const u8* data,
+ unsigned int len) {
+ struct md5_state* mctx = shash_desc_ctx(desc);
+ const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
- mctx->byte_count += len;
+ mctx->byte_count += len;
- if (avail > len) {
- memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
- data, len);
- return 0;
- }
+ if (avail > len) {
+ memcpy((char*)mctx->block + (sizeof(mctx->block) - avail), data, len);
+ return 0;
+ }
- memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
- data, avail);
+ memcpy((char*)mctx->block + (sizeof(mctx->block) - avail), data, avail);
- md5_transform_helper(mctx);
- data += avail;
- len -= avail;
+ md5_transform_helper(mctx);
+ data += avail;
+ len -= avail;
- while (len >= sizeof(mctx->block)) {
- memcpy(mctx->block, data, sizeof(mctx->block));
- md5_transform_helper(mctx);
- data += sizeof(mctx->block);
- len -= sizeof(mctx->block);
- }
+ while (len >= sizeof(mctx->block)) {
+ memcpy(mctx->block, data, sizeof(mctx->block));
+ md5_transform_helper(mctx);
+ data += sizeof(mctx->block);
+ len -= sizeof(mctx->block);
+ }
- memcpy(mctx->block, data, len);
+ memcpy(mctx->block, data, len);
- return 0;
+ return 0;
}
-static int md5_final(struct shash_desc *desc, u8 *out)
-{
- struct md5_state *mctx = shash_desc_ctx(desc);
- const unsigned int offset = mctx->byte_count & 0x3f;
- char *p = (char *)mctx->block + offset;
- int padding = 56 - (offset + 1);
+static int md5_final(struct shash_desc* desc, u8* out) {
+ struct md5_state* mctx = shash_desc_ctx(desc);
+ const unsigned int offset = mctx->byte_count & 0x3f;
+ char* p = (char*)mctx->block + offset;
+ int padding = 56 - (offset + 1);
- *p++ = 0x80;
- if (padding < 0) {
- memset(p, 0x00, padding + sizeof (u64));
- md5_transform_helper(mctx);
- p = (char *)mctx->block;
- padding = 56;
- }
+ *p++ = 0x80;
+ if (padding < 0) {
+ memset(p, 0x00, padding + sizeof(u64));
+ md5_transform_helper(mctx);
+ p = (char*)mctx->block;
+ padding = 56;
+ }
- memset(p, 0, padding);
- mctx->block[14] = mctx->byte_count << 3;
- mctx->block[15] = mctx->byte_count >> 29;
- le32_to_cpu_array(mctx->block, (sizeof(mctx->block) -
- sizeof(u64)) / sizeof(u32));
- md5_transform(mctx->hash, mctx->block);
- cpu_to_le32_array(mctx->hash, sizeof(mctx->hash) / sizeof(u32));
- memcpy(out, mctx->hash, sizeof(mctx->hash));
- memset(mctx, 0, sizeof(*mctx));
+ memset(p, 0, padding);
+ mctx->block[14] = mctx->byte_count << 3;
+ mctx->block[15] = mctx->byte_count >> 29;
+ le32_to_cpu_array(mctx->block,
+ (sizeof(mctx->block) - sizeof(u64)) / sizeof(u32));
+ md5_transform(mctx->hash, mctx->block);
+ cpu_to_le32_array(mctx->hash, sizeof(mctx->hash) / sizeof(u32));
+ memcpy(out, mctx->hash, sizeof(mctx->hash));
+ memset(mctx, 0, sizeof(*mctx));
- return 0;
+ return 0;
}
-static int md5_export(struct shash_desc *desc, void *out)
-{
- struct md5_state *ctx = shash_desc_ctx(desc);
+static int md5_export(struct shash_desc* desc, void* out) {
+ struct md5_state* ctx = shash_desc_ctx(desc);
- memcpy(out, ctx, sizeof(*ctx));
- return 0;
+ memcpy(out, ctx, sizeof(*ctx));
+ return 0;
}
-static int md5_import(struct shash_desc *desc, const void *in)
-{
- struct md5_state *ctx = shash_desc_ctx(desc);
+static int md5_import(struct shash_desc* desc, const void* in) {
+ struct md5_state* ctx = shash_desc_ctx(desc);
- memcpy(ctx, in, sizeof(*ctx));
- return 0;
+ memcpy(ctx, in, sizeof(*ctx));
+ return 0;
}
-static struct shash_alg alg = {
- .digestsize = MD5_DIGEST_SIZE,
- .init = md5_init,
- .update = md5_update,
- .final = md5_final,
- .export = md5_export,
- .import = md5_import,
- .descsize = sizeof(struct md5_state),
- .statesize = sizeof(struct md5_state),
- .base = {
- .cra_name = "md5",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
- .cra_module = NULL,
- }
-};
+static struct shash_alg alg = {.digestsize = MD5_DIGEST_SIZE,
+ .init = md5_init,
+ .update = md5_update,
+ .final = md5_final,
+ .export = md5_export,
+ .import = md5_import,
+ .descsize = sizeof(struct md5_state),
+ .statesize = sizeof(struct md5_state),
+ .base = {
+ .cra_name = "md5",
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_module = NULL,
+ }};
-static int __init md5_mod_init(void)
-{
- return crypto_register_shash(&alg);
+static int __init md5_mod_init(void) {
+ return crypto_register_shash(&alg);
}
-static void __exit md5_mod_fini(void)
-{
- crypto_unregister_shash(&alg);
+static void __exit md5_mod_fini(void) {
+ crypto_unregister_shash(&alg);
}
module_init(md5_mod_init);
diff --git a/kernel/sha1.c b/kernel/sha1.c
index 33d9440..b7d2a2b 100644
--- a/kernel/sha1.c
+++ b/kernel/sha1.c
@@ -31,43 +31,53 @@
*/
#ifdef CONFIG_X86
- #define setW(x, val) (*(volatile __u32 *)&W(x) = (val))
+#define setW(x, val) (*(volatile __u32*)&W(x) = (val))
#elif defined(CONFIG_ARM)
- #define setW(x, val) do { W(x) = (val); __asm__("":::"memory"); } while (0)
+#define setW(x, val) \
+ do { \
+ W(x) = (val); \
+ __asm__("" ::: "memory"); \
+ } while (0)
#else
- #define setW(x, val) (W(x) = (val))
+#define setW(x, val) (W(x) = (val))
#endif
/* This "rolls" over the 512-bit array */
#define W(x) (array[(x)&15])
-static inline u32 __get_unaligned_be32(const u8 *p)
-{
- return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3];
+static inline u32 __get_unaligned_be32(const u8* p) {
+ return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3];
}
-static inline u32 get_unaligned_be32(const void *p)
-{
- return __get_unaligned_be32((const u8 *)p);
+static inline u32 get_unaligned_be32(const void* p) {
+ return __get_unaligned_be32((const u8*)p);
}
/*
* Where do we get the source from? The first 16 iterations get it from
* the input data, the next mix it from the 512-bit array.
*/
-#define SHA_SRC(t) get_unaligned_be32((__u32 *)data + t)
-#define SHA_MIX(t) rol32(W(t+13) ^ W(t+8) ^ W(t+2) ^ W(t), 1)
+#define SHA_SRC(t) get_unaligned_be32((__u32*)data + t)
+#define SHA_MIX(t) rol32(W(t + 13) ^ W(t + 8) ^ W(t + 2) ^ W(t), 1)
-#define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \
- __u32 TEMP = input(t); setW(t, TEMP); \
- E += TEMP + rol32(A,5) + (fn) + (constant); \
- B = ror32(B, 2); } while (0)
+#define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) \
+ do { \
+ __u32 TEMP = input(t); \
+ setW(t, TEMP); \
+ E += TEMP + rol32(A, 5) + (fn) + (constant); \
+ B = ror32(B, 2); \
+ } while (0)
-#define T_0_15(t, A, B, C, D, E) SHA_ROUND(t, SHA_SRC, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
-#define T_16_19(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
-#define T_20_39(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0x6ed9eba1, A, B, C, D, E )
-#define T_40_59(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, ((B&C)+(D&(B^C))) , 0x8f1bbcdc, A, B, C, D, E )
-#define T_60_79(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0xca62c1d6, A, B, C, D, E )
+#define T_0_15(t, A, B, C, D, E) \
+ SHA_ROUND(t, SHA_SRC, (((C ^ D) & B) ^ D), 0x5a827999, A, B, C, D, E)
+#define T_16_19(t, A, B, C, D, E) \
+ SHA_ROUND(t, SHA_MIX, (((C ^ D) & B) ^ D), 0x5a827999, A, B, C, D, E)
+#define T_20_39(t, A, B, C, D, E) \
+ SHA_ROUND(t, SHA_MIX, (B ^ C ^ D), 0x6ed9eba1, A, B, C, D, E)
+#define T_40_59(t, A, B, C, D, E) \
+ SHA_ROUND(t, SHA_MIX, ((B & C) + (D & (B ^ C))), 0x8f1bbcdc, A, B, C, D, E)
+#define T_60_79(t, A, B, C, D, E) \
+ SHA_ROUND(t, SHA_MIX, (B ^ C ^ D), 0xca62c1d6, A, B, C, D, E)
/**
* sha_transform - single block SHA1 transform
@@ -85,122 +95,120 @@
* to clear the workspace. This is left to the caller to avoid
* unnecessary clears between chained hashing operations.
*/
-void sha_transform(__u32 *digest, const char *data, __u32 *array)
-{
- __u32 A, B, C, D, E;
+void sha_transform(__u32* digest, const char* data, __u32* array) {
+ __u32 A, B, C, D, E;
- A = digest[0];
- B = digest[1];
- C = digest[2];
- D = digest[3];
- E = digest[4];
+ A = digest[0];
+ B = digest[1];
+ C = digest[2];
+ D = digest[3];
+ E = digest[4];
- /* Round 1 - iterations 0-16 take their input from 'data' */
- T_0_15( 0, A, B, C, D, E);
- T_0_15( 1, E, A, B, C, D);
- T_0_15( 2, D, E, A, B, C);
- T_0_15( 3, C, D, E, A, B);
- T_0_15( 4, B, C, D, E, A);
- T_0_15( 5, A, B, C, D, E);
- T_0_15( 6, E, A, B, C, D);
- T_0_15( 7, D, E, A, B, C);
- T_0_15( 8, C, D, E, A, B);
- T_0_15( 9, B, C, D, E, A);
- T_0_15(10, A, B, C, D, E);
- T_0_15(11, E, A, B, C, D);
- T_0_15(12, D, E, A, B, C);
- T_0_15(13, C, D, E, A, B);
- T_0_15(14, B, C, D, E, A);
- T_0_15(15, A, B, C, D, E);
+ /* Round 1 - iterations 0-16 take their input from 'data' */
+ T_0_15(0, A, B, C, D, E);
+ T_0_15(1, E, A, B, C, D);
+ T_0_15(2, D, E, A, B, C);
+ T_0_15(3, C, D, E, A, B);
+ T_0_15(4, B, C, D, E, A);
+ T_0_15(5, A, B, C, D, E);
+ T_0_15(6, E, A, B, C, D);
+ T_0_15(7, D, E, A, B, C);
+ T_0_15(8, C, D, E, A, B);
+ T_0_15(9, B, C, D, E, A);
+ T_0_15(10, A, B, C, D, E);
+ T_0_15(11, E, A, B, C, D);
+ T_0_15(12, D, E, A, B, C);
+ T_0_15(13, C, D, E, A, B);
+ T_0_15(14, B, C, D, E, A);
+ T_0_15(15, A, B, C, D, E);
- /* Round 1 - tail. Input from 512-bit mixing array */
- T_16_19(16, E, A, B, C, D);
- T_16_19(17, D, E, A, B, C);
- T_16_19(18, C, D, E, A, B);
- T_16_19(19, B, C, D, E, A);
+ /* Round 1 - tail. Input from 512-bit mixing array */
+ T_16_19(16, E, A, B, C, D);
+ T_16_19(17, D, E, A, B, C);
+ T_16_19(18, C, D, E, A, B);
+ T_16_19(19, B, C, D, E, A);
- /* Round 2 */
- T_20_39(20, A, B, C, D, E);
- T_20_39(21, E, A, B, C, D);
- T_20_39(22, D, E, A, B, C);
- T_20_39(23, C, D, E, A, B);
- T_20_39(24, B, C, D, E, A);
- T_20_39(25, A, B, C, D, E);
- T_20_39(26, E, A, B, C, D);
- T_20_39(27, D, E, A, B, C);
- T_20_39(28, C, D, E, A, B);
- T_20_39(29, B, C, D, E, A);
- T_20_39(30, A, B, C, D, E);
- T_20_39(31, E, A, B, C, D);
- T_20_39(32, D, E, A, B, C);
- T_20_39(33, C, D, E, A, B);
- T_20_39(34, B, C, D, E, A);
- T_20_39(35, A, B, C, D, E);
- T_20_39(36, E, A, B, C, D);
- T_20_39(37, D, E, A, B, C);
- T_20_39(38, C, D, E, A, B);
- T_20_39(39, B, C, D, E, A);
+ /* Round 2 */
+ T_20_39(20, A, B, C, D, E);
+ T_20_39(21, E, A, B, C, D);
+ T_20_39(22, D, E, A, B, C);
+ T_20_39(23, C, D, E, A, B);
+ T_20_39(24, B, C, D, E, A);
+ T_20_39(25, A, B, C, D, E);
+ T_20_39(26, E, A, B, C, D);
+ T_20_39(27, D, E, A, B, C);
+ T_20_39(28, C, D, E, A, B);
+ T_20_39(29, B, C, D, E, A);
+ T_20_39(30, A, B, C, D, E);
+ T_20_39(31, E, A, B, C, D);
+ T_20_39(32, D, E, A, B, C);
+ T_20_39(33, C, D, E, A, B);
+ T_20_39(34, B, C, D, E, A);
+ T_20_39(35, A, B, C, D, E);
+ T_20_39(36, E, A, B, C, D);
+ T_20_39(37, D, E, A, B, C);
+ T_20_39(38, C, D, E, A, B);
+ T_20_39(39, B, C, D, E, A);
- /* Round 3 */
- T_40_59(40, A, B, C, D, E);
- T_40_59(41, E, A, B, C, D);
- T_40_59(42, D, E, A, B, C);
- T_40_59(43, C, D, E, A, B);
- T_40_59(44, B, C, D, E, A);
- T_40_59(45, A, B, C, D, E);
- T_40_59(46, E, A, B, C, D);
- T_40_59(47, D, E, A, B, C);
- T_40_59(48, C, D, E, A, B);
- T_40_59(49, B, C, D, E, A);
- T_40_59(50, A, B, C, D, E);
- T_40_59(51, E, A, B, C, D);
- T_40_59(52, D, E, A, B, C);
- T_40_59(53, C, D, E, A, B);
- T_40_59(54, B, C, D, E, A);
- T_40_59(55, A, B, C, D, E);
- T_40_59(56, E, A, B, C, D);
- T_40_59(57, D, E, A, B, C);
- T_40_59(58, C, D, E, A, B);
- T_40_59(59, B, C, D, E, A);
+ /* Round 3 */
+ T_40_59(40, A, B, C, D, E);
+ T_40_59(41, E, A, B, C, D);
+ T_40_59(42, D, E, A, B, C);
+ T_40_59(43, C, D, E, A, B);
+ T_40_59(44, B, C, D, E, A);
+ T_40_59(45, A, B, C, D, E);
+ T_40_59(46, E, A, B, C, D);
+ T_40_59(47, D, E, A, B, C);
+ T_40_59(48, C, D, E, A, B);
+ T_40_59(49, B, C, D, E, A);
+ T_40_59(50, A, B, C, D, E);
+ T_40_59(51, E, A, B, C, D);
+ T_40_59(52, D, E, A, B, C);
+ T_40_59(53, C, D, E, A, B);
+ T_40_59(54, B, C, D, E, A);
+ T_40_59(55, A, B, C, D, E);
+ T_40_59(56, E, A, B, C, D);
+ T_40_59(57, D, E, A, B, C);
+ T_40_59(58, C, D, E, A, B);
+ T_40_59(59, B, C, D, E, A);
- /* Round 4 */
- T_60_79(60, A, B, C, D, E);
- T_60_79(61, E, A, B, C, D);
- T_60_79(62, D, E, A, B, C);
- T_60_79(63, C, D, E, A, B);
- T_60_79(64, B, C, D, E, A);
- T_60_79(65, A, B, C, D, E);
- T_60_79(66, E, A, B, C, D);
- T_60_79(67, D, E, A, B, C);
- T_60_79(68, C, D, E, A, B);
- T_60_79(69, B, C, D, E, A);
- T_60_79(70, A, B, C, D, E);
- T_60_79(71, E, A, B, C, D);
- T_60_79(72, D, E, A, B, C);
- T_60_79(73, C, D, E, A, B);
- T_60_79(74, B, C, D, E, A);
- T_60_79(75, A, B, C, D, E);
- T_60_79(76, E, A, B, C, D);
- T_60_79(77, D, E, A, B, C);
- T_60_79(78, C, D, E, A, B);
- T_60_79(79, B, C, D, E, A);
+ /* Round 4 */
+ T_60_79(60, A, B, C, D, E);
+ T_60_79(61, E, A, B, C, D);
+ T_60_79(62, D, E, A, B, C);
+ T_60_79(63, C, D, E, A, B);
+ T_60_79(64, B, C, D, E, A);
+ T_60_79(65, A, B, C, D, E);
+ T_60_79(66, E, A, B, C, D);
+ T_60_79(67, D, E, A, B, C);
+ T_60_79(68, C, D, E, A, B);
+ T_60_79(69, B, C, D, E, A);
+ T_60_79(70, A, B, C, D, E);
+ T_60_79(71, E, A, B, C, D);
+ T_60_79(72, D, E, A, B, C);
+ T_60_79(73, C, D, E, A, B);
+ T_60_79(74, B, C, D, E, A);
+ T_60_79(75, A, B, C, D, E);
+ T_60_79(76, E, A, B, C, D);
+ T_60_79(77, D, E, A, B, C);
+ T_60_79(78, C, D, E, A, B);
+ T_60_79(79, B, C, D, E, A);
- digest[0] += A;
- digest[1] += B;
- digest[2] += C;
- digest[3] += D;
- digest[4] += E;
+ digest[0] += A;
+ digest[1] += B;
+ digest[2] += C;
+ digest[3] += D;
+ digest[4] += E;
}
/**
* sha_init - initialize the vectors for a SHA1 digest
* @buf: vector to initialize
*/
-void sha_init(__u32 *buf)
-{
- buf[0] = 0x67452301;
- buf[1] = 0xefcdab89;
- buf[2] = 0x98badcfe;
- buf[3] = 0x10325476;
- buf[4] = 0xc3d2e1f0;
+void sha_init(__u32* buf) {
+ buf[0] = 0x67452301;
+ buf[1] = 0xefcdab89;
+ buf[2] = 0x98badcfe;
+ buf[3] = 0x10325476;
+ buf[4] = 0xc3d2e1f0;
}
diff --git a/kernel/sha1_generic.c b/kernel/sha1_generic.c
index 1b4ac01..c7ca927 100644
--- a/kernel/sha1_generic.c
+++ b/kernel/sha1_generic.c
@@ -23,124 +23,117 @@
#include <crypto/sha.h>
#include <asm/byteorder.h>
-static int sha1_init(struct shash_desc *desc)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
+static int sha1_init(struct shash_desc* desc) {
+ struct sha1_state* sctx = shash_desc_ctx(desc);
- *sctx = (struct sha1_state){
- .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
- };
+ *sctx = (struct sha1_state){
+ .state = {SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4},
+ };
- return 0;
+ return 0;
}
-static int sha1_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- unsigned int partial, done;
- const u8 *src;
+static int sha1_update(struct shash_desc* desc,
+ const u8* data,
+ unsigned int len) {
+ struct sha1_state* sctx = shash_desc_ctx(desc);
+ unsigned int partial, done;
+ const u8* src;
- partial = sctx->count & 0x3f;
- sctx->count += len;
- done = 0;
- src = data;
+ partial = sctx->count & 0x3f;
+ sctx->count += len;
+ done = 0;
+ src = data;
- if ((partial + len) > 63) {
- u32 temp[SHA_WORKSPACE_WORDS];
+ if ((partial + len) > 63) {
+ u32 temp[SHA_WORKSPACE_WORDS];
- if (partial) {
- done = -partial;
- memcpy(sctx->buffer + partial, data, done + 64);
- src = sctx->buffer;
- }
+ if (partial) {
+ done = -partial;
+ memcpy(sctx->buffer + partial, data, done + 64);
+ src = sctx->buffer;
+ }
- do {
- sha_transform(sctx->state, src, temp);
- done += 64;
- src = data + done;
- } while (done + 63 < len);
+ do {
+ sha_transform(sctx->state, src, temp);
+ done += 64;
+ src = data + done;
+ } while (done + 63 < len);
- memset(temp, 0, sizeof(temp));
- partial = 0;
- }
- memcpy(sctx->buffer + partial, src, len - done);
+ memset(temp, 0, sizeof(temp));
+ partial = 0;
+ }
+ memcpy(sctx->buffer + partial, src, len - done);
- return 0;
+ return 0;
}
-
/* Add padding and return the message digest. */
-static int sha1_final(struct shash_desc *desc, u8 *out)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- __be32 *dst = (__be32 *)out;
- u32 i, index, padlen;
- __be64 bits;
- static const u8 padding[64] = { 0x80, };
+static int sha1_final(struct shash_desc* desc, u8* out) {
+ struct sha1_state* sctx = shash_desc_ctx(desc);
+ __be32* dst = (__be32*)out;
+ u32 i, index, padlen;
+ __be64 bits;
+ static const u8 padding[64] = {
+ 0x80,
+ };
- bits = __cpu_to_be64(sctx->count << 3);
+ bits = __cpu_to_be64(sctx->count << 3);
- /* Pad out to 56 mod 64 */
- index = sctx->count & 0x3f;
- padlen = (index < 56) ? (56 - index) : ((64+56) - index);
- sha1_update(desc, padding, padlen);
+ /* Pad out to 56 mod 64 */
+ index = sctx->count & 0x3f;
+ padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
+ sha1_update(desc, padding, padlen);
- /* Append length */
- sha1_update(desc, (const u8 *)&bits, sizeof(bits));
+ /* Append length */
+ sha1_update(desc, (const u8*)&bits, sizeof(bits));
- /* Store state in digest */
- for (i = 0; i < 5; i++)
- dst[i] = __cpu_to_be32(sctx->state[i]);
+ /* Store state in digest */
+ for (i = 0; i < 5; i++)
+ dst[i] = __cpu_to_be32(sctx->state[i]);
- /* Wipe context */
- memset(sctx, 0, sizeof *sctx);
+ /* Wipe context */
+ memset(sctx, 0, sizeof *sctx);
- return 0;
+ return 0;
}
-static int sha1_export(struct shash_desc *desc, void *out)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
+static int sha1_export(struct shash_desc* desc, void* out) {
+ struct sha1_state* sctx = shash_desc_ctx(desc);
- memcpy(out, sctx, sizeof(*sctx));
- return 0;
+ memcpy(out, sctx, sizeof(*sctx));
+ return 0;
}
-static int sha1_import(struct shash_desc *desc, const void *in)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
+static int sha1_import(struct shash_desc* desc, const void* in) {
+ struct sha1_state* sctx = shash_desc_ctx(desc);
- memcpy(sctx, in, sizeof(*sctx));
- return 0;
+ memcpy(sctx, in, sizeof(*sctx));
+ return 0;
}
-static struct shash_alg alg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .init = sha1_init,
- .update = sha1_update,
- .final = sha1_final,
- .export = sha1_export,
- .import = sha1_import,
- .descsize = sizeof(struct sha1_state),
- .statesize = sizeof(struct sha1_state),
- .base = {
- .cra_name = "sha1",
- .cra_driver_name= "sha1-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_module = NULL,
- }
-};
+static struct shash_alg alg = {.digestsize = SHA1_DIGEST_SIZE,
+ .init = sha1_init,
+ .update = sha1_update,
+ .final = sha1_final,
+ .export = sha1_export,
+ .import = sha1_import,
+ .descsize = sizeof(struct sha1_state),
+ .statesize = sizeof(struct sha1_state),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-generic",
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_module = NULL,
+ }};
-static int __init sha1_generic_mod_init(void)
-{
- return crypto_register_shash(&alg);
+static int __init sha1_generic_mod_init(void) {
+ return crypto_register_shash(&alg);
}
-static void __exit sha1_generic_mod_fini(void)
-{
- crypto_unregister_shash(&alg);
+static void __exit sha1_generic_mod_fini(void) {
+ crypto_unregister_shash(&alg);
}
module_init(sha1_generic_mod_init);
diff --git a/kernel/sha256_generic.c b/kernel/sha256_generic.c
index b759766..45faa15 100644
--- a/kernel/sha256_generic.c
+++ b/kernel/sha256_generic.c
@@ -23,371 +23,495 @@
#include <crypto/sha.h>
#include <asm/byteorder.h>
-static inline u32 Ch(u32 x, u32 y, u32 z)
-{
- return z ^ (x & (y ^ z));
+static inline u32 Ch(u32 x, u32 y, u32 z) {
+ return z ^ (x & (y ^ z));
}
-static inline u32 Maj(u32 x, u32 y, u32 z)
-{
- return (x & y) | (z & (x | y));
+static inline u32 Maj(u32 x, u32 y, u32 z) {
+ return (x & y) | (z & (x | y));
}
-#define e0(x) (ror32(x, 2) ^ ror32(x,13) ^ ror32(x,22))
-#define e1(x) (ror32(x, 6) ^ ror32(x,11) ^ ror32(x,25))
-#define s0(x) (ror32(x, 7) ^ ror32(x,18) ^ (x >> 3))
-#define s1(x) (ror32(x,17) ^ ror32(x,19) ^ (x >> 10))
+#define e0(x) (ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22))
+#define e1(x) (ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25))
+#define s0(x) (ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3))
+#define s1(x) (ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10))
-static inline void LOAD_OP(int I, u32 *W, const u8 *input)
-{
- W[I] = __be32_to_cpu( ((__be32*)(input))[I] );
+static inline void LOAD_OP(int I, u32* W, const u8* input) {
+ W[I] = __be32_to_cpu(((__be32*)(input))[I]);
}
-static inline void BLEND_OP(int I, u32 *W)
-{
- W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16];
+static inline void BLEND_OP(int I, u32* W) {
+ W[I] = s1(W[I - 2]) + W[I - 7] + s0(W[I - 15]) + W[I - 16];
}
-static void sha256_transform(u32 *state, const u8 *input)
-{
- u32 a, b, c, d, e, f, g, h, t1, t2;
- u32 W[64];
- int i;
+static void sha256_transform(u32* state, const u8* input) {
+ u32 a, b, c, d, e, f, g, h, t1, t2;
+ u32 W[64];
+ int i;
- /* load the input */
- for (i = 0; i < 16; i++)
- LOAD_OP(i, W, input);
+ /* load the input */
+ for (i = 0; i < 16; i++)
+ LOAD_OP(i, W, input);
- /* now blend */
- for (i = 16; i < 64; i++)
- BLEND_OP(i, W);
+ /* now blend */
+ for (i = 16; i < 64; i++)
+ BLEND_OP(i, W);
- /* load the state into our registers */
- a=state[0]; b=state[1]; c=state[2]; d=state[3];
- e=state[4]; f=state[5]; g=state[6]; h=state[7];
+ /* load the state into our registers */
+ a = state[0];
+ b = state[1];
+ c = state[2];
+ d = state[3];
+ e = state[4];
+ f = state[5];
+ g = state[6];
+ h = state[7];
- /* now iterate */
- t1 = h + e1(e) + Ch(e,f,g) + 0x428a2f98 + W[ 0];
- t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
- t1 = g + e1(d) + Ch(d,e,f) + 0x71374491 + W[ 1];
- t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
- t1 = f + e1(c) + Ch(c,d,e) + 0xb5c0fbcf + W[ 2];
- t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
- t1 = e + e1(b) + Ch(b,c,d) + 0xe9b5dba5 + W[ 3];
- t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
- t1 = d + e1(a) + Ch(a,b,c) + 0x3956c25b + W[ 4];
- t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
- t1 = c + e1(h) + Ch(h,a,b) + 0x59f111f1 + W[ 5];
- t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
- t1 = b + e1(g) + Ch(g,h,a) + 0x923f82a4 + W[ 6];
- t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
- t1 = a + e1(f) + Ch(f,g,h) + 0xab1c5ed5 + W[ 7];
- t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
+ /* now iterate */
+ t1 = h + e1(e) + Ch(e, f, g) + 0x428a2f98 + W[0];
+ t2 = e0(a) + Maj(a, b, c);
+ d += t1;
+ h = t1 + t2;
+ t1 = g + e1(d) + Ch(d, e, f) + 0x71374491 + W[1];
+ t2 = e0(h) + Maj(h, a, b);
+ c += t1;
+ g = t1 + t2;
+ t1 = f + e1(c) + Ch(c, d, e) + 0xb5c0fbcf + W[2];
+ t2 = e0(g) + Maj(g, h, a);
+ b += t1;
+ f = t1 + t2;
+ t1 = e + e1(b) + Ch(b, c, d) + 0xe9b5dba5 + W[3];
+ t2 = e0(f) + Maj(f, g, h);
+ a += t1;
+ e = t1 + t2;
+ t1 = d + e1(a) + Ch(a, b, c) + 0x3956c25b + W[4];
+ t2 = e0(e) + Maj(e, f, g);
+ h += t1;
+ d = t1 + t2;
+ t1 = c + e1(h) + Ch(h, a, b) + 0x59f111f1 + W[5];
+ t2 = e0(d) + Maj(d, e, f);
+ g += t1;
+ c = t1 + t2;
+ t1 = b + e1(g) + Ch(g, h, a) + 0x923f82a4 + W[6];
+ t2 = e0(c) + Maj(c, d, e);
+ f += t1;
+ b = t1 + t2;
+ t1 = a + e1(f) + Ch(f, g, h) + 0xab1c5ed5 + W[7];
+ t2 = e0(b) + Maj(b, c, d);
+ e += t1;
+ a = t1 + t2;
- t1 = h + e1(e) + Ch(e,f,g) + 0xd807aa98 + W[ 8];
- t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
- t1 = g + e1(d) + Ch(d,e,f) + 0x12835b01 + W[ 9];
- t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
- t1 = f + e1(c) + Ch(c,d,e) + 0x243185be + W[10];
- t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
- t1 = e + e1(b) + Ch(b,c,d) + 0x550c7dc3 + W[11];
- t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
- t1 = d + e1(a) + Ch(a,b,c) + 0x72be5d74 + W[12];
- t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
- t1 = c + e1(h) + Ch(h,a,b) + 0x80deb1fe + W[13];
- t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
- t1 = b + e1(g) + Ch(g,h,a) + 0x9bdc06a7 + W[14];
- t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
- t1 = a + e1(f) + Ch(f,g,h) + 0xc19bf174 + W[15];
- t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
+ t1 = h + e1(e) + Ch(e, f, g) + 0xd807aa98 + W[8];
+ t2 = e0(a) + Maj(a, b, c);
+ d += t1;
+ h = t1 + t2;
+ t1 = g + e1(d) + Ch(d, e, f) + 0x12835b01 + W[9];
+ t2 = e0(h) + Maj(h, a, b);
+ c += t1;
+ g = t1 + t2;
+ t1 = f + e1(c) + Ch(c, d, e) + 0x243185be + W[10];
+ t2 = e0(g) + Maj(g, h, a);
+ b += t1;
+ f = t1 + t2;
+ t1 = e + e1(b) + Ch(b, c, d) + 0x550c7dc3 + W[11];
+ t2 = e0(f) + Maj(f, g, h);
+ a += t1;
+ e = t1 + t2;
+ t1 = d + e1(a) + Ch(a, b, c) + 0x72be5d74 + W[12];
+ t2 = e0(e) + Maj(e, f, g);
+ h += t1;
+ d = t1 + t2;
+ t1 = c + e1(h) + Ch(h, a, b) + 0x80deb1fe + W[13];
+ t2 = e0(d) + Maj(d, e, f);
+ g += t1;
+ c = t1 + t2;
+ t1 = b + e1(g) + Ch(g, h, a) + 0x9bdc06a7 + W[14];
+ t2 = e0(c) + Maj(c, d, e);
+ f += t1;
+ b = t1 + t2;
+ t1 = a + e1(f) + Ch(f, g, h) + 0xc19bf174 + W[15];
+ t2 = e0(b) + Maj(b, c, d);
+ e += t1;
+ a = t1 + t2;
- t1 = h + e1(e) + Ch(e,f,g) + 0xe49b69c1 + W[16];
- t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
- t1 = g + e1(d) + Ch(d,e,f) + 0xefbe4786 + W[17];
- t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
- t1 = f + e1(c) + Ch(c,d,e) + 0x0fc19dc6 + W[18];
- t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
- t1 = e + e1(b) + Ch(b,c,d) + 0x240ca1cc + W[19];
- t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
- t1 = d + e1(a) + Ch(a,b,c) + 0x2de92c6f + W[20];
- t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
- t1 = c + e1(h) + Ch(h,a,b) + 0x4a7484aa + W[21];
- t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
- t1 = b + e1(g) + Ch(g,h,a) + 0x5cb0a9dc + W[22];
- t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
- t1 = a + e1(f) + Ch(f,g,h) + 0x76f988da + W[23];
- t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
+ t1 = h + e1(e) + Ch(e, f, g) + 0xe49b69c1 + W[16];
+ t2 = e0(a) + Maj(a, b, c);
+ d += t1;
+ h = t1 + t2;
+ t1 = g + e1(d) + Ch(d, e, f) + 0xefbe4786 + W[17];
+ t2 = e0(h) + Maj(h, a, b);
+ c += t1;
+ g = t1 + t2;
+ t1 = f + e1(c) + Ch(c, d, e) + 0x0fc19dc6 + W[18];
+ t2 = e0(g) + Maj(g, h, a);
+ b += t1;
+ f = t1 + t2;
+ t1 = e + e1(b) + Ch(b, c, d) + 0x240ca1cc + W[19];
+ t2 = e0(f) + Maj(f, g, h);
+ a += t1;
+ e = t1 + t2;
+ t1 = d + e1(a) + Ch(a, b, c) + 0x2de92c6f + W[20];
+ t2 = e0(e) + Maj(e, f, g);
+ h += t1;
+ d = t1 + t2;
+ t1 = c + e1(h) + Ch(h, a, b) + 0x4a7484aa + W[21];
+ t2 = e0(d) + Maj(d, e, f);
+ g += t1;
+ c = t1 + t2;
+ t1 = b + e1(g) + Ch(g, h, a) + 0x5cb0a9dc + W[22];
+ t2 = e0(c) + Maj(c, d, e);
+ f += t1;
+ b = t1 + t2;
+ t1 = a + e1(f) + Ch(f, g, h) + 0x76f988da + W[23];
+ t2 = e0(b) + Maj(b, c, d);
+ e += t1;
+ a = t1 + t2;
- t1 = h + e1(e) + Ch(e,f,g) + 0x983e5152 + W[24];
- t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
- t1 = g + e1(d) + Ch(d,e,f) + 0xa831c66d + W[25];
- t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
- t1 = f + e1(c) + Ch(c,d,e) + 0xb00327c8 + W[26];
- t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
- t1 = e + e1(b) + Ch(b,c,d) + 0xbf597fc7 + W[27];
- t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
- t1 = d + e1(a) + Ch(a,b,c) + 0xc6e00bf3 + W[28];
- t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
- t1 = c + e1(h) + Ch(h,a,b) + 0xd5a79147 + W[29];
- t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
- t1 = b + e1(g) + Ch(g,h,a) + 0x06ca6351 + W[30];
- t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
- t1 = a + e1(f) + Ch(f,g,h) + 0x14292967 + W[31];
- t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
+ t1 = h + e1(e) + Ch(e, f, g) + 0x983e5152 + W[24];
+ t2 = e0(a) + Maj(a, b, c);
+ d += t1;
+ h = t1 + t2;
+ t1 = g + e1(d) + Ch(d, e, f) + 0xa831c66d + W[25];
+ t2 = e0(h) + Maj(h, a, b);
+ c += t1;
+ g = t1 + t2;
+ t1 = f + e1(c) + Ch(c, d, e) + 0xb00327c8 + W[26];
+ t2 = e0(g) + Maj(g, h, a);
+ b += t1;
+ f = t1 + t2;
+ t1 = e + e1(b) + Ch(b, c, d) + 0xbf597fc7 + W[27];
+ t2 = e0(f) + Maj(f, g, h);
+ a += t1;
+ e = t1 + t2;
+ t1 = d + e1(a) + Ch(a, b, c) + 0xc6e00bf3 + W[28];
+ t2 = e0(e) + Maj(e, f, g);
+ h += t1;
+ d = t1 + t2;
+ t1 = c + e1(h) + Ch(h, a, b) + 0xd5a79147 + W[29];
+ t2 = e0(d) + Maj(d, e, f);
+ g += t1;
+ c = t1 + t2;
+ t1 = b + e1(g) + Ch(g, h, a) + 0x06ca6351 + W[30];
+ t2 = e0(c) + Maj(c, d, e);
+ f += t1;
+ b = t1 + t2;
+ t1 = a + e1(f) + Ch(f, g, h) + 0x14292967 + W[31];
+ t2 = e0(b) + Maj(b, c, d);
+ e += t1;
+ a = t1 + t2;
- t1 = h + e1(e) + Ch(e,f,g) + 0x27b70a85 + W[32];
- t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
- t1 = g + e1(d) + Ch(d,e,f) + 0x2e1b2138 + W[33];
- t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
- t1 = f + e1(c) + Ch(c,d,e) + 0x4d2c6dfc + W[34];
- t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
- t1 = e + e1(b) + Ch(b,c,d) + 0x53380d13 + W[35];
- t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
- t1 = d + e1(a) + Ch(a,b,c) + 0x650a7354 + W[36];
- t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
- t1 = c + e1(h) + Ch(h,a,b) + 0x766a0abb + W[37];
- t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
- t1 = b + e1(g) + Ch(g,h,a) + 0x81c2c92e + W[38];
- t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
- t1 = a + e1(f) + Ch(f,g,h) + 0x92722c85 + W[39];
- t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
+ t1 = h + e1(e) + Ch(e, f, g) + 0x27b70a85 + W[32];
+ t2 = e0(a) + Maj(a, b, c);
+ d += t1;
+ h = t1 + t2;
+ t1 = g + e1(d) + Ch(d, e, f) + 0x2e1b2138 + W[33];
+ t2 = e0(h) + Maj(h, a, b);
+ c += t1;
+ g = t1 + t2;
+ t1 = f + e1(c) + Ch(c, d, e) + 0x4d2c6dfc + W[34];
+ t2 = e0(g) + Maj(g, h, a);
+ b += t1;
+ f = t1 + t2;
+ t1 = e + e1(b) + Ch(b, c, d) + 0x53380d13 + W[35];
+ t2 = e0(f) + Maj(f, g, h);
+ a += t1;
+ e = t1 + t2;
+ t1 = d + e1(a) + Ch(a, b, c) + 0x650a7354 + W[36];
+ t2 = e0(e) + Maj(e, f, g);
+ h += t1;
+ d = t1 + t2;
+ t1 = c + e1(h) + Ch(h, a, b) + 0x766a0abb + W[37];
+ t2 = e0(d) + Maj(d, e, f);
+ g += t1;
+ c = t1 + t2;
+ t1 = b + e1(g) + Ch(g, h, a) + 0x81c2c92e + W[38];
+ t2 = e0(c) + Maj(c, d, e);
+ f += t1;
+ b = t1 + t2;
+ t1 = a + e1(f) + Ch(f, g, h) + 0x92722c85 + W[39];
+ t2 = e0(b) + Maj(b, c, d);
+ e += t1;
+ a = t1 + t2;
- t1 = h + e1(e) + Ch(e,f,g) + 0xa2bfe8a1 + W[40];
- t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
- t1 = g + e1(d) + Ch(d,e,f) + 0xa81a664b + W[41];
- t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
- t1 = f + e1(c) + Ch(c,d,e) + 0xc24b8b70 + W[42];
- t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
- t1 = e + e1(b) + Ch(b,c,d) + 0xc76c51a3 + W[43];
- t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
- t1 = d + e1(a) + Ch(a,b,c) + 0xd192e819 + W[44];
- t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
- t1 = c + e1(h) + Ch(h,a,b) + 0xd6990624 + W[45];
- t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
- t1 = b + e1(g) + Ch(g,h,a) + 0xf40e3585 + W[46];
- t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
- t1 = a + e1(f) + Ch(f,g,h) + 0x106aa070 + W[47];
- t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
+ t1 = h + e1(e) + Ch(e, f, g) + 0xa2bfe8a1 + W[40];
+ t2 = e0(a) + Maj(a, b, c);
+ d += t1;
+ h = t1 + t2;
+ t1 = g + e1(d) + Ch(d, e, f) + 0xa81a664b + W[41];
+ t2 = e0(h) + Maj(h, a, b);
+ c += t1;
+ g = t1 + t2;
+ t1 = f + e1(c) + Ch(c, d, e) + 0xc24b8b70 + W[42];
+ t2 = e0(g) + Maj(g, h, a);
+ b += t1;
+ f = t1 + t2;
+ t1 = e + e1(b) + Ch(b, c, d) + 0xc76c51a3 + W[43];
+ t2 = e0(f) + Maj(f, g, h);
+ a += t1;
+ e = t1 + t2;
+ t1 = d + e1(a) + Ch(a, b, c) + 0xd192e819 + W[44];
+ t2 = e0(e) + Maj(e, f, g);
+ h += t1;
+ d = t1 + t2;
+ t1 = c + e1(h) + Ch(h, a, b) + 0xd6990624 + W[45];
+ t2 = e0(d) + Maj(d, e, f);
+ g += t1;
+ c = t1 + t2;
+ t1 = b + e1(g) + Ch(g, h, a) + 0xf40e3585 + W[46];
+ t2 = e0(c) + Maj(c, d, e);
+ f += t1;
+ b = t1 + t2;
+ t1 = a + e1(f) + Ch(f, g, h) + 0x106aa070 + W[47];
+ t2 = e0(b) + Maj(b, c, d);
+ e += t1;
+ a = t1 + t2;
- t1 = h + e1(e) + Ch(e,f,g) + 0x19a4c116 + W[48];
- t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
- t1 = g + e1(d) + Ch(d,e,f) + 0x1e376c08 + W[49];
- t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
- t1 = f + e1(c) + Ch(c,d,e) + 0x2748774c + W[50];
- t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
- t1 = e + e1(b) + Ch(b,c,d) + 0x34b0bcb5 + W[51];
- t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
- t1 = d + e1(a) + Ch(a,b,c) + 0x391c0cb3 + W[52];
- t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
- t1 = c + e1(h) + Ch(h,a,b) + 0x4ed8aa4a + W[53];
- t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
- t1 = b + e1(g) + Ch(g,h,a) + 0x5b9cca4f + W[54];
- t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
- t1 = a + e1(f) + Ch(f,g,h) + 0x682e6ff3 + W[55];
- t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
+ t1 = h + e1(e) + Ch(e, f, g) + 0x19a4c116 + W[48];
+ t2 = e0(a) + Maj(a, b, c);
+ d += t1;
+ h = t1 + t2;
+ t1 = g + e1(d) + Ch(d, e, f) + 0x1e376c08 + W[49];
+ t2 = e0(h) + Maj(h, a, b);
+ c += t1;
+ g = t1 + t2;
+ t1 = f + e1(c) + Ch(c, d, e) + 0x2748774c + W[50];
+ t2 = e0(g) + Maj(g, h, a);
+ b += t1;
+ f = t1 + t2;
+ t1 = e + e1(b) + Ch(b, c, d) + 0x34b0bcb5 + W[51];
+ t2 = e0(f) + Maj(f, g, h);
+ a += t1;
+ e = t1 + t2;
+ t1 = d + e1(a) + Ch(a, b, c) + 0x391c0cb3 + W[52];
+ t2 = e0(e) + Maj(e, f, g);
+ h += t1;
+ d = t1 + t2;
+ t1 = c + e1(h) + Ch(h, a, b) + 0x4ed8aa4a + W[53];
+ t2 = e0(d) + Maj(d, e, f);
+ g += t1;
+ c = t1 + t2;
+ t1 = b + e1(g) + Ch(g, h, a) + 0x5b9cca4f + W[54];
+ t2 = e0(c) + Maj(c, d, e);
+ f += t1;
+ b = t1 + t2;
+ t1 = a + e1(f) + Ch(f, g, h) + 0x682e6ff3 + W[55];
+ t2 = e0(b) + Maj(b, c, d);
+ e += t1;
+ a = t1 + t2;
- t1 = h + e1(e) + Ch(e,f,g) + 0x748f82ee + W[56];
- t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
- t1 = g + e1(d) + Ch(d,e,f) + 0x78a5636f + W[57];
- t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
- t1 = f + e1(c) + Ch(c,d,e) + 0x84c87814 + W[58];
- t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
- t1 = e + e1(b) + Ch(b,c,d) + 0x8cc70208 + W[59];
- t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
- t1 = d + e1(a) + Ch(a,b,c) + 0x90befffa + W[60];
- t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
- t1 = c + e1(h) + Ch(h,a,b) + 0xa4506ceb + W[61];
- t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
- t1 = b + e1(g) + Ch(g,h,a) + 0xbef9a3f7 + W[62];
- t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
- t1 = a + e1(f) + Ch(f,g,h) + 0xc67178f2 + W[63];
- t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
+ t1 = h + e1(e) + Ch(e, f, g) + 0x748f82ee + W[56];
+ t2 = e0(a) + Maj(a, b, c);
+ d += t1;
+ h = t1 + t2;
+ t1 = g + e1(d) + Ch(d, e, f) + 0x78a5636f + W[57];
+ t2 = e0(h) + Maj(h, a, b);
+ c += t1;
+ g = t1 + t2;
+ t1 = f + e1(c) + Ch(c, d, e) + 0x84c87814 + W[58];
+ t2 = e0(g) + Maj(g, h, a);
+ b += t1;
+ f = t1 + t2;
+ t1 = e + e1(b) + Ch(b, c, d) + 0x8cc70208 + W[59];
+ t2 = e0(f) + Maj(f, g, h);
+ a += t1;
+ e = t1 + t2;
+ t1 = d + e1(a) + Ch(a, b, c) + 0x90befffa + W[60];
+ t2 = e0(e) + Maj(e, f, g);
+ h += t1;
+ d = t1 + t2;
+ t1 = c + e1(h) + Ch(h, a, b) + 0xa4506ceb + W[61];
+ t2 = e0(d) + Maj(d, e, f);
+ g += t1;
+ c = t1 + t2;
+ t1 = b + e1(g) + Ch(g, h, a) + 0xbef9a3f7 + W[62];
+ t2 = e0(c) + Maj(c, d, e);
+ f += t1;
+ b = t1 + t2;
+ t1 = a + e1(f) + Ch(f, g, h) + 0xc67178f2 + W[63];
+ t2 = e0(b) + Maj(b, c, d);
+ e += t1;
+ a = t1 + t2;
- state[0] += a; state[1] += b; state[2] += c; state[3] += d;
- state[4] += e; state[5] += f; state[6] += g; state[7] += h;
+ state[0] += a;
+ state[1] += b;
+ state[2] += c;
+ state[3] += d;
+ state[4] += e;
+ state[5] += f;
+ state[6] += g;
+ state[7] += h;
- /* clear any sensitive info... */
- a = b = c = d = e = f = g = h = t1 = t2 = 0;
- memset(W, 0, 64 * sizeof(u32));
+ /* clear any sensitive info... */
+ a = b = c = d = e = f = g = h = t1 = t2 = 0;
+ memset(W, 0, 64 * sizeof(u32));
}
+static int sha224_init(struct shash_desc* desc) {
+ struct sha256_state* sctx = shash_desc_ctx(desc);
+ sctx->state[0] = SHA224_H0;
+ sctx->state[1] = SHA224_H1;
+ sctx->state[2] = SHA224_H2;
+ sctx->state[3] = SHA224_H3;
+ sctx->state[4] = SHA224_H4;
+ sctx->state[5] = SHA224_H5;
+ sctx->state[6] = SHA224_H6;
+ sctx->state[7] = SHA224_H7;
+ sctx->count = 0;
-static int sha224_init(struct shash_desc *desc)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- sctx->state[0] = SHA224_H0;
- sctx->state[1] = SHA224_H1;
- sctx->state[2] = SHA224_H2;
- sctx->state[3] = SHA224_H3;
- sctx->state[4] = SHA224_H4;
- sctx->state[5] = SHA224_H5;
- sctx->state[6] = SHA224_H6;
- sctx->state[7] = SHA224_H7;
- sctx->count = 0;
-
- return 0;
+ return 0;
}
-static int sha256_init(struct shash_desc *desc)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- sctx->state[0] = SHA256_H0;
- sctx->state[1] = SHA256_H1;
- sctx->state[2] = SHA256_H2;
- sctx->state[3] = SHA256_H3;
- sctx->state[4] = SHA256_H4;
- sctx->state[5] = SHA256_H5;
- sctx->state[6] = SHA256_H6;
- sctx->state[7] = SHA256_H7;
- sctx->count = 0;
+static int sha256_init(struct shash_desc* desc) {
+ struct sha256_state* sctx = shash_desc_ctx(desc);
+ sctx->state[0] = SHA256_H0;
+ sctx->state[1] = SHA256_H1;
+ sctx->state[2] = SHA256_H2;
+ sctx->state[3] = SHA256_H3;
+ sctx->state[4] = SHA256_H4;
+ sctx->state[5] = SHA256_H5;
+ sctx->state[6] = SHA256_H6;
+ sctx->state[7] = SHA256_H7;
+ sctx->count = 0;
- return 0;
+ return 0;
}
-static int sha256_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- unsigned int partial, done;
- const u8 *src;
+static int sha256_update(struct shash_desc* desc,
+ const u8* data,
+ unsigned int len) {
+ struct sha256_state* sctx = shash_desc_ctx(desc);
+ unsigned int partial, done;
+ const u8* src;
- partial = sctx->count & 0x3f;
- sctx->count += len;
- done = 0;
- src = data;
+ partial = sctx->count & 0x3f;
+ sctx->count += len;
+ done = 0;
+ src = data;
- if ((partial + len) > 63) {
- if (partial) {
- done = -partial;
- memcpy(sctx->buf + partial, data, done + 64);
- src = sctx->buf;
- }
+ if ((partial + len) > 63) {
+ if (partial) {
+ done = -partial;
+ memcpy(sctx->buf + partial, data, done + 64);
+ src = sctx->buf;
+ }
- do {
- sha256_transform(sctx->state, src);
- done += 64;
- src = data + done;
- } while (done + 63 < len);
+ do {
+ sha256_transform(sctx->state, src);
+ done += 64;
+ src = data + done;
+ } while (done + 63 < len);
- partial = 0;
- }
- memcpy(sctx->buf + partial, src, len - done);
+ partial = 0;
+ }
+ memcpy(sctx->buf + partial, src, len - done);
- return 0;
+ return 0;
}
-static int sha256_final(struct shash_desc *desc, u8 *out)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- __be32 *dst = (__be32 *)out;
- __be64 bits;
- unsigned int index, pad_len;
- int i;
- static const u8 padding[64] = { 0x80, };
+static int sha256_final(struct shash_desc* desc, u8* out) {
+ struct sha256_state* sctx = shash_desc_ctx(desc);
+ __be32* dst = (__be32*)out;
+ __be64 bits;
+ unsigned int index, pad_len;
+ int i;
+ static const u8 padding[64] = {
+ 0x80,
+ };
- /* Save number of bits */
- bits = __cpu_to_be64(sctx->count << 3);
+ /* Save number of bits */
+ bits = __cpu_to_be64(sctx->count << 3);
- /* Pad out to 56 mod 64. */
- index = sctx->count & 0x3f;
- pad_len = (index < 56) ? (56 - index) : ((64+56) - index);
- sha256_update(desc, padding, pad_len);
+ /* Pad out to 56 mod 64. */
+ index = sctx->count & 0x3f;
+ pad_len = (index < 56) ? (56 - index) : ((64 + 56) - index);
+ sha256_update(desc, padding, pad_len);
- /* Append length (before padding) */
- sha256_update(desc, (const u8 *)&bits, sizeof(bits));
+ /* Append length (before padding) */
+ sha256_update(desc, (const u8*)&bits, sizeof(bits));
- /* Store state in digest */
- for (i = 0; i < 8; i++)
- dst[i] = __cpu_to_be32(sctx->state[i]);
+ /* Store state in digest */
+ for (i = 0; i < 8; i++)
+ dst[i] = __cpu_to_be32(sctx->state[i]);
- /* Zeroize sensitive information. */
- memset(sctx, 0, sizeof(*sctx));
+ /* Zeroize sensitive information. */
+ memset(sctx, 0, sizeof(*sctx));
- return 0;
+ return 0;
}
-static int sha224_final(struct shash_desc *desc, u8 *hash)
-{
- u8 D[SHA256_DIGEST_SIZE];
+static int sha224_final(struct shash_desc* desc, u8* hash) {
+ u8 D[SHA256_DIGEST_SIZE];
- sha256_final(desc, D);
+ sha256_final(desc, D);
- memcpy(hash, D, SHA224_DIGEST_SIZE);
- memset(D, 0, SHA256_DIGEST_SIZE);
+ memcpy(hash, D, SHA224_DIGEST_SIZE);
+ memset(D, 0, SHA256_DIGEST_SIZE);
- return 0;
+ return 0;
}
-static int sha256_export(struct shash_desc *desc, void *out)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
+static int sha256_export(struct shash_desc* desc, void* out) {
+ struct sha256_state* sctx = shash_desc_ctx(desc);
- memcpy(out, sctx, sizeof(*sctx));
- return 0;
+ memcpy(out, sctx, sizeof(*sctx));
+ return 0;
}
-static int sha256_import(struct shash_desc *desc, const void *in)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
+static int sha256_import(struct shash_desc* desc, const void* in) {
+ struct sha256_state* sctx = shash_desc_ctx(desc);
- memcpy(sctx, in, sizeof(*sctx));
- return 0;
+ memcpy(sctx, in, sizeof(*sctx));
+ return 0;
}
-struct shash_alg sha256 = {
- .digestsize = SHA256_DIGEST_SIZE,
- .init = sha256_init,
- .update = sha256_update,
- .final = sha256_final,
- .export = sha256_export,
- .import = sha256_import,
- .descsize = sizeof(struct sha256_state),
- .statesize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha256",
- .cra_driver_name= "sha256-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = NULL,
- }
-};
+struct shash_alg sha256 = {.digestsize = SHA256_DIGEST_SIZE,
+ .init = sha256_init,
+ .update = sha256_update,
+ .final = sha256_final,
+ .export = sha256_export,
+ .import = sha256_import,
+ .descsize = sizeof(struct sha256_state),
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-generic",
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_module = NULL,
+ }};
-struct shash_alg sha224 = {
- .digestsize = SHA224_DIGEST_SIZE,
- .init = sha224_init,
- .update = sha256_update,
- .final = sha224_final,
- .descsize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha224",
- .cra_driver_name= "sha224-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_module = NULL,
- }
-};
+struct shash_alg sha224 = {.digestsize = SHA224_DIGEST_SIZE,
+ .init = sha224_init,
+ .update = sha256_update,
+ .final = sha224_final,
+ .descsize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "sha224-generic",
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_module = NULL,
+ }};
-static int __init sha256_generic_mod_init(void)
-{
- int ret = 0;
+static int __init sha256_generic_mod_init(void) {
+ int ret = 0;
- ret = crypto_register_shash(&sha224);
+ ret = crypto_register_shash(&sha224);
- if (ret < 0)
- return ret;
+ if (ret < 0)
+ return ret;
- ret = crypto_register_shash(&sha256);
+ ret = crypto_register_shash(&sha256);
- if (ret < 0)
- crypto_unregister_shash(&sha224);
+ if (ret < 0)
+ crypto_unregister_shash(&sha224);
- return ret;
+ return ret;
}
-static void __exit sha256_generic_mod_fini(void)
-{
- crypto_unregister_shash(&sha224);
- crypto_unregister_shash(&sha256);
+static void __exit sha256_generic_mod_fini(void) {
+ crypto_unregister_shash(&sha224);
+ crypto_unregister_shash(&sha256);
}
module_init(sha256_generic_mod_init);
diff --git a/logging.h b/logging.h
index 89035d3..fd128db 100644
--- a/logging.h
+++ b/logging.h
@@ -7,6 +7,7 @@
#define VERITY_LOGGING_H_
#include "verity/logging/logging.h"
-#define INIT_LOGGING(name, flags...) { }
+#define INIT_LOGGING(name, flags...) \
+ {}
-#endif // VERITY_LOGGING_H_
+#endif // VERITY_LOGGING_H_
diff --git a/logging/logging.h b/logging/logging.h
index 7656a8b..7ffe7c8 100644
--- a/logging/logging.h
+++ b/logging/logging.h
@@ -23,31 +23,28 @@
static MessageLevel min_level = LEVEL_INFO;
-class Message {
+class Message {
public:
Message(MessageLevel level,
MessageType type,
int errn,
- const char *file,
- unsigned long line) : type_(type),
- log_errno_(errn),
- file_(file),
- line_(line) {
+ const char* file,
+ unsigned long line)
+ : type_(type), log_errno_(errn), file_(file), line_(line) {
#ifdef NDEBUG
- if (type == TYPE_DEBUG)
- return;
+ if (type == TYPE_DEBUG)
+ return;
#endif
- if (level > LEVEL_FATAL)
- level = LEVEL_FATAL;
- level_ = level;
+ if (level > LEVEL_FATAL)
+ level = LEVEL_FATAL;
+ level_ = level;
- if (type == TYPE_NULL || level < min_level)
- return;
+ if (type == TYPE_NULL || level < min_level)
+ return;
- static const char *kLevels[] = { "INFO", "WARNING", "ERROR", "FATAL" };
- std::cerr << "[" << kLevels[level]
- << ":" << file << ":" << line << "] ";
- }
+ static const char* kLevels[] = {"INFO", "WARNING", "ERROR", "FATAL"};
+ std::cerr << "[" << kLevels[level] << ":" << file << ":" << line << "] ";
+ }
// The Message object is expected to be destroyed at the end of the line.
// So far this works reliably with the macros below, which is good enough
@@ -70,9 +67,9 @@
std::cerr.flush();
if (level_ == LEVEL_FATAL)
exit(1);
- }
+ }
- template<typename T>
+ template <typename T>
const Message& operator<<(const T& t) const {
#ifdef NDEBUG
if (type() == TYPE_DEBUG)
@@ -89,7 +86,7 @@
MessageLevel level() const { return level_; }
MessageType type() const { return type_; }
int log_errno() const { return log_errno_; }
- const char *file() const { return file_; }
+ const char* file() const { return file_; }
unsigned long line() const { return line_; }
void set_level(MessageLevel l) { level_ = l; }
@@ -98,36 +95,24 @@
MessageLevel level_;
MessageType type_;
int log_errno_;
- const char *file_;
+ const char* file_;
unsigned long line_;
};
} // namespace logging
// Interface macros
-#define LOG(_level) \
- logging::Message(logging::LEVEL_##_level, \
- logging::TYPE_NORMAL, \
- 0, \
- __FILE__, \
- __LINE__)
-#define PLOG(_level) \
- logging::Message(logging::LEVEL_##_level, \
- logging::TYPE_ERRNO,\
- errno, \
- __FILE__, \
- __LINE__)
-#define DLOG(_level) \
- logging::Message(logging::LEVEL_##_level,\
- logging::TYPE_DEBUG, \
- 0, \
- __FILE__, \
- __LINE__)
-#define LOG_NULL \
- logging::Message(logging::LEVEL_INFO, \
- logging::TYPE_NULL, \
- 0, \
- __FILE__, \
- __LINE__)
+#define LOG(_level) \
+ logging::Message(logging::LEVEL_##_level, logging::TYPE_NORMAL, 0, __FILE__, \
+ __LINE__)
+#define PLOG(_level) \
+ logging::Message(logging::LEVEL_##_level, logging::TYPE_ERRNO, errno, \
+ __FILE__, __LINE__)
+#define DLOG(_level) \
+ logging::Message(logging::LEVEL_##_level, logging::TYPE_DEBUG, 0, __FILE__, \
+ __LINE__)
+#define LOG_NULL \
+ logging::Message(logging::LEVEL_INFO, logging::TYPE_NULL, 0, __FILE__, \
+ __LINE__)
#define LOG_IF(_level, cond) ((cond) ? LOG(_level) : LOG_NULL)
#define PLOG_IF(_level, cond) ((cond) ? PLOG(_level) : LOG_NULL)
diff --git a/simple_file/env.h b/simple_file/env.h
index aa576a7..38dc115 100644
--- a/simple_file/env.h
+++ b/simple_file/env.h
@@ -23,34 +23,38 @@
Env();
virtual ~Env() {}
// Wraps open(2). Use umask(2) (Env::Umask) to set the mode for file creation.
- virtual int Open(const char *pathname, int flags) const
- { return open(pathname, flags); }
- virtual int Create(const char *pathname, int flags, mode_t mode) const
- { return open(pathname, flags, mode); }
+ virtual int Open(const char* pathname, int flags) const {
+ return open(pathname, flags);
+ }
+ virtual int Create(const char* pathname, int flags, mode_t mode) const {
+ return open(pathname, flags, mode);
+ }
virtual mode_t Umask(mode_t mask) const { return umask(mask); }
- virtual int Close(int fd) const
- { return close(fd); }
- virtual int Fstat(int fd, struct stat *buf) const
- { return fstat(fd, buf); }
- virtual off_t Lseek(int fd, off_t offset, int whence) const
- { return lseek(fd, offset, whence); }
- virtual ssize_t Read(int fd, void *buf, size_t count) const
- { return read(fd, buf, count); }
- virtual ssize_t Pread(int fd, void *buf, size_t count, off_t offset) const
- { return pread(fd, buf, count, offset); }
+ virtual int Close(int fd) const { return close(fd); }
+ virtual int Fstat(int fd, struct stat* buf) const { return fstat(fd, buf); }
+ virtual off_t Lseek(int fd, off_t offset, int whence) const {
+ return lseek(fd, offset, whence);
+ }
+ virtual ssize_t Read(int fd, void* buf, size_t count) const {
+ return read(fd, buf, count);
+ }
+ virtual ssize_t Pread(int fd, void* buf, size_t count, off_t offset) const {
+ return pread(fd, buf, count, offset);
+ }
virtual ssize_t Pwrite(int fd,
- const void *buf,
+ const void* buf,
size_t count,
- off_t offset) const
- { return pwrite(fd, buf, count, offset); }
- virtual ssize_t Write(int fd, const void *buf, size_t count) const
- { return write(fd, buf, count); }
-
- // Wrap less defined behavior.
- virtual int BlockDevSize(int fd, int64_t *size) const {
- return ioctl(fd, BLKGETSIZE64, size);
+ off_t offset) const {
+ return pwrite(fd, buf, count, offset);
+ }
+ virtual ssize_t Write(int fd, const void* buf, size_t count) const {
+ return write(fd, buf, count);
}
+ // Wrap less defined behavior.
+ virtual int BlockDevSize(int fd, int64_t* size) const {
+ return ioctl(fd, BLKGETSIZE64, size);
+ }
};
} // namespace simple_file
diff --git a/simple_file/file.h b/simple_file/file.h
index d2d8078..f9174ea 100644
--- a/simple_file/file.h
+++ b/simple_file/file.h
@@ -23,19 +23,24 @@
// This class may not be used by multiple threads at once.
class File {
public:
- File() : default_env_(new Env), env_(NULL), fd_(-1), offset_(0) { }
- virtual ~File() { if (fd_ >= 0) { env()->Close(fd_); } delete default_env_; }
- virtual const Env *env() const;
+ File() : default_env_(new Env), env_(NULL), fd_(-1), offset_(0) {}
+ virtual ~File() {
+ if (fd_ >= 0) {
+ env()->Close(fd_);
+ }
+ delete default_env_;
+ }
+ virtual const Env* env() const;
// Specify the file and the open(2) flags for using it
- virtual bool Initialize(const char *path, int flags, const Env *env);
+ virtual bool Initialize(const char* path, int flags, const Env* env);
// Read |bytes| into |buf|. |buf| may be altered even on failure.
- virtual bool Read(int bytes, uint8_t *buf);
- virtual bool ReadAt(int bytes, uint8_t *buf, off_t at);
+ virtual bool Read(int bytes, uint8_t* buf);
+ virtual bool ReadAt(int bytes, uint8_t* buf, off_t at);
// Write |bytes| from |buf|
- virtual bool Write(int bytes, const uint8_t *buf);
+ virtual bool Write(int bytes, const uint8_t* buf);
// WriteAt |bytes| from |buf|
- virtual bool WriteAt(int bytes, const uint8_t *buf, off_t at);
+ virtual bool WriteAt(int bytes, const uint8_t* buf, off_t at);
// Size returns the total File size.
virtual int64_t Size() const;
@@ -45,8 +50,8 @@
virtual void Reset();
private:
- Env *default_env_;
- const Env *env_;
+ Env* default_env_;
+ const Env* env_;
int fd_;
off_t offset_;
};
diff --git a/simple_file/mock_file.h b/simple_file/mock_file.h
index 6c2c7e8..b8a566c 100644
--- a/simple_file/mock_file.h
+++ b/simple_file/mock_file.h
@@ -15,14 +15,14 @@
class MockFile : public File {
public:
- MockFile() { }
- ~MockFile() { }
- MOCK_CONST_METHOD0(env, const Env *());
- MOCK_METHOD3(Initialize, bool(const char *, int, const Env *));
- MOCK_METHOD2(Read, bool(int, uint8_t *));
- MOCK_METHOD3(ReadAt, bool(int, uint8_t *, off_t));
- MOCK_METHOD2(Write, bool(int, const uint8_t *));
- MOCK_METHOD3(WriteAt, bool(int, const uint8_t *, off_t));
+ MockFile() {}
+ ~MockFile() {}
+ MOCK_CONST_METHOD0(env, const Env*());
+ MOCK_METHOD3(Initialize, bool(const char*, int, const Env*));
+ MOCK_METHOD2(Read, bool(int, uint8_t*));
+ MOCK_METHOD3(ReadAt, bool(int, uint8_t*, off_t));
+ MOCK_METHOD2(Write, bool(int, const uint8_t*));
+ MOCK_METHOD3(WriteAt, bool(int, const uint8_t*, off_t));
MOCK_CONST_METHOD0(Size, int64_t());
MOCK_CONST_METHOD0(Whence, off_t());
MOCK_METHOD2(Seek, bool(off_t, bool));
diff --git a/utils.h b/utils.h
index 1e5c905..1acb19e 100644
--- a/utils.h
+++ b/utils.h
@@ -13,7 +13,7 @@
// A shabby function to convert an arbitrarily long digest to hex.
// This needs to be replace FOR SPEED.
// Note: hexdigest must be 2*digest_length+i long.
-void to_hex(char *hexdigest, const uint8_t *digest, unsigned int digest_length);
+void to_hex(char* hexdigest, const uint8_t* digest, unsigned int digest_length);
} // namespace verity_utils
diff --git a/verity_testrunner.cc b/verity_testrunner.cc
index 52e7bc9..1db284f 100644
--- a/verity_testrunner.cc
+++ b/verity_testrunner.cc
@@ -5,7 +5,7 @@
#include <gmock/gmock.h>
#include <gtest/gtest.h>
-int main(int argc, char **argv) {
+int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
::testing::GTEST_FLAG(throw_on_failure) = true;
::testing::InitGoogleMock(&argc, argv);