[U-Boot] [PATCH][v5] crypto/fsl - Add progressive hashing support using hardware acceleration.
Gaurav Rana
gaurav.rana at freescale.com
Thu Feb 12 10:43:54 CET 2015
Currently only normal hashing is supported using hardware acceleration.
Added support for progressive hashing using hardware.
Signed-off-by: Ruchika Gupta <ruchika.gupta at freescale.com>
Signed-off-by: Gaurav Rana <gaurav.rana at freescale.com>
CC: Simon Glass <sjg at chromium.org>
---
Changes in v5:
Modify description for CONFIG_SHA256, CONFIG_SHA256.
Changes in v4:
Add CONFIG_SHA256, CONFIG_SHA256, CONFIG_SHA_PROG_HW_ACCEL, CONFIG_SHA_HW_ACCEL to Kconfig.
Modify README for these configs descriptions.
Changes in v3:
Remove duplication of code and create function gen_hash_type.
Modify MAX_SG to MAX_SG_32
Changes in v2:
Merge to common functions for SHA1 and SHA256.
Replace malloc and memset with calloc.
Remove cast conversions for void* pointers.
Replace hardcoded errors with Macros.
Corrected comments.
Kconfig | 4 +-
README | 23 +++++--
common/hash.c | 10 +++
drivers/crypto/fsl/fsl_hash.c | 138 +++++++++++++++++++++++++++++++++++++++++-
drivers/crypto/fsl/fsl_hash.h | 34 +++++++++++
include/fsl_sec.h | 26 ++++++++
include/hw_sha.h | 41 ++++++++++++-
lib/Kconfig | 38 ++++++++++++
8 files changed, 307 insertions(+), 7 deletions(-)
create mode 100644 drivers/crypto/fsl/fsl_hash.h
diff --git a/Kconfig b/Kconfig
index fed488f..c4afb82 100644
--- a/Kconfig
+++ b/Kconfig
@@ -121,7 +121,9 @@ config FIT_SIGNATURE
select RSA
help
This option enables signature verification of FIT uImages,
- using a hash signed and verified using RSA.
+ using a hash signed and verified using RSA. If
+ CONFIG_SHA_PROG_HW_ACCEL is defined, i.e support for progressive
+ hashing is available using hardware, RSA library will be using it.
See doc/uImage.FIT/signature.txt for more details.
config SYS_EXTRA_OPTIONS
diff --git a/README b/README
index cac7978..7a3f1e7 100644
--- a/README
+++ b/README
@@ -3149,8 +3149,21 @@ CBFS (Coreboot Filesystem) support
Enable the hash verify command (hash -v). This adds to code
size a little.
- CONFIG_SHA1 - support SHA1 hashing
- CONFIG_SHA256 - support SHA256 hashing
+ CONFIG_SHA1 - This option enables support of hashing using SHA1
+ algorithm. The hash is calculated in software.
+ CONFIG_SHA256 - This option enables support of hashing using
+ SHA256 algorithm. The hash is calculated in software.
+ CONFIG_SHA_HW_ACCEL - This option enables calculation of hash
+ using SHA1/SHA256 algorithm in hardware. The files using
+ hash_lookup_algo function would get pointer to structure having
+ hardware accelerated SHA support. The hash command would
+ automatically use hardware support if this option is enabled.
+ CONFIG_SHA_PROG_HW_ACCEL - This option enables SHA1 or SHA256
+ progressive hashing using hardware acceleration. The
+ hash_progressive_lookup_algo function would return pointer to
+ structure having support for progressive hashing in hardware.
+ FIT_SIGNATURE which uses this function would automatically
+ use hardware support if this option is enabled.
Note: There is also a sha1sum command, which should perhaps
be deprecated in favour of 'hash sha1'.
@@ -3444,8 +3457,10 @@ FIT uImage format:
CONFIG_FIT_SIGNATURE
This option enables signature verification of FIT uImages,
- using a hash signed and verified using RSA. See
- doc/uImage.FIT/signature.txt for more details.
+ using a hash signed and verified using RSA. If
+ CONFIG_SHA_PROG_HW_ACCEL is defined, i.e support for progressive
+ hashing is available using hardware, RSA library will be using it.
+ See doc/uImage.FIT/signature.txt for more details.
WARNING: When relying on signed FIT images with required
signature check the legacy image format is default
diff --git a/common/hash.c b/common/hash.c
index d154d02..9e9f84b 100644
--- a/common/hash.c
+++ b/common/hash.c
@@ -127,11 +127,21 @@ static struct hash_algo hash_algo[] = {
SHA1_SUM_LEN,
hw_sha1,
CHUNKSZ_SHA1,
+#ifdef CONFIG_SHA_PROG_HW_ACCEL
+ hw_sha_init,
+ hw_sha_update,
+ hw_sha_finish,
+#endif
}, {
"sha256",
SHA256_SUM_LEN,
hw_sha256,
CHUNKSZ_SHA256,
+#ifdef CONFIG_SHA_PROG_HW_ACCEL
+ hw_sha_init,
+ hw_sha_update,
+ hw_sha_finish,
+#endif
},
#endif
#ifdef CONFIG_SHA1
diff --git a/drivers/crypto/fsl/fsl_hash.c b/drivers/crypto/fsl/fsl_hash.c
index d77f257..c298404 100644
--- a/drivers/crypto/fsl/fsl_hash.c
+++ b/drivers/crypto/fsl/fsl_hash.c
@@ -10,6 +10,9 @@
#include "jobdesc.h"
#include "desc.h"
#include "jr.h"
+#include "fsl_hash.h"
+#include <hw_sha.h>
+#include <asm-generic/errno.h>
#define CRYPTO_MAX_ALG_NAME 80
#define SHA1_DIGEST_SIZE 20
@@ -39,6 +42,122 @@ static struct caam_hash_template driver_hash[] = {
},
};
+static enum caam_hash_algos get_hash_type(struct hash_algo *algo)
+{
+ if (!strcmp(algo->name, driver_hash[SHA1].name))
+ return SHA1;
+ else
+ return SHA256;
+}
+
+/* Create the context for progressive hashing using h/w acceleration.
+ *
+ * @ctxp: Pointer to the pointer of the context for hashing
+ * @caam_algo: Enum for SHA1 or SHA256
+ * @return 0 if ok, -ENOMEM on error
+ */
+static int caam_hash_init(void **ctxp, enum caam_hash_algos caam_algo)
+{
+ *ctxp = calloc(1, sizeof(struct sha_ctx));
+ if (*ctxp == NULL) {
+ debug("Cannot allocate memory for context\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/*
+ * Update sg table for progressive hashing using h/w acceleration
+ *
+ * The context is freed by this function if an error occurs.
+ * We support at most 32 Scatter/Gather Entries.
+ *
+ * @hash_ctx: Pointer to the context for hashing
+ * @buf: Pointer to the buffer being hashed
+ * @size: Size of the buffer being hashed
+ * @is_last: 1 if this is the last update; 0 otherwise
+ * @caam_algo: Enum for SHA1 or SHA256
+ * @return 0 if ok, -EINVAL on error
+ */
+static int caam_hash_update(void *hash_ctx, const void *buf,
+ unsigned int size, int is_last,
+ enum caam_hash_algos caam_algo)
+{
+ uint32_t final = 0;
+ dma_addr_t addr = virt_to_phys((void *)buf);
+ struct sha_ctx *ctx = hash_ctx;
+
+ if (ctx->sg_num >= MAX_SG_32) {
+ free(ctx);
+ return -EINVAL;
+ }
+
+#ifdef CONFIG_PHYS_64BIT
+ ctx->sg_tbl[ctx->sg_num].addr_hi = addr >> 32;
+#else
+ ctx->sg_tbl[ctx->sg_num].addr_hi = 0x0;
+#endif
+ ctx->sg_tbl[ctx->sg_num].addr_lo = addr;
+
+ sec_out32(&ctx->sg_tbl[ctx->sg_num].len_flag,
+ (size & SG_ENTRY_LENGTH_MASK));
+
+ ctx->sg_num++;
+
+ if (is_last) {
+ final = sec_in32(&ctx->sg_tbl[ctx->sg_num - 1].len_flag) |
+ SG_ENTRY_FINAL_BIT;
+ sec_out32(&ctx->sg_tbl[ctx->sg_num - 1].len_flag, final);
+ }
+
+ return 0;
+}
+
+/*
+ * Perform progressive hashing on the given buffer and copy hash at
+ * destination buffer
+ *
+ * The context is freed after completion of hash operation.
+ *
+ * @hash_ctx: Pointer to the context for hashing
+ * @dest_buf: Pointer to the destination buffer where hash is to be copied
+ * @size: Size of the buffer being hashed
+ * @caam_algo: Enum for SHA1 or SHA256
+ * @return 0 if ok, -EINVAL on error
+ */
+static int caam_hash_finish(void *hash_ctx, void *dest_buf,
+ int size, enum caam_hash_algos caam_algo)
+{
+ uint32_t len = 0;
+ struct sha_ctx *ctx = hash_ctx;
+ int i = 0, ret = 0;
+
+ if (size < driver_hash[caam_algo].digestsize) {
+ free(ctx);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ctx->sg_num; i++)
+ len += (sec_in32(&ctx->sg_tbl[i].len_flag) &
+ SG_ENTRY_LENGTH_MASK);
+
+ inline_cnstr_jobdesc_hash(ctx->sha_desc, (uint8_t *)ctx->sg_tbl, len,
+ ctx->hash,
+ driver_hash[caam_algo].alg_type,
+ driver_hash[caam_algo].digestsize,
+ 1);
+
+ ret = run_descriptor_jr(ctx->sha_desc);
+
+ if (ret)
+ debug("Error %x\n", ret);
+ else
+ memcpy(dest_buf, ctx->hash, sizeof(ctx->hash));
+
+ free(ctx);
+ return ret;
+}
+
int caam_hash(const unsigned char *pbuf, unsigned int buf_len,
unsigned char *pout, enum caam_hash_algos algo)
{
@@ -48,7 +167,7 @@ int caam_hash(const unsigned char *pbuf, unsigned int buf_len,
desc = malloc(sizeof(int) * MAX_CAAM_DESCSIZE);
if (!desc) {
debug("Not enough memory for descriptor allocation\n");
- return -1;
+ return -ENOMEM;
}
inline_cnstr_jobdesc_hash(desc, pbuf, buf_len, pout,
@@ -75,3 +194,20 @@ void hw_sha1(const unsigned char *pbuf, unsigned int buf_len,
if (caam_hash(pbuf, buf_len, pout, SHA1))
printf("CAAM was not setup properly or it is faulty\n");
}
+
+int hw_sha_init(struct hash_algo *algo, void **ctxp)
+{
+ return caam_hash_init(ctxp, get_hash_type(algo));
+}
+
+int hw_sha_update(struct hash_algo *algo, void *ctx, const void *buf,
+ unsigned int size, int is_last)
+{
+ return caam_hash_update(ctx, buf, size, is_last, get_hash_type(algo));
+}
+
+int hw_sha_finish(struct hash_algo *algo, void *ctx, void *dest_buf,
+ int size)
+{
+ return caam_hash_finish(ctx, dest_buf, size, get_hash_type(algo));
+}
diff --git a/drivers/crypto/fsl/fsl_hash.h b/drivers/crypto/fsl/fsl_hash.h
new file mode 100644
index 0000000..f5be651
--- /dev/null
+++ b/drivers/crypto/fsl/fsl_hash.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2014 Freescale Semiconductor, Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ *
+ */
+
+#ifndef _SHA_H
+#define _SHA_H
+
+#include <fsl_sec.h>
+#include <hash.h>
+#include "jr.h"
+
+/* We support at most 32 Scatter/Gather Entries.*/
+#define MAX_SG_32 32
+
+/*
+ * Hash context contains the following fields
+ * @sha_desc: Sha Descriptor
+ * @sg_num: number of entries in sg table
+ * @len: total length of buffer
+ * @sg_tbl: sg entry table
+ * @hash: index to the hash calculated
+ */
+struct sha_ctx {
+ uint32_t sha_desc[64];
+ uint32_t sg_num;
+ uint32_t len;
+ struct sg_entry sg_tbl[MAX_SG_32];
+ u8 hash[HASH_MAX_DIGEST_SIZE];
+};
+
+#endif
diff --git a/include/fsl_sec.h b/include/fsl_sec.h
index aa850a3..b6e6f04 100644
--- a/include/fsl_sec.h
+++ b/include/fsl_sec.h
@@ -175,6 +175,32 @@ struct jr_regs {
u32 jrcr;
};
+/*
+ * Scatter Gather Entry - Specifies the the Scatter Gather Format
+ * related information
+ */
+struct sg_entry {
+#ifdef CONFIG_SYS_FSL_SEC_LE
+ uint32_t addr_lo; /* Memory Address - lo */
+ uint16_t addr_hi; /* Memory Address of start of buffer - hi */
+ uint16_t reserved_zero;
+#else
+ uint16_t reserved_zero;
+ uint16_t addr_hi; /* Memory Address of start of buffer - hi */
+ uint32_t addr_lo; /* Memory Address - lo */
+#endif
+
+ uint32_t len_flag; /* Length of the data in the frame */
+#define SG_ENTRY_LENGTH_MASK 0x3FFFFFFF
+#define SG_ENTRY_EXTENSION_BIT 0x80000000
+#define SG_ENTRY_FINAL_BIT 0x40000000
+ uint32_t bpid_offset;
+#define SG_ENTRY_BPID_MASK 0x00FF0000
+#define SG_ENTRY_BPID_SHIFT 16
+#define SG_ENTRY_OFFSET_MASK 0x00001FFF
+#define SG_ENTRY_OFFSET_SHIFT 0
+};
+
int sec_init(void);
#endif
diff --git a/include/hw_sha.h b/include/hw_sha.h
index 783350d..ab19a99 100644
--- a/include/hw_sha.h
+++ b/include/hw_sha.h
@@ -7,7 +7,7 @@
*/
#ifndef __HW_SHA_H
#define __HW_SHA_H
-
+#include <hash.h>
/**
* Computes hash value of input pbuf using h/w acceleration
@@ -34,4 +34,43 @@ void hw_sha256(const uchar * in_addr, uint buflen,
*/
void hw_sha1(const uchar * in_addr, uint buflen,
uchar * out_addr, uint chunk_size);
+
+/*
+ * Create the context for sha progressive hashing using h/w acceleration
+ *
+ * @algo: Pointer to the hash_algo struct
+ * @ctxp: Pointer to the pointer of the context for hashing
+ * @return 0 if ok, -ve on error
+ */
+int hw_sha_init(struct hash_algo *algo, void **ctxp);
+
+/*
+ * Update buffer for sha progressive hashing using h/w acceleration
+ *
+ * The context is freed by this function if an error occurs.
+ *
+ * @algo: Pointer to the hash_algo struct
+ * @ctx: Pointer to the context for hashing
+ * @buf: Pointer to the buffer being hashed
+ * @size: Size of the buffer being hashed
+ * @is_last: 1 if this is the last update; 0 otherwise
+ * @return 0 if ok, -ve on error
+ */
+int hw_sha_update(struct hash_algo *algo, void *ctx, const void *buf,
+ unsigned int size, int is_last);
+
+/*
+ * Copy sha hash result at destination location
+ *
+ * The context is freed after completion of hash operation or after an error.
+ *
+ * @algo: Pointer to the hash_algo struct
+ * @ctx: Pointer to the context for hashing
+ * @dest_buf: Pointer to the destination buffer where hash is to be copied
+ * @size: Size of the buffer being hashed
+ * @return 0 if ok, -ve on error
+ */
+int hw_sha_finish(struct hash_algo *algo, void *ctx, void *dest_buf,
+ int size);
+
#endif
diff --git a/lib/Kconfig b/lib/Kconfig
index a1f30a2..e3dd7ad 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -29,4 +29,42 @@ config SYS_HZ
source lib/rsa/Kconfig
+menu "Hashing Support"
+
+config SHA1
+ bool "Enable SHA1 support"
+ help
+ This option enables support of hashing using SHA1 algorithm.
+ The hash is calculated in software.
+ The SHA1 algorithm produces a 160bits (20-byte) hash value
+ termed as digest size.
+
+config SHA256
+ bool "Enable SHA256 support"
+ help
+ This option enables support of hashing using SHA256 algorithm.
+ The hash is calculated in software.
+ The SHA256 algorithm produces a 256bits (32-byte) hash value
+ termed as digest size.
+
+config SHA_HW_ACCEL
+ bool "Enable hashing using hardware"
+ help
+ This option enables calculation of hash using SHA1/SHA256 algorithm
+ in hardware. The files using hash_lookup_algo function would get
+ pointer to structure having hardware accelerated SHA support. The
+ hash command would automatically use hardware support if this option
+ is enabled.
+
+config SHA_PROG_HW_ACCEL
+ bool "Enable Progressive hashing support using hardware"
+ depends on SHA_HW_ACCEL
+ help
+ This option enables SHA1 or SHA256 progressive hashing using hardware
+ acceleration. The hash_progressive_lookup_algo function would return
+ pointer to structure having support for progressive hashing in
+ hardware. FIT_SIGNATURE which uses this function would automatically
+ use hardware support if this option is enabled.
+endmenu
+
endmenu
--
1.8.1.4
More information about the U-Boot
mailing list