This patch implements the AES cipher algorithm which is executed on the SPU using the crypto async interface. Please consider this as a sample implementation of KSPU API. This patch is not 100% done: - lowmem_page_address() will be replaced with kmap() (what does exact the same job but looks better) - the src+dst argument is one scatter list element. Currently I assume that it will not be greater than one PAGE_SIZE what mus not always be the case. - setkey() is using VMX code instead of the SPU. The reason is that there is no callback for setkey(). In order to do it on the SPU I have copy the key in my private struct (the key will disappear on return from setkey()), fill the work struct and queue it on the SPU. This can be racy: - setkey(a); - encrypt() - setkey(b) - encrypt(); On a busy SPU, setkey(b) will overwrite setkey(a) data before the first encrypt() can occur. If most people think this is made up and will never happen IRL I would get rid of VMX then :) - Use multiple buffer for some speed. - Add CBC block mode. Currently, testing is possible with the crypto/tcrypt.c testing module (what is currently the only async crypto user). Signed-off-by: Sebastian Siewior Index: b/arch/powerpc/platforms/cell/Makefile =================================================================== --- a/arch/powerpc/platforms/cell/Makefile +++ b/arch/powerpc/platforms/cell/Makefile @@ -22,4 +22,5 @@ obj-$(CONFIG_SPU_BASE) += spu_callback $(spufs-modular-m) \ $(spu-priv1-y) \ $(spu-manage-y) \ + crypto/ \ spufs/ Index: b/arch/powerpc/platforms/cell/crypto/Kconfig =================================================================== --- /dev/null +++ b/arch/powerpc/platforms/cell/crypto/Kconfig @@ -0,0 +1,12 @@ +config CRYPTO_AES_SPU + tristate "AES cipher algorithm (SPU support)" + select CRYPTO_ABLKCIPHER + depends on SPU_KERNEL_SUPPORT + default m + help + AES cipher algorithms (FIPS-197). AES uses the Rijndael + algorithm. + The AES specifies three key sizes: 128, 192 and 256 bits. + See for more information. + + This version of AES performs its work on a SPU core. Index: b/arch/powerpc/platforms/cell/crypto/Makefile =================================================================== --- /dev/null +++ b/arch/powerpc/platforms/cell/crypto/Makefile @@ -0,0 +1,6 @@ +# +# Crypto, arch specific +# +CFLAGS_aes_vmx_addon.o += -O3 -maltivec +aes_spu-objs := aes_spu_wrap.o aes_vmx_addon.o +obj-$(CONFIG_CRYPTO_AES_SPU) += aes_spu.o Index: b/arch/powerpc/platforms/cell/crypto/aes_spu_wrap.c =================================================================== --- /dev/null +++ b/arch/powerpc/platforms/cell/crypto/aes_spu_wrap.c @@ -0,0 +1,473 @@ +/* + * AES interface module for the async crypto API. + * + * Author: Sebastian Siewior + * License: GPLv2 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "aes_vmx_addon.h" + +struct map_key_spu { + struct list_head list; + unsigned int spu_slot; + struct aes_ctx *slot_content; +}; + +struct aes_ctx { + /* the key used for enc|dec purpose */ + struct aes_key_struct key; + /* identify the slot on the SPU */ + struct map_key_spu *key_mapping; + /* identify the SPU that is used */ + struct async_aes *spe_ctx; +}; + +struct async_d_request { + enum SPU_FUNCTIONS crypto_operation; + /* + * If src|dst or iv is not properly aligned, we keep here a copy of + * it that is properly aligned. + */ + struct kspu_work_item kspu_work; + unsigned char *aligned_data; + unsigned char *aligned_iv; + unsigned int offset_data; +}; + +struct async_aes { + struct kspu_context *ctx; /* may be shared */ + struct map_key_spu mapping_key_spu[SPU_KEY_SLOTS]; /* aes private */ + struct list_head key_ring; /* aes private */ +}; + +static struct async_aes async_spu; + +#define AES_MIN_KEY_SIZE 16 +#define AES_MAX_KEY_SIZE 32 + +#define AES_BLOCK_SIZE 16 +#if 0 +static void dump_vec(const char *s, const char v[], unsigned int l) +{ + unsigned int i; + + printk("%s: ", s); + for (i=0; ialigned_data) { + + dst_addr = lowmem_page_address(req->dst->page) + req->dst->offset; + if ((unsigned long) dst_addr & ALIGN_MASK) { + memcpy(dst_addr, a_d_ctx->aligned_data, req->nbytes); + } + free_page((unsigned long) a_d_ctx->aligned_data); + } + if (a_d_ctx->aligned_iv) { + memcpy(req->info, a_d_ctx->aligned_iv, 16); + kfree(a_d_ctx->aligned_iv); + } + + printk("memory cleaned up, calling crypto user\n"); + + local_bh_disable(); + req->base.complete(&req->base, 0); + local_bh_enable(); + printk("leaving\n"); +} + +static void aes_finish_callback(struct kspu_work_item *kspu_work) +{ + struct async_d_request *a_d_ctx = container_of(kspu_work, struct async_d_request, kspu_work); + struct ablkcipher_request *ablk_req = ablkcipher_ctx_cast(a_d_ctx); + + printk("in finish callback for %p\n", kspu_work); + + a_d_ctx = ablkcipher_request_ctx(ablk_req); + cleanup_requests(ablk_req, a_d_ctx); + return; +} + +static void update_key_on_spu(struct work_item *work_item, struct aes_ctx *aes_ctx) +{ + struct list_head *tail; + struct map_key_spu *entry; + struct aes_update_key *aes_update_key; + + /* XXX */ + tail = async_spu.key_ring.prev; + entry = list_entry(tail, struct map_key_spu, list); + list_move(tail, &async_spu.key_ring); + + entry->slot_content = aes_ctx; + aes_ctx->key_mapping = entry; + + printk("%s(): key seems to be deleted. new slot: %d\n", __FUNCTION__, entry->spu_slot); + + work_item->operation = SPU_FUNC_aes_update_key; + aes_update_key = &work_item->aes_update_key; + + aes_update_key->new_key = (unsigned long long) &aes_ctx->key; + aes_update_key->keyid = entry->spu_slot; + + printk("key addr: %p, slot: %u \n", (void*) aes_update_key->new_key, aes_update_key->keyid); + work_item_ready(aes_ctx->spe_ctx->ctx, NULL); +} + +static int queue_request_on_spu(struct ablkcipher_request *req, struct async_d_request *a_d_ctx, + struct aes_ctx *aes_ctx) +{ + struct aes_crypt *aes_crypt; + struct work_item *work_item; + char *src_addr, *dst_addr; + + BUG_ON(req->nbytes & ALIGN_MASK); + + work_item = get_spu_queue_slot(aes_ctx->spe_ctx->ctx); + + if (!aes_ctx->key_mapping || aes_ctx != aes_ctx->key_mapping->slot_content) { + + printk("key not available on the SPU. need to update\n"); + update_key_on_spu(work_item, aes_ctx); + /* + * get a new free slot, for the real job. We might not get + * one, because only one is assured. In that we queue it, and + * try again on next call. On next calling if *this* request + * we have the key allready on the other side so can process + * the request then. + */ + work_item = get_spu_queue_slot(aes_ctx->spe_ctx->ctx); + printk("update done. next queue slot is %p\n", work_item); + if (!work_item) + return -EBUSY; + + } else { + printk("Key is on the SPU allready\n"); + list_move(&aes_ctx->key_mapping->list, &async_spu.key_ring); + } + + src_addr = lowmem_page_address(req->src->page) + req->src->offset; + dst_addr = lowmem_page_address(req->dst->page) + req->dst->offset; + + if ((unsigned long) src_addr & ALIGN_MASK || (unsigned long) dst_addr & ALIGN_MASK) { + + printk("not properly aligned: %p | %p\n", src_addr, dst_addr); + /* req->nbytes should be req->src|dst->length */ + WARN_ON(req->nbytes != req->src->length); + WARN_ON(req->nbytes != req->dst->length); + /* XXX */ + BUG_ON(PAGE_SIZE < (req->src->offset + req->nbytes)); + BUG_ON(PAGE_SIZE < (req->dst->offset + req->nbytes)); + + /* replace with kmalloc() ? */ + a_d_ctx->aligned_data = (char*) __get_free_page(GFP_KERNEL); + if (!a_d_ctx->aligned_data) { + printk("unaligned OOM\n"); + return -EBUSY; + } + + printk("replace with %p\n", a_d_ctx->aligned_data); + if ((unsigned long) src_addr & ALIGN_MASK) { + memcpy(a_d_ctx->aligned_data, src_addr, req->nbytes); + src_addr = a_d_ctx->aligned_data; + } + + if ((unsigned long) dst_addr & ALIGN_MASK) { + dst_addr = a_d_ctx->aligned_data; + } + } else + a_d_ctx->aligned_data = NULL; + + printk("aligned_IV: %p\n", a_d_ctx->aligned_iv); + + if ((unsigned long) req->info & ALIGN_MASK) { + /* + * XXX + * copy the IV + * for non iv users: a_d_ctx is not zeroed before usage. + */ + a_d_ctx->aligned_iv = NULL; + } else + a_d_ctx->aligned_iv = NULL; + + work_item->operation = a_d_ctx->crypto_operation; + + aes_crypt = &work_item->aes_crypt; + aes_crypt->in = (unsigned long int) src_addr; + aes_crypt->out = (unsigned long int) dst_addr; + aes_crypt->data_size = req->nbytes; + aes_crypt->iv = (unsigned long int) a_d_ctx->aligned_iv; + aes_crypt->keyid = aes_ctx->key_mapping->spu_slot; + printk("in: %p, out %p, data_size: %u\n", + (void*) aes_crypt->in, (void*) aes_crypt->out, + aes_crypt->data_size); + printk("iv: %p, %d\n", (void*) aes_crypt->iv, aes_crypt->keyid); + printk("spu's work order ready. notify with %p\n", req); + a_d_ctx->kspu_work.notify = aes_finish_callback; + work_item_ready(aes_ctx->spe_ctx->ctx, &a_d_ctx->kspu_work); + return 0; +} + +/* + * aes_queue_work_items() is called by kspu to queue the work item on the SPU. + * kspu ensures atleast one slot when calling. The function may return 0 if + * more slots were required but not available. In this case, kspu will call + * again with the same work item. The function has to notice that this work + * item has been started and continue. + * Other return values (!=0) will remove the work item from list. + */ +static int aes_queue_work_items(struct kspu_work_item *kspu_work) +{ + struct async_d_request *a_d_ctx = container_of(kspu_work, struct async_d_request, kspu_work); + struct ablkcipher_request *ablk_req = ablkcipher_ctx_cast(a_d_ctx); + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(ablk_req); + struct aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); + unsigned int ret; + + printk("%s():%d\n", __FUNCTION__, __LINE__); + + ret = queue_request_on_spu(ablk_req, a_d_ctx, ctx); + if (!ret) + return 1; + + if (ret == -EBUSY) + return 0; + + BUG(); +} + +static int enqueue_request(struct ablkcipher_request *req, + enum SPU_FUNCTIONS op_type) +{ + struct async_d_request *asy_d_ctx = ablkcipher_request_ctx(req); + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct kspu_work_item *work = &asy_d_ctx->kspu_work; + + asy_d_ctx->crypto_operation = op_type; + work->enqueue = aes_queue_work_items; + printk("asy_d_ctx: %p\n", asy_d_ctx); + printk("kspu_work: %p\n", &asy_d_ctx->kspu_work); + + return enqueue_for_spu(ctx->spe_ctx->ctx, &asy_d_ctx->kspu_work); +} + +/* + * AltiVec and not SPU code is because the key may disappear after calling + * this func (for example if it is not properly aligned) + */ +static int aes_set_key_async(struct crypto_ablkcipher *parent, + const u8 *key, unsigned int keylen) +{ + struct aes_ctx *ctx = crypto_ablkcipher_ctx(parent); + int ret; + + ctx->spe_ctx = &async_spu; + ctx->key.len = keylen / 4; + ctx->key_mapping = NULL; + + preempt_disable(); + enable_kernel_altivec(); + ret = expand_key(key, keylen / 4, &ctx->key.enc[0], &ctx->key.dec[0]); + preempt_enable(); + + printk("aes_ctx: %p\n", ctx); + printk("spe: %p\n", ctx->spe_ctx); + if (ret == -EINVAL) + crypto_ablkcipher_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN); + else + printk("Set key okey\n"); + + return ret; +} + +static int aes_encrypt_ecb_async(struct ablkcipher_request *req) { + + printk("Recevied req @%p\n", req); + req->info = NULL; + return enqueue_request(req, SPU_FUNC_aes_encrypt_ecb); +} + +static int aes_decrypt_ecb_async(struct ablkcipher_request *req) { + + req->info = NULL; + return enqueue_request(req, SPU_FUNC_aes_decrypt_ecb); +} +#if 0 +static int aes_encrypt_cbc_async(struct ablkcipher_request *req) { + + return enqueue_request(req, SPU_FUNC_aes_encrypt_cbc); +} + +static int aes_decrypt_cbc_async(struct ablkcipher_request *req) { + + return enqueue_request(req, SPU_FUNC_aes_decrypt_cbc); +} +#endif +static int async_d_init(struct crypto_tfm *tfm) +{ + tfm->crt_ablkcipher.reqsize = sizeof(struct async_d_request); + return 0; +} +#if 0 +static void async_d_exit(struct crypto_tfm *tfm) +{ + printk("currently in %s()\n", __FUNCTION__); +} + +static void async_d_destory(struct crypto_alg *alg) +{ + printk("currently in %s()\n", __FUNCTION__); +} +#endif +static struct crypto_alg aes_ecb_alg_async = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-spu-async", + .cra_priority = 125, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_alignmask = 15, + .cra_ctxsize = sizeof(struct aes_ctx), + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(aes_ecb_alg_async.cra_list), + .cra_init = async_d_init, +// .cra_exit = async_d_exit, +// .cra_destroy = async_d_destory, + .cra_u = { + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = 0, + .setkey = aes_set_key_async, + .encrypt = aes_encrypt_ecb_async, + .decrypt = aes_decrypt_ecb_async, + } + } +}; +#if 0 +static struct crypto_alg aes_cbc_alg_async = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-spu-async", + .cra_priority = 125, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_alignmask = 15, + .cra_ctxsize = sizeof(struct aes_ctx), + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(aes_cbc_alg_async.cra_list), + .cra_init = async_d_init, +// .cra_exit = async_d_exit, +// .cra_destroy = async_d_destory, + .cra_u = { + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = aes_set_key_async, + .encrypt = aes_encrypt_cbc_async, + .decrypt = aes_decrypt_cbc_async, + } + } +}; +#endif + +static void init_spu_key_mapping(struct async_aes *spe_ctx) +{ + unsigned int i; + + INIT_LIST_HEAD(&spe_ctx->key_ring); + + for (i=0; imapping_key_spu[i].list, &spe_ctx->key_ring); + spe_ctx->mapping_key_spu[i].spu_slot = i; + } +} + +static int init_async_ctx(struct async_aes *spe_ctx) +{ + int ret; + + spe_ctx->ctx = get_kspu_ctx(); + printk("|spe_ctx: %p\n", spe_ctx); + printk("|kspuctx: %p\n", spe_ctx->ctx); + + init_spu_key_mapping(spe_ctx); + + ret = crypto_register_alg(&aes_ecb_alg_async); + if (ret) { + printk("crypto_register_alg(ecb) failed: %d\n", ret); + goto err_kthread; + } +#if 0 + ret = crypto_register_alg(&aes_cbc_alg_async); + if (ret) { + printk("crypto_register_alg(cbc) failed: %d\n", ret); + goto fail_cbc; + } +#endif + return 0; +#if 0 +fail_cbc: + crypto_unregister_alg(&aes_ecb_alg_async); +#endif +err_kthread: + return ret; +} + +static void deinit_async_ctx(struct async_aes *async_aes) { + + crypto_unregister_alg(&aes_ecb_alg_async); +// crypto_unregister_alg(&aes_cbc_alg_async); +} + +static int __init aes_init(void) +{ + unsigned int ret; + + ret = init_async_ctx(&async_spu); + if (ret) { + printk("async_api_init() failed\n"); + return ret; + } + + printk("AES+SPU on board\n"); + return 0; +} + +static void __exit aes_fini(void) +{ + deinit_async_ctx(&async_spu); +} + +module_init(aes_init); +module_exit(aes_fini); + +MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm with SPU support"); +MODULE_LICENSE("GPL"); Index: b/arch/powerpc/platforms/cell/crypto/aes_vmx_addon.c =================================================================== --- /dev/null +++ b/arch/powerpc/platforms/cell/crypto/aes_vmx_addon.c @@ -0,0 +1,279 @@ +/* + * Key expansion in VMX. + * This is a rip of my first AES implementation in VMX. Only key expansion is + * required, other parts are left behind. + * + * Author: Sebastian Siewior (bigeasy _at_ breakpoint.cc) + * License: GPL v2 + */ + +#include +#include +#include +#include "aes_vmx_addon.h" + +static const vector unsigned char imm_7Fh = { + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f +}; + +/* + * This values are either defined in AES standard or can be + * computed. + */ +static const unsigned int Rcon[] = { + 0x00000000, 0x01000000, 0x02000000, 0x04000000, 0x08000000, + 0x10000000, 0x20000000, 0x40000000, 0x80000000, 0x1b000000, + 0x36000000 +}; + +static const vector unsigned char sbox_enc[16] = { + { 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, + 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76 }, + { 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, + 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0 }, + { 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, + 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15 }, + { 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, + 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75 }, + { 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, + 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84 }, + { 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, + 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf }, + { 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, + 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8 }, + { 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, + 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2 }, + { 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, + 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73 }, + { 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, + 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb }, + { 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, + 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79 }, + { 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, + 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08 }, + { 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, + 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a }, + { 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, + 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e }, + { 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, + 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf }, + { 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, + 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 } +}; + +static const vector unsigned char inv_select_0e = { + 0x00, 0x01, 0x02, 0x03, + 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, + 0x0c, 0x0d, 0x0e, 0x0f +}; + +static const vector unsigned char inv_select_0b = { + 0x01, 0x02, 0x03, 0x00, + 0x05, 0x06, 0x07, 0x04, + 0x09, 0x0a, 0x0b, 0x08, + 0x0d, 0x0e, 0x0f, 0x0c +}; + +static const vector unsigned char inv_select_0d = { + 0x02, 0x03, 0x00, 0x01, + 0x06, 0x07, 0x04, 0x05, + 0x0a, 0x0b, 0x08, 0x09, + 0x0e, 0x0f, 0x0c, 0x0d +}; + +static const vector unsigned char inv_select_09 = { + 0x03, 0x00, 0x01, 0x02, + 0x07, 0x04, 0x05, 0x06, + 0x0b, 0x08, 0x09, 0x0a, + 0x0f, 0x0c, 0x0d, 0x0e +}; + +static vector unsigned char ByteSub(vector unsigned char state) +{ + /* line of the s-box */ + vector unsigned char line_01, line_23, line_45, line_67, + line_89, line_AB, line_CD, line_EF; + /* selector */ + vector unsigned char sel1, sel2, sel7; + /* correct lines */ + vector unsigned char cor_0123, cor_4567, cor_89AB, cor_CDEF, + cor_0to7, cor_8toF; + vector unsigned char ret_state; + vector unsigned char state_shift2, state_shift1; + + line_01 = vec_perm(sbox_enc[0], sbox_enc[1], state); + line_23 = vec_perm(sbox_enc[2], sbox_enc[3], state); + line_45 = vec_perm(sbox_enc[4], sbox_enc[5], state); + line_67 = vec_perm(sbox_enc[6], sbox_enc[7], state); + line_89 = vec_perm(sbox_enc[8], sbox_enc[9], state); + line_AB = vec_perm(sbox_enc[10], sbox_enc[11], state); + line_CD = vec_perm(sbox_enc[12], sbox_enc[13], state); + line_EF = vec_perm(sbox_enc[14], sbox_enc[15], state); + + state_shift2 = vec_vslb(state, vec_splat_u8(2)); + sel2 = (typeof (sel2)) vec_vcmpgtub(state_shift2, imm_7Fh); + cor_0123 = vec_sel(line_01, line_23, sel2); + cor_4567 = vec_sel(line_45, line_67, sel2); + cor_89AB = vec_sel(line_89, line_AB, sel2); + cor_CDEF = vec_sel(line_CD, line_EF, sel2); + + state_shift1 = vec_vslb(state, vec_splat_u8(1)); + sel1 = (typeof (sel1))vec_vcmpgtub(state_shift1, imm_7Fh); + cor_0to7 = vec_sel(cor_0123, cor_4567, sel1); + cor_8toF = vec_sel(cor_89AB, cor_CDEF, sel1); + + sel7 = (typeof (sel7))vec_vcmpgtub(state, imm_7Fh); + ret_state = vec_sel(cor_0to7, cor_8toF, sel7); + + return ret_state; +} + +static vector unsigned char InvMixColumn(vector unsigned char state) +{ + vector unsigned char op0, op1, op2, op3, op4, op5; + vector unsigned char mul_0e, mul_09, mul_0d, mul_0b; + vector unsigned char ret; + vector unsigned char imm_00h, imm_01h; + vector unsigned char need_add; + vector unsigned char shifted_vec, modul; + vector unsigned char toadd; + vector unsigned char mul_2, mul_4, mul_8; + vector unsigned char mul_2_4; + + /* compute 0e, 0b, 0d, 09 in GF */ + imm_00h = vec_splat_u8(0x00); + imm_01h = vec_splat_u8(0x01); + + modul = vec_splat( vec_lvsr(0, (unsigned char *) 0), 0x0b); // 0x1b + + need_add = (vector unsigned char)vec_vcmpgtub(state, imm_7Fh); + shifted_vec = vec_vslb(state, imm_01h); + toadd = vec_sel(imm_00h, modul, need_add); + mul_2 = vec_xor(toadd, shifted_vec); + + need_add = (vector unsigned char)vec_vcmpgtub(mul_2, imm_7Fh); + shifted_vec = vec_vslb(mul_2, imm_01h); + toadd = vec_sel(imm_00h, modul, need_add); + mul_4 = vec_xor(toadd, shifted_vec); + + need_add = (vector unsigned char)vec_vcmpgtub(mul_4, imm_7Fh); + shifted_vec = vec_vslb(mul_4, imm_01h); + toadd = vec_sel(imm_00h, modul, need_add); + mul_8 = vec_xor(toadd, shifted_vec); + + mul_2_4 = vec_xor(mul_2, mul_4); + /* 09 = 8 * 1 */ + mul_09 = vec_xor(mul_8, state); + + /* 0e = 2 * 4 * 8 */ + mul_0e = vec_xor(mul_2_4, mul_8); + + /* 0b = 2 * 8 * 1 */ + mul_0b = vec_xor(mul_2, mul_09); + + /* 0d = 4 * 8 * 1 */ + mul_0d = vec_xor(mul_4, mul_09); + + /* prepare vectors for add */ + + op0 = vec_perm(mul_0e, mul_0e, inv_select_0e); + op1 = vec_perm(mul_0b, mul_0b, inv_select_0b); + op2 = vec_perm(mul_0d, mul_0d, inv_select_0d); + op3 = vec_perm(mul_09, mul_09, inv_select_09); + + op4 = vec_xor(op0, op1); + op5 = vec_xor(op2, op3); + ret = vec_xor(op4, op5); + return ret; +} + +static unsigned int SubWord(unsigned int in) +{ + unsigned char buff[16] __attribute__ ((aligned (16))); + vector unsigned char vec_buf; + + buff[0] = in >> 24; + buff[1] = (in >> 16) & 0xff; + buff[2] = (in >> 8) & 0xff; + buff[3] = in & 0xff; + + vec_buf = vec_ld(0, buff); + vec_buf = ByteSub(vec_buf); + vec_st(vec_buf, 0, buff); + return buff[0] << 24 | buff[1] << 16 | buff[2] << 8 | buff[3]; +} + +static unsigned int RotWord(unsigned int word) +{ + return (word << 8 | word >> 24); +} + +int expand_key(const unsigned char *key, unsigned int keylen, + unsigned char exp_enc_key[15 *4*4], unsigned char exp_dec_key[15*4*4]) +{ + unsigned int tmp, i, rounds; + unsigned int expanded_key[15 *4] __attribute__ ((aligned (16))); + vector unsigned char expanded_dec_key[15]; + vector unsigned char mixed_key; + vector unsigned char *cur_key; + + switch (keylen) { + case 4: + rounds = 10; + break; + + case 6: + rounds = 12; + break; + + case 8: + rounds = 14; + break; + + default: + /* wrong key size */ + return -EINVAL; + } + + memcpy(expanded_key, key, keylen*4); + + i = keylen; + + /* setup enc key */ + + for (; i< 4 * (rounds+1); i++) { + tmp = expanded_key[i-1]; + + if (!(i % keylen)) { + tmp = RotWord(tmp); + tmp = SubWord(tmp); + tmp ^= Rcon[i / keylen ]; + } else if (keylen > 6 && (i % keylen == 4)) + tmp = SubWord(tmp); + + expanded_key[i] = expanded_key[i-keylen] ^ tmp; + } + + memcpy(exp_enc_key, expanded_key, 15*4*4); + + /* setup dec key: the key is turned arround and prepared for the + * "alternative decryption" mode + */ + + cur_key = (vector unsigned char*) expanded_key; + + memcpy(&expanded_dec_key[rounds], &expanded_key[0], 4*4); + memcpy(&expanded_dec_key[0], &expanded_key[rounds *4], 4*4); + + cur_key++; + for (i = (rounds-1); i> 0; i--) { + + mixed_key = InvMixColumn(*cur_key++); + expanded_dec_key[i] = mixed_key; + } + + memcpy(exp_dec_key, expanded_dec_key, 15*4*4); + return 0; +} Index: b/arch/powerpc/platforms/cell/crypto/aes_vmx_addon.h =================================================================== --- /dev/null +++ b/arch/powerpc/platforms/cell/crypto/aes_vmx_addon.h @@ -0,0 +1,7 @@ +#ifndef __aes_vmx_addon_h__ +#define __aes_vmx_addon_h__ + +int expand_key(const unsigned char *key, unsigned int keylen, + unsigned char exp_enc_key[15 *4*4], unsigned char exp_dec_key[15*4*4]); + +#endif Index: b/arch/powerpc/platforms/cell/spufs/Makefile =================================================================== --- a/arch/powerpc/platforms/cell/spufs/Makefile +++ b/arch/powerpc/platforms/cell/spufs/Makefile @@ -24,6 +24,7 @@ $(obj)/switch.o: $(obj)/spu_save_dump.h clean-files := spu_save_dump.h spu_restore_dump.h spu_kspu_dump.h spu_kspu_code_obj-y += $(obj)/spu_main.o +spu_kspu_code_obj-$(CONFIG_CRYPTO_AES_SPU) += $(obj)/spu_aes.o spu_kspu_code_obj-y += $(spu_kspu_code_obj-m) $(obj)/spu_kspu.o: $(spu_kspu_code_obj-y) Index: b/arch/powerpc/platforms/cell/spufs/spu_aes.c =================================================================== --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/spu_aes.c @@ -0,0 +1,995 @@ +/* + * AES implementation with spu support. + * v.02 + * + * Author: + * Sebastian Siewior (bigeasy _at_ breakpoint.cc) + * Arnd Bergmann (arnd _at_ arndb.de) + * + * License: GPL v2 + * + * Code based on ideas from "Effincient Galois Field Arithmetic on SIMD Architectures" by + * Raghav Bhaskar, Prapdeep K. Dubey, Vijay Kumar, Atri Rudra and Animesh Sharma. + * + * This implementation makes use of spu and asumes therefore big endian (on the other + * hand only Intel makes it (still) wrong (well it made porting to 64bit probably a lot of + * easier)). + * Tables for MixColumn() and InvMixColumn() are adjusted in order to omit ShiftRow in all but + * last round. + */ +#include +#include +#include + +#include +#include + +//#define BUG() { printf("BUG() at %s:%d\n", __FUNCTION__, __LINE__); while(1) ; } +#define BUG() ; +/* + * This values are either defined in AES standard or can be + * computed. + */ +static const unsigned int Rcon[] = { + 0x00000000, 0x01000000, 0x02000000, 0x04000000, 0x08000000, + 0x10000000, 0x20000000, 0x40000000, 0x80000000, 0x1b000000, + 0x36000000 +}; + +static const vector unsigned char sbox_enc[16] = { + { 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, + 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76 }, + { 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, + 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0 }, + { 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, + 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15 }, + { 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, + 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75 }, + { 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, + 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84 }, + { 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, + 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf }, + { 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, + 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8 }, + { 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, + 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2 }, + { 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, + 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73 }, + { 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, + 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb }, + { 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, + 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79 }, + { 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, + 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08 }, + { 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, + 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a }, + { 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, + 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e }, + { 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, + 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf }, + { 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, + 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 } +}; + +static const vector unsigned char shift_round = { + 0x00, 0x05, 0x0a, 0x0f, + 0x04, 0x09, 0x0e, 0x03, + 0x08, 0x0d, 0x02, 0x07, + 0x0c, 0x01, 0x06, 0x0b +}; + +static const vector unsigned char pre_xor_s0 = { + 0x10, 0x00, 0x00, 0x10, + 0x14, 0x04, 0x04, 0x14, + 0x18, 0x08, 0x08, 0x18, + 0x1c, 0x0c, 0x0c, 0x1c +}; + +static const vector unsigned char pre_xor_s1 = { + 0x15, 0x15, 0x05, 0x00, + 0x19, 0x19, 0x09, 0x04, + 0x1d, 0x1d, 0x0d, 0x08, + 0x11, 0x11, 0x01, 0x0c +}; + +static const vector unsigned char pre_xor_s2 = { + 0x05, 0x1a, 0x1a, 0x05, + 0x09, 0x1e, 0x1e, 0x09, + 0x0d, 0x12, 0x12, 0x0d, + 0x01, 0x16, 0x16, 0x01 +}; + +static const vector unsigned char pre_xor_s3 = { + 0x0a, 0x0a, 0x1f, 0x0a, + 0x0e, 0x0e, 0x13, 0x0e, + 0x02, 0x02, 0x17, 0x02, + 0x06, 0x06, 0x1b, 0x06 +}; + +static const vector unsigned char pre_xor_s4 = { + 0x0f, 0x0f, 0x0f, 0x1f, + 0x03, 0x03, 0x03, 0x13, + 0x07, 0x07, 0x07, 0x17, + 0x0b, 0x0b, 0x0b, 0x1b +}; + +static const vector unsigned char sbox_dec[16] = { + { 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, + 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb }, + { 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, + 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb }, + { 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, + 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e }, + { 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, + 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25 }, + { 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, + 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92 }, + { 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, + 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84 }, + { 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, + 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06 }, + { 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, + 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b }, + { 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, + 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73 }, + { 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, + 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e }, + { 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, + 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b }, + { 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, + 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4 }, + { 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, + 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f }, + { 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, + 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef }, + { 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, + 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61 }, + { 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, + 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d } +}; + +static const vector unsigned char inv_shift_round = { + 0x00, 0x0d, 0x0a, 0x07, + 0x04, 0x01, 0x0e, 0x0B, + 0x08, 0x05, 0x02, 0x0f, + 0x0c, 0x09, 0x06, 0x03 +}; + +static const vector unsigned char inv_select_0e_shifted = { + 0x00, 0x0d, 0x0a, 0x07, + 0x04, 0x01, 0x0e, 0x0B, + 0x08, 0x05, 0x02, 0x0f, + 0x0c, 0x09, 0x06, 0x03 +}; + +static const vector unsigned char inv_select_0b_shifted = { + 0x0d, 0x0a, 0x07, 0x00, + 0x01, 0x0e, 0x0b, 0x04, + 0x05, 0x02, 0x0f, 0x08, + 0x09, 0x06, 0x03, 0x0c +}; + +static const vector unsigned char inv_select_0d_shifted = { + 0x0a, 0x07, 0x00, 0x0d, + 0x0e, 0x0b, 0x04, 0x01, + 0x02, 0x0f, 0x08, 0x05, + 0x06, 0x03, 0x0c, 0x09 +}; + +static const vector unsigned char inv_select_09_shifted = { + 0x07, 0x00, 0x0d, 0x0a, + 0x0b, 0x04, 0x01, 0x0e, + 0x0f, 0x08, 0x05, 0x02, + 0x03, 0x0c, 0x09, 0x06 +}; + +static const vector unsigned char inv_select_0e_norm = { + 0x00, 0x01, 0x02, 0x03, + 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, + 0x0c, 0x0d, 0x0e, 0x0f +}; + +static const vector unsigned char inv_select_0b_norm = { + 0x01, 0x02, 0x03, 0x00, + 0x05, 0x06, 0x07, 0x04, + 0x09, 0x0a, 0x0b, 0x08, + 0x0d, 0x0e, 0x0f, 0x0c +}; + +static const vector unsigned char inv_select_0d_norm = { + 0x02, 0x03, 0x00, 0x01, + 0x06, 0x07, 0x04, 0x05, + 0x0a, 0x0b, 0x08, 0x09, + 0x0e, 0x0f, 0x0c, 0x0d +}; + +static const vector unsigned char inv_select_09_norm = { + 0x03, 0x00, 0x01, 0x02, + 0x07, 0x04, 0x05, 0x06, + 0x0b, 0x08, 0x09, 0x0a, + 0x0f, 0x0c, 0x0d, 0x0e +}; + +#ifdef CONFIG_CRYPTO_AES_ALTIspu_TABLE +/* small GF lookup table */ +static const vector unsigned char gf_mul_9_high = { + 0x00, 0x90, 0x3b, 0xab, 0x76, 0xe6, 0x4d, 0xdd, + 0xec, 0x7c, 0xd7, 0x47, 0x9a, 0x0a, 0xa1, 0x31 +}; +static const vector unsigned char gf_mul_b_high = { + 0x00, 0xb0, 0x7b, 0xcb, 0xf6, 0x46, 0x8d, 0x3d, + 0xf7, 0x47, 0x8c, 0x3c, 0x01, 0xb1, 0x7a, 0xca +}; +static const vector unsigned char gf_mul_d_high = { + 0x00, 0xd0, 0xbb, 0x6b, 0x6d, 0xbd, 0xd6, 0x06, + 0xda, 0x0a, 0x61, 0xb1, 0xb7, 0x67, 0x0c, 0xdc +}; +static const vector unsigned char gf_mul_e_high = { + 0x00, 0xe0, 0xdb, 0x3b, 0xad, 0x4d, 0x76, 0x96, + 0x41, 0xa1, 0x9a, 0x7a, 0xec, 0x0c, 0x37, 0xd7 +}; +static const vector unsigned char gf_mul_9_low = { + 0x00, 0x09, 0x12, 0x1b, 0x24, 0x2d, 0x36, 0x3f, + 0x48, 0x41, 0x5a, 0x53, 0x6c, 0x65, 0x7e, 0x77 +}; +static const vector unsigned char gf_mul_b_low = { + 0x00, 0x0b, 0x16, 0x1d, 0x2c, 0x27, 0x3a, 0x31, + 0x58, 0x53, 0x4e, 0x45, 0x74, 0x7f, 0x62, 0x69 +}; +static const vector unsigned char gf_mul_d_low = { + 0x00, 0x0d, 0x1a, 0x17, 0x34, 0x39, 0x2e, 0x23, + 0x68, 0x65, 0x72, 0x7f, 0x5c, 0x51, 0x46, 0x4b +}; +static const vector unsigned char gf_mul_e_low = { + 0x00, 0x0e, 0x1c, 0x12, 0x38, 0x36, 0x24, 0x2a, + 0x70, 0x7e, 0x6c, 0x62, 0x48, 0x46, 0x54, 0x5a +}; +#endif +/* encryption code */ + +static vector unsigned char ByteSub(vector unsigned char state) +{ + /* line of the s-box */ + vector unsigned char line_01, line_23, line_45, line_67, + line_89, line_AB, line_CD, line_EF; + /* selector */ + vector unsigned char sel1, sel2, sel7; + /* correct lines */ + vector unsigned char cor_0123, cor_4567, cor_89AB, cor_CDEF, + cor_0to7, cor_8toF; + vector unsigned char ret_state, lower_state; + vector unsigned char state_shift2, state_shift1; + + lower_state = spu_and(state, (unsigned char) 0x1f); + line_01 = spu_shuffle(sbox_enc[0], sbox_enc[1], lower_state); + line_23 = spu_shuffle(sbox_enc[2], sbox_enc[3], lower_state); + line_45 = spu_shuffle(sbox_enc[4], sbox_enc[5], lower_state); + line_67 = spu_shuffle(sbox_enc[6], sbox_enc[7], lower_state); + line_89 = spu_shuffle(sbox_enc[8], sbox_enc[9], lower_state); + line_AB = spu_shuffle(sbox_enc[10], sbox_enc[11], lower_state); + line_CD = spu_shuffle(sbox_enc[12], sbox_enc[13], lower_state); + line_EF = spu_shuffle(sbox_enc[14], sbox_enc[15], lower_state); + + /* FIXME: INSTEAD OF state << 2 + cmpgt 0x7f => cmpeq && and but check pipe first */ + state_shift2 = spu_slqw(state, 2); + sel2 = (typeof (sel2)) spu_cmpgt(state_shift2, 0x7f); + cor_0123 = spu_sel(line_01, line_23, sel2); + cor_4567 = spu_sel(line_45, line_67, sel2); + cor_89AB = spu_sel(line_89, line_AB, sel2); + cor_CDEF = spu_sel(line_CD, line_EF, sel2); + + state_shift1 = spu_slqw(state, 1); + sel1 = (typeof (sel1))spu_cmpgt(state_shift1, 0x7f); + cor_0to7 = spu_sel(cor_0123, cor_4567, sel1); + cor_8toF = spu_sel(cor_89AB, cor_CDEF, sel1); + + sel7 = (typeof (sel7))spu_cmpgt(state, 0x7f); + ret_state = spu_sel(cor_0to7, cor_8toF, sel7); + /* TODO: Once you done here, goto the InvByteSub and FIXME as well */ + + return ret_state; +} + +static vector unsigned char ShiftRow(vector unsigned char state) +{ + + return spu_shuffle(state, state, shift_round); +} + +static vector unsigned char MixColumn(vector unsigned char state) +{ + vector unsigned char imm_00h; + vector unsigned char need_add, lower_state; + vector unsigned char shifted_vec, modul; + vector unsigned char toadd, xtimed; + vector unsigned char op1, op2, op3, op4, op5; + vector unsigned char xor_12, xor_34, xor_1234, ret; + + imm_00h = spu_splats((unsigned char) 0x00); + modul = spu_splats((unsigned char) 0x1b); + + need_add = (vector unsigned char)spu_cmpgt(state, 0x7f); + lower_state = spu_and(state, 0x7f); + shifted_vec = spu_slqw(lower_state, 0x01); + + toadd = spu_sel(imm_00h, modul, need_add); + + xtimed = spu_xor(toadd, shifted_vec); + + op1 = spu_shuffle(state, xtimed, pre_xor_s0); + op2 = spu_shuffle(state, xtimed, pre_xor_s1); + op3 = spu_shuffle(state, xtimed, pre_xor_s2); + op4 = spu_shuffle(state, xtimed, pre_xor_s3); + op5 = spu_shuffle(state, xtimed, pre_xor_s4); + + xor_12 = spu_xor(op1, op2); + xor_34 = spu_xor(op3, op4); + xor_1234 = spu_xor(xor_12, xor_34); + ret = spu_xor(xor_1234, op5); + + return ret; +} + +static vector unsigned char AddRoundKey(vector unsigned char state, + vector unsigned char key) +{ + return spu_xor(state,key); +} + +static vector unsigned char normalRound(vector unsigned char state, vector unsigned char key) +{ + vector unsigned char pstate; + + pstate = ByteSub(state); + pstate = MixColumn(pstate); + pstate = AddRoundKey(pstate, key); + return pstate; +} + +static vector unsigned char finalRound(vector unsigned char state, vector unsigned char key) +{ + vector unsigned char pstate; + + pstate = ByteSub(state); + pstate = ShiftRow(pstate); + pstate = AddRoundKey(pstate, key); + return pstate; +} + +static vector unsigned char aes_encrypt_block(vector unsigned char in, + const vector unsigned char *key, unsigned char key_len) +{ + unsigned char i; + vector unsigned char pstate; + + pstate = spu_xor(in, *key++); + switch (key_len) { + + case 8: /* 14 rounds */ + pstate = normalRound(pstate, *key++); + pstate = normalRound(pstate, *key++); + + case 6: /* 12 rounds */ + pstate = normalRound(pstate, *key++); + pstate = normalRound(pstate, *key++); + + case 4: /* 10 rounds */ + for (i=0; i<9; i++) + pstate = normalRound(pstate, *key++); + + break; + + default: + /* unsupported */ +// printf("key_len is %d\n", key_len); + BUG(); + ; + } + + pstate = finalRound(pstate, *key); + return pstate; +} + +static int aes_encrypt_spu_block_char(unsigned char *buffer, + const unsigned char *kp, unsigned int key_len) +{ + vector unsigned char pstate; + + pstate = (*((vector unsigned char *)(buffer))); + pstate = aes_encrypt_block(pstate, (const vector unsigned char*) kp, key_len); + + *((vec_uchar16 *)(buffer)) = pstate; + return 0; +} + +/* decryption code, alternative version */ + +static vector unsigned char InvByteSub(vector unsigned char state) +{ + /* line of the s-box */ + vector unsigned char line_01, line_23, line_45, line_67, + line_89, line_AB, line_CD, line_EF; + /* selector */ + vector unsigned char sel1, sel2, sel7; + /* correct lines */ + vector unsigned char cor_0123, cor_4567, cor_89AB, cor_CDEF, + cor_0to7, cor_8toF; + vector unsigned char ret_state, lower_state; + vector unsigned char state_shift2, state_shift1; + + lower_state = spu_and(state, 0x1f); + line_01 = spu_shuffle(sbox_dec[0], sbox_dec[1], lower_state); + line_23 = spu_shuffle(sbox_dec[2], sbox_dec[3], lower_state); + line_45 = spu_shuffle(sbox_dec[4], sbox_dec[5], lower_state); + line_67 = spu_shuffle(sbox_dec[6], sbox_dec[7], lower_state); + line_89 = spu_shuffle(sbox_dec[8], sbox_dec[9], lower_state); + line_AB = spu_shuffle(sbox_dec[10], sbox_dec[11], lower_state); + line_CD = spu_shuffle(sbox_dec[12], sbox_dec[13], lower_state); + line_EF = spu_shuffle(sbox_dec[14], sbox_dec[15], lower_state); + + state_shift2 = spu_slqw(state, 2); + sel2 = (typeof (sel2)) spu_cmpgt(state_shift2, 0x7f); + cor_0123 = spu_sel(line_01, line_23, sel2); + cor_4567 = spu_sel(line_45, line_67, sel2); + cor_89AB = spu_sel(line_89, line_AB, sel2); + cor_CDEF = spu_sel(line_CD, line_EF, sel2); + + state_shift1 = spu_slqw(state, 1); + sel1 = (typeof (sel1))spu_cmpgt(state_shift1, 0x7f); + cor_0to7 = spu_sel(cor_0123, cor_4567, sel1); + cor_8toF = spu_sel(cor_89AB, cor_CDEF, sel1); + + sel7 = (typeof (sel7))spu_cmpgt(state, 0x7f); + ret_state = spu_sel(cor_0to7, cor_8toF, sel7); + + return ret_state; +} + +static vector unsigned char InvShiftRow(vector unsigned char state) +{ + + return spu_shuffle(state, state, inv_shift_round); +} + +static vector unsigned char InvMixColumn(vector unsigned char state, + vector unsigned char inv_select_0e, vector unsigned char inv_select_0b, + vector unsigned char inv_select_0d, vector unsigned char inv_select_09 ) +{ + vector unsigned char op0, op1, op2, op3, op4, op5; + vector unsigned char mul_0e, mul_09, mul_0d, mul_0b; + vector unsigned char ret; + +#ifdef CONFIG_CRYPTO_AES_spu_TABLE + /* 14 operations, 1x 8 memory loads */ + + vector unsigned char state_high; + vector unsigned char imm_04h; + vector unsigned char mul_09_hi, mul_09_lo, mul_0b_hi, mul_0b_lo, mul_0d_hi, + mul_0d_lo, mul_0e_hi, mul_0e_lo; + + imm_04h = spu_splat_u8(0x04); + + state_high = spu_sr(state, imm_04h); + + mul_09_hi = spu_perm(gf_mul_9_high, gf_mul_9_high, state_high); + mul_0b_hi = spu_perm(gf_mul_b_high, gf_mul_b_high, state_high); + mul_0d_hi = spu_perm(gf_mul_d_high, gf_mul_d_high, state_high); + mul_0e_hi = spu_perm(gf_mul_e_high, gf_mul_e_high, state_high); + + mul_09_lo = spu_perm(gf_mul_9_low, gf_mul_9_low, state); + mul_0b_lo = spu_perm(gf_mul_b_low, gf_mul_b_low, state); + mul_0d_lo = spu_perm(gf_mul_d_low, gf_mul_d_low, state); + mul_0e_lo = spu_perm(gf_mul_e_low, gf_mul_e_low, state); + + mul_09 = spu_xor(mul_09_hi, mul_09_lo); + mul_0b = spu_xor(mul_0b_hi, mul_0b_lo); + mul_0d = spu_xor(mul_0d_hi, mul_0d_lo); + mul_0e = spu_xor(mul_0e_hi, mul_0e_lo); + +#else + /* 21 operations, 3x 1 memory loads */ + + vector unsigned char imm_00h; + vector unsigned char need_add, statef_shift; + vector unsigned char shifted_vec, modul; + vector unsigned char toadd; + vector unsigned char mul_2, mul_4, mul_8; + vector unsigned char mul_2_4; + + /* compute 0e, 0b, 0d, 09 in GF */ + imm_00h = spu_splats((unsigned char) 0x00); + modul = spu_splats((unsigned char) 0x1b); + + need_add = (vector unsigned char)spu_cmpgt(state, 0x7f); + toadd = spu_sel(imm_00h, modul, need_add); + statef_shift = spu_and(state, 0x7f); + shifted_vec = spu_slqw(statef_shift, 0x01); + mul_2 = spu_xor(toadd, shifted_vec); + + need_add = (vector unsigned char)spu_cmpgt(mul_2, 0x7f); + toadd = spu_sel(imm_00h, modul, need_add); + statef_shift = spu_and(mul_2, 0x7f); + shifted_vec = spu_slqw(statef_shift, 0x01); + mul_4 = spu_xor(toadd, shifted_vec); + + need_add = (vector unsigned char)spu_cmpgt(mul_4, 0x7f); + statef_shift = spu_and(mul_4, 0x7f); + shifted_vec = spu_slqw(statef_shift, 0x01); + toadd = spu_sel(imm_00h, modul, need_add); + mul_8 = spu_xor(toadd, shifted_vec); + + mul_2_4 = spu_xor(mul_2, mul_4); + /* 09 = 8 * 1 */ + mul_09 = spu_xor(mul_8, state); + + /* 0e = 2 * 4 * 8 */ + mul_0e = spu_xor(mul_2_4, mul_8); + + /* 0b = 2 * 8 * 1 */ + mul_0b = spu_xor(mul_2, mul_09); + + /* 0d = 4 * 8 * 1 */ + mul_0d = spu_xor(mul_4, mul_09); +#endif + + /* prepare vectors for add */ + + op0 = spu_shuffle(mul_0e, mul_0e, inv_select_0e); + op1 = spu_shuffle(mul_0b, mul_0b, inv_select_0b); + op2 = spu_shuffle(mul_0d, mul_0d, inv_select_0d); + op3 = spu_shuffle(mul_09, mul_09, inv_select_09); + + op4 = spu_xor(op0, op1); + op5 = spu_xor(op2, op3); + ret = spu_xor(op4, op5); + return ret; +} + +static vector unsigned char InvNormalRound(vector unsigned char state, + vector unsigned char key) +{ + vector unsigned char pstate; + + pstate = InvByteSub(state); + pstate = InvMixColumn(pstate, inv_select_0e_shifted, inv_select_0b_shifted, + inv_select_0d_shifted, inv_select_09_shifted); + pstate = AddRoundKey(pstate, key); + return pstate; +} + +static vector unsigned char InvfinalRound(vector unsigned char state, + vector unsigned char key) +{ + vector unsigned char pstate; + + pstate = InvByteSub(state); + pstate = InvShiftRow(pstate); + pstate = AddRoundKey(pstate, key); + return pstate; +} + + +static vector unsigned char aes_decrypt_block(vector unsigned char in, + const vector unsigned char *key, unsigned int key_len) +{ + vector unsigned char pstate; + unsigned int i; + + pstate = spu_xor(in, *key++); + + switch (key_len) { + case 8: /* 14 rounds */ + pstate = InvNormalRound(pstate, *key++); + pstate = InvNormalRound(pstate, *key++); + + case 6: /* 12 rounds */ + pstate = InvNormalRound(pstate, *key++); + pstate = InvNormalRound(pstate, *key++); + + case 4: /* 10 rounds */ + for (i=0; i<9; i++) + pstate = InvNormalRound(pstate, *key++); + + break; + + default: +// printf("key_len is %d\n", key_len); + BUG(); + } + + pstate = InvfinalRound(pstate, *key); + return pstate; +} + +static int aes_decrypt_block_char(unsigned char *buffer, + const unsigned char *kp, unsigned int key_len) +{ + vector unsigned char pstate; + + pstate = (*((vector unsigned char *)(buffer))); + pstate = aes_decrypt_block(pstate, (const vector unsigned char*) kp, key_len); + *((vec_uchar16 *)(buffer)) = pstate; + return 0; +} + +/* expand key */ +static unsigned int SubWord(unsigned int in) +{ + unsigned char buff[16] __attribute__ ((aligned (16))); + vector unsigned char spu_buf; + + buff[0] = in >> 24; + buff[1] = (in >> 16) & 0xff; + buff[2] = (in >> 8) & 0xff; + buff[3] = in & 0xff; + + spu_buf = (*((vector unsigned char *)(buff))); + spu_buf = ByteSub(spu_buf); + *((vec_uchar16 *)(buff)) = spu_buf; + return buff[0] << 24 | buff[1] << 16 | buff[2] << 8 | buff[3]; +} + +static unsigned int RotWord(unsigned int word) +{ + return (word << 8 | word >> 24); +} + +union key { + struct { + unsigned char data[11 * 16]; + } __attribute__((aligned(16))) out_key_128; + struct { + unsigned char data[13 * 16]; + } __attribute__((aligned(16))) out_key_192; + struct { + unsigned char data[15 * 16]; + } __attribute__((aligned(16))) out_key_256; + struct { + unsigned char data[16]; + } __attribute__((packed)) in_key_128; + struct { + unsigned char data[24]; + } __attribute__((packed)) in_key_192; + struct { + unsigned char data[32]; + } __attribute__((packed)) in_key_256; + unsigned int data[15 * 4]; + vector unsigned char vec[15]; +} __attribute__((aligned(16))); + +static int expand_key(const unsigned char *key, unsigned int keylen, + union key *exp_enc_key, union key *exp_dec_key) +{ + unsigned int tmp, i, rounds; + union key expanded_key, *key_in; + union key expanded_dec_key; + vector unsigned char mixed_key; + vector unsigned char *cur_key; + + key_in = (void*) key; + switch (keylen) { + case 4: + rounds = 10; + expanded_key.in_key_128 = key_in->in_key_128; + break; + + case 6: + rounds = 12; + expanded_key.in_key_192 = key_in->in_key_192; + break; + + case 8: + rounds = 14; + expanded_key.in_key_256 = key_in->in_key_256; + break; + + default: + /* wrong key size */ + return -1; + } + + i = keylen; + + /* setup enc key */ + + for (; i< 4 * (rounds+1); i++) { + tmp = expanded_key.data[i-1]; + + if (!(i % keylen)) { + tmp = RotWord(tmp); + tmp = SubWord(tmp); + tmp ^= Rcon[i / keylen ]; + } else if (keylen > 6 && (i % keylen == 4)) + tmp = SubWord(tmp); + + expanded_key.data[i] = expanded_key.data[i-keylen] ^ tmp; + } + + exp_enc_key->out_key_256 = expanded_key.out_key_256; + + /* setup dec key: the key is turned arround and prepared for the + * "alternative decryption" mode + */ + + cur_key = (vector unsigned char*) expanded_key.data; + + expanded_dec_key.vec[rounds] = expanded_key.vec[0]; + expanded_dec_key.vec[0] = expanded_key.vec[rounds]; + + cur_key++; + for (i = (rounds-1); i> 0; i--) { + + mixed_key = InvMixColumn(*cur_key++, inv_select_0e_norm, inv_select_0b_norm, + inv_select_0d_norm, inv_select_09_norm); + expanded_dec_key.vec[i] = mixed_key; + } + + exp_dec_key->out_key_256 = expanded_dec_key.out_key_256; + + return 0; +} + +static int aes_encrypt_ecb(unsigned char *buffer, + const unsigned char *kp, unsigned int key_len, unsigned int len, + unsigned char *iv_) +{ + unsigned int left = len; +#if 0 + while (left >= 32) { + aes_encrypt_spu_block_char(in, out, kp, key_len); + aes_encrypt_spu_block_char(in+16, out+16, kp, key_len); + left -= 32; + in += 32; + out += 32; + } +#endif + while (left >= 16) { + aes_encrypt_spu_block_char(buffer, kp, key_len); + left -= 16; + buffer += 16; + } + + return len; +} + +static int aes_decrypt_ecb(unsigned char *buffer, + const unsigned char *kp, unsigned int key_len, unsigned int len, + unsigned char *iv_) +{ + unsigned int left = len; + + while (left >= 16) { + aes_decrypt_block_char(buffer, kp, key_len); + left -= 16; + buffer += 16; + } + return len; +} +#if 0 +static int aes_encrypt_cbc(unsigned char *buffer, + const unsigned char *kp, unsigned int key_len, unsigned int len, + unsigned char *iv_) +{ + unsigned int i; + vector unsigned char iv, input; + + iv = (*((vector unsigned char *)(iv_))); + for (i=0; i< len; i += 16) { + input = (*((vector unsigned char *)(buffer))); + input = spu_xor(input, iv); + + iv = aes_encrypt_block(input, (const vector unsigned char*) kp, key_len); + + *((vec_uchar16 *)(buffer)) = iv; + + buffer += 16; + } + + *((vec_uchar16 *)(iv_)) = iv; + return len; +} + +static int aes_decrypt_cbc(unsigned char *buffer, + const unsigned char *kp, unsigned int key_len, unsigned int len, + unsigned char *iv_) +{ + unsigned int i; + vector unsigned char iv, input, vret, decrypted; + + iv = (*((vector unsigned char *)(iv_))); + for (i=0; i< len; i += 16) { + + input = (*((vector unsigned char *)(buffer))); + vret = aes_decrypt_block(input, (const vector unsigned char*) kp, key_len); + + decrypted = spu_xor(vret, iv); + iv = input; + + *((vec_uchar16 *)(buffer)) = decrypted; + + buffer += 16; + } + + *((vec_uchar16 *)(iv_)) = iv; + return len; +} +#endif +#define DMA_TAG 1 +static struct aes_key_struct keys[SPU_KEY_SLOTS]; +/* + * MFC supports: + * - 16kb/transfer + * - 16 transfers in queue + * + * 1024 => 64 16byte blocks + */ +//static unsigned char iv[16] __attribute__((aligned (16))); + +/* spu_mfcstat(MFC_TAG_UPDATE_ALL); */ + +static void consumed_inc(struct kernel_spu_data *spu_data) +{ + unsigned int cons = spu_data->kspu_ring_data.consumed; + + cons++; + + spu_data->kspu_ring_data.consumed = cons; + + if (spu_stat_out_mbox()) + spu_write_out_mbox(0x1234); +} + +int spu_aes_setkey(unsigned int current_read) +{ + struct kernel_spu_data *spu_data = (struct kernel_spu_data*) KERNEL_SPU_DATA_OFFSET; + struct aes_set_key *aes_set_key = &spu_data->work_item[current_read].aes_set_key; + unsigned char plain_key[32] __attribute__((aligned (16))); + unsigned int cur_key; + int ret; +#if 1 + /* key size can by 16, 24, 32. MFC accepts only 16 & 32 */ + mfc_getb(plain_key, aes_set_key->plain, 32, DMA_TAG, 0, 0); + spu_mfcstat(MFC_TAG_UPDATE_ALL); + + cur_key = aes_set_key->keyid; + keys[cur_key].len = aes_set_key->len; + + ret = expand_key(plain_key, keys[cur_key].len, (union key*) keys[cur_key].enc, + (union key*) keys[cur_key].dec); + + mfc_putb(&keys[cur_key], aes_set_key->keys, sizeof(struct aes_key_struct), + DMA_TAG, 0, 0); + + spu_mfcstat(MFC_TAG_UPDATE_ALL); +#endif + consumed_inc(spu_data); +// spu_stop(0x01); + return ret; +} + +int spu_aes_update_key(unsigned int current_read) +{ + struct kernel_spu_data *spu_data = (struct kernel_spu_data*) KERNEL_SPU_DATA_OFFSET; + struct aes_update_key *aes_update_key = &spu_data->work_item[current_read].aes_update_key; +#if 1 + spu_mfcstat(MFC_TAG_UPDATE_ALL); + mfc_getb(&keys[aes_update_key->keyid], aes_update_key->new_key, + sizeof(struct aes_key_struct), DMA_TAG, 0, 0); + spu_mfcstat(MFC_TAG_UPDATE_ALL); +#endif + consumed_inc(spu_data); +// spu_stop(0x02); + return 0; +} + +unsigned char buffer_data[16 * 1024]; + +int spu_aes_encrypt_ecb(unsigned int current_read) { + struct kernel_spu_data *spu_data = (struct kernel_spu_data*) KERNEL_SPU_DATA_OFFSET; + struct aes_crypt *aes_crypt = &spu_data->work_item[current_read].aes_crypt; + unsigned int cur_key; + unsigned long data_len; + int ret; +#if 1 + data_len = aes_crypt->data_size; + mfc_getb(&buffer_data[0], aes_crypt->in, data_len, DMA_TAG, 0, 0); + spu_mfcstat(MFC_TAG_UPDATE_ALL); + + cur_key = aes_crypt->keyid; + ret = aes_encrypt_ecb(&buffer_data[0], keys[cur_key].enc, keys[cur_key].len, + data_len, NULL); + + mfc_putb(&buffer_data[0], aes_crypt->out, data_len, DMA_TAG, 0, 0); + spu_mfcstat(MFC_TAG_UPDATE_ALL); +#endif + consumed_inc(spu_data); + +// spu_stop(0x03); + return ret; +} + +int spu_aes_decrypt_ecb(unsigned int current_read) { + struct kernel_spu_data *spu_data = (struct kernel_spu_data*) KERNEL_SPU_DATA_OFFSET; + struct aes_crypt *aes_crypt = &spu_data->work_item[current_read].aes_crypt; + unsigned int cur_key; + unsigned long data_len; + int ret; + +#if 1 + data_len = aes_crypt->data_size; + mfc_getb(&buffer_data[0], aes_crypt->in, data_len, DMA_TAG, 0, 0); + spu_mfcstat(MFC_TAG_UPDATE_ALL); + + cur_key = aes_crypt->keyid; + ret = aes_decrypt_ecb(&buffer_data[0], keys[cur_key].dec, keys[cur_key].len, + data_len, NULL); + + mfc_putb(&buffer_data[0], aes_crypt->out, data_len, DMA_TAG, 0, 0); + spu_mfcstat(MFC_TAG_UPDATE_ALL); +#endif + consumed_inc(spu_data); + +// spu_stop(0x04); + return ret; +} + +#if 0 +int spu_aes_encrypt_cbc(union possible_arguments *pa) { + struct aes_crypt *aes_crypt = (struct aes_crypt*) pa; + unsigned int cur_key; + unsigned long data_len; + int ret; +#if 1 + spu_mfcstat(MFC_TAG_UPDATE_ALL); + data_len = aes_crypt->data_size; + + mfc_getb(&iv, aes_crypt->iv, 16, DMA_TAG, 0, 0); + spu_mfcstat(MFC_TAG_UPDATE_ALL); + + mfc_getb(&buffer_data[0][0], aes_crypt->in, data_len, DMA_TAG, 0, 0); + spu_mfcstat(MFC_TAG_UPDATE_ALL); + + cur_key = aes_crypt->keyid; + ret = aes_encrypt_cbc(&buffer_data[0][0], keys[cur_key].enc, keys[cur_key].len, + data_len, iv); + + mfc_putb(&iv, aes_crypt->iv, 16, DMA_TAG, 0, 0); + spu_mfcstat(MFC_TAG_UPDATE_ALL); + + mfc_putb(&buffer_data[0][0], aes_crypt->out, data_len, DMA_TAG, 0, 0); + spu_mfcstat(MFC_TAG_UPDATE_ALL); +#endif + spu_stop(0x16); + return ret; +} + +int spu_aes_decrypt_cbc(union possible_arguments *pa) { + struct aes_crypt *aes_crypt = (struct aes_crypt*) pa; + unsigned int cur_key; + unsigned long data_len; + int ret; +#if 1 + spu_mfcstat(MFC_TAG_UPDATE_ALL); + data_len = aes_crypt->data_size; + + mfc_getb(&iv, aes_crypt->iv, 16, DMA_TAG, 0, 0); + spu_mfcstat(MFC_TAG_UPDATE_ALL); + + mfc_getb(&buffer_data[0][0], aes_crypt->in, data_len, DMA_TAG, 0, 0); + spu_mfcstat(MFC_TAG_UPDATE_ALL); + + cur_key = aes_crypt->keyid; + ret = aes_decrypt_cbc(&buffer_data[0][0], keys[cur_key].dec, keys[cur_key].len, + data_len, iv); + + mfc_putb(&iv, aes_crypt->iv, 16, DMA_TAG, 0, 0); + spu_mfcstat(MFC_TAG_UPDATE_ALL); + + mfc_putb(&buffer_data[0][0], aes_crypt->out, data_len, DMA_TAG, 0, 0); + spu_mfcstat(MFC_TAG_UPDATE_ALL); +#endif + spu_stop(0x17); + return ret; +} +#endif Index: b/arch/powerpc/platforms/cell/spufs/spu_main.c =================================================================== --- a/arch/powerpc/platforms/cell/spufs/spu_main.c +++ b/arch/powerpc/platforms/cell/spufs/spu_main.c @@ -9,6 +9,14 @@ spu_operation spu_ops[TOTAL_SPU_FUNCS] __attribute__((aligned(16))) = { + [SPU_FUNC_aes_setkey] = spu_aes_setkey, + [SPU_FUNC_aes_update_key] = spu_aes_update_key, + [SPU_FUNC_aes_encrypt_ecb] = spu_aes_encrypt_ecb, + [SPU_FUNC_aes_decrypt_ecb] = spu_aes_decrypt_ecb, +#if 0 + [SPU_FUNC_aes_encrypt_cbc] = spu_aes_encrypt_cbc, + [SPU_FUNC_aes_decrypt_cbc] = spu_aes_decrypt_cbc, +#endif }; #define DMA_TAG 1 Index: b/drivers/crypto/Kconfig =================================================================== --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -78,4 +78,5 @@ config ZCRYPT_MONOLITHIC that contains all parts of the crypto device driver (ap bus, request router and all the card drivers). +source "arch/powerpc/platforms/cell/crypto/Kconfig" endmenu Index: b/include/asm-powerpc/kspu/aes.h =================================================================== --- /dev/null +++ b/include/asm-powerpc/kspu/aes.h @@ -0,0 +1,52 @@ +#ifndef __SPU_AES_H__ +#define __SPU_AES_H__ + +#define MAX_AES_ROUNDS 15 +#define MAX_AES_KEYSIZE_INT (MAX_AES_ROUNDS *4) +#define MAX_AES_KEYSIZE_BYTE (MAX_AES_KEYSIZE_INT *4) +#define SPU_KEY_SLOTS 5 + +struct aes_key_struct { + unsigned char enc[MAX_AES_KEYSIZE_BYTE] __attribute__((aligned(16))); + unsigned char dec[MAX_AES_KEYSIZE_BYTE] __attribute__((aligned(16))); + unsigned int len __attribute__((aligned(16))); +}; + +struct aes_set_key { + /* in */ + unsigned long long plain __attribute__((aligned(16))); + unsigned int len __attribute__((aligned(16))); + unsigned int keyid __attribute__((aligned(16))); + + /* out */ + unsigned long long keys __attribute__((aligned(16))); +}; + +struct aes_update_key { + /* copy key from ea to ls into a specific slot */ + unsigned long long new_key __attribute__((aligned(16))); + unsigned int keyid __attribute__((aligned(16))); +}; + +struct aes_crypt { + /* in */ + unsigned long long in __attribute__((aligned(16))); + unsigned int keyid __attribute__((aligned(16))); + unsigned int data_size __attribute__((aligned(16))); + + /* out */ + unsigned long long iv __attribute__((aligned(16))); /* as well as in */ + unsigned long long out __attribute__((aligned(16))); +}; + +/* exported calls */ +#if 0 +int spu_aes_encrypt_cbc(union possible_arguments *pa); +int spu_aes_decrypt_cbc(union possible_arguments *pa); +#endif + +int spu_aes_setkey(unsigned int cur); +int spu_aes_update_key(unsigned int cur); +int spu_aes_encrypt_ecb(unsigned int cur); +int spu_aes_decrypt_ecb(unsigned int cur); +#endif Index: b/include/asm-powerpc/kspu/merged_code.h =================================================================== --- a/include/asm-powerpc/kspu/merged_code.h +++ b/include/asm-powerpc/kspu/merged_code.h @@ -1,6 +1,7 @@ #ifndef KSPU_MERGED_CODE_H #define KSPU_MERGED_CODE_H #include +#include #define KSPU_LS_SIZE 0x40000 @@ -21,6 +22,12 @@ typedef int (*spu_operation)(unsigned int cur); enum SPU_FUNCTIONS { + SPU_FUNC_aes_setkey, + SPU_FUNC_aes_update_key, + SPU_FUNC_aes_encrypt_ecb, + SPU_FUNC_aes_decrypt_ecb, + SPU_FUNC_aes_encrypt_cbc, + SPU_FUNC_aes_decrypt_cbc, TOTAL_SPU_FUNCS, }; @@ -28,6 +35,9 @@ enum SPU_FUNCTIONS { struct work_item { enum SPU_FUNCTIONS operation __attribute__((aligned(16))); union { + struct aes_set_key aes_set_key; + struct aes_update_key aes_update_key; + struct aes_crypt aes_crypt; } __attribute__((aligned(16))); }; --