summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/crypto/msm/qce.txt228
-rw-r--r--Documentation/crypto/msm/qce40.txt241
-rw-r--r--Documentation/crypto/msm/qcedev.txt232
-rw-r--r--Documentation/crypto/msm/qcrypto.txt144
-rw-r--r--Documentation/devicetree/bindings/crypto/msm/qcedev.txt43
-rw-r--r--Documentation/devicetree/bindings/crypto/msm/qcota.txt42
-rw-r--r--Documentation/devicetree/bindings/crypto/msm/qcrypto.txt61
-rw-r--r--drivers/crypto/Kconfig58
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/msm/Makefile12
-rw-r--r--drivers/crypto/msm/compat_qcedev.c432
-rw-r--r--drivers/crypto/msm/ota_crypto.c977
-rw-r--r--drivers/crypto/msm/qce.c2644
-rw-r--r--drivers/crypto/msm/qce.h190
-rw-r--r--drivers/crypto/msm/qce50.c6066
-rw-r--r--drivers/crypto/msm/qce50.h240
-rw-r--r--drivers/crypto/msm/qce_ota.h30
-rw-r--r--drivers/crypto/msm/qcedev.c2125
-rw-r--r--drivers/crypto/msm/qcedevi.h124
-rw-r--r--drivers/crypto/msm/qcrypto.c5332
-rw-r--r--drivers/crypto/msm/qcryptohw_30.h308
-rw-r--r--drivers/crypto/msm/qcryptohw_40.h316
-rw-r--r--drivers/crypto/msm/qcryptohw_50.h528
-rw-r--r--include/linux/platform_data/qcom_crypto_device.h24
-rw-r--r--include/linux/qcrypto.h65
-rw-r--r--include/uapi/linux/Kbuild3
-rw-r--r--include/uapi/linux/compat_qcedev.h165
-rw-r--r--include/uapi/linux/fips_status.h33
-rw-r--r--include/uapi/linux/qcedev.h259
-rw-r--r--include/uapi/linux/qcota.h210
30 files changed, 21133 insertions, 0 deletions
diff --git a/Documentation/crypto/msm/qce.txt b/Documentation/crypto/msm/qce.txt
new file mode 100644
index 000000000000..18435d170e19
--- /dev/null
+++ b/Documentation/crypto/msm/qce.txt
@@ -0,0 +1,228 @@
+Introduction:
+=============
+
+The Qualcomm crypto engine (qce) driver is a module that
+provides common services for accessing the Qualcomm crypto device.
+Currently, the two main clients of qce are
+-qcrypto driver (module provided for accessing CE HW by kernel space apps)
+-qcedev driver (module provided for accessing CE HW by user space apps)
+
+
+The crypto engine (qce) driver is a client to the DMA driver for the Qualcomm
+DMA device - Application Data Mover (ADM). ADM is used to provide the DMA
+transfer capability between Qualcomm crypto device hardware and DDR memory
+for crypto operations.
+
+ Figure 1.
+ ---------
+
+ Linux kernel
+ (ex:IPSec)<--*Qualcomm crypto driver----+
+ (qcrypto) |
+ (for kernel space app) |
+ |
+ +-->|
+ |
+ | *qce <----> Qualcomm
+ | driver ADM driver <---> ADM HW
+ +-->| | |
+ | | |
+ | | |
+ | | |
+ Linux kernel | | |
+ misc device <--- *QCEDEV Driver-------+ | |
+ interface (qcedev) (Reg interface) (DMA interface)
+ (for user space app) \ /
+ \ /
+ \ /
+ \ /
+ \ /
+ \ /
+ \ /
+ Qualcomm crypto CE3 HW
+
+
+ The entities marked with (*) in the Figure 1, are the software components of
+ the Linux Qualcomm crypto modules.
+
+===============
+IMPORTANT NOTE:
+===============
+(1) The CE hardware can be accessed either from user space OR kernel space,
+ at one time. Both user space and kernel space clients cannot access the
+ qce driver (and the CE hardware) at the same time.
+ - If your device has user space apps that needs to access the crypto
+ hardware, make sure to have the qcrypto module disabled/unloaded.
+ This will result in the kernel space apps to use the registered
+ software implementation of the crypto algorithms.
+ - If your device has kernel space apps that needs to access the
+ crypto hardware, make sure to have qcedev module disabled/unloaded
+ and implement your user space application to use the software
+ implemenation (ex: openssl/crypto) of the crypto algorithms.
+
+(2) If your device has Playready(Windows Media DRM) application enabled and
+ uses the qcedev module to access the crypto hardware accelarator,
+ please be informed that for performance reasons, the CE hardware will need
+ to be dedicated to playready application. Any other user space application
+ should be implemented to use the software implemenation (ex: openssl/crypto)
+ of the crypto algorithms.
+
+
+Hardware description:
+=====================
+
+Qualcomm Crypto HW device family provides a series of algorithms implemented
+in the device hardware.
+
+Crypto 2 hardware provides hashing - SHA-1, SHA-256, ciphering - DES, 3DES, AES
+algorithms, and concurrent operations of hashing, and ciphering.
+
+In addition to those functions provided by Crypto 2 HW, Crypto 3 HW provides
+fast AES algorithms.
+
+In addition to those functions provided by Crypto 3 HW, Crypto 3E provides
+HMAC-SHA1 hashing algorithm, and Over The Air (OTA) f8/f9 algorithms as
+defined by the 3GPP forum.
+
+
+Software description
+====================
+
+The crypto device is defined as a platform device. The driver is
+independent of the platform. The driver supports multiple instances of
+crypto HW.
+All the platform specific parameters are defined in the board init
+file, eg. arch/arm/mach-msm/board-msm7x30.c for MSM7x30.
+
+The qce driver provide the common services of HW crypto
+access to the two drivers as listed above (qcedev, qcrypto. It sets up
+the crypto HW device for the operation, then it requests ADM driver for
+the DMA of the crypto operation.
+
+Two ADM channels and two command lists (one command list for each
+channel) are involved in an operation.
+
+The setting up of the command lists and the procedure of the operation
+of the crypto device are described in the following sections.
+
+The command list for the first DMA channel is set up as follows:
+
+ 1st command of the list is for the DMA transfer from DDR memory to the
+ crypto device to input data to crypto device. The dst crci of the command
+ is set for crci-in for this crypto device.
+
+ 2nd command is for the DMA tansfer is from crypto device to DDR memory for
+ the authentication result. The src crci is set as crci-hash-done of the
+ crypto device. If authentication is not required in the operation,
+ the 2nd command is not used.
+
+The command list for the second DMA channel is set up as follows:
+
+ One command to DMA data from crypto device to DDR memory for encryption or
+ decryption output from crypto device.
+
+To accomplish ciphering and authentication concurrent operations, the driver
+performs the following steps:
+ (a). set up HW crypto device
+ (b). hit the crypto go register.
+ (c). issue the DMA command of first channel to the ADM driver,
+ (d). issue the DMA command of 2nd channel to the ADM driver.
+
+SHA1/SHA256 is an authentication/integrity hash algorithm. To accomplish
+hash operation (or any authentication only algorithm), 2nd DMA channel is
+not required. Only steps (a) to (c) are performed.
+
+At the completion of the DMA operation (for (c) and (d)) ADM driver
+invokes the callback registered to the DMA driver. This signifies the end of
+the DMA operation(s). The driver reads the status and other information from
+the CE hardware register and then invokes the callback to the qce driver client.
+This signal the completion and the results of the DMA along with the status of
+the CE hardware to the qce driver client. This completes a crypto operation.
+
+In the qce driver initialization, memory for the two command lists, descriptor
+lists for each crypto device are allocated out of coherent memory, using Linux
+DMA API. The driver pre-configures most of the two ADM command lists
+in the initialization. During each crypto operation, minimal set up is required.
+src_dscr or/and dst_dscr descriptor list of the ADM command are populated
+from the information obtained from the corresponding data structure. eg: for
+AEAD request, the following data structure provides the information:
+
+ struct aead_request *req
+ ......
+ req->assoc
+ req->src
+ req->dst
+
+The DMA address of a scatter list will be retrieved and set up in the
+descriptor list of an ADM command.
+
+Power Management
+================
+ none
+
+
+Interface:
+==========
+
+The interface is defined in kernel/drivers/crypto/msm/inc/qce.h
+
+The clients qcrypto, qcedev drivers are the clients using
+the interfaces.
+
+The following services are provided by the qce driver -
+
+ qce_open(), qce_close(), qce_ablk_cipher_req(),
+ qce_hw_support(), qce_process_sha_req()
+
+ qce_open() is the first request from the client, ex. Qualcomm crypto
+ driver (qcedev, qcrypto), to open a crypto engine. It is normally
+ called at the probe function of the client for a device. During the
+ probe,
+ - ADM command list structure will be set up
+ - Crypto device will be initialized.
+ - Resource associated with the crypto engine is retrieved by doing
+ platform_get_resource() or platform_get_resource_byname().
+
+ The resources for a device are
+ - crci-in, crci-out, crci-hash-done
+ - two DMA channel IDs, one for encryption and decryption input, one for
+ output.
+ - base address of the HW crypto device.
+
+ qce_close() is the last request from the client. Normally, it is
+ called from the remove function of the client.
+
+ qce_hw_support() allows the client to query what is supported
+ by the crypto engine hardware.
+
+ qce_ablk_cipher_req() provides ciphering service to the client.
+ qce_process_sha_req() provide hashing service to the client.
+ qce_aead_req() provide aead service to the client.
+
+Module parameters:
+==================
+
+The following module parameters are defined in the board init file.
+-CE hardware nase register address
+-Data mover channel used for transfer to/from CE hardware
+These parameters differ in each platform.
+
+
+Dependencies:
+=============
+
+Existing DMA driver.
+The transfers are DMA'ed between the crypto hardware and DDR memory via the
+data mover, ADM. The data transfers are set up to use the existing dma driver.
+
+User space utilities:
+=====================
+ n/a
+
+Known issues:
+=============
+ n/a
+
+To do:
+======
+ n/a
diff --git a/Documentation/crypto/msm/qce40.txt b/Documentation/crypto/msm/qce40.txt
new file mode 100644
index 000000000000..e99f7d7ef6cf
--- /dev/null
+++ b/Documentation/crypto/msm/qce40.txt
@@ -0,0 +1,241 @@
+Introduction:
+=============
+
+The Qualcomm crypto engine (qce40) driver is a module that
+provides common services for accessing the Qualcomm crypto device.
+Currently, the two main clients of qce40 are
+-qcrypto driver (module provided for accessing CE HW by kernel space apps)
+-qcedev driver (module provided for accessing CE HW by user space apps)
+This module provides the same interface to the clients as does qce.c and is
+based off qce.c. Following are the updates from qce.c
+- Add support for AES XTS mode
+- Add support for CMAC mode
+- Add support for AES CCM mode
+- Add support for SHA1/SHA256 HMAC
+- Read HASH/MAC information directly from CE hardware registers instead of
+ using datamover.
+
+The crypto engine (qce40) module is a client to the DMA driver for the Qualcomm
+DMA device - Application Data Mover (ADM). ADM is used to provide the DMA
+transfer capability between Qualcomm crypto device hardware and DDR memory
+for crypto operations.
+
+ Figure 1.
+ ---------
+
+ Linux kernel
+ (ex:IPSec)<--*Qualcomm crypto driver----+
+ (qcrypto) |
+ (for kernel space app) |
+ |
+ +-->|
+ |
+ | *qce40 <----> Qualcomm
+ | driver ADM driver <---> ADM HW
+ +-->| | |
+ | | |
+ | | |
+ | | |
+ Linux kernel | | |
+ misc device <--- *QCEDEV Driver-------+ | |
+ interface (qcedev) (Reg interface) (DMA interface)
+ (for user space app) \ /
+ \ /
+ \ /
+ \ /
+ \ /
+ \ /
+ \ /
+ Qualcomm crypto CE3 HW
+
+
+ The entities marked with (*) in the Figure 1, are the software components of
+ the Linux Qualcomm crypto modules.
+
+===============
+IMPORTANT NOTE:
+===============
+(1) The CE hardware can be accessed either from user space OR kernel space,
+ at one time. Both user space and kernel space clients cannot access the
+ qce driver (and the CE hardware) at the same time.
+ - If your device has user space apps that needs to access the crypto
+ hardware, make sure to have the qcrypto module disabled/unloaded.
+ This will result in the kernel space apps to use the registered
+ software implementation of the crypto algorithms.
+ - If your device has kernel space apps that needs to access the
+ crypto hardware, make sure to have qcedev module disabled/unloaded
+ and implement your user space application to use the software
+ implemenation (ex: openssl/crypto) of the crypto algorithms.
+
+(2) If your device has Playready(Windows Media DRM) application enabled and
+ uses the qcedev module to access the crypto hardware accelarator,
+ please be informed that for performance reasons, the CE hardware will need
+ to be dedicated to playready application. Any other user space application
+ should be implemented to use the software implemenation (ex: openssl/crypto)
+ of the crypto algorithms.
+
+
+Hardware description:
+=====================
+
+Qualcomm Crypto HW device family provides a series of algorithms implemented
+in the device hardware.
+
+Crypto 2 hardware provides hashing - SHA-1, SHA-256, ciphering - DES, 3DES, AES
+algorithms, and concurrent operations of hashing and ciphering.
+
+In addition to those functions provided by Crypto 2 HW, Crypto 3 HW provides
+fast AES algorithms.
+
+In addition to those functions provided by Crypto 3 HW, Crypto 3E provides
+HMAC-SHA1 hashing algorithm, and Over The Air (OTA) f8/f9 algorithms as
+defined by the 3GPP forum.
+
+
+Software description
+====================
+
+The crypto device is defined as a platform device. The driver is
+independent of the platform. The driver supports multiple instances of
+crypto HW.
+All the platform specific parameters are defined in the board init
+file, eg. arch/arm/mach-msm/board-msm8960.c for MSM8960.
+
+The qce40 driver provide the common services of HW crypto
+access to the two drivers as listed above (qcedev, qcrypto. It sets up
+the crypto HW device for the operation, then it requests ADM driver for
+the DMA of the crypto operation.
+
+Two ADM channels and two command lists (one command list for each
+channel) are involved in an operation.
+
+The setting up of the command lists and the procedure of the operation
+of the crypto device are described in the following sections.
+
+The command lists contains a single command. For the first DMA channel it
+is set up as follows:
+
+ The command is for the DMA transfer from DDR memory to the
+ crypto device to input data to crypto device. The dst crci of the command
+ is set for crci-in for this crypto device.
+
+The command list for the second DMA channel is set up as follows:
+
+ One command to DMA data from crypto device to DDR memory for encryption or
+ decryption output from crypto device.
+
+To accomplish ciphering and authentication concurrent operations, the driver
+performs the following steps:
+ (a). set up HW crypto device
+ (b). hit the crypto go register.
+ (c). issue the DMA command of first channel to the ADM driver,
+ (d). issue the DMA command of 2nd channel to the ADM driver.
+
+SHA1/SHA256 is an authentication/integrity hash algorithm. To accomplish
+hash operation (or any authentication only algorithm), 2nd DMA channel is
+not required. Only steps (a) to (c) are performed.
+
+At the completion of the DMA operation (for (c) and (d)) ADM driver
+invokes the callback registered to the DMA driver. This signifies the end of
+the DMA operation(s). The driver reads the status and other information from
+the CE hardware register. For HASH functions (SHA1/SHA256, HMAC, CMAC and
+CCM) were the MAC/HASH information is read off hardware registers.
+
+[ NOTE: This is different from what is done in the qce module that support
+CE3.x hardware. In CE4.0 there is not CRCI_HASH and hence we cannot rely
+on the data mover to populate the HMAC/SHA information. This information
+is acquired fromte h ahrdware by reading directly from some registers that
+hold this information ]
+
+The driver than nvokes the callback to the qce driver client.
+This signal the completion and the results of the DMA along with the status of
+the CE hardware to the qce40 driver client. This completes a crypto operation.
+
+In the qce40 driver initialization, memory for the two command lists, descriptor
+lists for each crypto device are allocated out of coherent memory, using Linux
+DMA API. The driver pre-configures most of the two ADM command lists
+in the initialization. During each crypto operation, minimal set up is required.
+src_dscr or/and dst_dscr descriptor list of the ADM command are populated
+from the information obtained from the corresponding data structure. eg: for
+AEAD request, the following data structure provides the information:
+
+ struct aead_request *req
+ ......
+ req->assoc
+ req->src
+ req->dst
+
+The DMA address of a scatter list will be retrieved and set up in the
+descriptor list of an ADM command.
+
+Power Management
+================
+ none
+
+
+Interface:
+==========
+
+The interface is defined in kernel/drivers/crypto/msm/inc/qce.h
+
+The clients qcrypto, qcedev drivers are the clients using
+the interfaces.
+
+The following services are provided by the qce driver -
+
+ qce_open(), qce_close(), qce_ablk_cipher_req(),
+ qce_hw_support(), qce_process_sha_req()
+
+ qce_open() is the first request from the client, ex. Qualcomm crypto
+ driver (qcedev, qcrypto), to open a crypto engine. It is normally
+ called at the probe function of the client for a device. During the
+ probe,
+ - ADM command list structure will be set up
+ - Crypto device will be initialized.
+ - Resource associated with the crypto engine is retrieved by doing
+ platform_get_resource() or platform_get_resource_byname().
+
+ The resources for a device are
+ - crci-in, crci-out, crci-hash-done
+ - two DMA channel IDs, one for encryption and decryption input, one for
+ output.
+ - base address of the HW crypto device.
+
+ qce_close() is the last request from the client. Normally, it is
+ called from the remove function of the client.
+
+ qce_hw_support() allows the client to query what is supported
+ by the crypto engine hardware.
+
+ qce_ablk_cipher_req() provides ciphering service to the client.
+ qce_process_sha_req() provides hashing service to the client.
+ qce_aead_req() provides aead service to the client.
+
+
+Module parameters:
+==================
+
+The following module parameters are defined in the board init file.
+-CE hardware base register address
+-Data mover channel used for transfer to/from CE hardware
+These parameters differ in each platform.
+
+
+Dependencies:
+=============
+
+Existing DMA driver.
+The transfers are DMA'ed between the crypto hardware and DDR memory via the
+data mover, ADM. The data transfers are set up to use the existing dma driver.
+
+User space utilities:
+=====================
+ n/a
+
+Known issues:
+=============
+ n/a
+
+To do:
+======
+ n/a
diff --git a/Documentation/crypto/msm/qcedev.txt b/Documentation/crypto/msm/qcedev.txt
new file mode 100644
index 000000000000..fde69bbed7c0
--- /dev/null
+++ b/Documentation/crypto/msm/qcedev.txt
@@ -0,0 +1,232 @@
+Introduction:
+=============
+
+This driver provides IOCTLS for user space application to access crypto
+engine hardware for the qcedev crypto services. The driver supports the
+following crypto algorithms
+- AES-128, AES-256 (ECB, CBC and CTR mode)
+- AES-192, (ECB, CBC and CTR mode)
+ (support exists on platform supporting CE 3.x hardware)
+- SHA1/SHA256
+- AES-128, AES-256 (XTS), AES CMAC, SHA1/SHA256 HMAC
+ (support exists on platform supporting CE 4.x hardware)
+
+Hardware description:
+=====================
+Crypto 3E provides cipher and hash algorithms as defined in the
+3GPP forum specifications.
+
+
+Software description
+====================
+
+The driver is a Linux platform device driver. For an msm target,
+there can be multiple crypto devices assigned for QCEDEV.
+
+The driver is a misc device driver as well.
+The following operations are registered in the driver,
+-qcedev_ioctl()
+-qcedev_open()
+-qcedev_release()
+
+The following IOCTLS are available to the user space application(s)-
+
+ Cipher IOCTLs:
+ --------------
+ QCEDEV_IOCTL_ENC_REQ is for encrypting data.
+ QCEDEV_IOCTL_DEC_REQ is for decrypting data.
+
+ Hashing/HMAC IOCTLs
+ -------------------
+
+ QCEDEV_IOCTL_SHA_INIT_REQ is for initializing a hash/hmac request.
+ QCEDEV_IOCTL_SHA_UPDATE_REQ is for updating hash/hmac.
+ QCEDEV_IOCTL_SHA_FINAL_REQ is for ending the hash/mac request.
+ QCEDEV_IOCTL_GET_SHA_REQ is for retrieving the hash/hmac for data
+ packet of known size.
+ QCEDEV_IOCTL_GET_CMAC_REQ is for retrieving the MAC (using AES CMAC
+ algorithm) for data packet of known size.
+
+The requests are synchronous. The driver will put the process to
+sleep, waiting for the completion of the requests using wait_for_completion().
+
+Since the requests are coming out of user space application, before giving
+the requests to the low level qce driver, the ioctl requests and the
+associated input/output buffer will have to be safe checked, and copied
+to/from kernel space.
+
+The extra copying of requests/buffer can affect the performance. The issue
+with copying the data buffer is resolved by having the client use PMEM
+allocated buffers.
+
+NOTE: Using memory allocated via PMEM is supported only for in place
+ operations where source and destination buffers point to the same
+ location. Support for different source and destination buffers
+ is not supported currently.
+ Furthermore, when using PMEM, and in AES CTR mode, when issuing an
+ encryption or decryption request, a non-zero byteoffset is not
+ supported.
+
+The design of the driver is to allow multiple open, and multiple requests
+to be issued from application(s). Therefore, the driver will internally queue
+the requests, and serialize the requests to the low level qce (or qce40) driver.
+
+On an IOCTL request from an application, if there is no outstanding
+request, a the driver will issue a "qce" request, otherwise,
+the request is queued in the driver queue. The process is suspended
+waiting for completion.
+
+On completion of a request by the low level qce driver, the internal
+tasklet (done_tasklet) is scheduled. The sole purpose of done_tasklet is
+to call the completion of the current active request (complete()), and
+issue more requests to the qce, if any.
+When the process wakes up from wait_for_completion(), it will collect the
+return code, and return the ioctl.
+
+A spin lock is used to protect the critical section of internal queue to
+be accessed from multiple tasks, SMP, and completion callback
+from qce.
+
+The driver maintains a set of statistics using debug fs. The files are
+in /debug/qcedev/stats1, /debug/qcedev/stats2, /debug/qcedev/stats3;
+one for each instance of device. Reading the file associated with
+a device will retrieve the driver statistics for that device.
+Any write to the file will clear the statistics.
+
+
+Power Management
+================
+n/a
+
+
+Interface:
+==========
+
+Linux user space applications will need to open a handle
+(file desrciptor) to the qcedev device. This is achieved by doing
+the following to retrieve a file desrciptor to the device.
+
+ fd = open("/dev/qce", O_RDWR);
+ ..
+ ioctl(fd, ...);
+
+Once a valid fd is retrieved, user can call the following ioctls with
+the fd as the first parameter and a pointer to an appropriate data
+structure, qcedev_cipher_op_req or qcedev_sha_op_req (depending on
+cipher/hash functionality) as the second parameter.
+
+The following IOCTLS are available to the user space application(s)-
+
+ Cipher IOCTLs:
+ --------------
+ QCEDEV_IOCTL_ENC_REQ is for encrypting data.
+ QCEDEV_IOCTL_DEC_REQ is for decrypting data.
+
+ The caller of the IOCTL passes a pointer to the structure shown
+ below, as the second parameter.
+
+ struct qcedev_cipher_op_req {
+ int use_pmem;
+ union{
+ struct qcedev_pmem_info pmem;
+ struct qcedev_vbuf_info vbuf;
+ };
+ uint32_t entries;
+ uint32_t data_len;
+ uint8_t in_place_op;
+ uint8_t enckey[QCEDEV_MAX_KEY_SIZE];
+ uint32_t encklen;
+ uint8_t iv[QCEDEV_MAX_IV_SIZE];
+ uint32_t ivlen;
+ uint32_t byteoffset;
+ enum qcedev_cipher_alg_enum alg;
+ enum qcedev_cipher_mode_enum mode;
+ enum qcedev_oper_enum op;
+ };
+
+ Hashing/HMAC IOCTLs
+ -------------------
+
+ QCEDEV_IOCTL_SHA_INIT_REQ is for initializing a hash/hmac request.
+ QCEDEV_IOCTL_SHA_UPDATE_REQ is for updating hash/hmac.
+ QCEDEV_IOCTL_SHA_FINAL_REQ is for ending the hash/mac request.
+ QCEDEV_IOCTL_GET_SHA_REQ is for retrieving the hash/hmac for data
+ packet of known size.
+ QCEDEV_IOCTL_GET_CMAC_REQ is for retrieving the MAC (using AES CMAC
+ algorithm) for data packet of known size.
+
+ The caller of the IOCTL passes a pointer to the structure shown
+ below, as the second parameter.
+
+ struct qcedev_sha_op_req {
+ struct buf_info data[QCEDEV_MAX_BUFFERS];
+ uint32_t entries;
+ uint32_t data_len;
+ uint8_t digest[QCEDEV_MAX_SHA_DIGEST];
+ uint32_t diglen;
+ uint8_t *authkey;
+ uint32_t authklen;
+ enum qcedev_sha_alg_enum alg;
+ struct qcedev_sha_ctxt ctxt;
+ };
+
+The IOCTLs and associated request data structures are defined in
+ kernel/drivers/crypto/msm/inc/qcedev.h..
+
+
+Module parameters:
+==================
+
+The following module parameters are defined in the board init file.
+-CE hardware nase register address
+-Data mover channel used for transfer to/from CE hardware
+These parameters differ in each platform.
+
+
+
+Dependencies:
+=============
+qce driver. Please see Documentation/arm/msm/qce.txt.
+
+
+User space utilities:
+=====================
+
+none
+
+Known issues:
+=============
+
+none.
+
+
+To do:
+======
+ Enhance Cipher functionality:
+ (1) Add support for handling > 32KB for ciphering functionality when
+ - operation is not an "in place" operation (source != destination).
+ (when using PMEM allocated memory)
+
+Limitations:
+============
+ (1) In case of cipher functionality, Driver does not support
+ a combination of different memory sources for source/destination.
+ In other words, memory pointed to by src and dst,
+ must BOTH (src/dst) be "pmem" or BOTH(src/dst) be "vbuf".
+
+ (2) In case of hash functionality, driver does not support handling data
+ buffers allocated via PMEM.
+
+ (3) Do not load this driver if your device already has kernel space apps
+ that need to access the crypto hardware.
+ Make sure to have qcedev module disabled/unloaded and implement your user
+ space application to use the software implemenation (ex: openssl/crypto)
+ of the crypto algorithms.
+ (NOTE: Please refer to details on the limitations listed in qce.txt)
+
+ (4) If your device has Playready (Windows Media DRM) application enabled
+ and uses the qcedev module to access the crypto hardware accelarator,
+ please be informed that for performance reasons, the CE hardware will
+ need to be dedicated to playready application. Any other user space
+ application should be implemented to use the software implemenation
+ (ex: openssl/crypto) of the crypto algorithms.
diff --git a/Documentation/crypto/msm/qcrypto.txt b/Documentation/crypto/msm/qcrypto.txt
new file mode 100644
index 000000000000..81aa1941e157
--- /dev/null
+++ b/Documentation/crypto/msm/qcrypto.txt
@@ -0,0 +1,144 @@
+Introduction:
+=============
+
+Qualcomm Crypto (qcrypto) driver is a Linux crypto driver which interfaces
+with the Linux kernel crypto API layer to provide the HW crypto functions.
+This driver is accessed by kernel space apps via the kernel crypto API layer.
+At present there is no means for user space apps to access this module.
+
+Hardware description:
+=====================
+
+Qualcomm Crypto HW device family provides a series of algorithms implemented
+in the device.
+
+Crypto 2 hardware provides hashing - SHA-1, SHA-256, ciphering - DES, 3DES, AES
+algorithms, and concurrent operations of hashing, and ciphering.
+
+In addition to those functions provided by Crypto 2 HW, Crypto 3 provides fast
+AES algorithms.
+
+In addition to those functions provided by Crypto 3 HW, Crypto 3E provides
+HMAC-SHA1 hashing algorithm.
+
+In addition to those functions provided by Crypto 3 HW, Crypto 4.0 provides
+HMAC-SHA1/SHA256, AES CBC-MAC hashing algorithm and AES XTS/CCM cipher
+algorithms.
+
+
+Software description
+====================
+
+The module init function (_qcrypto_init()), does a platform_register(),
+to register the driver. As the result, the driver probe function,
+_qcrypto_probe(), will be invoked for each registered device.
+
+In the probe function, driver opens the low level CE (qce_open), and
+registers the supported algorithms to the kernel crypto API layer.
+Currently, qcrypto supports the following algorithms.
+
+ ablkcipher -
+ cbc(aes),ecb(aes),ctr(aes)
+ ahash -
+ sha1, sha256
+ aead -
+ authenc(hmac(sha1),cbc(aes))
+
+ The hmac(sha1), hmac(sha256, authenc(hmac(sha1),cbc(aes)), ccm(aes)
+ and xts(aes) algorithms are registered for some platforms that
+ support these in the CE hardware
+
+The HW device can support various algorithms. However, the most important
+algorithms to gain the performance using a HW crypto accelerator are
+AEAD, and ABLKCIPHER.
+
+AEAD stands for "authentication encryption with association data".
+ABLKCIPHER stands of "asynchronous block cipher".
+
+The AEAD structure is described in the following header file
+ LINUX/opensource/kernel/include/crypto/aead.h
+
+The design of the driver is to allow multiple requests
+issued from kernel client SW (eg IPSec).
+Therefore, the driver will have to internally queue the requests, and
+serialize the requests to the low level qce driver.
+
+When a request is received from the client, if there is no outstanding
+request, a qce (or qce40) request is issued, otherwise, the request is
+queued in the driver queue.
+
+On completion of a request, the qce (or qce40) invokes the registered
+callback from the qcrypto. The internal tasklet (done_tasklet) is scheduled
+in this callback function. The sole purpose of done_tasklet is
+to call the completion of the current active request, and
+issue more requests to the qce (or qce40), if any exists.
+
+A spin lock is used to protect the critical section of internal queue to
+be accessed from multiple tasks, SMP, and completion callback
+from qce.
+
+The driver maintains a set of statistics using debug fs. The files are
+in /debug/qcrypto/stats1, /debug/qcrypto/stats2, /debug/qcrypto/stats3;
+one for each instance of device. Reading the file associated with
+a device will retrieve the driver statistics for that device.
+Any write to the file will clear the statistics.
+
+Test vectors for authenc(hmac(sha1),cbc(aes)) algorithm are
+developed offline, and imported to crypto/testmgr.c, and crypto/testmgr.h.
+
+
+Power Management
+================
+ none
+
+
+Interface:
+==========
+The kernel interface is defined in
+ LINUX/opensource/kernel/include/linux/crypto.h.
+
+
+Module parameters:
+==================
+
+All the platform specific parameters are defined in the board init
+file, eg. arch/arm/mach-msm/board-mssm7x30.c for msm7x30.
+
+Dependencies:
+=============
+qce driver.
+
+
+User space utilities:
+=====================
+ n/a
+
+Known issues:
+=============
+ n/a
+
+To do:
+======
+ Add Hashing algorithms.
+
+
+Limitations:
+===============
+(1) Each packet transfer size (for cipher and hash) is limited to maximum of
+ 32KB. This is a limitation in the crypto engine hardware. Client will
+ have to break packets larger than 32KB into multiple requests of smaller
+ size data packets.
+
+(2) Do not load this driver if your device has user space apps that needs to
+ access the crypto hardware. Please make sure to have the qcrypto module
+ disabled/unloaded.
+ Not having the driver loaded, will result in the kernel space apps to use
+ the registered software implementation of the crypto algorithms.
+
+(3) If your device has Playready application enabled and uses the qcedev module
+ to access the crypto hardware accelarator, please be informed that for
+ performance reasons, the CE hardware will need to be dedicated to playready
+ application. Any other user space or kernel application should be implemented
+ to use the software implemenation of the crypto algorithms.
+
+ (NOTE: Please refer to details on the limitations listed in qce/40.txt)
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcedev.txt b/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
new file mode 100644
index 000000000000..1585d06da3f3
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
@@ -0,0 +1,43 @@
+* QCEDEV (Qualcomm Crypto Engine Device)
+
+Required properties:
+ - compatible : should be "qcom,qcedev"
+ - reg : should contain crypto, BAM register map.
+ - reg-names : should contain the crypto and bam base register names.
+ - interrupts : should contain crypto BAM interrupt.
+ - qcom,bam-pipe-pair : should contain crypto BAM pipe pair index.
+ - qcom,ce-hw-instance : should contain crypto HW instance.
+ - qcom,msm_bus,name: Should be "qcedev-noc"
+ - qcom,msm_bus,num_cases: Depends on the use cases for bus scaling
+ - qcom,msm_bus,active-only: Boolean flag for context of request (actve/dual)
+ - qcom,msm_bus,num_paths: The paths for source and destination ports
+ - qcom,msm_bus,vectors: Vectors for bus topology.
+ - qcom,ce-device: Device number.
+ - qcom,ce-opp-freq: indicates the CE operating frequency in Hz, changes from target to target.
+
+Optional properties:
+ - qcom,ce-hw-shared : optional, indicates if the hardware is shared between EE.
+ - qcom,ce-hw-key : optional, indicates if the hardware supports use of HW KEY.
+ - qcom,support-core-clk-only : optional, indicates if the HW supports single crypto core clk.
+ - qcom,bsm-ee : optional, indicate the BAM EE value, changes from target to target. Default value is 1 if not specified.
+
+Example:
+
+ qcom,qcedev@fd440000 {
+ compatible = "qcom,qcedev";
+ reg = <0xfd440000 0x20000>,
+ <0xfd444000 0x8000>;
+ reg-names = "crypto-base","crypto-bam-base";
+ interrupts = <0 235 0>;
+ qcom,bam-pipe-pair = <0>;
+ qcom,ce-hw-instance = <1>;
+ qcom,ce-device = <0>;
+ qcom,ce-hw-shared;
+ qcom,msm-bus,name = "qcedev-noc";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <56 512 0 0>,
+ <56 512 3936000 393600>,
+ qcom,ce-opp-freq = <100000000>;
+ };
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcota.txt b/Documentation/devicetree/bindings/crypto/msm/qcota.txt
new file mode 100644
index 000000000000..3ce63af7d4e6
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/msm/qcota.txt
@@ -0,0 +1,42 @@
+* QCOTA (Over The Air Crypto Device)
+
+Required properties:
+ - compatible : should be "qcom,qcota"
+ - reg : should contain crypto, BAM register map.
+ - reg-names : should contain the crypto and bam base register names.
+ - interrupts : should contain crypto BAM interrupt.
+ - qcom,bam-pipe-pair : should contain crypto BAM pipe pair index.
+ - qcom,ce-hw-instance : should contain crypto HW instance.
+ - qcom,ce-device: Unique QCOTA device identifier. 0 for first
+ instance, 1 for second instance, n-1 for n-th instance.
+ - qcom,ce-opp-freq: indicates the CE operating frequency in Hz, changes from target to target.
+
+Optional properties:
+ - qcom,support-core-clk-only : optional, indicates if the HW supports single crypto core clk.
+ - qcom,bsm-ee : optional, indicate the BAM EE value, changes from target to target.Default value is 1 if not specified.
+
+Example:
+
+ qcom,qcota@fe140000 {
+ compatible = "qcom,qcota";
+ reg = <0xfe140000 0x20000>,
+ <0xfe144000 0x8000>;
+ reg-names = "crypto-base","crypto-bam-base";
+ interrupts = <0 111 0>;
+ qcom,bam-pipe-pair = <1>;
+ qcom,ce-hw-instance = <2>;
+ qcom,ce-device = <0>;
+ qcom,ce-opp-freq = <100000000>;
+ };
+
+ qcom,qcota@fe0c0000 {
+ compatible = "qcom,qcota";
+ reg = <0xfe0c0000 0x20000>,
+ <0xfe0c4000 0x8000>;
+ reg-names = "crypto-base","crypto-bam-base";
+ interrupts = <0 113 0>;
+ qcom,bam-pipe-pair = <1>;
+ qcom,ce-hw-instance = <4>;
+ qcom,ce-device = <1>;
+ qcom,ce-opp-freq = <100000000>;
+ };
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
new file mode 100644
index 000000000000..46e01578a1f0
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
@@ -0,0 +1,61 @@
+* QCRYPTO (Qualcomm Crypto)
+
+Required properties:
+ - compatible : should be "qcom,qcrypto"
+ - reg : should contain crypto, BAM register map.
+ - reg-names : should contain the crypto and bam base register names.
+ - interrupts : should contain crypto BAM interrupt.
+ - qcom,bam-pipe-pair : should contain crypto BAM pipe pair index.
+ - qcom,ce-hw-instance : should contain crypto HW instance.
+ - qcom,msm_bus,name: Should be "qcrypto-noc"
+ - qcom,msm_bus,num_cases: Depends on the use cases for bus scaling
+ - qcom,msm_bus,active-only: Boolean flag for context of request (actve/dual)
+ - qcom,msm_bus,num_paths: The paths for source and destination ports
+ - qcom,ce-device: Device number. Device number is encoded with the following:
+ bit 3-0 device type: 0 for full disk encryption(fde)
+ 1 for per file encrption(pfe)
+ bit 7-4 unit number within the device type.
+
+
+Optional properties:
+ - qcom,ce-hw-shared : optional, indicates if the hardware is shared between EE.
+ - qcom,ce-hw-key : optional, indicates if the hardware supports use of HW KEY.
+ - qcom,use-sw-aes-cbc-ecb-ctr-algo : optional, indicates if use SW aes-cbc/ecb/ctr algorithm.
+ - qcom,use-sw-aes-xts-algo : optional, indicates if use SW aes-xts algorithm.
+ - qcom,use-sw-aead-algo : optional, indicates if use SW aead algorithm.
+ - qcom,use-sw-ahash-algo : optional, indicates if use SW hash algorithm.
+ - qcom,use-sw-hmac-algo : optional, indicates if use SW hmac algorithm.
+ - qcom,use-sw-aes-ccm-algo : optional, indicates if use SW aes-ccm algorithm.
+ - qcom,clk-mgmt-sus-res : optional, indicate if the ce clocks need to be disabled/enabled in suspend/resume function.
+ - qcom,support-core-clk-only : optional, indicates if the HW supports single crypto core clk.
+ - qcom,bsm-ee : optional, indicate the BAM EE value, changes from target to target.Default value is 1 if not specified.
+
+ - qcom,ce-opp-freq: optional, indicates the CE operating frequency in Hz,
+ changes from target to target. If not specified, by default the
+ frequency is set as 100MHZ.
+
+ - qcom,msm_bus,vectors: optional, indicates vectors for bus topology.
+ This attribute is required for msm targets where bus scaling is
+ required. For other targets such as fsm, they do not perform
+ bus scaling. It is not required for those targets.
+
+Example:
+
+ qcom,qcrypto@fd444000 {
+ compatible = "qcom,qcrypto";
+ reg = <0xfd440000 0x20000>,
+ <0xfd444000 0x8000>;
+ reg-names = "crypto-base","crypto-bam-base";
+ interrupts = <0 235 0>;
+ qcom,bam-pipe-pair = <1>;
+ qcom,ce-hw-instance = <1>;
+ qcom,ce-device = <0>;
+ qcom,ce-hw-shared;
+ qcom,msm-bus,name = "qcrypto-noc";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <56 512 0 0>,
+ <56 512 3936000 393600>,
+ qcom,ce-opp-freq = <100000000>;
+ };
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 2569e043317e..6763346be33e 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -346,6 +346,64 @@ config CRYPTO_DEV_S5P
Select this to offload Samsung S5PV210 or S5PC110, Exynos from AES
algorithms execution.
+config CRYPTO_DEV_QCE50
+ bool
+
+config FIPS_ENABLE
+ bool "FIPS140-2 compliant build"
+ default n
+ help
+ This flag is used to make current build FIPS140-2
+ compliant. This flag will enable the patch of code
+ which will perform this task. Please select Y here
+ to enable.
+
+config CRYPTO_DEV_QCRYPTO
+ tristate "Qualcomm Crypto accelerator"
+ select CRYPTO_DES
+ select CRYPTO_ALGAPI
+ select CRYPTO_AUTHENC
+ select CRYPTO_BLKCIPHER
+ default n
+ help
+ This driver supports Qualcomm crypto acceleration.
+ To compile this driver as a module, choose M here: the
+ module will be called qcrypto.
+
+config CRYPTO_DEV_QCOM_MSM_QCE
+ tristate "Qualcomm Crypto Engine (QCE) module"
+ select CRYPTO_DEV_QCE50 if ARCH_APQ8084 || ARCH_MSM8916 || ARCH_MSM8994 || ARCH_MSM8996 || ARCH_MSM8992 || ARCH_MSMTITANIUM
+ default n
+ help
+ This driver supports Qualcomm Crypto Engine in MSM7x30, MSM8660
+ MSM8x55, MSM8960, MSM9615, MSM8916, MSM8994, MSM8996, FSM9900,
+ MSMTITANINUM and APQ8084.
+
+ To compile this driver as a module, choose M here: the
+ For MSM7x30 MSM8660 and MSM8x55 the module is called qce
+ For MSM8960, APQ8064 and MSM9615 the module is called qce40
+ For MSM8974, MSM8916, MSM8994, MSM8996, MSM8992, MSMTITANIUM
+ and APQ8084 the module is called qce50.
+
+config CRYPTO_DEV_QCEDEV
+ tristate "QCEDEV Interface to CE module"
+ default n
+ help
+ This driver supports Qualcomm QCEDEV Crypto in MSM7x30, MSM8660,
+ MSM8960, MSM9615, APQ8064, MSM8974, MSM8916, MSM8994, MSM8996
+ and APQ8084. This exposes the interface to the QCE hardware
+ accelerator via IOCTLs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called qcedev.
+
+config CRYPTO_DEV_OTA_CRYPTO
+ tristate "OTA Crypto module"
+ help
+ This driver supports Qualcomm OTA Crypto in the FSM9xxx.
+ To compile this driver as a module, choose M here: the
+ module will be called ota_crypto.
+
config CRYPTO_DEV_NX
bool "Support for IBM PowerPC Nest (NX) cryptographic acceleration"
depends on PPC64
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index c3ced6fbd1b8..1098d5e643ae 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -27,5 +27,6 @@ obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
+obj-$(CONFIG_CRYPTO_DEV_QCOM_MSM_QCE) += msm/
obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
diff --git a/drivers/crypto/msm/Makefile b/drivers/crypto/msm/Makefile
new file mode 100644
index 000000000000..c4bbc2c2622b
--- /dev/null
+++ b/drivers/crypto/msm/Makefile
@@ -0,0 +1,12 @@
+obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qcedev.o
+ifeq ($(CONFIG_CRYPTO_DEV_QCE50), y)
+ obj-$(CONFIG_CRYPTO_DEV_QCOM_MSM_QCE) += qce50.o
+else
+ obj-$(CONFIG_CRYPTO_DEV_QCOM_MSM_QCE) += qce.o
+endif
+ifdef CONFIG_COMPAT
+obj-$(CONFIG_CRYPTO_DEV_QCOM_MSM_QCE) += compat_qcedev.o
+endif
+obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcrypto.o
+obj-$(CONFIG_CRYPTO_DEV_OTA_CRYPTO) += ota_crypto.o
+
diff --git a/drivers/crypto/msm/compat_qcedev.c b/drivers/crypto/msm/compat_qcedev.c
new file mode 100644
index 000000000000..97ae990b5378
--- /dev/null
+++ b/drivers/crypto/msm/compat_qcedev.c
@@ -0,0 +1,432 @@
+/*
+ * QTI CE 32-bit compatibility syscall for 64-bit systems
+ *
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "COMPAT-QCEDEV: %s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/qcedev.h>
+#include <linux/compat.h>
+#include <linux/compat_qcedev.h>
+
+static int compat_get_qcedev_pmem_info(
+ struct compat_qcedev_pmem_info __user *pmem32,
+ struct qcedev_pmem_info __user *pmem)
+{
+ compat_ulong_t offset;
+ compat_int_t fd_src;
+ compat_int_t fd_dst;
+ int err = 0, i = 0;
+ uint32_t len;
+
+ err |= get_user(fd_src, &pmem32->fd_src);
+ err |= put_user(fd_src, &pmem->fd_src);
+
+ for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+ err |= get_user(offset, &pmem32->src[i].offset);
+ err |= put_user(offset, &pmem->src[i].offset);
+ err |= get_user(len, &pmem32->src[i].len);
+ err |= put_user(len, &pmem->src[i].len);
+ }
+
+ err |= get_user(fd_dst, &pmem32->fd_dst);
+ err |= put_user(fd_dst, &pmem->fd_dst);
+
+ for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+ err |= get_user(offset, &pmem32->dst[i].offset);
+ err |= put_user(offset, &pmem->dst[i].offset);
+ err |= get_user(len, &pmem32->dst[i].len);
+ err |= put_user(len, &pmem->dst[i].len);
+ }
+
+ return err;
+}
+
+static int compat_put_qcedev_pmem_info(
+ struct compat_qcedev_pmem_info __user *pmem32,
+ struct qcedev_pmem_info __user *pmem)
+{
+ compat_ulong_t offset;
+ compat_int_t fd_src;
+ compat_int_t fd_dst;
+ int err = 0, i = 0;
+ uint32_t len;
+
+ err |= get_user(fd_src, &pmem->fd_src);
+ err |= put_user(fd_src, &pmem32->fd_src);
+
+ for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+ err |= get_user(offset, &pmem->src[i].offset);
+ err |= put_user(offset, &pmem32->src[i].offset);
+ err |= get_user(len, &pmem->src[i].len);
+ err |= put_user(len, &pmem32->src[i].len);
+ }
+
+ err |= get_user(fd_dst, &pmem->fd_dst);
+ err |= put_user(fd_dst, &pmem32->fd_dst);
+
+ for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+ err |= get_user(offset, &pmem->dst[i].offset);
+ err |= put_user(offset, &pmem32->dst[i].offset);
+ err |= get_user(len, &pmem->dst[i].len);
+ err |= put_user(len, &pmem32->dst[i].len);
+ }
+
+ return err;
+}
+
+static int compat_get_qcedev_vbuf_info(
+ struct compat_qcedev_vbuf_info __user *vbuf32,
+ struct qcedev_vbuf_info __user *vbuf)
+{
+ compat_uptr_t vaddr;
+ int err = 0, i = 0;
+ uint32_t len;
+
+ for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+ err |= get_user(vaddr, &vbuf32->src[i].vaddr);
+ vbuf->src[i].vaddr = NULL;
+ err |= put_user(vaddr, (compat_uptr_t *)&vbuf->src[i].vaddr);
+ err |= get_user(len, &vbuf32->src[i].len);
+ err |= put_user(len, &vbuf->src[i].len);
+ }
+
+ for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+ err |= get_user(vaddr, &vbuf32->dst[i].vaddr);
+ vbuf->dst[i].vaddr = NULL;
+ err |= put_user(vaddr, (compat_uptr_t *)&vbuf->dst[i].vaddr);
+ err |= get_user(len, &vbuf32->dst[i].len);
+ err |= put_user(len, &vbuf->dst[i].len);
+ }
+ return err;
+}
+
+static int compat_put_qcedev_vbuf_info(
+ struct compat_qcedev_vbuf_info __user *vbuf32,
+ struct qcedev_vbuf_info __user *vbuf)
+{
+ compat_uptr_t vaddr;
+ int err = 0, i = 0;
+ uint32_t len;
+
+ for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+ err |= get_user(vaddr, (compat_uptr_t *)&vbuf->src[i].vaddr);
+ vbuf32->src[i].vaddr = 0;
+ err |= put_user(vaddr, &vbuf32->src[i].vaddr);
+ err |= get_user(len, &vbuf->src[i].len);
+ err |= put_user(len, &vbuf32->src[i].len);
+ }
+
+ for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+ err |= get_user(vaddr, (compat_uptr_t *)&vbuf->dst[i].vaddr);
+ vbuf32->dst[i].vaddr = 0;
+ err |= put_user(vaddr, &vbuf32->dst[i].vaddr);
+ err |= get_user(len, &vbuf->dst[i].len);
+ err |= put_user(len, &vbuf32->dst[i].len);
+ }
+ return err;
+}
+
+static int compat_get_qcedev_cipher_op_req(
+ struct compat_qcedev_cipher_op_req __user *data32,
+ struct qcedev_cipher_op_req __user *data)
+{
+ enum qcedev_cipher_mode_enum mode;
+ enum qcedev_cipher_alg_enum alg;
+ compat_ulong_t byteoffset;
+ enum qcedev_oper_enum op;
+ compat_ulong_t data_len;
+ compat_ulong_t encklen;
+ compat_ulong_t entries;
+ compat_ulong_t ivlen;
+ uint8_t in_place_op;
+ int err = 0, i = 0;
+ uint8_t use_pmem;
+ uint8_t enckey;
+ uint8_t iv;
+
+ err |= get_user(use_pmem, &data32->use_pmem);
+ err |= put_user(use_pmem, &data->use_pmem);
+
+ if (use_pmem)
+ err |= compat_get_qcedev_pmem_info(&data32->pmem, &data->pmem);
+ else
+ err |= compat_get_qcedev_vbuf_info(&data32->vbuf, &data->vbuf);
+
+ err |= get_user(entries, &data32->entries);
+ err |= put_user(entries, &data->entries);
+ err |= get_user(data_len, &data32->data_len);
+ err |= put_user(data_len, &data->data_len);
+ err |= get_user(in_place_op, &data32->in_place_op);
+ err |= put_user(in_place_op, &data->in_place_op);
+
+ for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
+ err |= get_user(enckey, &(data32->enckey[i]));
+ err |= put_user(enckey, &(data->enckey[i]));
+ }
+
+ err |= get_user(encklen, &data32->encklen);
+ err |= put_user(encklen, &data->encklen);
+
+ for (i = 0; i < QCEDEV_MAX_IV_SIZE; i++) {
+ err |= get_user(iv, &(data32->iv[i]));
+ err |= put_user(iv, &(data->iv[i]));
+ }
+
+ err |= get_user(ivlen, &data32->ivlen);
+ err |= put_user(ivlen, &data->ivlen);
+ err |= get_user(byteoffset, &data32->byteoffset);
+ err |= put_user(byteoffset, &data->byteoffset);
+ err |= get_user(alg, &data32->alg);
+ err |= put_user(alg, &data->alg);
+ err |= get_user(mode, &data32->mode);
+ err |= put_user(mode, &data->mode);
+ err |= get_user(op, &data32->op);
+ err |= put_user(op, &data->op);
+
+ return err;
+}
+
+static int compat_put_qcedev_cipher_op_req(
+ struct compat_qcedev_cipher_op_req __user *data32,
+ struct qcedev_cipher_op_req __user *data)
+{
+ enum qcedev_cipher_mode_enum mode;
+ enum qcedev_cipher_alg_enum alg;
+ compat_ulong_t byteoffset;
+ enum qcedev_oper_enum op;
+ compat_ulong_t data_len;
+ compat_ulong_t encklen;
+ compat_ulong_t entries;
+ compat_ulong_t ivlen;
+ uint8_t in_place_op;
+ int err = 0, i = 0;
+ uint8_t use_pmem;
+ uint8_t enckey;
+ uint8_t iv;
+
+ err |= get_user(use_pmem, &data->use_pmem);
+ err |= put_user(use_pmem, &data32->use_pmem);
+
+ if (use_pmem)
+ err |= compat_put_qcedev_pmem_info(&data32->pmem, &data->pmem);
+ else
+ err |= compat_put_qcedev_vbuf_info(&data32->vbuf, &data->vbuf);
+
+ err |= get_user(entries, &data->entries);
+ err |= put_user(entries, &data32->entries);
+ err |= get_user(data_len, &data->data_len);
+ err |= put_user(data_len, &data32->data_len);
+ err |= get_user(in_place_op, &data->in_place_op);
+ err |= put_user(in_place_op, &data32->in_place_op);
+
+ for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
+ err |= get_user(enckey, &(data->enckey[i]));
+ err |= put_user(enckey, &(data32->enckey[i]));
+ }
+
+ err |= get_user(encklen, &data->encklen);
+ err |= put_user(encklen, &data32->encklen);
+
+ for (i = 0; i < QCEDEV_MAX_IV_SIZE; i++) {
+ err |= get_user(iv, &(data->iv[i]));
+ err |= put_user(iv, &(data32->iv[i]));
+ }
+
+ err |= get_user(ivlen, &data->ivlen);
+ err |= put_user(ivlen, &data32->ivlen);
+ err |= get_user(byteoffset, &data->byteoffset);
+ err |= put_user(byteoffset, &data32->byteoffset);
+ err |= get_user(alg, &data->alg);
+ err |= put_user(alg, &data32->alg);
+ err |= get_user(mode, &data->mode);
+ err |= put_user(mode, &data32->mode);
+ err |= get_user(op, &data->op);
+ err |= put_user(op, &data32->op);
+
+ return err;
+}
+
+static int compat_get_qcedev_sha_op_req(
+ struct compat_qcedev_sha_op_req __user *data32,
+ struct qcedev_sha_op_req __user *data)
+{
+ enum qcedev_sha_alg_enum alg;
+ compat_ulong_t authklen;
+ compat_ulong_t data_len;
+ compat_ulong_t entries;
+ compat_ulong_t diglen;
+ compat_uptr_t authkey;
+ compat_uptr_t vaddr;
+ int err = 0, i = 0;
+ uint8_t digest;
+ uint32_t len;
+
+ for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+ err |= get_user(vaddr, &data32->data[i].vaddr);
+ data->data[i].vaddr = 0;
+ err |= put_user(vaddr, (compat_uptr_t *)&data->data[i].vaddr);
+ err |= get_user(len, &data32->data[i].len);
+ err |= put_user(len, &data->data[i].len);
+ }
+
+ err |= get_user(entries, &data32->entries);
+ err |= put_user(entries, &data->entries);
+ err |= get_user(data_len, &data32->data_len);
+ err |= put_user(data_len, &data->data_len);
+
+ for (i = 0; i < QCEDEV_MAX_SHA_DIGEST; i++) {
+ err |= get_user(digest, &(data32->digest[i]));
+ err |= put_user(digest, &(data->digest[i]));
+ }
+
+ err |= get_user(diglen, &data32->diglen);
+ err |= put_user(diglen, &data->diglen);
+ err |= get_user(authkey, &data32->authkey);
+ data->authkey = NULL;
+ err |= put_user(authkey, (compat_uptr_t *)&data->authkey);
+ err |= get_user(authklen, &data32->authklen);
+ err |= put_user(authklen, &data->authklen);
+ err |= get_user(alg, &data32->alg);
+ err |= put_user(alg, &data->alg);
+
+ return err;
+}
+
+static int compat_put_qcedev_sha_op_req(
+ struct compat_qcedev_sha_op_req __user *data32,
+ struct qcedev_sha_op_req __user *data)
+{
+ enum qcedev_sha_alg_enum alg;
+ compat_ulong_t authklen;
+ compat_ulong_t data_len;
+ compat_ulong_t entries;
+ compat_ulong_t diglen;
+ compat_uptr_t authkey;
+ compat_uptr_t vaddr;
+ int err = 0, i = 0;
+ uint8_t digest;
+ uint32_t len;
+
+ for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+ err |= get_user(vaddr, (compat_uptr_t *)&data->data[i].vaddr);
+ data32->data[i].vaddr = 0;
+ err |= put_user(vaddr, &data32->data[i].vaddr);
+ err |= get_user(len, &data->data[i].len);
+ err |= put_user(len, &data32->data[i].len);
+ }
+
+ err |= get_user(entries, &data->entries);
+ err |= put_user(entries, &data32->entries);
+ err |= get_user(data_len, &data->data_len);
+ err |= put_user(data_len, &data32->data_len);
+
+ for (i = 0; i < QCEDEV_MAX_SHA_DIGEST; i++) {
+ err |= get_user(digest, &(data->digest[i]));
+ err |= put_user(digest, &(data32->digest[i]));
+ }
+
+ err |= get_user(diglen, &data->diglen);
+ err |= put_user(diglen, &data32->diglen);
+ err |= get_user(authkey, (compat_uptr_t *)&data->authkey);
+ data32->authkey = 0;
+ err |= put_user(authkey, &data32->authkey);
+ err |= get_user(authklen, &data->authklen);
+ err |= put_user(authklen, &data32->authklen);
+ err |= get_user(alg, &data->alg);
+ err |= put_user(alg, &data32->alg);
+
+ return err;
+}
+
+static unsigned int convert_cmd(unsigned int cmd)
+{
+ switch (cmd) {
+ case COMPAT_QCEDEV_IOCTL_ENC_REQ:
+ return QCEDEV_IOCTL_ENC_REQ;
+ case COMPAT_QCEDEV_IOCTL_DEC_REQ:
+ return QCEDEV_IOCTL_DEC_REQ;
+ case COMPAT_QCEDEV_IOCTL_SHA_INIT_REQ:
+ return QCEDEV_IOCTL_SHA_INIT_REQ;
+ case COMPAT_QCEDEV_IOCTL_SHA_UPDATE_REQ:
+ return QCEDEV_IOCTL_SHA_UPDATE_REQ;
+ case COMPAT_QCEDEV_IOCTL_SHA_FINAL_REQ:
+ return QCEDEV_IOCTL_SHA_FINAL_REQ;
+ case COMPAT_QCEDEV_IOCTL_GET_SHA_REQ:
+ return QCEDEV_IOCTL_GET_SHA_REQ;
+ case COMPAT_QCEDEV_IOCTL_GET_CMAC_REQ:
+ return QCEDEV_IOCTL_GET_CMAC_REQ;
+ default:
+ return cmd;
+ }
+
+}
+
+long compat_qcedev_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ switch (cmd) {
+ case COMPAT_QCEDEV_IOCTL_ENC_REQ:
+ case COMPAT_QCEDEV_IOCTL_DEC_REQ: {
+ struct compat_qcedev_cipher_op_req __user *data32;
+ struct qcedev_cipher_op_req __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (!data)
+ return -EFAULT;
+
+ err = compat_get_qcedev_cipher_op_req(data32, data);
+ if (err)
+ return err;
+
+ ret = qcedev_ioctl(file, convert_cmd(cmd), (unsigned long)data);
+ err = compat_put_qcedev_cipher_op_req(data32, data);
+ return ret ? ret : err;
+ }
+ case COMPAT_QCEDEV_IOCTL_SHA_INIT_REQ:
+ case COMPAT_QCEDEV_IOCTL_SHA_UPDATE_REQ:
+ case COMPAT_QCEDEV_IOCTL_SHA_FINAL_REQ:
+ case COMPAT_QCEDEV_IOCTL_GET_CMAC_REQ:
+ case COMPAT_QCEDEV_IOCTL_GET_SHA_REQ: {
+ struct compat_qcedev_sha_op_req __user *data32;
+ struct qcedev_sha_op_req __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (!data)
+ return -EFAULT;
+
+ err = compat_get_qcedev_sha_op_req(data32, data);
+ if (err)
+ return err;
+
+ ret = qcedev_ioctl(file, convert_cmd(cmd), (unsigned long)data);
+ err = compat_put_qcedev_sha_op_req(data32, data);
+ return ret ? ret : err;
+ }
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(compat_qcedev_ioctl);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI 32-64 Compatibility for Crypto driver");
diff --git a/drivers/crypto/msm/ota_crypto.c b/drivers/crypto/msm/ota_crypto.c
new file mode 100644
index 000000000000..9b4a001bec95
--- /dev/null
+++ b/drivers/crypto/msm/ota_crypto.c
@@ -0,0 +1,977 @@
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* Qualcomm Over the Air (OTA) Crypto driver */
+
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/cache.h>
+
+
+#include <linux/qcota.h>
+#include "qce.h"
+#include "qce_ota.h"
+
+enum qce_ota_oper_enum {
+ QCE_OTA_F8_OPER = 0,
+ QCE_OTA_MPKT_F8_OPER = 1,
+ QCE_OTA_F9_OPER = 2,
+ QCE_OTA_VAR_MPKT_F8_OPER = 3,
+ QCE_OTA_OPER_LAST
+};
+
+struct ota_dev_control;
+
+struct ota_async_req {
+ struct list_head rlist;
+ struct completion complete;
+ int err;
+ enum qce_ota_oper_enum op;
+ union {
+ struct qce_f9_req f9_req;
+ struct qce_f8_req f8_req;
+ struct qce_f8_multi_pkt_req f8_mp_req;
+ struct qce_f8_varible_multi_pkt_req f8_v_mp_req;
+ } req;
+ unsigned int steps;
+ struct ota_qce_dev *pqce;
+};
+
+/*
+ * Register ourselves as a misc device to be able to access the ota
+ * from userspace.
+ */
+
+
+#define QCOTA_DEV "qcota"
+
+
+struct ota_dev_control {
+
+ /* misc device */
+ struct miscdevice miscdevice;
+ struct list_head ready_commands;
+ unsigned magic;
+ struct list_head qce_dev;
+ spinlock_t lock;
+ struct mutex register_lock;
+ bool registered;
+ uint32_t total_units;
+};
+
+struct ota_qce_dev {
+ struct list_head qlist;
+ /* qce handle */
+ void *qce;
+
+ /* platform device */
+ struct platform_device *pdev;
+
+ struct ota_async_req *active_command;
+ struct tasklet_struct done_tasklet;
+ struct ota_dev_control *podev;
+ uint32_t unit;
+ u64 total_req;
+ u64 err_req;
+};
+
+#define OTA_MAGIC 0x4f544143
+
+static long qcota_ioctl(struct file *file,
+ unsigned cmd, unsigned long arg);
+static int qcota_open(struct inode *inode, struct file *file);
+static int qcota_release(struct inode *inode, struct file *file);
+static int start_req(struct ota_qce_dev *pqce, struct ota_async_req *areq);
+static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv, int ret);
+
+static const struct file_operations qcota_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = qcota_ioctl,
+ .open = qcota_open,
+ .release = qcota_release,
+};
+
+static struct ota_dev_control qcota_dev = {
+ .miscdevice = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "qcota0",
+ .fops = &qcota_fops,
+ },
+ .magic = OTA_MAGIC,
+};
+
+#define DEBUG_MAX_FNAME 16
+#define DEBUG_MAX_RW_BUF 1024
+
+struct qcota_stat {
+ u64 f8_req;
+ u64 f8_mp_req;
+ u64 f8_v_mp_req;
+ u64 f9_req;
+ u64 f8_op_success;
+ u64 f8_op_fail;
+ u64 f8_mp_op_success;
+ u64 f8_mp_op_fail;
+ u64 f8_v_mp_op_success;
+ u64 f8_v_mp_op_fail;
+ u64 f9_op_success;
+ u64 f9_op_fail;
+};
+static struct qcota_stat _qcota_stat;
+static struct dentry *_debug_dent;
+static char _debug_read_buf[DEBUG_MAX_RW_BUF];
+static int _debug_qcota;
+
+static struct ota_dev_control *qcota_control(void)
+{
+
+ return &qcota_dev;
+}
+
+static int qcota_open(struct inode *inode, struct file *file)
+{
+ struct ota_dev_control *podev;
+
+ podev = qcota_control();
+ if (podev == NULL) {
+ pr_err("%s: no such device %d\n", __func__,
+ MINOR(inode->i_rdev));
+ return -ENOENT;
+ }
+
+ file->private_data = podev;
+
+ return 0;
+}
+
+static int qcota_release(struct inode *inode, struct file *file)
+{
+ struct ota_dev_control *podev;
+
+ podev = file->private_data;
+
+ if (podev != NULL && podev->magic != OTA_MAGIC) {
+ pr_err("%s: invalid handle %p\n",
+ __func__, podev);
+ }
+
+ file->private_data = NULL;
+
+ return 0;
+}
+
+static bool _next_v_mp_req(struct ota_async_req *areq)
+{
+ unsigned char *p;
+
+ if (areq->err)
+ return false;
+ if (++areq->steps >= areq->req.f8_v_mp_req.num_pkt)
+ return false;
+
+ p = areq->req.f8_v_mp_req.qce_f8_req.data_in;
+ p += areq->req.f8_v_mp_req.qce_f8_req.data_len;
+ p = (uint8_t *) ALIGN(((uintptr_t)p), L1_CACHE_BYTES);
+
+ areq->req.f8_v_mp_req.qce_f8_req.data_out = p;
+ areq->req.f8_v_mp_req.qce_f8_req.data_in = p;
+ areq->req.f8_v_mp_req.qce_f8_req.data_len =
+ areq->req.f8_v_mp_req.cipher_iov[areq->steps].size;
+
+ areq->req.f8_v_mp_req.qce_f8_req.count_c++;
+ return true;
+}
+
+static void req_done(unsigned long data)
+{
+ struct ota_qce_dev *pqce = (struct ota_qce_dev *)data;
+ struct ota_dev_control *podev = pqce->podev;
+ struct ota_async_req *areq;
+ unsigned long flags;
+ struct ota_async_req *new_req = NULL;
+ int ret = 0;
+ bool schedule = true;
+
+ spin_lock_irqsave(&podev->lock, flags);
+ areq = pqce->active_command;
+ if (unlikely(areq == NULL))
+ pr_err("ota_crypto: req_done, no active request\n");
+ else if (areq->op == QCE_OTA_VAR_MPKT_F8_OPER) {
+ if (_next_v_mp_req(areq)) {
+ /* execute next subcommand */
+ spin_unlock_irqrestore(&podev->lock, flags);
+ ret = start_req(pqce, areq);
+ if (unlikely(ret)) {
+ areq->err = ret;
+ schedule = true;
+ spin_lock_irqsave(&podev->lock, flags);
+ } else {
+ areq = NULL;
+ schedule = false;
+ }
+ } else {
+ /* done with this variable mp req */
+ schedule = true;
+ }
+ }
+ while (schedule) {
+ if (!list_empty(&podev->ready_commands)) {
+ new_req = container_of(podev->ready_commands.next,
+ struct ota_async_req, rlist);
+ list_del(&new_req->rlist);
+ pqce->active_command = new_req;
+ spin_unlock_irqrestore(&podev->lock, flags);
+
+ new_req->err = 0;
+ /* start a new request */
+ ret = start_req(pqce, new_req);
+ if (unlikely(new_req && ret)) {
+ new_req->err = ret;
+ complete(&new_req->complete);
+ ret = 0;
+ new_req = NULL;
+ spin_lock_irqsave(&podev->lock, flags);
+ } else {
+ schedule = false;
+ }
+ } else {
+ pqce->active_command = NULL;
+ spin_unlock_irqrestore(&podev->lock, flags);
+ schedule = false;
+ };
+ }
+ if (areq)
+ complete(&areq->complete);
+ return;
+}
+
+static void f9_cb(void *cookie, unsigned char *icv, unsigned char *iv,
+ int ret)
+{
+ struct ota_async_req *areq = (struct ota_async_req *) cookie;
+ struct ota_qce_dev *pqce;
+
+ pqce = areq->pqce;
+ areq->req.f9_req.mac_i = *((uint32_t *)icv);
+
+ if (ret) {
+ pqce->err_req++;
+ areq->err = -ENXIO;
+ } else
+ areq->err = 0;
+
+ tasklet_schedule(&pqce->done_tasklet);
+}
+
+static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv,
+ int ret)
+{
+ struct ota_async_req *areq = (struct ota_async_req *) cookie;
+ struct ota_qce_dev *pqce;
+
+ pqce = areq->pqce;
+
+ if (ret) {
+ pqce->err_req++;
+ areq->err = -ENXIO;
+ } else {
+ areq->err = 0;
+ }
+
+ tasklet_schedule(&pqce->done_tasklet);
+}
+
+static int start_req(struct ota_qce_dev *pqce, struct ota_async_req *areq)
+{
+ struct qce_f9_req *pf9;
+ struct qce_f8_multi_pkt_req *p_mp_f8;
+ struct qce_f8_req *pf8;
+ int ret = 0;
+
+ /* command should be on the podev->active_command */
+ areq->pqce = pqce;
+
+ switch (areq->op) {
+ case QCE_OTA_F8_OPER:
+ pf8 = &areq->req.f8_req;
+ ret = qce_f8_req(pqce->qce, pf8, areq, f8_cb);
+ break;
+ case QCE_OTA_MPKT_F8_OPER:
+ p_mp_f8 = &areq->req.f8_mp_req;
+ ret = qce_f8_multi_pkt_req(pqce->qce, p_mp_f8, areq, f8_cb);
+ break;
+
+ case QCE_OTA_F9_OPER:
+ pf9 = &areq->req.f9_req;
+ ret = qce_f9_req(pqce->qce, pf9, areq, f9_cb);
+ break;
+
+ case QCE_OTA_VAR_MPKT_F8_OPER:
+ pf8 = &areq->req.f8_v_mp_req.qce_f8_req;
+ ret = qce_f8_req(pqce->qce, pf8, areq, f8_cb);
+ break;
+
+ default:
+ ret = -ENOTSUPP;
+ break;
+ };
+ areq->err = ret;
+ pqce->total_req++;
+ if (ret)
+ pqce->err_req++;
+ return ret;
+}
+
+static struct ota_qce_dev *schedule_qce(struct ota_dev_control *podev)
+{
+ /* do this function with spinlock set */
+ struct ota_qce_dev *p;
+
+ if (unlikely(list_empty(&podev->qce_dev))) {
+ pr_err("%s: no valid qce to schedule\n", __func__);
+ return NULL;
+ }
+
+ list_for_each_entry(p, &podev->qce_dev, qlist) {
+ if (p->active_command == NULL)
+ return p;
+ }
+ return NULL;
+}
+
+static int submit_req(struct ota_async_req *areq, struct ota_dev_control *podev)
+{
+ unsigned long flags;
+ int ret = 0;
+ struct qcota_stat *pstat;
+ struct ota_qce_dev *pqce;
+
+ areq->err = 0;
+
+ spin_lock_irqsave(&podev->lock, flags);
+ pqce = schedule_qce(podev);
+ if (pqce) {
+ pqce->active_command = areq;
+ spin_unlock_irqrestore(&podev->lock, flags);
+
+ ret = start_req(pqce, areq);
+ if (ret != 0) {
+ spin_lock_irqsave(&podev->lock, flags);
+ pqce->active_command = NULL;
+ spin_unlock_irqrestore(&podev->lock, flags);
+ }
+
+ } else {
+ list_add_tail(&areq->rlist, &podev->ready_commands);
+ spin_unlock_irqrestore(&podev->lock, flags);
+ }
+
+ if (ret == 0)
+ wait_for_completion(&areq->complete);
+
+ pstat = &_qcota_stat;
+ switch (areq->op) {
+ case QCE_OTA_F8_OPER:
+ if (areq->err)
+ pstat->f8_op_fail++;
+ else
+ pstat->f8_op_success++;
+ break;
+
+ case QCE_OTA_MPKT_F8_OPER:
+
+ if (areq->err)
+ pstat->f8_mp_op_fail++;
+ else
+ pstat->f8_mp_op_success++;
+ break;
+
+ case QCE_OTA_F9_OPER:
+ if (areq->err)
+ pstat->f9_op_fail++;
+ else
+ pstat->f9_op_success++;
+ break;
+ case QCE_OTA_VAR_MPKT_F8_OPER:
+ default:
+ if (areq->err)
+ pstat->f8_v_mp_op_fail++;
+ else
+ pstat->f8_v_mp_op_success++;
+ break;
+ };
+
+ return areq->err;
+}
+
+static long qcota_ioctl(struct file *file,
+ unsigned cmd, unsigned long arg)
+{
+ int err = 0;
+ struct ota_dev_control *podev;
+ uint8_t *user_src;
+ uint8_t *user_dst;
+ uint8_t *k_buf = NULL;
+ struct ota_async_req areq;
+ uint32_t total, temp;
+ struct qcota_stat *pstat;
+ int i;
+ uint8_t *p = NULL;
+
+ podev = file->private_data;
+ if (podev == NULL || podev->magic != OTA_MAGIC) {
+ pr_err("%s: invalid handle %p\n",
+ __func__, podev);
+ return -ENOENT;
+ }
+
+ /* Verify user arguments. */
+ if (_IOC_TYPE(cmd) != QCOTA_IOC_MAGIC)
+ return -ENOTTY;
+
+ init_completion(&areq.complete);
+
+ pstat = &_qcota_stat;
+
+ switch (cmd) {
+ case QCOTA_F9_REQ:
+ if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+ sizeof(struct qce_f9_req)))
+ return -EFAULT;
+ if (__copy_from_user(&areq.req.f9_req, (void __user *)arg,
+ sizeof(struct qce_f9_req)))
+ return -EFAULT;
+
+ user_src = areq.req.f9_req.message;
+ if (!access_ok(VERIFY_READ, (void __user *)user_src,
+ areq.req.f9_req.msize))
+ return -EFAULT;
+
+ if (areq.req.f9_req.msize == 0)
+ return 0;
+ k_buf = kmalloc(areq.req.f9_req.msize, GFP_KERNEL);
+ if (k_buf == NULL)
+ return -ENOMEM;
+
+ if (__copy_from_user(k_buf, (void __user *)user_src,
+ areq.req.f9_req.msize)) {
+ kfree(k_buf);
+ return -EFAULT;
+ }
+
+ areq.req.f9_req.message = k_buf;
+ areq.op = QCE_OTA_F9_OPER;
+
+ pstat->f9_req++;
+ err = submit_req(&areq, podev);
+
+ areq.req.f9_req.message = user_src;
+ if (err == 0 && __copy_to_user((void __user *)arg,
+ &areq.req.f9_req, sizeof(struct qce_f9_req))) {
+ err = -EFAULT;
+ }
+ kfree(k_buf);
+ break;
+
+ case QCOTA_F8_REQ:
+ if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+ sizeof(struct qce_f8_req)))
+ return -EFAULT;
+ if (__copy_from_user(&areq.req.f8_req, (void __user *)arg,
+ sizeof(struct qce_f8_req)))
+ return -EFAULT;
+ total = areq.req.f8_req.data_len;
+ user_src = areq.req.f8_req.data_in;
+ if (user_src != NULL) {
+ if (!access_ok(VERIFY_READ, (void __user *)
+ user_src, total))
+ return -EFAULT;
+
+ };
+
+ user_dst = areq.req.f8_req.data_out;
+ if (!access_ok(VERIFY_WRITE, (void __user *)
+ user_dst, total))
+ return -EFAULT;
+
+ if (!total)
+ return 0;
+ k_buf = kmalloc(total, GFP_KERNEL);
+ if (k_buf == NULL)
+ return -ENOMEM;
+
+ /* k_buf returned from kmalloc should be cache line aligned */
+ if (user_src && __copy_from_user(k_buf,
+ (void __user *)user_src, total)) {
+ kfree(k_buf);
+ return -EFAULT;
+ }
+
+ if (user_src)
+ areq.req.f8_req.data_in = k_buf;
+ else
+ areq.req.f8_req.data_in = NULL;
+ areq.req.f8_req.data_out = k_buf;
+
+ areq.op = QCE_OTA_F8_OPER;
+
+ pstat->f8_req++;
+ err = submit_req(&areq, podev);
+
+ if (err == 0 && __copy_to_user(user_dst, k_buf, total))
+ err = -EFAULT;
+ kfree(k_buf);
+
+ break;
+
+ case QCOTA_F8_MPKT_REQ:
+ if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+ sizeof(struct qce_f8_multi_pkt_req)))
+ return -EFAULT;
+ if (__copy_from_user(&areq.req.f8_mp_req, (void __user *)arg,
+ sizeof(struct qce_f8_multi_pkt_req)))
+ return -EFAULT;
+ temp = areq.req.f8_mp_req.qce_f8_req.data_len;
+ if (temp < (uint32_t) areq.req.f8_mp_req.cipher_start +
+ areq.req.f8_mp_req.cipher_size)
+ return -EINVAL;
+ total = (uint32_t) areq.req.f8_mp_req.num_pkt *
+ areq.req.f8_mp_req.qce_f8_req.data_len;
+
+ user_src = areq.req.f8_mp_req.qce_f8_req.data_in;
+ if (!access_ok(VERIFY_READ, (void __user *)
+ user_src, total))
+ return -EFAULT;
+
+ user_dst = areq.req.f8_mp_req.qce_f8_req.data_out;
+ if (!access_ok(VERIFY_WRITE, (void __user *)
+ user_dst, total))
+ return -EFAULT;
+
+ if (!total)
+ return 0;
+ k_buf = kmalloc(total, GFP_KERNEL);
+ if (k_buf == NULL)
+ return -ENOMEM;
+ /* k_buf returned from kmalloc should be cache line aligned */
+ if (__copy_from_user(k_buf, (void __user *)user_src, total)) {
+ kfree(k_buf);
+
+ return -EFAULT;
+ }
+
+ areq.req.f8_mp_req.qce_f8_req.data_out = k_buf;
+ areq.req.f8_mp_req.qce_f8_req.data_in = k_buf;
+
+ areq.op = QCE_OTA_MPKT_F8_OPER;
+
+ pstat->f8_mp_req++;
+ err = submit_req(&areq, podev);
+
+ if (err == 0 && __copy_to_user(user_dst, k_buf, total))
+ err = -EFAULT;
+ kfree(k_buf);
+ break;
+
+ case QCOTA_F8_V_MPKT_REQ:
+ if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+ sizeof(struct qce_f8_varible_multi_pkt_req)))
+ return -EFAULT;
+ if (__copy_from_user(&areq.req.f8_v_mp_req, (void __user *)arg,
+ sizeof(struct qce_f8_varible_multi_pkt_req)))
+ return -EFAULT;
+
+ if (areq.req.f8_v_mp_req.num_pkt > MAX_NUM_V_MULTI_PKT)
+ return -EINVAL;
+
+ for (i = 0, total = 0; i < areq.req.f8_v_mp_req.num_pkt; i++) {
+ if (!access_ok(VERIFY_WRITE, (void __user *)
+ areq.req.f8_v_mp_req.cipher_iov[i].addr,
+ areq.req.f8_v_mp_req.cipher_iov[i].size))
+ return -EFAULT;
+ total += areq.req.f8_v_mp_req.cipher_iov[i].size;
+ total = ALIGN(total, L1_CACHE_BYTES);
+ }
+
+ if (!total)
+ return 0;
+ k_buf = kmalloc(total, GFP_KERNEL);
+ if (k_buf == NULL)
+ return -ENOMEM;
+
+ for (i = 0, p = k_buf; i < areq.req.f8_v_mp_req.num_pkt; i++) {
+ user_src = areq.req.f8_v_mp_req.cipher_iov[i].addr;
+ if (__copy_from_user(p, (void __user *)user_src,
+ areq.req.f8_v_mp_req.cipher_iov[i].size)) {
+ kfree(k_buf);
+ return -EFAULT;
+ }
+ p += areq.req.f8_v_mp_req.cipher_iov[i].size;
+ p = (uint8_t *) ALIGN(((uintptr_t)p),
+ L1_CACHE_BYTES);
+ }
+
+ areq.req.f8_v_mp_req.qce_f8_req.data_out = k_buf;
+ areq.req.f8_v_mp_req.qce_f8_req.data_in = k_buf;
+ areq.req.f8_v_mp_req.qce_f8_req.data_len =
+ areq.req.f8_v_mp_req.cipher_iov[0].size;
+ areq.steps = 0;
+ areq.op = QCE_OTA_VAR_MPKT_F8_OPER;
+
+ pstat->f8_v_mp_req++;
+ err = submit_req(&areq, podev);
+
+ if (err != 0) {
+ kfree(k_buf);
+ return err;
+ }
+
+ for (i = 0, p = k_buf; i < areq.req.f8_v_mp_req.num_pkt; i++) {
+ user_dst = areq.req.f8_v_mp_req.cipher_iov[i].addr;
+ if (__copy_to_user(user_dst, p,
+ areq.req.f8_v_mp_req.cipher_iov[i].size)) {
+ kfree(k_buf);
+ return -EFAULT;
+ }
+ p += areq.req.f8_v_mp_req.cipher_iov[i].size;
+ p = (uint8_t *) ALIGN(((uintptr_t)p),
+ L1_CACHE_BYTES);
+ }
+ kfree(k_buf);
+ break;
+ default:
+ return -ENOTTY;
+ }
+
+ return err;
+}
+
+static int qcota_probe(struct platform_device *pdev)
+{
+ void *handle = NULL;
+ int rc = 0;
+ struct ota_dev_control *podev;
+ struct ce_hw_support ce_support;
+ struct ota_qce_dev *pqce;
+ unsigned long flags;
+
+ podev = &qcota_dev;
+ pqce = kzalloc(sizeof(*pqce), GFP_KERNEL);
+ if (!pqce) {
+ pr_err("qcota_probe: Memory allocation FAIL\n");
+ return -ENOMEM;
+ }
+
+ pqce->podev = podev;
+ pqce->active_command = NULL;
+ tasklet_init(&pqce->done_tasklet, req_done, (unsigned long)pqce);
+
+ /* open qce */
+ handle = qce_open(pdev, &rc);
+ if (handle == NULL) {
+ pr_err("%s: device %s, can not open qce\n",
+ __func__, pdev->name);
+ goto err;
+ }
+ if (qce_hw_support(handle, &ce_support) < 0 ||
+ ce_support.ota == false) {
+ pr_err("%s: device %s, qce does not support ota capability\n",
+ __func__, pdev->name);
+ rc = -ENODEV;
+ goto err;
+ }
+ pqce->qce = handle;
+ pqce->pdev = pdev;
+ pqce->total_req = 0;
+ pqce->err_req = 0;
+ platform_set_drvdata(pdev, pqce);
+
+ mutex_lock(&podev->register_lock);
+ rc = 0;
+ if (podev->registered == false) {
+ rc = misc_register(&podev->miscdevice);
+ if (rc == 0) {
+ pqce->unit = podev->total_units;
+ podev->total_units++;
+ podev->registered = true;
+ };
+ } else {
+ pqce->unit = podev->total_units;
+ podev->total_units++;
+ }
+ mutex_unlock(&podev->register_lock);
+ if (rc) {
+ pr_err("ion: failed to register misc device.\n");
+ goto err;
+ }
+
+ spin_lock_irqsave(&podev->lock, flags);
+ list_add_tail(&pqce->qlist, &podev->qce_dev);
+ spin_unlock_irqrestore(&podev->lock, flags);
+
+ return 0;
+err:
+ if (handle)
+ qce_close(handle);
+
+ platform_set_drvdata(pdev, NULL);
+ tasklet_kill(&pqce->done_tasklet);
+ kfree(pqce);
+ return rc;
+}
+
+static int qcota_remove(struct platform_device *pdev)
+{
+ struct ota_dev_control *podev;
+ struct ota_qce_dev *pqce;
+ unsigned long flags;
+
+ pqce = platform_get_drvdata(pdev);
+ if (!pqce)
+ return 0;
+ if (pqce->qce)
+ qce_close(pqce->qce);
+
+ podev = pqce->podev;
+ if (!podev)
+ goto ret;
+
+ spin_lock_irqsave(&podev->lock, flags);
+ list_del(&pqce->qlist);
+ spin_unlock_irqrestore(&podev->lock, flags);
+
+ mutex_lock(&podev->register_lock);
+ if (--podev->total_units == 0) {
+ if (podev->miscdevice.minor != MISC_DYNAMIC_MINOR)
+ misc_deregister(&podev->miscdevice);
+ podev->registered = false;
+ }
+ mutex_unlock(&podev->register_lock);
+ret:
+
+ tasklet_kill(&pqce->done_tasklet);
+ kfree(pqce);
+ return 0;
+}
+
+static struct of_device_id qcota_match[] = {
+ { .compatible = "qcom,qcota",
+ },
+ {}
+};
+
+static struct platform_driver qcota_plat_driver = {
+ .probe = qcota_probe,
+ .remove = qcota_remove,
+ .driver = {
+ .name = "qcota",
+ .owner = THIS_MODULE,
+ .of_match_table = qcota_match,
+ },
+};
+
+static int _disp_stats(void)
+{
+ struct qcota_stat *pstat;
+ int len = 0;
+ struct ota_dev_control *podev = &qcota_dev;
+ unsigned long flags;
+ struct ota_qce_dev *p;
+
+ pstat = &_qcota_stat;
+ len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
+ "\nQualcomm OTA crypto accelerator Statistics:\n");
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " F8 request : %llu\n",
+ pstat->f8_req);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " F8 operation success : %llu\n",
+ pstat->f8_op_success);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " F8 operation fail : %llu\n",
+ pstat->f8_op_fail);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " F8 MP request : %llu\n",
+ pstat->f8_mp_req);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " F8 MP operation success : %llu\n",
+ pstat->f8_mp_op_success);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " F8 MP operation fail : %llu\n",
+ pstat->f8_mp_op_fail);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " F8 Variable MP request : %llu\n",
+ pstat->f8_v_mp_req);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " F8 Variable MP operation success: %llu\n",
+ pstat->f8_v_mp_op_success);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " F8 Variable MP operation fail : %llu\n",
+ pstat->f8_v_mp_op_fail);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " F9 request : %llu\n",
+ pstat->f9_req);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " F9 operation success : %llu\n",
+ pstat->f9_op_success);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " F9 operation fail : %llu\n",
+ pstat->f9_op_fail);
+
+ spin_lock_irqsave(&podev->lock, flags);
+
+ list_for_each_entry(p, &podev->qce_dev, qlist) {
+ len += scnprintf(
+ _debug_read_buf + len,
+ DEBUG_MAX_RW_BUF - len - 1,
+ " Engine %4d Req : %llu\n",
+ p->unit,
+ p->total_req
+ );
+ len += scnprintf(
+ _debug_read_buf + len,
+ DEBUG_MAX_RW_BUF - len - 1,
+ " Engine %4d Req Error : %llu\n",
+ p->unit,
+ p->err_req
+ );
+ }
+
+ spin_unlock_irqrestore(&podev->lock, flags);
+
+ return len;
+}
+
+static int _debug_stats_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t _debug_stats_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int rc = -EINVAL;
+ int len;
+
+ len = _disp_stats();
+
+ rc = simple_read_from_buffer((void __user *) buf, len,
+ ppos, (void *) _debug_read_buf, len);
+
+ return rc;
+}
+
+static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ota_dev_control *podev = &qcota_dev;
+ unsigned long flags;
+ struct ota_qce_dev *p;
+
+ memset((char *)&_qcota_stat, 0, sizeof(struct qcota_stat));
+
+ spin_lock_irqsave(&podev->lock, flags);
+
+ list_for_each_entry(p, &podev->qce_dev, qlist) {
+ p->total_req = 0;
+ p->err_req = 0;
+ }
+
+ spin_unlock_irqrestore(&podev->lock, flags);
+
+ return count;
+}
+
+static const struct file_operations _debug_stats_ops = {
+ .open = _debug_stats_open,
+ .read = _debug_stats_read,
+ .write = _debug_stats_write,
+};
+
+static int _qcota_debug_init(void)
+{
+ int rc;
+ char name[DEBUG_MAX_FNAME];
+ struct dentry *dent;
+
+ _debug_dent = debugfs_create_dir("qcota", NULL);
+ if (IS_ERR(_debug_dent)) {
+ pr_err("qcota debugfs_create_dir fail, error %ld\n",
+ PTR_ERR(_debug_dent));
+ return PTR_ERR(_debug_dent);
+ }
+
+ snprintf(name, DEBUG_MAX_FNAME-1, "stats-0");
+ _debug_qcota = 0;
+ dent = debugfs_create_file(name, 0644, _debug_dent,
+ &_debug_qcota, &_debug_stats_ops);
+ if (dent == NULL) {
+ pr_err("qcota debugfs_create_file fail, error %ld\n",
+ PTR_ERR(dent));
+ rc = PTR_ERR(dent);
+ goto err;
+ }
+ return 0;
+err:
+ debugfs_remove_recursive(_debug_dent);
+ return rc;
+}
+
+static int __init qcota_init(void)
+{
+ int rc;
+ struct ota_dev_control *podev;
+
+ rc = _qcota_debug_init();
+ if (rc)
+ return rc;
+
+ podev = &qcota_dev;
+ INIT_LIST_HEAD(&podev->ready_commands);
+ INIT_LIST_HEAD(&podev->qce_dev);
+ spin_lock_init(&podev->lock);
+ mutex_init(&podev->register_lock);
+ podev->registered = false;
+ podev->total_units = 0;
+
+ return platform_driver_register(&qcota_plat_driver);
+}
+static void __exit qcota_exit(void)
+{
+ debugfs_remove_recursive(_debug_dent);
+ platform_driver_unregister(&qcota_plat_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Rohit Vaswani <rvaswani@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm Ota Crypto driver");
+MODULE_VERSION("1.02");
+
+module_init(qcota_init);
+module_exit(qcota_exit);
diff --git a/drivers/crypto/msm/qce.c b/drivers/crypto/msm/qce.c
new file mode 100644
index 000000000000..7ddbb1938400
--- /dev/null
+++ b/drivers/crypto/msm/qce.c
@@ -0,0 +1,2644 @@
+/* Qualcomm Crypto Engine driver.
+ *
+ * Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <linux/qcedev.h>
+#include <linux/qcota.h>
+#include <mach/dma.h>
+
+#include "qce.h"
+#include "qcryptohw_30.h"
+#include "qce_ota.h"
+
+/* ADM definitions */
+#define LI_SG_CMD (1 << 31) /* last index in the scatter gather cmd */
+#define SRC_INDEX_SG_CMD(index) ((index & 0x3fff) << 16)
+#define DST_INDEX_SG_CMD(index) (index & 0x3fff)
+#define ADM_DESC_LAST (1 << 31)
+
+/* Data xfer between DM and CE in blocks of 16 bytes */
+#define ADM_CE_BLOCK_SIZE 16
+
+#define QCE_FIFO_SIZE 0x8000
+
+/* Data xfer between DM and CE in blocks of 64 bytes */
+#define ADM_SHA_BLOCK_SIZE 64
+
+#define ADM_DESC_LENGTH_MASK 0xffff
+#define ADM_DESC_LENGTH(x) (x & ADM_DESC_LENGTH_MASK)
+
+struct dmov_desc {
+ uint32_t addr;
+ uint32_t len;
+};
+
+#define ADM_STATUS_OK 0x80000002
+
+/* Misc definitions */
+
+/* QCE max number of descriptor in a descriptor list */
+#define QCE_MAX_NUM_DESC 128
+
+/* State of DM channel */
+enum qce_chan_st_enum {
+ QCE_CHAN_STATE_IDLE = 0,
+ QCE_CHAN_STATE_IN_PROG = 1,
+ QCE_CHAN_STATE_COMP = 2,
+ QCE_CHAN_STATE_LAST
+};
+
+/*
+ * CE HW device structure.
+ * Each engine has an instance of the structure.
+ * Each engine can only handle one crypto operation at one time. It is up to
+ * the sw above to ensure single threading of operation on an engine.
+ */
+struct qce_device {
+ struct device *pdev; /* Handle to platform_device structure */
+ unsigned char *coh_vmem; /* Allocated coherent virtual memory */
+ dma_addr_t coh_pmem; /* Allocated coherent physical memory */
+ void __iomem *iobase; /* Virtual io base of CE HW */
+ unsigned int phy_iobase; /* Physical io base of CE HW */
+ struct clk *ce_clk; /* Handle to CE clk */
+ unsigned int crci_in; /* CRCI for CE DM IN Channel */
+ unsigned int crci_out; /* CRCI for CE DM OUT Channel */
+ unsigned int crci_hash; /* CRCI for CE HASH */
+ unsigned int chan_ce_in; /* ADM channel used for CE input
+ * and auth result if authentication
+ * only operation. */
+ unsigned int chan_ce_out; /* ADM channel used for CE output,
+ and icv for esp */
+
+
+ unsigned int *cmd_pointer_list_ce_in;
+ dma_addr_t phy_cmd_pointer_list_ce_in;
+
+ unsigned int *cmd_pointer_list_ce_out;
+ dma_addr_t phy_cmd_pointer_list_ce_out;
+
+ unsigned char *cmd_list_ce_in;
+ dma_addr_t phy_cmd_list_ce_in;
+
+ unsigned char *cmd_list_ce_out;
+ dma_addr_t phy_cmd_list_ce_out;
+
+ struct dmov_desc *ce_out_src_desc;
+ dma_addr_t phy_ce_out_src_desc;
+
+ struct dmov_desc *ce_out_dst_desc;
+ dma_addr_t phy_ce_out_dst_desc;
+
+ struct dmov_desc *ce_in_src_desc;
+ dma_addr_t phy_ce_in_src_desc;
+
+ struct dmov_desc *ce_in_dst_desc;
+ dma_addr_t phy_ce_in_dst_desc;
+
+ unsigned char *ce_out_ignore;
+ dma_addr_t phy_ce_out_ignore;
+
+ unsigned char *ce_pad;
+ dma_addr_t phy_ce_pad;
+
+ struct msm_dmov_cmd *chan_ce_in_cmd;
+ struct msm_dmov_cmd *chan_ce_out_cmd;
+
+ uint32_t ce_out_ignore_size;
+
+ int ce_out_dst_desc_index;
+ int ce_in_dst_desc_index;
+
+ int ce_out_src_desc_index;
+ int ce_in_src_desc_index;
+
+ enum qce_chan_st_enum chan_ce_in_state; /* chan ce_in state */
+ enum qce_chan_st_enum chan_ce_out_state; /* chan ce_out state */
+
+ int chan_ce_in_status; /* chan ce_in status */
+ int chan_ce_out_status; /* chan ce_out status */
+
+
+ unsigned char *dig_result;
+ dma_addr_t phy_dig_result;
+
+ /* cached aes key */
+ uint32_t aeskey[AES256_KEY_SIZE/sizeof(uint32_t)];
+
+ uint32_t aes_key_size; /* cached aes key size in bytes */
+ int fastaes; /* ce supports fast aes */
+ int hmac; /* ce support hmac-sha1 */
+ bool ota; /* ce support ota */
+
+ qce_comp_func_ptr_t qce_cb; /* qce callback function pointer */
+
+ int assoc_nents;
+ int src_nents;
+ int dst_nents;
+
+ void *areq;
+ enum qce_cipher_mode_enum mode;
+
+ dma_addr_t phy_iv_in;
+ dma_addr_t phy_ota_src;
+ dma_addr_t phy_ota_dst;
+ unsigned int ota_size;
+ int err;
+};
+
+/* Standard initialization vector for SHA-1, source: FIPS 180-2 */
+static uint32_t _std_init_vector_sha1[] = {
+ 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0
+};
+/* Standard initialization vector for SHA-256, source: FIPS 180-2 */
+static uint32_t _std_init_vector_sha256[] = {
+ 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
+ 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
+};
+
+/* Source: FIPS 197, Figure 7. S-box: substitution values for the byte xy */
+static const uint32_t _s_box[256] = {
+ 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5,
+ 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
+
+ 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
+ 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
+
+ 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc,
+ 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
+
+ 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a,
+ 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
+
+ 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
+ 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
+
+ 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
+ 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
+
+ 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85,
+ 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
+
+ 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
+ 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
+
+ 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17,
+ 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
+
+ 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88,
+ 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
+
+ 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
+ 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
+
+ 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9,
+ 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
+
+ 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6,
+ 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
+
+ 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
+ 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
+
+ 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94,
+ 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
+
+ 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68,
+ 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 };
+
+
+/*
+ * Source: FIPS 197, Sec 5.2 Key Expansion, Figure 11. Pseudo Code for Key
+ * Expansion.
+ */
+static void _aes_expand_key_schedule(uint32_t keysize, uint32_t *AES_KEY,
+ uint32_t *AES_RND_KEY)
+{
+ uint32_t i;
+ uint32_t Nk;
+ uint32_t Nr, rot_data;
+ uint32_t Rcon = 0x01000000;
+ uint32_t temp;
+ uint32_t data_in;
+ uint32_t MSB_store;
+ uint32_t byte_for_sub;
+ uint32_t word_sub[4];
+
+ switch (keysize) {
+ case 192:
+ Nk = 6;
+ Nr = 12;
+ break;
+
+ case 256:
+ Nk = 8;
+ Nr = 14;
+ break;
+
+ case 128:
+ default: /* default to AES128 */
+ Nk = 4;
+ Nr = 10;
+ break;
+ }
+
+ /* key expansion */
+ i = 0;
+ while (i < Nk) {
+ AES_RND_KEY[i] = AES_KEY[i];
+ i = i + 1;
+ }
+
+ i = Nk;
+ while (i < (4 * (Nr + 1))) {
+ temp = AES_RND_KEY[i-1];
+ if (Nr == 14) {
+ switch (i) {
+ case 8:
+ Rcon = 0x01000000;
+ break;
+
+ case 16:
+ Rcon = 0x02000000;
+ break;
+
+ case 24:
+ Rcon = 0x04000000;
+ break;
+
+ case 32:
+ Rcon = 0x08000000;
+ break;
+
+ case 40:
+ Rcon = 0x10000000;
+ break;
+
+ case 48:
+ Rcon = 0x20000000;
+ break;
+
+ case 56:
+ Rcon = 0x40000000;
+ break;
+ }
+ } else if (Nr == 12) {
+ switch (i) {
+ case 6:
+ Rcon = 0x01000000;
+ break;
+
+ case 12:
+ Rcon = 0x02000000;
+ break;
+
+ case 18:
+ Rcon = 0x04000000;
+ break;
+
+ case 24:
+ Rcon = 0x08000000;
+ break;
+
+ case 30:
+ Rcon = 0x10000000;
+ break;
+
+ case 36:
+ Rcon = 0x20000000;
+ break;
+
+ case 42:
+ Rcon = 0x40000000;
+ break;
+
+ case 48:
+ Rcon = 0x80000000;
+ break;
+ }
+ } else if (Nr == 10) {
+ switch (i) {
+ case 4:
+ Rcon = 0x01000000;
+ break;
+
+ case 8:
+ Rcon = 0x02000000;
+ break;
+
+ case 12:
+ Rcon = 0x04000000;
+ break;
+
+ case 16:
+ Rcon = 0x08000000;
+ break;
+
+ case 20:
+ Rcon = 0x10000000;
+ break;
+
+ case 24:
+ Rcon = 0x20000000;
+ break;
+
+ case 28:
+ Rcon = 0x40000000;
+ break;
+
+ case 32:
+ Rcon = 0x80000000;
+ break;
+
+ case 36:
+ Rcon = 0x1b000000;
+ break;
+
+ case 40:
+ Rcon = 0x36000000;
+ break;
+ }
+ }
+
+ if ((i % Nk) == 0) {
+ data_in = temp;
+ MSB_store = (data_in >> 24 & 0xff);
+ rot_data = (data_in << 8) | MSB_store;
+ byte_for_sub = rot_data;
+ word_sub[0] = _s_box[(byte_for_sub & 0xff)];
+ word_sub[1] = (_s_box[((byte_for_sub & 0xff00) >> 8)]
+ << 8);
+ word_sub[2] = (_s_box[((byte_for_sub & 0xff0000) >> 16)]
+ << 16);
+ word_sub[3] = (_s_box[((byte_for_sub & 0xff000000)
+ >> 24)] << 24);
+ word_sub[0] = word_sub[0] | word_sub[1] | word_sub[2] |
+ word_sub[3];
+ temp = word_sub[0] ^ Rcon;
+ } else if ((Nk > 6) && ((i % Nk) == 4)) {
+ byte_for_sub = temp;
+ word_sub[0] = _s_box[(byte_for_sub & 0xff)];
+ word_sub[1] = (_s_box[((byte_for_sub & 0xff00) >> 8)]
+ << 8);
+ word_sub[2] = (_s_box[((byte_for_sub & 0xff0000) >> 16)]
+ << 16);
+ word_sub[3] = (_s_box[((byte_for_sub & 0xff000000) >>
+ 24)] << 24);
+ word_sub[0] = word_sub[0] | word_sub[1] | word_sub[2] |
+ word_sub[3];
+ temp = word_sub[0];
+ }
+
+ AES_RND_KEY[i] = AES_RND_KEY[i-Nk]^temp;
+ i = i+1;
+ }
+}
+
+static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b,
+ unsigned int len)
+{
+ unsigned n;
+
+ n = len / sizeof(uint32_t);
+ for (; n > 0; n--) {
+ *iv = ((*b << 24) & 0xff000000) |
+ (((*(b+1)) << 16) & 0xff0000) |
+ (((*(b+2)) << 8) & 0xff00) |
+ (*(b+3) & 0xff);
+ b += sizeof(uint32_t);
+ iv++;
+ }
+
+ n = len % sizeof(uint32_t);
+ if (n == 3) {
+ *iv = ((*b << 24) & 0xff000000) |
+ (((*(b+1)) << 16) & 0xff0000) |
+ (((*(b+2)) << 8) & 0xff00);
+ } else if (n == 2) {
+ *iv = ((*b << 24) & 0xff000000) |
+ (((*(b+1)) << 16) & 0xff0000);
+ } else if (n == 1) {
+ *iv = ((*b << 24) & 0xff000000);
+ }
+}
+
+static void _net_words_to_byte_stream(uint32_t *iv, unsigned char *b,
+ unsigned int len)
+{
+ unsigned n = len / sizeof(uint32_t);
+
+ for (; n > 0; n--) {
+ *b++ = (unsigned char) ((*iv >> 24) & 0xff);
+ *b++ = (unsigned char) ((*iv >> 16) & 0xff);
+ *b++ = (unsigned char) ((*iv >> 8) & 0xff);
+ *b++ = (unsigned char) (*iv & 0xff);
+ iv++;
+ }
+ n = len % sizeof(uint32_t);
+ if (n == 3) {
+ *b++ = (unsigned char) ((*iv >> 24) & 0xff);
+ *b++ = (unsigned char) ((*iv >> 16) & 0xff);
+ *b = (unsigned char) ((*iv >> 8) & 0xff);
+ } else if (n == 2) {
+ *b++ = (unsigned char) ((*iv >> 24) & 0xff);
+ *b = (unsigned char) ((*iv >> 16) & 0xff);
+ } else if (n == 1) {
+ *b = (unsigned char) ((*iv >> 24) & 0xff);
+ }
+}
+
+static int count_sg(struct scatterlist *sg, int nbytes)
+{
+ int i;
+
+ for (i = 0; nbytes > 0; i++, sg = scatterwalk_sg_next(sg))
+ nbytes -= sg->length;
+ return i;
+}
+
+static int qce_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ int i;
+
+ for (i = 0; i < nents; ++i) {
+ dma_map_sg(dev, sg, 1, direction);
+ sg = scatterwalk_sg_next(sg);
+ }
+
+ return nents;
+}
+
+static int qce_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction)
+{
+ int i;
+
+ for (i = 0; i < nents; ++i) {
+ dma_unmap_sg(dev, sg, 1, direction);
+ sg = scatterwalk_sg_next(sg);
+ }
+
+ return nents;
+}
+
+static int _probe_ce_engine(struct qce_device *pce_dev)
+{
+ unsigned int val;
+ unsigned int rev;
+ unsigned int eng_availability; /* engine available functions */
+
+ val = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
+ if ((val & 0xfffffff) != 0x0200004) {
+ dev_err(pce_dev->pdev,
+ "unknown Qualcomm crypto device at 0x%x 0x%x\n",
+ pce_dev->phy_iobase, val);
+ return -EIO;
+ };
+ rev = (val & CRYPTO_CORE_REV_MASK) >> CRYPTO_CORE_REV;
+ if (rev == 0x2) {
+ dev_info(pce_dev->pdev,
+ "Qualcomm Crypto 3e device found at 0x%x\n",
+ pce_dev->phy_iobase);
+ } else if (rev == 0x1) {
+ dev_info(pce_dev->pdev,
+ "Qualcomm Crypto 3 device found at 0x%x\n",
+ pce_dev->phy_iobase);
+ } else if (rev == 0x0) {
+ dev_info(pce_dev->pdev,
+ "Qualcomm Crypto 2 device found at 0x%x\n",
+ pce_dev->phy_iobase);
+ } else {
+ dev_err(pce_dev->pdev,
+ "unknown Qualcomm crypto device at 0x%x\n",
+ pce_dev->phy_iobase);
+ return -EIO;
+ }
+
+ eng_availability = readl_relaxed(pce_dev->iobase +
+ CRYPTO_ENGINES_AVAIL);
+
+ if (((eng_availability & CRYPTO_AES_SEL_MASK) >> CRYPTO_AES_SEL)
+ == CRYPTO_AES_SEL_FAST)
+ pce_dev->fastaes = 1;
+ else
+ pce_dev->fastaes = 0;
+
+ if (eng_availability & (1 << CRYPTO_HMAC_SEL))
+ pce_dev->hmac = 1;
+ else
+ pce_dev->hmac = 0;
+
+ if ((eng_availability & (1 << CRYPTO_F9_SEL)) &&
+ (eng_availability & (1 << CRYPTO_F8_SEL)))
+ pce_dev->ota = true;
+ else
+ pce_dev->ota = false;
+
+ pce_dev->aes_key_size = 0;
+
+ return 0;
+};
+
+static int _init_ce_engine(struct qce_device *pce_dev)
+{
+ unsigned int val;
+
+ /* reset qce */
+ writel_relaxed(1 << CRYPTO_SW_RST, pce_dev->iobase + CRYPTO_CONFIG_REG);
+
+ /* Ensure previous instruction (write to reset bit)
+ * was completed.
+ */
+ mb();
+ /* configure ce */
+ val = (1 << CRYPTO_MASK_DOUT_INTR) | (1 << CRYPTO_MASK_DIN_INTR) |
+ (1 << CRYPTO_MASK_AUTH_DONE_INTR) |
+ (1 << CRYPTO_MASK_ERR_INTR);
+ writel_relaxed(val, pce_dev->iobase + CRYPTO_CONFIG_REG);
+
+ if (_probe_ce_engine(pce_dev) < 0)
+ return -EIO;
+ if (readl_relaxed(pce_dev->iobase + CRYPTO_CONFIG_REG) != val) {
+ dev_err(pce_dev->pdev,
+ "unknown Qualcomm crypto device at 0x%x\n",
+ pce_dev->phy_iobase);
+ return -EIO;
+ };
+ return 0;
+};
+
+static int _sha_ce_setup(struct qce_device *pce_dev, struct qce_sha_req *sreq)
+{
+ uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
+ uint32_t diglen;
+ int rc;
+ int i;
+ uint32_t cfg = 0;
+
+ /* if not the last, the size has to be on the block boundary */
+ if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
+ return -EIO;
+
+ switch (sreq->alg) {
+ case QCE_HASH_SHA1:
+ diglen = SHA1_DIGEST_SIZE;
+ break;
+ case QCE_HASH_SHA256:
+ diglen = SHA256_DIGEST_SIZE;
+ break;
+ default:
+ return -EINVAL;
+ }
+ /*
+ * write 20/32 bytes, 5/8 words into auth_iv
+ * for SHA1/SHA256
+ */
+
+ if (sreq->first_blk) {
+ if (sreq->alg == QCE_HASH_SHA1) {
+ for (i = 0; i < 5; i++)
+ auth32[i] = _std_init_vector_sha1[i];
+ } else {
+ for (i = 0; i < 8; i++)
+ auth32[i] = _std_init_vector_sha256[i];
+ }
+ } else
+ _byte_stream_to_net_words(auth32, sreq->digest, diglen);
+
+ rc = clk_enable(pce_dev->ce_clk);
+ if (rc)
+ return rc;
+
+ writel_relaxed(auth32[0], pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
+ writel_relaxed(auth32[1], pce_dev->iobase + CRYPTO_AUTH_IV1_REG);
+ writel_relaxed(auth32[2], pce_dev->iobase + CRYPTO_AUTH_IV2_REG);
+ writel_relaxed(auth32[3], pce_dev->iobase + CRYPTO_AUTH_IV3_REG);
+ writel_relaxed(auth32[4], pce_dev->iobase + CRYPTO_AUTH_IV4_REG);
+
+ if (sreq->alg == QCE_HASH_SHA256) {
+ writel_relaxed(auth32[5], pce_dev->iobase +
+ CRYPTO_AUTH_IV5_REG);
+ writel_relaxed(auth32[6], pce_dev->iobase +
+ CRYPTO_AUTH_IV6_REG);
+ writel_relaxed(auth32[7], pce_dev->iobase +
+ CRYPTO_AUTH_IV7_REG);
+ }
+ /* write auth_bytecnt 0/1, start with 0 */
+ writel_relaxed(sreq->auth_data[0], pce_dev->iobase +
+ CRYPTO_AUTH_BYTECNT0_REG);
+ writel_relaxed(sreq->auth_data[1], pce_dev->iobase +
+ CRYPTO_AUTH_BYTECNT1_REG);
+
+ /* write auth_seg_cfg */
+ writel_relaxed(sreq->size << CRYPTO_AUTH_SEG_SIZE,
+ pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+
+ /*
+ * write seg_cfg
+ */
+
+ if (sreq->alg == QCE_HASH_SHA1)
+ cfg |= (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE);
+ else
+ cfg = (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE);
+
+ if (sreq->first_blk)
+ cfg |= 1 << CRYPTO_FIRST;
+ if (sreq->last_blk)
+ cfg |= 1 << CRYPTO_LAST;
+ cfg |= CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG;
+ writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
+
+ /* write seg_size */
+ writel_relaxed(sreq->size, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+ /* issue go to crypto */
+ writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
+ /* Ensure previous instructions (setting the GO register)
+ * was completed before issuing a DMA transfer request
+ */
+ mb();
+
+ return 0;
+}
+
+static int _ce_setup(struct qce_device *pce_dev, struct qce_req *q_req,
+ uint32_t totallen, uint32_t coffset)
+{
+ uint32_t hmackey[HMAC_KEY_SIZE/sizeof(uint32_t)] = {
+ 0, 0, 0, 0, 0};
+ uint32_t enckey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)] = {
+ 0, 0, 0, 0, 0, 0, 0, 0};
+ uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
+ 0, 0, 0, 0};
+ uint32_t enck_size_in_word = q_req->encklen / sizeof(uint32_t);
+ int aes_key_chg;
+ int i, rc;
+ uint32_t aes_round_key[CRYPTO_AES_RNDKEYS];
+ uint32_t cfg;
+ uint32_t ivsize = q_req->ivsize;
+
+ rc = clk_enable(pce_dev->ce_clk);
+ if (rc)
+ return rc;
+
+ cfg = (1 << CRYPTO_FIRST) | (1 << CRYPTO_LAST);
+ if (q_req->op == QCE_REQ_AEAD) {
+
+ /* do authentication setup */
+
+ cfg |= (CRYPTO_AUTH_SIZE_HMAC_SHA1 << CRYPTO_AUTH_SIZE)|
+ (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG);
+
+ /* write sha1 init vector */
+ writel_relaxed(_std_init_vector_sha1[0],
+ pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
+ writel_relaxed(_std_init_vector_sha1[1],
+ pce_dev->iobase + CRYPTO_AUTH_IV1_REG);
+ writel_relaxed(_std_init_vector_sha1[2],
+ pce_dev->iobase + CRYPTO_AUTH_IV2_REG);
+ writel_relaxed(_std_init_vector_sha1[3],
+ pce_dev->iobase + CRYPTO_AUTH_IV3_REG);
+ writel_relaxed(_std_init_vector_sha1[4],
+ pce_dev->iobase + CRYPTO_AUTH_IV4_REG);
+ /* write hmac key */
+ _byte_stream_to_net_words(hmackey, q_req->authkey,
+ q_req->authklen);
+ writel_relaxed(hmackey[0], pce_dev->iobase +
+ CRYPTO_AUTH_IV5_REG);
+ writel_relaxed(hmackey[1], pce_dev->iobase +
+ CRYPTO_AUTH_IV6_REG);
+ writel_relaxed(hmackey[2], pce_dev->iobase +
+ CRYPTO_AUTH_IV7_REG);
+ writel_relaxed(hmackey[3], pce_dev->iobase +
+ CRYPTO_AUTH_IV8_REG);
+ writel_relaxed(hmackey[4], pce_dev->iobase +
+ CRYPTO_AUTH_IV9_REG);
+ writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG);
+ writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT1_REG);
+
+ /* write auth_seg_cfg */
+ writel_relaxed((totallen << CRYPTO_AUTH_SEG_SIZE) & 0xffff0000,
+ pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+
+ }
+
+ _byte_stream_to_net_words(enckey32, q_req->enckey, q_req->encklen);
+
+ switch (q_req->mode) {
+ case QCE_MODE_ECB:
+ cfg |= (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+ break;
+
+ case QCE_MODE_CBC:
+ cfg |= (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+ break;
+
+ case QCE_MODE_CTR:
+ default:
+ cfg |= (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
+ break;
+ }
+ pce_dev->mode = q_req->mode;
+
+ switch (q_req->alg) {
+ case CIPHER_ALG_DES:
+ if (q_req->mode != QCE_MODE_ECB) {
+ _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
+ writel_relaxed(enciv32[0], pce_dev->iobase +
+ CRYPTO_CNTR0_IV0_REG);
+ writel_relaxed(enciv32[1], pce_dev->iobase +
+ CRYPTO_CNTR1_IV1_REG);
+ }
+ writel_relaxed(enckey32[0], pce_dev->iobase +
+ CRYPTO_DES_KEY0_REG);
+ writel_relaxed(enckey32[1], pce_dev->iobase +
+ CRYPTO_DES_KEY1_REG);
+ cfg |= ((CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG));
+ break;
+
+ case CIPHER_ALG_3DES:
+ if (q_req->mode != QCE_MODE_ECB) {
+ _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
+ writel_relaxed(enciv32[0], pce_dev->iobase +
+ CRYPTO_CNTR0_IV0_REG);
+ writel_relaxed(enciv32[1], pce_dev->iobase +
+ CRYPTO_CNTR1_IV1_REG);
+ }
+ writel_relaxed(enckey32[0], pce_dev->iobase +
+ CRYPTO_DES_KEY0_REG);
+ writel_relaxed(enckey32[1], pce_dev->iobase +
+ CRYPTO_DES_KEY1_REG);
+ writel_relaxed(enckey32[2], pce_dev->iobase +
+ CRYPTO_DES_KEY2_REG);
+ writel_relaxed(enckey32[3], pce_dev->iobase +
+ CRYPTO_DES_KEY3_REG);
+ writel_relaxed(enckey32[4], pce_dev->iobase +
+ CRYPTO_DES_KEY4_REG);
+ writel_relaxed(enckey32[5], pce_dev->iobase +
+ CRYPTO_DES_KEY5_REG);
+ cfg |= ((CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG));
+ break;
+
+ case CIPHER_ALG_AES:
+ default:
+ if (q_req->mode != QCE_MODE_ECB) {
+ _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
+ writel_relaxed(enciv32[0], pce_dev->iobase +
+ CRYPTO_CNTR0_IV0_REG);
+ writel_relaxed(enciv32[1], pce_dev->iobase +
+ CRYPTO_CNTR1_IV1_REG);
+ writel_relaxed(enciv32[2], pce_dev->iobase +
+ CRYPTO_CNTR2_IV2_REG);
+ writel_relaxed(enciv32[3], pce_dev->iobase +
+ CRYPTO_CNTR3_IV3_REG);
+ }
+ /* set number of counter bits */
+ writel_relaxed(0xffff, pce_dev->iobase + CRYPTO_CNTR_MASK_REG);
+
+ if (q_req->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
+ cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
+ CRYPTO_ENCR_KEY_SZ);
+ cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
+ } else {
+ switch (q_req->encklen) {
+ case AES128_KEY_SIZE:
+ cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
+ CRYPTO_ENCR_KEY_SZ);
+ break;
+ case AES192_KEY_SIZE:
+ cfg |= (CRYPTO_ENCR_KEY_SZ_AES192 <<
+ CRYPTO_ENCR_KEY_SZ);
+ break;
+ case AES256_KEY_SIZE:
+ default:
+ cfg |= (CRYPTO_ENCR_KEY_SZ_AES256 <<
+ CRYPTO_ENCR_KEY_SZ);
+
+ /* check for null key. If null, use hw key*/
+ for (i = 0; i < enck_size_in_word; i++) {
+ if (enckey32[i] != 0)
+ break;
+ }
+ if (i == enck_size_in_word)
+ cfg |= 1 << CRYPTO_USE_HW_KEY;
+ break;
+ } /* end of switch (q_req->encklen) */
+
+ cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
+ if (pce_dev->aes_key_size != q_req->encklen)
+ aes_key_chg = 1;
+ else {
+ for (i = 0; i < enck_size_in_word; i++) {
+ if (enckey32[i] != pce_dev->aeskey[i])
+ break;
+ }
+ aes_key_chg = (i == enck_size_in_word) ? 0 : 1;
+ }
+
+ if (aes_key_chg) {
+ if (pce_dev->fastaes) {
+ for (i = 0; i < enck_size_in_word;
+ i++) {
+ writel_relaxed(enckey32[i],
+ pce_dev->iobase +
+ CRYPTO_AES_RNDKEY0 +
+ (i * sizeof(uint32_t)));
+ }
+ } else {
+ /* size in bit */
+ _aes_expand_key_schedule(
+ q_req->encklen * 8,
+ enckey32, aes_round_key);
+
+ for (i = 0; i < CRYPTO_AES_RNDKEYS;
+ i++) {
+ writel_relaxed(aes_round_key[i],
+ pce_dev->iobase +
+ CRYPTO_AES_RNDKEY0 +
+ (i * sizeof(uint32_t)));
+ }
+ }
+
+ pce_dev->aes_key_size = q_req->encklen;
+ for (i = 0; i < enck_size_in_word; i++)
+ pce_dev->aeskey[i] = enckey32[i];
+ } /*if (aes_key_chg) { */
+ } /* else of if (q_req->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
+ break;
+ } /* end of switch (q_req->mode) */
+
+ if (q_req->dir == QCE_ENCRYPT)
+ cfg |= (1 << CRYPTO_AUTH_POS);
+ cfg |= ((q_req->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
+
+ /* write encr seg cfg */
+ writel_relaxed((q_req->cryptlen << CRYPTO_ENCR_SEG_SIZE) |
+ (coffset & 0xffff), /* cipher offset */
+ pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+ /* write seg cfg and size */
+ writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
+ writel_relaxed(totallen, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+ /* issue go to crypto */
+ writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
+ /* Ensure previous instructions (setting the GO register)
+ * was completed before issuing a DMA transfer request
+ */
+ mb();
+ return 0;
+};
+
+static int _aead_complete(struct qce_device *pce_dev)
+{
+ struct aead_request *areq;
+ struct crypto_aead *aead;
+ uint32_t ivsize;
+ uint32_t iv_out[4];
+ unsigned char iv[4 * sizeof(uint32_t)];
+ uint32_t status;
+
+ areq = (struct aead_request *) pce_dev->areq;
+ aead = crypto_aead_reqtfm(areq);
+ ivsize = crypto_aead_ivsize(aead);
+
+ if (areq->src != areq->dst) {
+ qce_dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
+ DMA_FROM_DEVICE);
+ }
+ qce_dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+ DMA_TO_DEVICE);
+ dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
+ ivsize, DMA_TO_DEVICE);
+ qce_dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
+ DMA_TO_DEVICE);
+
+ /* check ce error status */
+ status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
+ if (status & (1 << CRYPTO_SW_ERR)) {
+ pce_dev->err++;
+ dev_err(pce_dev->pdev,
+ "Qualcomm Crypto Error at 0x%x, status%x\n",
+ pce_dev->phy_iobase, status);
+ _init_ce_engine(pce_dev);
+ clk_disable(pce_dev->ce_clk);
+ pce_dev->qce_cb(areq, pce_dev->dig_result, NULL, -ENXIO);
+ return 0;
+ };
+
+ /* get iv out */
+ if (pce_dev->mode == QCE_MODE_ECB) {
+ clk_disable(pce_dev->ce_clk);
+ pce_dev->qce_cb(areq, pce_dev->dig_result, NULL,
+ pce_dev->chan_ce_in_status |
+ pce_dev->chan_ce_out_status);
+ } else {
+
+ iv_out[0] = readl_relaxed(pce_dev->iobase +
+ CRYPTO_CNTR0_IV0_REG);
+ iv_out[1] = readl_relaxed(pce_dev->iobase +
+ CRYPTO_CNTR1_IV1_REG);
+ iv_out[2] = readl_relaxed(pce_dev->iobase +
+ CRYPTO_CNTR2_IV2_REG);
+ iv_out[3] = readl_relaxed(pce_dev->iobase +
+ CRYPTO_CNTR3_IV3_REG);
+
+ _net_words_to_byte_stream(iv_out, iv, sizeof(iv));
+ clk_disable(pce_dev->ce_clk);
+ pce_dev->qce_cb(areq, pce_dev->dig_result, iv,
+ pce_dev->chan_ce_in_status |
+ pce_dev->chan_ce_out_status);
+ };
+ return 0;
+};
+
+static void _sha_complete(struct qce_device *pce_dev)
+{
+
+ struct ahash_request *areq;
+ uint32_t auth_data[2];
+ uint32_t status;
+
+ areq = (struct ahash_request *) pce_dev->areq;
+ qce_dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+ DMA_TO_DEVICE);
+
+ /* check ce error status */
+ status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
+ if (status & (1 << CRYPTO_SW_ERR)) {
+ pce_dev->err++;
+ dev_err(pce_dev->pdev,
+ "Qualcomm Crypto Error at 0x%x, status%x\n",
+ pce_dev->phy_iobase, status);
+ _init_ce_engine(pce_dev);
+ clk_disable(pce_dev->ce_clk);
+ pce_dev->qce_cb(areq, pce_dev->dig_result, NULL, -ENXIO);
+ return;
+ };
+
+ auth_data[0] = readl_relaxed(pce_dev->iobase +
+ CRYPTO_AUTH_BYTECNT0_REG);
+ auth_data[1] = readl_relaxed(pce_dev->iobase +
+ CRYPTO_AUTH_BYTECNT1_REG);
+ /* Ensure previous instruction (retriving byte count information)
+ * was completed before disabling the clk.
+ */
+ mb();
+ clk_disable(pce_dev->ce_clk);
+ pce_dev->qce_cb(areq, pce_dev->dig_result, (unsigned char *)auth_data,
+ pce_dev->chan_ce_in_status);
+};
+
+static int _ablk_cipher_complete(struct qce_device *pce_dev)
+{
+ struct ablkcipher_request *areq;
+ uint32_t iv_out[4];
+ unsigned char iv[4 * sizeof(uint32_t)];
+ uint32_t status;
+
+ areq = (struct ablkcipher_request *) pce_dev->areq;
+
+ if (areq->src != areq->dst) {
+ qce_dma_unmap_sg(pce_dev->pdev, areq->dst,
+ pce_dev->dst_nents, DMA_FROM_DEVICE);
+ }
+ qce_dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+ DMA_TO_DEVICE);
+
+ /* check ce error status */
+ status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
+ if (status & (1 << CRYPTO_SW_ERR)) {
+ pce_dev->err++;
+ dev_err(pce_dev->pdev,
+ "Qualcomm Crypto Error at 0x%x, status%x\n",
+ pce_dev->phy_iobase, status);
+ _init_ce_engine(pce_dev);
+ clk_disable(pce_dev->ce_clk);
+ pce_dev->qce_cb(areq, NULL, NULL, -ENXIO);
+ return 0;
+ };
+
+ /* get iv out */
+ if (pce_dev->mode == QCE_MODE_ECB) {
+ clk_disable(pce_dev->ce_clk);
+ pce_dev->qce_cb(areq, NULL, NULL, pce_dev->chan_ce_in_status |
+ pce_dev->chan_ce_out_status);
+ } else {
+ iv_out[0] = readl_relaxed(pce_dev->iobase +
+ CRYPTO_CNTR0_IV0_REG);
+ iv_out[1] = readl_relaxed(pce_dev->iobase +
+ CRYPTO_CNTR1_IV1_REG);
+ iv_out[2] = readl_relaxed(pce_dev->iobase +
+ CRYPTO_CNTR2_IV2_REG);
+ iv_out[3] = readl_relaxed(pce_dev->iobase +
+ CRYPTO_CNTR3_IV3_REG);
+
+ _net_words_to_byte_stream(iv_out, iv, sizeof(iv));
+ clk_disable(pce_dev->ce_clk);
+ pce_dev->qce_cb(areq, NULL, iv, pce_dev->chan_ce_in_status |
+ pce_dev->chan_ce_out_status);
+ }
+
+ return 0;
+};
+
+static int qce_split_and_insert_dm_desc(struct dmov_desc *pdesc,
+ unsigned int plen, unsigned int paddr, int *index)
+{
+ while (plen > QCE_FIFO_SIZE) {
+ pdesc->len = QCE_FIFO_SIZE;
+ if (paddr > 0) {
+ pdesc->addr = paddr;
+ paddr += QCE_FIFO_SIZE;
+ }
+ plen -= pdesc->len;
+ if (plen > 0) {
+ *index = (*index) + 1;
+ if ((*index) >= QCE_MAX_NUM_DESC)
+ return -ENOMEM;
+ pdesc++;
+ }
+ }
+ if ((plen > 0) && (plen <= QCE_FIFO_SIZE)) {
+ pdesc->len = plen;
+ if (paddr > 0)
+ pdesc->addr = paddr;
+ }
+
+ return 0;
+}
+
+static int _chain_sg_buffer_in(struct qce_device *pce_dev,
+ struct scatterlist *sg, unsigned int nbytes)
+{
+ unsigned int len;
+ unsigned int dlen;
+ struct dmov_desc *pdesc;
+
+ pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
+ /*
+ * Two consective chunks may be handled by the old
+ * buffer descriptor.
+ */
+ while (nbytes > 0) {
+ len = min(nbytes, sg_dma_len(sg));
+ dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
+ nbytes -= len;
+ if (dlen == 0) {
+ pdesc->addr = sg_dma_address(sg);
+ pdesc->len = len;
+ if (pdesc->len > QCE_FIFO_SIZE) {
+ if (qce_split_and_insert_dm_desc(pdesc,
+ pdesc->len, sg_dma_address(sg),
+ &pce_dev->ce_in_src_desc_index))
+ return -EIO;
+ }
+ } else if (sg_dma_address(sg) == (pdesc->addr + dlen)) {
+ pdesc->len = dlen + len;
+ if (pdesc->len > QCE_FIFO_SIZE) {
+ if (qce_split_and_insert_dm_desc(pdesc,
+ pdesc->len, pdesc->addr,
+ &pce_dev->ce_in_src_desc_index))
+ return -EIO;
+ }
+ } else {
+ pce_dev->ce_in_src_desc_index++;
+ if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC)
+ return -ENOMEM;
+ pdesc++;
+ pdesc->len = len;
+ pdesc->addr = sg_dma_address(sg);
+ if (pdesc->len > QCE_FIFO_SIZE) {
+ if (qce_split_and_insert_dm_desc(pdesc,
+ pdesc->len, sg_dma_address(sg),
+ &pce_dev->ce_in_src_desc_index))
+ return -EIO;
+ }
+ }
+ if (nbytes > 0)
+ sg = scatterwalk_sg_next(sg);
+ }
+ return 0;
+}
+
+static int _chain_pm_buffer_in(struct qce_device *pce_dev,
+ unsigned int pmem, unsigned int nbytes)
+{
+ unsigned int dlen;
+ struct dmov_desc *pdesc;
+
+ pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
+ dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
+ if (dlen == 0) {
+ pdesc->addr = pmem;
+ pdesc->len = nbytes;
+ } else if (pmem == (pdesc->addr + dlen)) {
+ pdesc->len = dlen + nbytes;
+ } else {
+ pce_dev->ce_in_src_desc_index++;
+ if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC)
+ return -ENOMEM;
+ pdesc++;
+ pdesc->len = nbytes;
+ pdesc->addr = pmem;
+ }
+ return 0;
+}
+
+static void _chain_buffer_in_init(struct qce_device *pce_dev)
+{
+ struct dmov_desc *pdesc;
+
+ pce_dev->ce_in_src_desc_index = 0;
+ pce_dev->ce_in_dst_desc_index = 0;
+ pdesc = pce_dev->ce_in_src_desc;
+ pdesc->len = 0;
+}
+
+static void _ce_in_final(struct qce_device *pce_dev, int ncmd, unsigned total)
+{
+ struct dmov_desc *pdesc;
+ dmov_sg *pcmd;
+
+ pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
+ pdesc->len |= ADM_DESC_LAST;
+
+ pdesc = pce_dev->ce_in_dst_desc;
+ if (total > QCE_FIFO_SIZE) {
+ qce_split_and_insert_dm_desc(pdesc, total, 0,
+ &pce_dev->ce_in_dst_desc_index);
+ pdesc = pce_dev->ce_in_dst_desc + pce_dev->ce_in_dst_desc_index;
+ pdesc->len |= ADM_DESC_LAST;
+ } else
+ pdesc->len = ADM_DESC_LAST | total;
+
+ pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in;
+ if (ncmd == 1)
+ pcmd->cmd |= CMD_LC;
+ else {
+ dmov_s *pscmd;
+
+ pcmd->cmd &= ~CMD_LC;
+ pcmd++;
+ pscmd = (dmov_s *)pcmd;
+ pscmd->cmd |= CMD_LC;
+ }
+
+#ifdef QCE_DEBUG
+ dev_info(pce_dev->pdev, "_ce_in_final %d\n",
+ pce_dev->ce_in_src_desc_index);
+#endif
+}
+
+#ifdef QCE_DEBUG
+static void _ce_in_dump(struct qce_device *pce_dev)
+{
+ int i;
+ struct dmov_desc *pdesc;
+
+ dev_info(pce_dev->pdev, "_ce_in_dump: src\n");
+ for (i = 0; i <= pce_dev->ce_in_src_desc_index; i++) {
+ pdesc = pce_dev->ce_in_src_desc + i;
+ dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
+ pdesc->len);
+ }
+ dev_info(pce_dev->pdev, "_ce_in_dump: dst\n");
+ for (i = 0; i <= pce_dev->ce_in_dst_desc_index; i++) {
+ pdesc = pce_dev->ce_in_dst_desc + i;
+ dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
+ pdesc->len);
+ }
+};
+
+static void _ce_out_dump(struct qce_device *pce_dev)
+{
+ int i;
+ struct dmov_desc *pdesc;
+
+ dev_info(pce_dev->pdev, "_ce_out_dump: src\n");
+ for (i = 0; i <= pce_dev->ce_out_src_desc_index; i++) {
+ pdesc = pce_dev->ce_out_src_desc + i;
+ dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
+ pdesc->len);
+ }
+
+ dev_info(pce_dev->pdev, "_ce_out_dump: dst\n");
+ for (i = 0; i <= pce_dev->ce_out_dst_desc_index; i++) {
+ pdesc = pce_dev->ce_out_dst_desc + i;
+ dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
+ pdesc->len);
+ }
+};
+#endif
+
+static int _chain_sg_buffer_out(struct qce_device *pce_dev,
+ struct scatterlist *sg, unsigned int nbytes)
+{
+ unsigned int len;
+ unsigned int dlen;
+ struct dmov_desc *pdesc;
+
+ pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
+ /*
+ * Two consective chunks may be handled by the old
+ * buffer descriptor.
+ */
+ while (nbytes > 0) {
+ len = min(nbytes, sg_dma_len(sg));
+ dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
+ nbytes -= len;
+ if (dlen == 0) {
+ pdesc->addr = sg_dma_address(sg);
+ pdesc->len = len;
+ if (pdesc->len > QCE_FIFO_SIZE) {
+ if (qce_split_and_insert_dm_desc(pdesc,
+ pdesc->len, sg_dma_address(sg),
+ &pce_dev->ce_out_dst_desc_index))
+ return -EIO;
+ }
+ } else if (sg_dma_address(sg) == (pdesc->addr + dlen)) {
+ pdesc->len = dlen + len;
+ if (pdesc->len > QCE_FIFO_SIZE) {
+ if (qce_split_and_insert_dm_desc(pdesc,
+ pdesc->len, pdesc->addr,
+ &pce_dev->ce_out_dst_desc_index))
+ return -EIO;
+ }
+ } else {
+ pce_dev->ce_out_dst_desc_index++;
+ if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC)
+ return -EIO;
+ pdesc++;
+ pdesc->len = len;
+ pdesc->addr = sg_dma_address(sg);
+ if (pdesc->len > QCE_FIFO_SIZE) {
+ if (qce_split_and_insert_dm_desc(pdesc,
+ pdesc->len, sg_dma_address(sg),
+ &pce_dev->ce_out_dst_desc_index))
+ return -EIO;
+ }
+ }
+ if (nbytes > 0)
+ sg = scatterwalk_sg_next(sg);
+ }
+ return 0;
+}
+
+static int _chain_pm_buffer_out(struct qce_device *pce_dev,
+ unsigned int pmem, unsigned int nbytes)
+{
+ unsigned int dlen;
+ struct dmov_desc *pdesc;
+
+ pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
+ dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
+
+ if (dlen == 0) {
+ pdesc->addr = pmem;
+ pdesc->len = nbytes;
+ } else if (pmem == (pdesc->addr + dlen)) {
+ pdesc->len = dlen + nbytes;
+ } else {
+ pce_dev->ce_out_dst_desc_index++;
+ if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC)
+ return -EIO;
+ pdesc++;
+ pdesc->len = nbytes;
+ pdesc->addr = pmem;
+ }
+ return 0;
+};
+
+static void _chain_buffer_out_init(struct qce_device *pce_dev)
+{
+ struct dmov_desc *pdesc;
+
+ pce_dev->ce_out_dst_desc_index = 0;
+ pce_dev->ce_out_src_desc_index = 0;
+ pdesc = pce_dev->ce_out_dst_desc;
+ pdesc->len = 0;
+};
+
+static void _ce_out_final(struct qce_device *pce_dev, int ncmd, unsigned total)
+{
+ struct dmov_desc *pdesc;
+ dmov_sg *pcmd;
+
+ pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
+ pdesc->len |= ADM_DESC_LAST;
+
+ pdesc = pce_dev->ce_out_src_desc;
+ if (total > QCE_FIFO_SIZE) {
+ qce_split_and_insert_dm_desc(pdesc, total, 0,
+ &pce_dev->ce_out_src_desc_index);
+ pdesc = pce_dev->ce_out_src_desc +
+ pce_dev->ce_out_src_desc_index;
+ pdesc->len |= ADM_DESC_LAST;
+ } else
+ pdesc->len = ADM_DESC_LAST | total;
+
+ pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out;
+ if (ncmd == 1)
+ pcmd->cmd |= CMD_LC;
+ else {
+ dmov_s *pscmd;
+
+ pcmd->cmd &= ~CMD_LC;
+ pcmd++;
+ pscmd = (dmov_s *)pcmd;
+ pscmd->cmd |= CMD_LC;
+ }
+#ifdef QCE_DEBUG
+ dev_info(pce_dev->pdev, "_ce_out_final %d\n",
+ pce_dev->ce_out_dst_desc_index);
+#endif
+
+};
+
+static void _aead_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
+ unsigned int result, struct msm_dmov_errdata *err)
+{
+ struct qce_device *pce_dev;
+
+ pce_dev = (struct qce_device *) cmd_ptr->user;
+ if (result != ADM_STATUS_OK) {
+ dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+ result);
+ pce_dev->chan_ce_in_status = -1;
+ } else
+ pce_dev->chan_ce_in_status = 0;
+
+ pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
+ if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
+ pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+ pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+ /* done */
+ _aead_complete(pce_dev);
+ }
+};
+
+static void _aead_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
+ unsigned int result, struct msm_dmov_errdata *err)
+{
+ struct qce_device *pce_dev;
+
+ pce_dev = (struct qce_device *) cmd_ptr->user;
+ if (result != ADM_STATUS_OK) {
+ dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+ result);
+ pce_dev->chan_ce_out_status = -1;
+ } else {
+ pce_dev->chan_ce_out_status = 0;
+ };
+
+ pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
+ if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
+ pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+ pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+ /* done */
+ _aead_complete(pce_dev);
+ }
+
+};
+
+static void _sha_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
+ unsigned int result, struct msm_dmov_errdata *err)
+{
+ struct qce_device *pce_dev;
+
+ pce_dev = (struct qce_device *) cmd_ptr->user;
+ if (result != ADM_STATUS_OK) {
+ dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+ result);
+ pce_dev->chan_ce_in_status = -1;
+ } else
+ pce_dev->chan_ce_in_status = 0;
+ pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+ _sha_complete(pce_dev);
+};
+
+static void _ablk_cipher_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
+ unsigned int result, struct msm_dmov_errdata *err)
+{
+ struct qce_device *pce_dev;
+
+ pce_dev = (struct qce_device *) cmd_ptr->user;
+ if (result != ADM_STATUS_OK) {
+ dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+ result);
+ pce_dev->chan_ce_in_status = -1;
+ } else
+ pce_dev->chan_ce_in_status = 0;
+
+ pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
+ if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
+ pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+ pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+ /* done */
+ _ablk_cipher_complete(pce_dev);
+ }
+};
+
+static void _ablk_cipher_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
+ unsigned int result, struct msm_dmov_errdata *err)
+{
+ struct qce_device *pce_dev;
+
+ pce_dev = (struct qce_device *) cmd_ptr->user;
+ if (result != ADM_STATUS_OK) {
+ dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+ result);
+ pce_dev->chan_ce_out_status = -1;
+ } else {
+ pce_dev->chan_ce_out_status = 0;
+ };
+
+ pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
+ if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
+ pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+ pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+ /* done */
+ _ablk_cipher_complete(pce_dev);
+ }
+};
+
+
+static int _setup_cmd_template(struct qce_device *pce_dev)
+{
+ dmov_sg *pcmd;
+ dmov_s *pscmd;
+ struct dmov_desc *pdesc;
+ unsigned char *vaddr;
+ int i = 0;
+
+ /* Divide up the 4K coherent memory */
+ /* 1. ce_in channel 1st command src descriptors, 128 entries */
+ vaddr = pce_dev->coh_vmem;
+ vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
+ pce_dev->ce_in_src_desc = (struct dmov_desc *) vaddr;
+ pce_dev->phy_ce_in_src_desc = pce_dev->coh_pmem +
+ (vaddr - pce_dev->coh_vmem);
+ vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
+
+ /* 2. ce_in channel 1st command dst descriptor, 1 entry */
+ vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
+ pce_dev->ce_in_dst_desc = (struct dmov_desc *) vaddr;
+ pce_dev->phy_ce_in_dst_desc = pce_dev->coh_pmem +
+ (vaddr - pce_dev->coh_vmem);
+ vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
+
+ /*
+ * 3. ce_in channel command list of one scatter gather command
+ * and one simple command.
+ */
+ pce_dev->cmd_list_ce_in = vaddr;
+ pce_dev->phy_cmd_list_ce_in = pce_dev->coh_pmem
+ + (vaddr - pce_dev->coh_vmem);
+ vaddr = vaddr + sizeof(dmov_s) + sizeof(dmov_sg);
+
+ /* 4. authentication result. */
+ pce_dev->dig_result = vaddr;
+ pce_dev->phy_dig_result = pce_dev->coh_pmem +
+ (vaddr - pce_dev->coh_vmem);
+ vaddr = vaddr + SHA256_DIGESTSIZE;
+
+ /*
+ * 5. ce_out channel command list of one scatter gather command
+ * and one simple command.
+ */
+ pce_dev->cmd_list_ce_out = vaddr;
+ pce_dev->phy_cmd_list_ce_out = pce_dev->coh_pmem
+ + (vaddr - pce_dev->coh_vmem);
+ vaddr = vaddr + sizeof(dmov_s) + sizeof(dmov_sg);
+
+ /* 6. ce_out channel command src descriptors, 1 entry */
+ vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
+ pce_dev->ce_out_src_desc = (struct dmov_desc *) vaddr;
+ pce_dev->phy_ce_out_src_desc = pce_dev->coh_pmem
+ + (vaddr - pce_dev->coh_vmem);
+ vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
+
+ /* 7. ce_out channel command dst descriptors, 128 entries. */
+ vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
+ pce_dev->ce_out_dst_desc = (struct dmov_desc *) vaddr;
+ pce_dev->phy_ce_out_dst_desc = pce_dev->coh_pmem
+ + (vaddr - pce_dev->coh_vmem);
+ vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
+
+ /* 8. pad area. */
+ pce_dev->ce_pad = vaddr;
+ pce_dev->phy_ce_pad = pce_dev->coh_pmem +
+ (vaddr - pce_dev->coh_vmem);
+ vaddr = vaddr + ADM_CE_BLOCK_SIZE;
+
+ /* 9. ce_in channel command pointer list. */
+ vaddr = (unsigned char *) ALIGN(((unsigned int) vaddr), 16);
+ pce_dev->cmd_pointer_list_ce_in = (unsigned int *) vaddr;
+ pce_dev->phy_cmd_pointer_list_ce_in = pce_dev->coh_pmem +
+ (vaddr - pce_dev->coh_vmem);
+ vaddr = vaddr + sizeof(unsigned char *);
+
+ /* 10. ce_ou channel command pointer list. */
+ vaddr = (unsigned char *) ALIGN(((unsigned int) vaddr), 16);
+ pce_dev->cmd_pointer_list_ce_out = (unsigned int *) vaddr;
+ pce_dev->phy_cmd_pointer_list_ce_out = pce_dev->coh_pmem +
+ (vaddr - pce_dev->coh_vmem);
+ vaddr = vaddr + sizeof(unsigned char *);
+
+ /* 11. throw away area to store by-pass data from ce_out. */
+ pce_dev->ce_out_ignore = (unsigned char *) vaddr;
+ pce_dev->phy_ce_out_ignore = pce_dev->coh_pmem
+ + (vaddr - pce_dev->coh_vmem);
+ pce_dev->ce_out_ignore_size = (2 * PAGE_SIZE) - (vaddr -
+ pce_dev->coh_vmem); /* at least 1.5 K of space */
+ /*
+ * The first command of command list ce_in is for the input of
+ * concurrent operation of encrypt/decrypt or for the input
+ * of authentication.
+ */
+ pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in;
+ /* swap byte and half word , dst crci , scatter gather */
+ pcmd->cmd = CMD_DST_SWAP_BYTES | CMD_DST_SWAP_SHORTS |
+ CMD_DST_CRCI(pce_dev->crci_in) | CMD_MODE_SG;
+ pdesc = pce_dev->ce_in_src_desc;
+ pdesc->addr = 0; /* to be filled in each operation */
+ pdesc->len = 0; /* to be filled in each operation */
+ pcmd->src_dscr = (unsigned) pce_dev->phy_ce_in_src_desc;
+
+ pdesc = pce_dev->ce_in_dst_desc;
+ for (i = 0; i < QCE_MAX_NUM_DESC; i++) {
+ pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase);
+ pdesc->len = 0; /* to be filled in each operation */
+ pdesc++;
+ }
+ pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_in_dst_desc;
+ pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) |
+ DST_INDEX_SG_CMD(0);
+ pcmd++;
+ /*
+ * The second command is for the digested data of
+ * hashing operation only. For others, this command is not used.
+ */
+ pscmd = (dmov_s *) pcmd;
+ /* last command, swap byte, half word, src crci, single */
+ pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS |
+ CMD_SRC_CRCI(pce_dev->crci_hash) | CMD_MODE_SINGLE;
+ pscmd->src = (unsigned) (CRYPTO_AUTH_IV0_REG + pce_dev->phy_iobase);
+ pscmd->len = SHA256_DIGESTSIZE; /* to be filled. */
+ pscmd->dst = (unsigned) pce_dev->phy_dig_result;
+ /* setup command pointer list */
+ *(pce_dev->cmd_pointer_list_ce_in) = (CMD_PTR_LP | DMOV_CMD_LIST |
+ DMOV_CMD_ADDR((unsigned int)
+ pce_dev->phy_cmd_list_ce_in));
+ pce_dev->chan_ce_in_cmd->user = (void *) pce_dev;
+ pce_dev->chan_ce_in_cmd->exec_func = NULL;
+ pce_dev->chan_ce_in_cmd->cmdptr = DMOV_CMD_ADDR(
+ (unsigned int) pce_dev->phy_cmd_pointer_list_ce_in);
+ /*
+ * The first command in the command list ce_out.
+ * It is for encry/decryp output.
+ * If hashing only, ce_out is not used.
+ */
+ pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out;
+ /* swap byte, half word, source crci, scatter gather */
+ pcmd->cmd = CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS |
+ CMD_SRC_CRCI(pce_dev->crci_out) | CMD_MODE_SG;
+
+ pdesc = pce_dev->ce_out_src_desc;
+ for (i = 0; i < QCE_MAX_NUM_DESC; i++) {
+ pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase);
+ pdesc->len = 0; /* to be filled in each operation */
+ pdesc++;
+ }
+ pcmd->src_dscr = (unsigned) pce_dev->phy_ce_out_src_desc;
+
+ pdesc = pce_dev->ce_out_dst_desc;
+ pdesc->addr = 0; /* to be filled in each operation */
+ pdesc->len = 0; /* to be filled in each operation */
+ pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_out_dst_desc;
+ pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) |
+ DST_INDEX_SG_CMD(0);
+
+ pcmd++;
+ /*
+ * The second command is for digested data of esp operation.
+ * For ciphering, this command is not used.
+ */
+ pscmd = (dmov_s *) pcmd;
+ /* last command, swap byte, half word, src crci, single */
+ pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS |
+ CMD_SRC_CRCI(pce_dev->crci_hash) | CMD_MODE_SINGLE;
+ pscmd->src = (CRYPTO_AUTH_IV0_REG + pce_dev->phy_iobase);
+ pscmd->len = SHA1_DIGESTSIZE; /* we only support hmac(sha1) */
+ pscmd->dst = (unsigned) pce_dev->phy_dig_result;
+ /* setup command pointer list */
+ *(pce_dev->cmd_pointer_list_ce_out) = (CMD_PTR_LP | DMOV_CMD_LIST |
+ DMOV_CMD_ADDR((unsigned int)pce_dev->
+ phy_cmd_list_ce_out));
+
+ pce_dev->chan_ce_out_cmd->user = pce_dev;
+ pce_dev->chan_ce_out_cmd->exec_func = NULL;
+ pce_dev->chan_ce_out_cmd->cmdptr = DMOV_CMD_ADDR(
+ (unsigned int) pce_dev->phy_cmd_pointer_list_ce_out);
+
+
+ return 0;
+};
+
+static int _qce_start_dma(struct qce_device *pce_dev, bool ce_in, bool ce_out)
+{
+
+ if (ce_in)
+ pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IN_PROG;
+ else
+ pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
+
+ if (ce_out)
+ pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IN_PROG;
+ else
+ pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
+
+ if (ce_in)
+ msm_dmov_enqueue_cmd(pce_dev->chan_ce_in,
+ pce_dev->chan_ce_in_cmd);
+ if (ce_out)
+ msm_dmov_enqueue_cmd(pce_dev->chan_ce_out,
+ pce_dev->chan_ce_out_cmd);
+
+ return 0;
+};
+
+static void _f9_complete(struct qce_device *pce_dev)
+{
+ uint32_t mac_i;
+ uint32_t status;
+
+ dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
+ pce_dev->ota_size, DMA_TO_DEVICE);
+
+ /* check ce error status */
+ status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
+ if (status & (1 << CRYPTO_SW_ERR)) {
+ pce_dev->err++;
+ dev_err(pce_dev->pdev,
+ "Qualcomm Crypto Error at 0x%x, status%x\n",
+ pce_dev->phy_iobase, status);
+ _init_ce_engine(pce_dev);
+ pce_dev->qce_cb(pce_dev->areq, NULL, NULL, -ENXIO);
+ return;
+ };
+
+ mac_i = readl_relaxed(pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
+ pce_dev->qce_cb(pce_dev->areq, (void *) mac_i, NULL,
+ pce_dev->chan_ce_in_status);
+};
+
+static void _f8_complete(struct qce_device *pce_dev)
+{
+ uint32_t status;
+
+ if (pce_dev->phy_ota_dst != 0)
+ dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst,
+ pce_dev->ota_size, DMA_FROM_DEVICE);
+ if (pce_dev->phy_ota_src != 0)
+ dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
+ pce_dev->ota_size, (pce_dev->phy_ota_dst) ?
+ DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
+
+ /* check ce error status */
+ status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
+ if (status & (1 << CRYPTO_SW_ERR)) {
+ pce_dev->err++;
+ dev_err(pce_dev->pdev,
+ "Qualcomm Crypto Error at 0x%x, status%x\n",
+ pce_dev->phy_iobase, status);
+ _init_ce_engine(pce_dev);
+ pce_dev->qce_cb(pce_dev->areq, NULL, NULL, -ENXIO);
+ return;
+ };
+
+ pce_dev->qce_cb(pce_dev->areq, NULL, NULL,
+ pce_dev->chan_ce_in_status |
+ pce_dev->chan_ce_out_status);
+};
+
+
+static void _f9_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
+ unsigned int result, struct msm_dmov_errdata *err)
+{
+ struct qce_device *pce_dev;
+
+ pce_dev = (struct qce_device *) cmd_ptr->user;
+ if (result != ADM_STATUS_OK) {
+ dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+ result);
+ pce_dev->chan_ce_in_status = -1;
+ } else
+ pce_dev->chan_ce_in_status = 0;
+ pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+ _f9_complete(pce_dev);
+};
+
+static void _f8_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
+ unsigned int result, struct msm_dmov_errdata *err)
+{
+ struct qce_device *pce_dev;
+
+ pce_dev = (struct qce_device *) cmd_ptr->user;
+ if (result != ADM_STATUS_OK) {
+ dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+ result);
+ pce_dev->chan_ce_in_status = -1;
+ } else
+ pce_dev->chan_ce_in_status = 0;
+
+ pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
+ if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
+ pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+ pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+ /* done */
+ _f8_complete(pce_dev);
+ }
+};
+
+static void _f8_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
+ unsigned int result, struct msm_dmov_errdata *err)
+{
+ struct qce_device *pce_dev;
+
+ pce_dev = (struct qce_device *) cmd_ptr->user;
+ if (result != ADM_STATUS_OK) {
+ dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+ result);
+ pce_dev->chan_ce_out_status = -1;
+ } else {
+ pce_dev->chan_ce_out_status = 0;
+ };
+
+ pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
+ if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
+ pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+ pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+ /* done */
+ _f8_complete(pce_dev);
+ }
+};
+
+static int _ce_f9_setup(struct qce_device *pce_dev, struct qce_f9_req *req)
+{
+ uint32_t cfg;
+ uint32_t ikey[OTA_KEY_SIZE/sizeof(uint32_t)];
+
+ _byte_stream_to_net_words(ikey, &req->ikey[0], OTA_KEY_SIZE);
+ writel_relaxed(ikey[0], pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
+ writel_relaxed(ikey[1], pce_dev->iobase + CRYPTO_AUTH_IV1_REG);
+ writel_relaxed(ikey[2], pce_dev->iobase + CRYPTO_AUTH_IV2_REG);
+ writel_relaxed(ikey[3], pce_dev->iobase + CRYPTO_AUTH_IV3_REG);
+ writel_relaxed(req->last_bits, pce_dev->iobase + CRYPTO_AUTH_IV4_REG);
+
+ writel_relaxed(req->fresh, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG);
+ writel_relaxed(req->count_i, pce_dev->iobase +
+ CRYPTO_AUTH_BYTECNT1_REG);
+
+ /* write auth_seg_cfg */
+ writel_relaxed((uint32_t)req->msize << CRYPTO_AUTH_SEG_SIZE,
+ pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+
+ /* write seg_cfg */
+ cfg = (CRYPTO_AUTH_ALG_F9 << CRYPTO_AUTH_ALG) | (1 << CRYPTO_FIRST) |
+ (1 << CRYPTO_LAST);
+
+ if (req->algorithm == QCE_OTA_ALGO_KASUMI)
+ cfg |= (CRYPTO_AUTH_SIZE_UIA1 << CRYPTO_AUTH_SIZE);
+ else
+ cfg |= (CRYPTO_AUTH_SIZE_UIA2 << CRYPTO_AUTH_SIZE);
+
+ if (req->direction == QCE_OTA_DIR_DOWNLINK)
+ cfg |= 1 << CRYPTO_F9_DIRECTION;
+
+ writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
+
+ /* write seg_size */
+ writel_relaxed(req->msize, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+ /* issue go to crypto */
+ writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
+
+ /*
+ * barrier to ensure previous instructions
+ * (including GO) to CE finish before issue DMA transfer
+ * request.
+ */
+ mb();
+ return 0;
+};
+
+static int _ce_f8_setup(struct qce_device *pce_dev, struct qce_f8_req *req,
+ bool key_stream_mode, uint16_t npkts, uint16_t cipher_offset,
+ uint16_t cipher_size)
+{
+ uint32_t cfg;
+ uint32_t ckey[OTA_KEY_SIZE/sizeof(uint32_t)];
+
+ if ((key_stream_mode && (req->data_len & 0xf || npkts > 1)) ||
+ (req->bearer >= QCE_OTA_MAX_BEARER))
+ return -EINVAL;
+
+ /* write seg_cfg */
+ cfg = (CRYPTO_ENCR_ALG_F8 << CRYPTO_ENCR_ALG) | (1 << CRYPTO_FIRST) |
+ (1 << CRYPTO_LAST);
+ if (req->algorithm == QCE_OTA_ALGO_KASUMI)
+ cfg |= (CRYPTO_ENCR_KEY_SZ_UEA1 << CRYPTO_ENCR_KEY_SZ);
+ else
+ cfg |= (CRYPTO_ENCR_KEY_SZ_UEA2 << CRYPTO_ENCR_KEY_SZ);
+ if (key_stream_mode)
+ cfg |= 1 << CRYPTO_F8_KEYSTREAM_ENABLE;
+ if (req->direction == QCE_OTA_DIR_DOWNLINK)
+ cfg |= 1 << CRYPTO_F8_DIRECTION;
+ writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
+
+ /* write seg_size */
+ writel_relaxed(req->data_len, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+ /* write 0 to auth_size, auth_offset */
+ writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+
+ /* write encr_seg_cfg seg_size, seg_offset */
+ writel_relaxed((((uint32_t) cipher_size) << CRYPTO_ENCR_SEG_SIZE) |
+ (cipher_offset & 0xffff),
+ pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+ /* write keys */
+ _byte_stream_to_net_words(ckey, &req->ckey[0], OTA_KEY_SIZE);
+ writel_relaxed(ckey[0], pce_dev->iobase + CRYPTO_DES_KEY0_REG);
+ writel_relaxed(ckey[1], pce_dev->iobase + CRYPTO_DES_KEY1_REG);
+ writel_relaxed(ckey[2], pce_dev->iobase + CRYPTO_DES_KEY2_REG);
+ writel_relaxed(ckey[3], pce_dev->iobase + CRYPTO_DES_KEY3_REG);
+
+ /* write cntr0_iv0 for countC */
+ writel_relaxed(req->count_c, pce_dev->iobase + CRYPTO_CNTR0_IV0_REG);
+
+ /* write cntr1_iv1 for nPkts, and bearer */
+ if (npkts == 1)
+ npkts = 0;
+ writel_relaxed(req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
+ npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT,
+ pce_dev->iobase + CRYPTO_CNTR1_IV1_REG);
+
+ /* issue go to crypto */
+ writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
+
+ /*
+ * barrier to ensure previous instructions
+ * (including GO) to CE finish before issue DMA transfer
+ * request.
+ */
+ mb();
+ return 0;
+};
+
+struct qce_pm_table qce_pm_table = {NULL, NULL};
+EXPORT_SYMBOL(qce_pm_table);
+
+void qce_get_driver_stats(void *handle)
+{
+}
+EXPORT_SYMBOL(qce_get_driver_stats);
+
+void qce_clear_driver_stats(void *handle)
+{
+}
+EXPORT_SYMBOL(qce_clear_driver_stats);
+
+int qce_aead_req(void *handle, struct qce_req *q_req)
+{
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+ struct aead_request *areq = (struct aead_request *) q_req->areq;
+ struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ uint32_t ivsize = crypto_aead_ivsize(aead);
+ uint32_t totallen;
+ uint32_t pad_len;
+ uint32_t authsize = crypto_aead_authsize(aead);
+ int rc = 0;
+
+ q_req->ivsize = ivsize;
+ if (q_req->dir == QCE_ENCRYPT)
+ q_req->cryptlen = areq->cryptlen;
+ else
+ q_req->cryptlen = areq->cryptlen - authsize;
+
+ if ((q_req->cryptlen > ULONG_MAX - ivsize) ||
+ (q_req->cryptlen + ivsize > ULONG_MAX - areq->assoclen)) {
+ pr_err("Integer overflow on total aead req length.\n");
+ return -EINVAL;
+ }
+
+ totallen = q_req->cryptlen + ivsize + areq->assoclen;
+ pad_len = ALIGN(totallen, ADM_CE_BLOCK_SIZE) - totallen;
+
+ _chain_buffer_in_init(pce_dev);
+ _chain_buffer_out_init(pce_dev);
+
+ pce_dev->assoc_nents = 0;
+ pce_dev->phy_iv_in = 0;
+ pce_dev->src_nents = 0;
+ pce_dev->dst_nents = 0;
+
+ pce_dev->assoc_nents = count_sg(areq->assoc, areq->assoclen);
+ qce_dma_map_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
+ DMA_TO_DEVICE);
+ if (_chain_sg_buffer_in(pce_dev, areq->assoc, areq->assoclen) < 0) {
+ rc = -ENOMEM;
+ goto bad;
+ }
+
+ /* cipher iv for input */
+ pce_dev->phy_iv_in = dma_map_single(pce_dev->pdev, q_req->iv,
+ ivsize, DMA_TO_DEVICE);
+ if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_iv_in, ivsize) < 0) {
+ rc = -ENOMEM;
+ goto bad;
+ }
+
+ /* for output, ignore associated data and cipher iv */
+ if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_out_ignore,
+ ivsize + areq->assoclen) < 0) {
+ rc = -ENOMEM;
+ goto bad;
+ }
+
+ /* cipher input */
+ pce_dev->src_nents = count_sg(areq->src, q_req->cryptlen);
+ qce_dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+ DMA_TO_DEVICE);
+ if (_chain_sg_buffer_in(pce_dev, areq->src, q_req->cryptlen) < 0) {
+ rc = -ENOMEM;
+ goto bad;
+ }
+
+ /* cipher output */
+ if (areq->src != areq->dst) {
+ pce_dev->dst_nents = count_sg(areq->dst, q_req->cryptlen);
+ qce_dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
+ DMA_FROM_DEVICE);
+ };
+ if (_chain_sg_buffer_out(pce_dev, areq->dst, q_req->cryptlen) < 0) {
+ rc = -ENOMEM;
+ goto bad;
+ }
+
+ /* pad data */
+ if (pad_len) {
+ if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
+ pad_len) < 0) {
+ rc = -ENOMEM;
+ goto bad;
+ }
+ if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
+ pad_len) < 0) {
+ rc = -ENOMEM;
+ goto bad;
+ }
+ }
+
+ /* finalize the ce_in and ce_out channels command lists */
+ _ce_in_final(pce_dev, 1, ALIGN(totallen, ADM_CE_BLOCK_SIZE));
+ _ce_out_final(pce_dev, 2, ALIGN(totallen, ADM_CE_BLOCK_SIZE));
+
+ /* set up crypto device */
+ rc = _ce_setup(pce_dev, q_req, totallen, ivsize + areq->assoclen);
+ if (rc < 0)
+ goto bad;
+
+ /* setup for callback, and issue command to adm */
+ pce_dev->areq = q_req->areq;
+ pce_dev->qce_cb = q_req->qce_cb;
+
+ pce_dev->chan_ce_in_cmd->complete_func = _aead_ce_in_call_back;
+ pce_dev->chan_ce_out_cmd->complete_func = _aead_ce_out_call_back;
+
+ rc = _qce_start_dma(pce_dev, true, true);
+ if (rc == 0)
+ return 0;
+bad:
+ if (pce_dev->assoc_nents) {
+ qce_dma_unmap_sg(pce_dev->pdev, areq->assoc,
+ pce_dev->assoc_nents, DMA_TO_DEVICE);
+ }
+ if (pce_dev->phy_iv_in) {
+ dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
+ ivsize, DMA_TO_DEVICE);
+ }
+ if (pce_dev->src_nents) {
+ qce_dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+ DMA_TO_DEVICE);
+ }
+ if (pce_dev->dst_nents) {
+ qce_dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
+ DMA_FROM_DEVICE);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(qce_aead_req);
+
+int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
+{
+ int rc = 0;
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+ struct ablkcipher_request *areq = (struct ablkcipher_request *)
+ c_req->areq;
+
+ uint32_t pad_len = ALIGN(areq->nbytes, ADM_CE_BLOCK_SIZE)
+ - areq->nbytes;
+
+ _chain_buffer_in_init(pce_dev);
+ _chain_buffer_out_init(pce_dev);
+
+ pce_dev->src_nents = 0;
+ pce_dev->dst_nents = 0;
+ /* cipher input */
+ pce_dev->src_nents = count_sg(areq->src, areq->nbytes);
+
+ qce_dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+ DMA_TO_DEVICE);
+ if (_chain_sg_buffer_in(pce_dev, areq->src, areq->nbytes) < 0) {
+ rc = -ENOMEM;
+ goto bad;
+ }
+
+ /* cipher output */
+ if (areq->src != areq->dst) {
+ pce_dev->dst_nents = count_sg(areq->dst, areq->nbytes);
+ qce_dma_map_sg(pce_dev->pdev, areq->dst,
+ pce_dev->dst_nents, DMA_FROM_DEVICE);
+ };
+ if (_chain_sg_buffer_out(pce_dev, areq->dst, areq->nbytes) < 0) {
+ rc = -ENOMEM;
+ goto bad;
+ }
+
+ /* pad data */
+ if (pad_len) {
+ if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
+ pad_len) < 0) {
+ rc = -ENOMEM;
+ goto bad;
+ }
+ if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
+ pad_len) < 0) {
+ rc = -ENOMEM;
+ goto bad;
+ }
+ }
+
+ /* finalize the ce_in and ce_out channels command lists */
+ _ce_in_final(pce_dev, 1, areq->nbytes + pad_len);
+ _ce_out_final(pce_dev, 1, areq->nbytes + pad_len);
+
+#ifdef QCE_DEBUG
+ _ce_in_dump(pce_dev);
+ _ce_out_dump(pce_dev);
+#endif
+ /* set up crypto device */
+ rc = _ce_setup(pce_dev, c_req, areq->nbytes, 0);
+ if (rc < 0)
+ goto bad;
+
+ /* setup for callback, and issue command to adm */
+ pce_dev->areq = areq;
+ pce_dev->qce_cb = c_req->qce_cb;
+ pce_dev->chan_ce_in_cmd->complete_func =
+ _ablk_cipher_ce_in_call_back;
+ pce_dev->chan_ce_out_cmd->complete_func =
+ _ablk_cipher_ce_out_call_back;
+ rc = _qce_start_dma(pce_dev, true, true);
+
+ if (rc == 0)
+ return 0;
+bad:
+ if (pce_dev->dst_nents) {
+ qce_dma_unmap_sg(pce_dev->pdev, areq->dst,
+ pce_dev->dst_nents, DMA_FROM_DEVICE);
+ }
+ if (pce_dev->src_nents) {
+ qce_dma_unmap_sg(pce_dev->pdev, areq->src,
+ pce_dev->src_nents,
+ (areq->src == areq->dst) ?
+ DMA_BIDIRECTIONAL :
+ DMA_TO_DEVICE);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(qce_ablk_cipher_req);
+
+int qce_process_sha_req(void *handle, struct qce_sha_req *sreq)
+{
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+ int rc;
+ uint32_t pad_len = ALIGN(sreq->size, ADM_CE_BLOCK_SIZE) - sreq->size;
+ struct ahash_request *areq = (struct ahash_request *)sreq->areq;
+
+ _chain_buffer_in_init(pce_dev);
+ pce_dev->src_nents = count_sg(sreq->src, sreq->size);
+ qce_dma_map_sg(pce_dev->pdev, sreq->src, pce_dev->src_nents,
+ DMA_TO_DEVICE);
+
+ if (_chain_sg_buffer_in(pce_dev, sreq->src, sreq->size) < 0) {
+ rc = -ENOMEM;
+ goto bad;
+ }
+
+ if (pad_len) {
+ if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
+ pad_len) < 0) {
+ rc = -ENOMEM;
+ goto bad;
+ }
+ }
+ _ce_in_final(pce_dev, 2, sreq->size + pad_len);
+
+#ifdef QCE_DEBUG
+ _ce_in_dump(pce_dev);
+#endif
+
+ rc = _sha_ce_setup(pce_dev, sreq);
+
+ if (rc < 0)
+ goto bad;
+
+ pce_dev->areq = areq;
+ pce_dev->qce_cb = sreq->qce_cb;
+ pce_dev->chan_ce_in_cmd->complete_func = _sha_ce_in_call_back;
+
+ rc = _qce_start_dma(pce_dev, true, false);
+
+ if (rc == 0)
+ return 0;
+bad:
+ if (pce_dev->src_nents) {
+ qce_dma_unmap_sg(pce_dev->pdev, sreq->src,
+ pce_dev->src_nents, DMA_TO_DEVICE);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(qce_process_sha_req);
+
+int qce_enable_clk(void *handle)
+{
+ return 0;
+}
+EXPORT_SYMBOL(qce_enable_clk);
+
+int qce_disable_clk(void *handle)
+{
+ return 0;
+}
+EXPORT_SYMBOL(qce_disable_clk);
+
+/*
+ * crypto engine open function.
+ */
+void *qce_open(struct platform_device *pdev, int *rc)
+{
+ struct qce_device *pce_dev;
+ struct resource *resource;
+ struct clk *ce_clk;
+
+ pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL);
+ if (!pce_dev) {
+ *rc = -ENOMEM;
+ dev_err(&pdev->dev, "Can not allocate memory\n");
+ return NULL;
+ }
+ pce_dev->pdev = &pdev->dev;
+ ce_clk = clk_get(pce_dev->pdev, "core_clk");
+ if (IS_ERR(ce_clk)) {
+ kfree(pce_dev);
+ *rc = PTR_ERR(ce_clk);
+ return NULL;
+ }
+ pce_dev->ce_clk = ce_clk;
+ *rc = clk_enable(pce_dev->ce_clk);
+ if (*rc) {
+ kfree(pce_dev);
+ return NULL;
+ }
+
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!resource) {
+ *rc = -ENXIO;
+ dev_err(pce_dev->pdev, "Missing MEM resource\n");
+ goto err;
+ };
+ pce_dev->phy_iobase = resource->start;
+ pce_dev->iobase = ioremap_nocache(resource->start,
+ resource->end - resource->start + 1);
+ if (!pce_dev->iobase) {
+ *rc = -ENOMEM;
+ dev_err(pce_dev->pdev, "Can not map io memory\n");
+ goto err;
+ }
+
+ pce_dev->chan_ce_in_cmd = kzalloc(sizeof(struct msm_dmov_cmd),
+ GFP_KERNEL);
+ pce_dev->chan_ce_out_cmd = kzalloc(sizeof(struct msm_dmov_cmd),
+ GFP_KERNEL);
+ if (pce_dev->chan_ce_in_cmd == NULL ||
+ pce_dev->chan_ce_out_cmd == NULL) {
+ dev_err(pce_dev->pdev, "Can not allocate memory\n");
+ *rc = -ENOMEM;
+ goto err;
+ }
+
+ resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+ "crypto_channels");
+ if (!resource) {
+ *rc = -ENXIO;
+ dev_err(pce_dev->pdev, "Missing DMA channel resource\n");
+ goto err;
+ };
+ pce_dev->chan_ce_in = resource->start;
+ pce_dev->chan_ce_out = resource->end;
+ resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+ "crypto_crci_in");
+ if (!resource) {
+ *rc = -ENXIO;
+ dev_err(pce_dev->pdev, "Missing DMA crci in resource\n");
+ goto err;
+ };
+ pce_dev->crci_in = resource->start;
+ resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+ "crypto_crci_out");
+ if (!resource) {
+ *rc = -ENXIO;
+ dev_err(pce_dev->pdev, "Missing DMA crci out resource\n");
+ goto err;
+ };
+ pce_dev->crci_out = resource->start;
+ resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+ "crypto_crci_hash");
+ if (!resource) {
+ *rc = -ENXIO;
+ dev_err(pce_dev->pdev, "Missing DMA crci hash resource\n");
+ goto err;
+ };
+ pce_dev->crci_hash = resource->start;
+ pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
+ 2*PAGE_SIZE, &pce_dev->coh_pmem, GFP_KERNEL);
+
+ if (pce_dev->coh_vmem == NULL) {
+ *rc = -ENOMEM;
+ dev_err(pce_dev->pdev, "Can not allocate coherent memory.\n");
+ goto err;
+ }
+ _setup_cmd_template(pce_dev);
+
+ pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+ pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+ if (_init_ce_engine(pce_dev)) {
+ *rc = -ENXIO;
+ clk_disable(pce_dev->ce_clk);
+ goto err;
+ }
+ *rc = 0;
+ clk_disable(pce_dev->ce_clk);
+
+ pce_dev->err = 0;
+
+ return pce_dev;
+err:
+ if (pce_dev)
+ qce_close(pce_dev);
+ return NULL;
+}
+EXPORT_SYMBOL(qce_open);
+
+/*
+ * crypto engine close function.
+ */
+int qce_close(void *handle)
+{
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+
+ if (handle == NULL)
+ return -ENODEV;
+ if (pce_dev->iobase)
+ iounmap(pce_dev->iobase);
+
+ if (pce_dev->coh_vmem)
+ dma_free_coherent(pce_dev->pdev, 2*PAGE_SIZE, pce_dev->coh_vmem,
+ pce_dev->coh_pmem);
+ kfree(pce_dev->chan_ce_in_cmd);
+ kfree(pce_dev->chan_ce_out_cmd);
+
+ clk_put(pce_dev->ce_clk);
+ kfree(handle);
+ return 0;
+}
+EXPORT_SYMBOL(qce_close);
+
+int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
+{
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+
+ if (ce_support == NULL)
+ return -EINVAL;
+
+ if (pce_dev->hmac == 1)
+ ce_support->sha1_hmac_20 = true;
+ else
+ ce_support->sha1_hmac_20 = false;
+ ce_support->sha1_hmac = false;
+ ce_support->sha256_hmac = false;
+ ce_support->sha_hmac = false;
+ ce_support->cmac = false;
+ ce_support->aes_key_192 = true;
+ ce_support->aes_xts = false;
+ ce_support->aes_ccm = false;
+ ce_support->ota = pce_dev->ota;
+ ce_support->aligned_only = false;
+ ce_support->is_shared = false;
+ ce_support->bam = false;
+ ce_support->max_request = 1;
+ return 0;
+}
+EXPORT_SYMBOL(qce_hw_support);
+
+int qce_f8_req(void *handle, struct qce_f8_req *req,
+ void *cookie, qce_comp_func_ptr_t qce_cb)
+{
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+ bool key_stream_mode;
+ dma_addr_t dst;
+ int rc;
+ uint32_t pad_len = ALIGN(req->data_len, ADM_CE_BLOCK_SIZE) -
+ req->data_len;
+
+ _chain_buffer_in_init(pce_dev);
+ _chain_buffer_out_init(pce_dev);
+
+ key_stream_mode = (req->data_in == NULL);
+
+ /* F8 cipher input */
+ if (key_stream_mode)
+ pce_dev->phy_ota_src = 0;
+ else {
+ pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev,
+ req->data_in, req->data_len,
+ (req->data_in == req->data_out) ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+ if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ota_src,
+ req->data_len) < 0) {
+ pce_dev->phy_ota_dst = 0;
+ rc = -ENOMEM;
+ goto bad;
+ }
+ }
+
+ /* F8 cipher output */
+ if (req->data_in != req->data_out) {
+ dst = dma_map_single(pce_dev->pdev, req->data_out,
+ req->data_len, DMA_FROM_DEVICE);
+ pce_dev->phy_ota_dst = dst;
+ } else {
+ dst = pce_dev->phy_ota_src;
+ pce_dev->phy_ota_dst = 0;
+ }
+ if (_chain_pm_buffer_out(pce_dev, dst, req->data_len) < 0) {
+ rc = -ENOMEM;
+ goto bad;
+ }
+
+ pce_dev->ota_size = req->data_len;
+
+ /* pad data */
+ if (pad_len) {
+ if (!key_stream_mode && _chain_pm_buffer_in(pce_dev,
+ pce_dev->phy_ce_pad, pad_len) < 0) {
+ rc = -ENOMEM;
+ goto bad;
+ }
+ if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
+ pad_len) < 0) {
+ rc = -ENOMEM;
+ goto bad;
+ }
+ }
+
+ /* finalize the ce_in and ce_out channels command lists */
+ if (!key_stream_mode)
+ _ce_in_final(pce_dev, 1, req->data_len + pad_len);
+ _ce_out_final(pce_dev, 1, req->data_len + pad_len);
+
+ /* set up crypto device */
+ rc = _ce_f8_setup(pce_dev, req, key_stream_mode, 1, 0, req->data_len);
+ if (rc < 0)
+ goto bad;
+
+ /* setup for callback, and issue command to adm */
+ pce_dev->areq = cookie;
+ pce_dev->qce_cb = qce_cb;
+
+ if (!key_stream_mode)
+ pce_dev->chan_ce_in_cmd->complete_func = _f8_ce_in_call_back;
+
+ pce_dev->chan_ce_out_cmd->complete_func = _f8_ce_out_call_back;
+
+ rc = _qce_start_dma(pce_dev, !(key_stream_mode), true);
+ if (rc == 0)
+ return 0;
+bad:
+ if (pce_dev->phy_ota_dst != 0)
+ dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst,
+ req->data_len, DMA_FROM_DEVICE);
+ if (pce_dev->phy_ota_src != 0)
+ dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
+ req->data_len,
+ (req->data_in == req->data_out) ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+ return rc;
+}
+EXPORT_SYMBOL(qce_f8_req);
+
+int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *mreq,
+ void *cookie, qce_comp_func_ptr_t qce_cb)
+{
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+ uint16_t num_pkt = mreq->num_pkt;
+ uint16_t cipher_start = mreq->cipher_start;
+ uint16_t cipher_size = mreq->cipher_size;
+ struct qce_f8_req *req = &mreq->qce_f8_req;
+ uint32_t total;
+ uint32_t pad_len;
+ dma_addr_t dst = 0;
+ int rc = 0;
+
+ total = num_pkt * req->data_len;
+ pad_len = ALIGN(total, ADM_CE_BLOCK_SIZE) - total;
+
+ _chain_buffer_in_init(pce_dev);
+ _chain_buffer_out_init(pce_dev);
+
+ /* F8 cipher input */
+ pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev,
+ req->data_in, total,
+ (req->data_in == req->data_out) ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+ if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ota_src,
+ total) < 0) {
+ pce_dev->phy_ota_dst = 0;
+ rc = -ENOMEM;
+ goto bad;
+ }
+ /* F8 cipher output */
+ if (req->data_in != req->data_out) {
+ dst = dma_map_single(pce_dev->pdev, req->data_out, total,
+ DMA_FROM_DEVICE);
+ pce_dev->phy_ota_dst = dst;
+ } else {
+ dst = pce_dev->phy_ota_src;
+ pce_dev->phy_ota_dst = 0;
+ }
+ if (_chain_pm_buffer_out(pce_dev, dst, total) < 0) {
+ rc = -ENOMEM;
+ goto bad;
+ }
+
+ pce_dev->ota_size = total;
+
+ /* pad data */
+ if (pad_len) {
+ if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
+ pad_len) < 0) {
+ rc = -ENOMEM;
+ goto bad;
+ }
+ if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
+ pad_len) < 0) {
+ rc = -ENOMEM;
+ goto bad;
+ }
+ }
+
+ /* finalize the ce_in and ce_out channels command lists */
+ _ce_in_final(pce_dev, 1, total + pad_len);
+ _ce_out_final(pce_dev, 1, total + pad_len);
+
+
+ /* set up crypto device */
+ rc = _ce_f8_setup(pce_dev, req, false, num_pkt, cipher_start,
+ cipher_size);
+ if (rc)
+ goto bad;
+
+ /* setup for callback, and issue command to adm */
+ pce_dev->areq = cookie;
+ pce_dev->qce_cb = qce_cb;
+
+ pce_dev->chan_ce_in_cmd->complete_func = _f8_ce_in_call_back;
+ pce_dev->chan_ce_out_cmd->complete_func = _f8_ce_out_call_back;
+
+ rc = _qce_start_dma(pce_dev, true, true);
+ if (rc == 0)
+ return 0;
+bad:
+ if (pce_dev->phy_ota_dst)
+ dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst, total,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src, total,
+ (req->data_in == req->data_out) ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+ return rc;
+}
+EXPORT_SYMBOL(qce_f8_multi_pkt_req);
+
+int qce_f9_req(void *handle, struct qce_f9_req *req, void *cookie,
+ qce_comp_func_ptr_t qce_cb)
+{
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+ int rc;
+ uint32_t pad_len = ALIGN(req->msize, ADM_CE_BLOCK_SIZE) - req->msize;
+
+ pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev, req->message,
+ req->msize, DMA_TO_DEVICE);
+
+ _chain_buffer_in_init(pce_dev);
+ rc = _chain_pm_buffer_in(pce_dev, pce_dev->phy_ota_src, req->msize);
+ if (rc < 0) {
+ rc = -ENOMEM;
+ goto bad;
+ }
+
+ pce_dev->ota_size = req->msize;
+ if (pad_len) {
+ rc = _chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
+ pad_len);
+ if (rc < 0) {
+ rc = -ENOMEM;
+ goto bad;
+ }
+ }
+ _ce_in_final(pce_dev, 2, req->msize + pad_len);
+ rc = _ce_f9_setup(pce_dev, req);
+ if (rc < 0)
+ goto bad;
+
+ /* setup for callback, and issue command to adm */
+ pce_dev->areq = cookie;
+ pce_dev->qce_cb = qce_cb;
+
+ pce_dev->chan_ce_in_cmd->complete_func = _f9_ce_in_call_back;
+
+ rc = _qce_start_dma(pce_dev, true, false);
+ if (rc == 0)
+ return 0;
+bad:
+ dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
+ req->msize, DMA_TO_DEVICE);
+ return rc;
+}
+EXPORT_SYMBOL(qce_f9_req);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Crypto Engine driver");
+
diff --git a/drivers/crypto/msm/qce.h b/drivers/crypto/msm/qce.h
new file mode 100644
index 000000000000..7455a122922c
--- /dev/null
+++ b/drivers/crypto/msm/qce.h
@@ -0,0 +1,190 @@
+/* Qualcomm Crypto Engine driver API
+ *
+ * Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#ifndef __CRYPTO_MSM_QCE_H
+#define __CRYPTO_MSM_QCE_H
+
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/crypto.h>
+
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/sha.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+
+/* SHA digest size in bytes */
+#define SHA256_DIGESTSIZE 32
+#define SHA1_DIGESTSIZE 20
+
+#define AES_CE_BLOCK_SIZE 16
+
+/* key size in bytes */
+#define HMAC_KEY_SIZE (SHA1_DIGESTSIZE) /* hmac-sha1 */
+#define SHA_HMAC_KEY_SIZE 64
+#define DES_KEY_SIZE 8
+#define TRIPLE_DES_KEY_SIZE 24
+#define AES128_KEY_SIZE 16
+#define AES192_KEY_SIZE 24
+#define AES256_KEY_SIZE 32
+#define MAX_CIPHER_KEY_SIZE AES256_KEY_SIZE
+
+/* iv length in bytes */
+#define AES_IV_LENGTH 16
+#define DES_IV_LENGTH 8
+#define MAX_IV_LENGTH AES_IV_LENGTH
+
+/* Maximum number of bytes per transfer */
+#define QCE_MAX_OPER_DATA 0xFF00
+
+/* Maximum Nonce bytes */
+#define MAX_NONCE 16
+
+typedef void (*qce_comp_func_ptr_t)(void *areq,
+ unsigned char *icv, unsigned char *iv, int ret);
+
+/* Cipher algorithms supported */
+enum qce_cipher_alg_enum {
+ CIPHER_ALG_DES = 0,
+ CIPHER_ALG_3DES = 1,
+ CIPHER_ALG_AES = 2,
+ CIPHER_ALG_LAST
+};
+
+/* Hash and hmac algorithms supported */
+enum qce_hash_alg_enum {
+ QCE_HASH_SHA1 = 0,
+ QCE_HASH_SHA256 = 1,
+ QCE_HASH_SHA1_HMAC = 2,
+ QCE_HASH_SHA256_HMAC = 3,
+ QCE_HASH_AES_CMAC = 4,
+ QCE_HASH_LAST
+};
+
+/* Cipher encryption/decryption operations */
+enum qce_cipher_dir_enum {
+ QCE_ENCRYPT = 0,
+ QCE_DECRYPT = 1,
+ QCE_CIPHER_DIR_LAST
+};
+
+/* Cipher algorithms modes */
+enum qce_cipher_mode_enum {
+ QCE_MODE_CBC = 0,
+ QCE_MODE_ECB = 1,
+ QCE_MODE_CTR = 2,
+ QCE_MODE_XTS = 3,
+ QCE_MODE_CCM = 4,
+ QCE_CIPHER_MODE_LAST
+};
+
+/* Cipher operation type */
+enum qce_req_op_enum {
+ QCE_REQ_ABLK_CIPHER = 0,
+ QCE_REQ_ABLK_CIPHER_NO_KEY = 1,
+ QCE_REQ_AEAD = 2,
+ QCE_REQ_LAST
+};
+
+/* Algorithms/features supported in CE HW engine */
+struct ce_hw_support {
+ bool sha1_hmac_20; /* Supports 20 bytes of HMAC key*/
+ bool sha1_hmac; /* supports max HMAC key of 64 bytes*/
+ bool sha256_hmac; /* supports max HMAC key of 64 bytes*/
+ bool sha_hmac; /* supports SHA1 and SHA256 MAX HMAC key of 64 bytes*/
+ bool cmac;
+ bool aes_key_192;
+ bool aes_xts;
+ bool aes_ccm;
+ bool ota;
+ bool aligned_only;
+ bool bam;
+ bool is_shared;
+ bool hw_key;
+ bool use_sw_aes_cbc_ecb_ctr_algo;
+ bool use_sw_aead_algo;
+ bool use_sw_aes_xts_algo;
+ bool use_sw_ahash_algo;
+ bool use_sw_hmac_algo;
+ bool use_sw_aes_ccm_algo;
+ bool clk_mgmt_sus_res;
+ unsigned int ce_device;
+ unsigned int ce_hw_instance;
+ unsigned int max_request;
+};
+
+/* Sha operation parameters */
+struct qce_sha_req {
+ qce_comp_func_ptr_t qce_cb; /* call back */
+ enum qce_hash_alg_enum alg; /* sha algorithm */
+ unsigned char *digest; /* sha digest */
+ struct scatterlist *src; /* pointer to scatter list entry */
+ uint32_t auth_data[4]; /* byte count */
+ unsigned char *authkey; /* auth key */
+ unsigned int authklen; /* auth key length */
+ bool first_blk; /* first block indicator */
+ bool last_blk; /* last block indicator */
+ unsigned int size; /* data length in bytes */
+ void *areq;
+ unsigned int flags;
+};
+
+struct qce_req {
+ enum qce_req_op_enum op; /* operation type */
+ qce_comp_func_ptr_t qce_cb; /* call back */
+ void *areq;
+ enum qce_cipher_alg_enum alg; /* cipher algorithms*/
+ enum qce_cipher_dir_enum dir; /* encryption? decryption? */
+ enum qce_cipher_mode_enum mode; /* algorithm mode */
+ enum qce_hash_alg_enum auth_alg;/* authentication algorithm for aead */
+ unsigned char *authkey; /* authentication key */
+ unsigned int authklen; /* authentication key kength */
+ unsigned int authsize; /* authentication key kength */
+ unsigned char nonce[MAX_NONCE];/* nonce for ccm mode */
+ unsigned char *assoc; /* Ptr to formatted associated data */
+ unsigned int assoclen; /* Formatted associated data length */
+ struct scatterlist *asg; /* Formatted associated data sg */
+ unsigned char *enckey; /* cipher key */
+ unsigned int encklen; /* cipher key length */
+ unsigned char *iv; /* initialization vector */
+ unsigned int ivsize; /* initialization vector size*/
+ unsigned int cryptlen; /* data length */
+ unsigned int use_pmem; /* is source of data PMEM allocated? */
+ struct qcedev_pmem_info *pmem; /* pointer to pmem_info structure*/
+ unsigned int flags;
+};
+
+struct qce_pm_table {
+ int (*suspend)(void *handle);
+ int (*resume)(void *handle);
+};
+
+extern struct qce_pm_table qce_pm_table;
+
+void *qce_open(struct platform_device *pdev, int *rc);
+int qce_close(void *handle);
+int qce_aead_req(void *handle, struct qce_req *req);
+int qce_ablk_cipher_req(void *handle, struct qce_req *req);
+int qce_hw_support(void *handle, struct ce_hw_support *support);
+int qce_process_sha_req(void *handle, struct qce_sha_req *s_req);
+int qce_enable_clk(void *handle);
+int qce_disable_clk(void *handle);
+void qce_get_driver_stats(void *handle);
+void qce_clear_driver_stats(void *handle);
+
+#endif /* __CRYPTO_MSM_QCE_H */
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
new file mode 100644
index 000000000000..36949f4b3f14
--- /dev/null
+++ b/drivers/crypto/msm/qce50.c
@@ -0,0 +1,6066 @@
+/* Qualcomm Crypto Engine driver.
+ *
+ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "QCE50: %s: " fmt, __func__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <linux/bitops.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/qcrypto.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <soc/qcom/socinfo.h>
+
+#include "qce.h"
+#include "qce50.h"
+#include "qcryptohw_50.h"
+#include "qce_ota.h"
+
+#define CRYPTO_CONFIG_RESET 0xE01EF
+#define MAX_SPS_DESC_FIFO_SIZE 0xfff0
+#define QCE_MAX_NUM_DSCR 0x200
+#define QCE_SECTOR_SIZE 0x200
+#define CE_CLK_100MHZ 100000000
+#define CE_CLK_DIV 1000000
+
+#define CRYPTO_CORE_MAJOR_VER_NUM 0x05
+#define CRYPTO_CORE_MINOR_VER_NUM 0x03
+#define CRYPTO_CORE_STEP_VER_NUM 0x1
+
+#define CRYPTO_REQ_USER_PAT 0xdead0000
+
+static DEFINE_MUTEX(bam_register_lock);
+static DEFINE_MUTEX(qce_iomap_mutex);
+
+struct bam_registration_info {
+ struct list_head qlist;
+ unsigned long handle;
+ uint32_t cnt;
+ uint32_t bam_mem;
+ void __iomem *bam_iobase;
+ bool support_cmd_dscr;
+};
+static LIST_HEAD(qce50_bam_list);
+
+/* Used to determine the mode */
+#define MAX_BUNCH_MODE_REQ 2
+/* Max number of request supported */
+#define MAX_QCE_BAM_REQ 8
+/* Interrupt flag will be set for every SET_INTR_AT_REQ request */
+#define SET_INTR_AT_REQ (MAX_QCE_BAM_REQ - 2)
+/* To create extra request space to hold dummy request */
+#define MAX_QCE_BAM_REQ_WITH_DUMMY_REQ (MAX_QCE_BAM_REQ + 1)
+/* Allocate the memory for MAX_QCE_BAM_REQ + 1 (for dummy request) */
+#define MAX_QCE_ALLOC_BAM_REQ MAX_QCE_BAM_REQ_WITH_DUMMY_REQ
+/* QCE driver modes */
+#define IN_INTERRUPT_MODE 0
+#define IN_BUNCH_MODE 1
+/* Dummy request data length */
+#define DUMMY_REQ_DATA_LEN 64
+/* Delay timer to expire when in bunch mode */
+#define DELAY_IN_JIFFIES 5
+/* Index to point the dummy request */
+#define DUMMY_REQ_INDEX MAX_QCE_BAM_REQ
+
+struct dummy_request {
+ struct qce_sha_req sreq;
+ uint8_t *in_buf;
+ struct scatterlist sg;
+ struct ahash_request areq;
+};
+
+/*
+ * CE HW device structure.
+ * Each engine has an instance of the structure.
+ * Each engine can only handle one crypto operation at one time. It is up to
+ * the sw above to ensure single threading of operation on an engine.
+ */
+struct qce_device {
+ struct device *pdev; /* Handle to platform_device structure */
+ struct bam_registration_info *pbam;
+
+ unsigned char *coh_vmem; /* Allocated coherent virtual memory */
+ dma_addr_t coh_pmem; /* Allocated coherent physical memory */
+ int memsize; /* Memory allocated */
+ uint32_t bam_mem; /* bam physical address, from DT */
+ uint32_t bam_mem_size; /* bam io size, from DT */
+ int is_shared; /* CE HW is shared */
+ bool support_cmd_dscr;
+ bool support_hw_key;
+ bool support_clk_mgmt_sus_res;
+ bool support_only_core_src_clk;
+
+ void __iomem *iobase; /* Virtual io base of CE HW */
+ unsigned int phy_iobase; /* Physical io base of CE HW */
+
+ struct clk *ce_core_src_clk; /* Handle to CE src clk*/
+ struct clk *ce_core_clk; /* Handle to CE clk */
+ struct clk *ce_clk; /* Handle to CE clk */
+ struct clk *ce_bus_clk; /* Handle to CE AXI clk*/
+ bool no_get_around;
+ bool no_ccm_mac_status_get_around;
+ unsigned int ce_opp_freq_hz;
+ bool use_sw_aes_cbc_ecb_ctr_algo;
+ bool use_sw_aead_algo;
+ bool use_sw_aes_xts_algo;
+ bool use_sw_ahash_algo;
+ bool use_sw_hmac_algo;
+ bool use_sw_aes_ccm_algo;
+ uint32_t engines_avail;
+ struct qce_ce_cfg_reg_setting reg;
+ struct ce_bam_info ce_bam_info;
+ struct ce_request_info ce_request_info[MAX_QCE_ALLOC_BAM_REQ];
+ unsigned int ce_request_index;
+ spinlock_t lock;
+ spinlock_t sps_lock;
+ unsigned int no_of_queued_req;
+ struct timer_list timer;
+ struct dummy_request dummyreq;
+ unsigned int mode;
+ unsigned int intr_cadence;
+ unsigned int dev_no;
+ struct qce_driver_stats qce_stats;
+ atomic_t bunch_cmd_seq;
+ atomic_t last_intr_seq;
+};
+
+static void print_notify_debug(struct sps_event_notify *notify);
+static void _sps_producer_callback(struct sps_event_notify *notify);
+static int qce_dummy_req(struct qce_device *pce_dev);
+
+static int _qce50_disp_stats;
+
+/* Standard initialization vector for SHA-1, source: FIPS 180-2 */
+static uint32_t _std_init_vector_sha1[] = {
+ 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0
+};
+
+/* Standard initialization vector for SHA-256, source: FIPS 180-2 */
+static uint32_t _std_init_vector_sha256[] = {
+ 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
+ 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
+};
+
+static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b,
+ unsigned int len)
+{
+ unsigned n;
+
+ n = len / sizeof(uint32_t);
+ for (; n > 0; n--) {
+ *iv = ((*b << 24) & 0xff000000) |
+ (((*(b+1)) << 16) & 0xff0000) |
+ (((*(b+2)) << 8) & 0xff00) |
+ (*(b+3) & 0xff);
+ b += sizeof(uint32_t);
+ iv++;
+ }
+
+ n = len % sizeof(uint32_t);
+ if (n == 3) {
+ *iv = ((*b << 24) & 0xff000000) |
+ (((*(b+1)) << 16) & 0xff0000) |
+ (((*(b+2)) << 8) & 0xff00);
+ } else if (n == 2) {
+ *iv = ((*b << 24) & 0xff000000) |
+ (((*(b+1)) << 16) & 0xff0000);
+ } else if (n == 1) {
+ *iv = ((*b << 24) & 0xff000000);
+ }
+}
+
+static void _byte_stream_swap_to_net_words(uint32_t *iv, unsigned char *b,
+ unsigned int len)
+{
+ unsigned i, j;
+ unsigned char swap_iv[AES_IV_LENGTH];
+
+ memset(swap_iv, 0, AES_IV_LENGTH);
+ for (i = (AES_IV_LENGTH-len), j = len-1; i < AES_IV_LENGTH; i++, j--)
+ swap_iv[i] = b[j];
+ _byte_stream_to_net_words(iv, swap_iv, AES_IV_LENGTH);
+}
+
+static int count_sg(struct scatterlist *sg, int nbytes)
+{
+ int i;
+
+ for (i = 0; nbytes > 0; i++, sg = scatterwalk_sg_next(sg))
+ nbytes -= sg->length;
+ return i;
+}
+
+static int qce_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ int i;
+
+ for (i = 0; i < nents; ++i) {
+ dma_map_sg(dev, sg, 1, direction);
+ sg = scatterwalk_sg_next(sg);
+ }
+
+ return nents;
+}
+
+static int qce_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction)
+{
+ int i;
+
+ for (i = 0; i < nents; ++i) {
+ dma_unmap_sg(dev, sg, 1, direction);
+ sg = scatterwalk_sg_next(sg);
+ }
+
+ return nents;
+}
+
+static int _probe_ce_engine(struct qce_device *pce_dev)
+{
+ unsigned int rev;
+ unsigned int maj_rev, min_rev, step_rev;
+
+ rev = readl_relaxed(pce_dev->iobase + CRYPTO_VERSION_REG);
+ mb();
+ maj_rev = (rev & CRYPTO_CORE_MAJOR_REV_MASK) >> CRYPTO_CORE_MAJOR_REV;
+ min_rev = (rev & CRYPTO_CORE_MINOR_REV_MASK) >> CRYPTO_CORE_MINOR_REV;
+ step_rev = (rev & CRYPTO_CORE_STEP_REV_MASK) >> CRYPTO_CORE_STEP_REV;
+
+ if (maj_rev != CRYPTO_CORE_MAJOR_VER_NUM) {
+ pr_err("Unsupported Qualcomm crypto device at 0x%x, rev %d.%d.%d\n",
+ pce_dev->phy_iobase, maj_rev, min_rev, step_rev);
+ return -EIO;
+ } else {
+ /*
+ * The majority of crypto HW bugs have been fixed in 5.3.0 and
+ * above. That allows a single sps transfer of consumer
+ * pipe, and a single sps transfer of producer pipe
+ * for a crypto request. no_get_around flag indicates this.
+ *
+ * In 5.3.1, the CCM MAC_FAILED in result dump issue is
+ * fixed. no_ccm_mac_status_get_around flag indicates this.
+ */
+ pce_dev->no_get_around = (min_rev >=
+ CRYPTO_CORE_MINOR_VER_NUM) ? true : false;
+ if (min_rev > CRYPTO_CORE_MINOR_VER_NUM)
+ pce_dev->no_ccm_mac_status_get_around = true;
+ else if ((min_rev == CRYPTO_CORE_MINOR_VER_NUM) &&
+ (step_rev >= CRYPTO_CORE_STEP_VER_NUM))
+ pce_dev->no_ccm_mac_status_get_around = true;
+ else
+ pce_dev->no_ccm_mac_status_get_around = false;
+ }
+
+ pce_dev->ce_bam_info.minor_version = min_rev;
+
+ pce_dev->engines_avail = readl_relaxed(pce_dev->iobase +
+ CRYPTO_ENGINES_AVAIL);
+ dev_info(pce_dev->pdev, "Qualcomm Crypto %d.%d.%d device found @0x%x\n",
+ maj_rev, min_rev, step_rev, pce_dev->phy_iobase);
+
+ pce_dev->ce_bam_info.ce_burst_size = MAX_CE_BAM_BURST_SIZE;
+
+ dev_info(pce_dev->pdev,
+ "CE device = 0x%x\n, "
+ "IO base, CE = 0x%p\n, "
+ "Consumer (IN) PIPE %d, "
+ "Producer (OUT) PIPE %d\n"
+ "IO base BAM = 0x%p\n"
+ "BAM IRQ %d\n"
+ "Engines Availability = 0x%x\n",
+ pce_dev->ce_bam_info.ce_device,
+ pce_dev->iobase,
+ pce_dev->ce_bam_info.dest_pipe_index,
+ pce_dev->ce_bam_info.src_pipe_index,
+ pce_dev->ce_bam_info.bam_iobase,
+ pce_dev->ce_bam_info.bam_irq,
+ pce_dev->engines_avail);
+ return 0;
+};
+
+static struct qce_cmdlist_info *_ce_get_hash_cmdlistinfo(
+ struct qce_device *pce_dev,
+ int req_info, struct qce_sha_req *sreq)
+{
+ struct ce_sps_data *pce_sps_data;
+ struct qce_cmdlistptr_ops *cmdlistptr;
+
+ pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
+ cmdlistptr = &pce_sps_data->cmdlistptr;
+ switch (sreq->alg) {
+ case QCE_HASH_SHA1:
+ return &cmdlistptr->auth_sha1;
+ case QCE_HASH_SHA256:
+ return &cmdlistptr->auth_sha256;
+ case QCE_HASH_SHA1_HMAC:
+ return &cmdlistptr->auth_sha1_hmac;
+ case QCE_HASH_SHA256_HMAC:
+ return &cmdlistptr->auth_sha256_hmac;
+ case QCE_HASH_AES_CMAC:
+ if (sreq->authklen == AES128_KEY_SIZE)
+ return &cmdlistptr->auth_aes_128_cmac;
+ return &cmdlistptr->auth_aes_256_cmac;
+ default:
+ return NULL;
+ }
+ return NULL;
+}
+
+static int _ce_setup_hash(struct qce_device *pce_dev,
+ struct qce_sha_req *sreq,
+ struct qce_cmdlist_info *cmdlistinfo)
+{
+ uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
+ uint32_t diglen;
+ int i;
+ uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ bool sha1 = false;
+ struct sps_command_element *pce = NULL;
+ bool use_hw_key = false;
+ bool use_pipe_key = false;
+ uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
+ uint32_t auth_cfg;
+
+ if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
+ (sreq->alg == QCE_HASH_SHA256_HMAC) ||
+ (sreq->alg == QCE_HASH_AES_CMAC)) {
+
+
+ /* no more check for null key. use flag */
+ if ((sreq->flags & QCRYPTO_CTX_USE_HW_KEY)
+ == QCRYPTO_CTX_USE_HW_KEY)
+ use_hw_key = true;
+ else if ((sreq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
+ QCRYPTO_CTX_USE_PIPE_KEY)
+ use_pipe_key = true;
+ pce = cmdlistinfo->go_proc;
+ if (use_hw_key == true) {
+ pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG +
+ pce_dev->phy_iobase);
+ } else {
+ pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
+ pce_dev->phy_iobase);
+ pce = cmdlistinfo->auth_key;
+ if (use_pipe_key == false) {
+ _byte_stream_to_net_words(mackey32,
+ sreq->authkey,
+ sreq->authklen);
+ for (i = 0; i < authk_size_in_word; i++, pce++)
+ pce->data = mackey32[i];
+ }
+ }
+ }
+
+ if (sreq->alg == QCE_HASH_AES_CMAC)
+ goto go_proc;
+
+ /* if not the last, the size has to be on the block boundary */
+ if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
+ return -EIO;
+
+ switch (sreq->alg) {
+ case QCE_HASH_SHA1:
+ case QCE_HASH_SHA1_HMAC:
+ diglen = SHA1_DIGEST_SIZE;
+ sha1 = true;
+ break;
+ case QCE_HASH_SHA256:
+ case QCE_HASH_SHA256_HMAC:
+ diglen = SHA256_DIGEST_SIZE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
+ if (sreq->first_blk) {
+ if (sha1) {
+ for (i = 0; i < 5; i++)
+ auth32[i] = _std_init_vector_sha1[i];
+ } else {
+ for (i = 0; i < 8; i++)
+ auth32[i] = _std_init_vector_sha256[i];
+ }
+ } else {
+ _byte_stream_to_net_words(auth32, sreq->digest, diglen);
+ }
+
+ pce = cmdlistinfo->auth_iv;
+ for (i = 0; i < 5; i++, pce++)
+ pce->data = auth32[i];
+
+ if ((sreq->alg == QCE_HASH_SHA256) ||
+ (sreq->alg == QCE_HASH_SHA256_HMAC)) {
+ for (i = 5; i < 8; i++, pce++)
+ pce->data = auth32[i];
+ }
+
+ /* write auth_bytecnt 0/1, start with 0 */
+ pce = cmdlistinfo->auth_bytecount;
+ for (i = 0; i < 2; i++, pce++)
+ pce->data = sreq->auth_data[i];
+
+ /* Set/reset last bit in CFG register */
+ pce = cmdlistinfo->auth_seg_cfg;
+ auth_cfg = pce->data & ~(1 << CRYPTO_LAST |
+ 1 << CRYPTO_FIRST |
+ 1 << CRYPTO_USE_PIPE_KEY_AUTH |
+ 1 << CRYPTO_USE_HW_KEY_AUTH);
+ if (sreq->last_blk)
+ auth_cfg |= 1 << CRYPTO_LAST;
+ if (sreq->first_blk)
+ auth_cfg |= 1 << CRYPTO_FIRST;
+ if (use_hw_key)
+ auth_cfg |= 1 << CRYPTO_USE_HW_KEY_AUTH;
+ if (use_pipe_key)
+ auth_cfg |= 1 << CRYPTO_USE_PIPE_KEY_AUTH;
+ pce->data = auth_cfg;
+go_proc:
+ /* write auth seg size */
+ pce = cmdlistinfo->auth_seg_size;
+ pce->data = sreq->size;
+
+ pce = cmdlistinfo->encr_seg_cfg;
+ pce->data = 0;
+
+ /* write auth seg size start*/
+ pce = cmdlistinfo->auth_seg_start;
+ pce->data = 0;
+
+ /* write seg size */
+ pce = cmdlistinfo->seg_size;
+
+ /* always ensure there is input data. ZLT does not work for bam-ndp */
+ if (sreq->size)
+ pce->data = sreq->size;
+ else
+ pce->data = pce_dev->ce_bam_info.ce_burst_size;
+
+ return 0;
+}
+
+static struct qce_cmdlist_info *_ce_get_aead_cmdlistinfo(
+ struct qce_device *pce_dev,
+ int req_info, struct qce_req *creq)
+{
+ struct ce_sps_data *pce_sps_data;
+ struct qce_cmdlistptr_ops *cmdlistptr;
+
+ pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
+ cmdlistptr = &pce_sps_data->cmdlistptr;
+ switch (creq->alg) {
+ case CIPHER_ALG_DES:
+ switch (creq->mode) {
+ case QCE_MODE_CBC:
+ if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
+ return &cmdlistptr->aead_hmac_sha1_cbc_des;
+ else if (creq->auth_alg == QCE_HASH_SHA256_HMAC)
+ return &cmdlistptr->aead_hmac_sha256_cbc_des;
+ else
+ return NULL;
+ break;
+ default:
+ return NULL;
+ }
+ break;
+ case CIPHER_ALG_3DES:
+ switch (creq->mode) {
+ case QCE_MODE_CBC:
+ if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
+ return &cmdlistptr->aead_hmac_sha1_cbc_3des;
+ else if (creq->auth_alg == QCE_HASH_SHA256_HMAC)
+ return &cmdlistptr->aead_hmac_sha256_cbc_3des;
+ else
+ return NULL;
+ break;
+ default:
+ return NULL;
+ }
+ break;
+ case CIPHER_ALG_AES:
+ switch (creq->mode) {
+ case QCE_MODE_CBC:
+ if (creq->encklen == AES128_KEY_SIZE) {
+ if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
+ return &cmdlistptr->
+ aead_hmac_sha1_cbc_aes_128;
+ else if (creq->auth_alg ==
+ QCE_HASH_SHA256_HMAC)
+ return &cmdlistptr->
+ aead_hmac_sha256_cbc_aes_128;
+ else
+ return NULL;
+ } else if (creq->encklen == AES256_KEY_SIZE) {
+ if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
+ return &cmdlistptr->
+ aead_hmac_sha1_cbc_aes_256;
+ else if (creq->auth_alg ==
+ QCE_HASH_SHA256_HMAC)
+ return &cmdlistptr->
+ aead_hmac_sha256_cbc_aes_256;
+ else
+ return NULL;
+ } else
+ return NULL;
+ break;
+ default:
+ return NULL;
+ }
+ break;
+
+ default:
+ return NULL;
+ }
+ return NULL;
+}
+
+static int _ce_setup_aead(struct qce_device *pce_dev, struct qce_req *q_req,
+ uint32_t totallen_in, uint32_t coffset,
+ struct qce_cmdlist_info *cmdlistinfo)
+{
+ int32_t authk_size_in_word = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
+ int i;
+ uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {0};
+ struct sps_command_element *pce;
+ uint32_t a_cfg;
+ uint32_t enckey32[(MAX_CIPHER_KEY_SIZE*2)/sizeof(uint32_t)] = {0};
+ uint32_t enciv32[MAX_IV_LENGTH/sizeof(uint32_t)] = {0};
+ uint32_t enck_size_in_word = 0;
+ uint32_t enciv_in_word;
+ uint32_t key_size;
+ uint32_t encr_cfg = 0;
+ uint32_t ivsize = q_req->ivsize;
+
+ key_size = q_req->encklen;
+ enck_size_in_word = key_size/sizeof(uint32_t);
+
+ switch (q_req->alg) {
+ case CIPHER_ALG_DES:
+ enciv_in_word = 2;
+ break;
+ case CIPHER_ALG_3DES:
+ enciv_in_word = 2;
+ break;
+ case CIPHER_ALG_AES:
+ if ((key_size != AES128_KEY_SIZE) &&
+ (key_size != AES256_KEY_SIZE))
+ return -EINVAL;
+ enciv_in_word = 4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* only support cbc mode */
+ if (q_req->mode != QCE_MODE_CBC)
+ return -EINVAL;
+
+ _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
+ pce = cmdlistinfo->encr_cntr_iv;
+ for (i = 0; i < enciv_in_word; i++, pce++)
+ pce->data = enciv32[i];
+
+ /*
+ * write encr key
+ * do not use hw key or pipe key
+ */
+ _byte_stream_to_net_words(enckey32, q_req->enckey, key_size);
+ pce = cmdlistinfo->encr_key;
+ for (i = 0; i < enck_size_in_word; i++, pce++)
+ pce->data = enckey32[i];
+
+ /* write encr seg cfg */
+ pce = cmdlistinfo->encr_seg_cfg;
+ encr_cfg = pce->data;
+ if (q_req->dir == QCE_ENCRYPT)
+ encr_cfg |= (1 << CRYPTO_ENCODE);
+ else
+ encr_cfg &= ~(1 << CRYPTO_ENCODE);
+ pce->data = encr_cfg;
+
+ /* we only support sha1-hmac and sha256-hmac at this point */
+ _byte_stream_to_net_words(mackey32, q_req->authkey,
+ q_req->authklen);
+ pce = cmdlistinfo->auth_key;
+ for (i = 0; i < authk_size_in_word; i++, pce++)
+ pce->data = mackey32[i];
+ pce = cmdlistinfo->auth_iv;
+
+ if (q_req->auth_alg == QCE_HASH_SHA1_HMAC)
+ for (i = 0; i < 5; i++, pce++)
+ pce->data = _std_init_vector_sha1[i];
+ else
+ for (i = 0; i < 8; i++, pce++)
+ pce->data = _std_init_vector_sha256[i];
+
+ /* write auth_bytecnt 0/1, start with 0 */
+ pce = cmdlistinfo->auth_bytecount;
+ for (i = 0; i < 2; i++, pce++)
+ pce->data = 0;
+
+ pce = cmdlistinfo->auth_seg_cfg;
+ a_cfg = pce->data;
+ a_cfg &= ~(CRYPTO_AUTH_POS_MASK);
+ if (q_req->dir == QCE_ENCRYPT)
+ a_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
+ else
+ a_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+ pce->data = a_cfg;
+
+ /* write auth seg size */
+ pce = cmdlistinfo->auth_seg_size;
+ pce->data = totallen_in;
+
+ /* write auth seg size start*/
+ pce = cmdlistinfo->auth_seg_start;
+ pce->data = 0;
+
+ /* write seg size */
+ pce = cmdlistinfo->seg_size;
+ pce->data = totallen_in;
+
+ /* write encr seg size */
+ pce = cmdlistinfo->encr_seg_size;
+ pce->data = q_req->cryptlen;
+
+ /* write encr seg start */
+ pce = cmdlistinfo->encr_seg_start;
+ pce->data = (coffset & 0xffff);
+
+ return 0;
+
+}
+
+static struct qce_cmdlist_info *_ce_get_cipher_cmdlistinfo(
+ struct qce_device *pce_dev,
+ int req_info, struct qce_req *creq)
+{
+ struct ce_request_info *preq_info;
+ struct ce_sps_data *pce_sps_data;
+ struct qce_cmdlistptr_ops *cmdlistptr;
+
+ preq_info = &pce_dev->ce_request_info[req_info];
+ pce_sps_data = &preq_info->ce_sps;
+ cmdlistptr = &pce_sps_data->cmdlistptr;
+ if (creq->alg != CIPHER_ALG_AES) {
+ switch (creq->alg) {
+ case CIPHER_ALG_DES:
+ if (creq->mode == QCE_MODE_ECB)
+ return &cmdlistptr->cipher_des_ecb;
+ return &cmdlistptr->cipher_des_cbc;
+ case CIPHER_ALG_3DES:
+ if (creq->mode == QCE_MODE_ECB)
+ return &cmdlistptr->cipher_3des_ecb;
+ return &cmdlistptr->cipher_3des_cbc;
+ default:
+ return NULL;
+ }
+ } else {
+ switch (creq->mode) {
+ case QCE_MODE_ECB:
+ if (creq->encklen == AES128_KEY_SIZE)
+ return &cmdlistptr->cipher_aes_128_ecb;
+ return &cmdlistptr->cipher_aes_256_ecb;
+ case QCE_MODE_CBC:
+ case QCE_MODE_CTR:
+ if (creq->encklen == AES128_KEY_SIZE)
+ return &cmdlistptr->cipher_aes_128_cbc_ctr;
+ return &cmdlistptr->cipher_aes_256_cbc_ctr;
+ case QCE_MODE_XTS:
+ if (creq->encklen/2 == AES128_KEY_SIZE)
+ return &cmdlistptr->cipher_aes_128_xts;
+ return &cmdlistptr->cipher_aes_256_xts;
+ case QCE_MODE_CCM:
+ if (creq->encklen == AES128_KEY_SIZE)
+ return &cmdlistptr->aead_aes_128_ccm;
+ return &cmdlistptr->aead_aes_256_ccm;
+ default:
+ return NULL;
+ }
+ }
+ return NULL;
+}
+
+static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq,
+ uint32_t totallen_in, uint32_t coffset,
+ struct qce_cmdlist_info *cmdlistinfo)
+{
+ uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
+ 0, 0, 0, 0};
+ uint32_t enck_size_in_word = 0;
+ uint32_t key_size;
+ bool use_hw_key = false;
+ bool use_pipe_key = false;
+ uint32_t encr_cfg = 0;
+ uint32_t ivsize = creq->ivsize;
+ int i;
+ struct sps_command_element *pce = NULL;
+
+ if (creq->mode == QCE_MODE_XTS)
+ key_size = creq->encklen/2;
+ else
+ key_size = creq->encklen;
+
+ pce = cmdlistinfo->go_proc;
+ if ((creq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
+ use_hw_key = true;
+ } else {
+ if ((creq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
+ QCRYPTO_CTX_USE_PIPE_KEY)
+ use_pipe_key = true;
+ }
+ pce = cmdlistinfo->go_proc;
+ if (use_hw_key == true)
+ pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG +
+ pce_dev->phy_iobase);
+ else
+ pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
+ pce_dev->phy_iobase);
+ if ((use_pipe_key == false) && (use_hw_key == false)) {
+ _byte_stream_to_net_words(enckey32, creq->enckey, key_size);
+ enck_size_in_word = key_size/sizeof(uint32_t);
+ }
+
+ if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
+ uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
+ uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
+ uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
+ uint32_t auth_cfg = 0;
+
+ /* write nonce */
+ _byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
+ pce = cmdlistinfo->auth_nonce_info;
+ for (i = 0; i < noncelen32; i++, pce++)
+ pce->data = nonce32[i];
+
+ if (creq->authklen == AES128_KEY_SIZE)
+ auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_128;
+ else {
+ if (creq->authklen == AES256_KEY_SIZE)
+ auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_256;
+ }
+ if (creq->dir == QCE_ENCRYPT)
+ auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+ else
+ auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
+ auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE);
+
+ if (use_hw_key == true) {
+ auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH);
+ } else {
+ auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+ /* write auth key */
+ pce = cmdlistinfo->auth_key;
+ for (i = 0; i < authklen32; i++, pce++)
+ pce->data = enckey32[i];
+ }
+
+ pce = cmdlistinfo->auth_seg_cfg;
+ pce->data = auth_cfg;
+
+ pce = cmdlistinfo->auth_seg_size;
+ if (creq->dir == QCE_ENCRYPT)
+ pce->data = totallen_in;
+ else
+ pce->data = totallen_in - creq->authsize;
+ pce = cmdlistinfo->auth_seg_start;
+ pce->data = 0;
+ } else {
+ if (creq->op != QCE_REQ_AEAD) {
+ pce = cmdlistinfo->auth_seg_cfg;
+ pce->data = 0;
+ }
+ }
+ switch (creq->mode) {
+ case QCE_MODE_ECB:
+ if (key_size == AES128_KEY_SIZE)
+ encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_128;
+ else
+ encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_256;
+ break;
+ case QCE_MODE_CBC:
+ if (key_size == AES128_KEY_SIZE)
+ encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
+ else
+ encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
+ break;
+ case QCE_MODE_XTS:
+ if (key_size == AES128_KEY_SIZE)
+ encr_cfg = pce_dev->reg.encr_cfg_aes_xts_128;
+ else
+ encr_cfg = pce_dev->reg.encr_cfg_aes_xts_256;
+ break;
+ case QCE_MODE_CCM:
+ if (key_size == AES128_KEY_SIZE)
+ encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_128;
+ else
+ encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_256;
+ encr_cfg |= (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE) |
+ (CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
+ break;
+ case QCE_MODE_CTR:
+ default:
+ if (key_size == AES128_KEY_SIZE)
+ encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_128;
+ else
+ encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_256;
+ break;
+ }
+
+ switch (creq->alg) {
+ case CIPHER_ALG_DES:
+ if (creq->mode != QCE_MODE_ECB) {
+ _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+ pce = cmdlistinfo->encr_cntr_iv;
+ pce->data = enciv32[0];
+ pce++;
+ pce->data = enciv32[1];
+ }
+ if (use_hw_key == false) {
+ pce = cmdlistinfo->encr_key;
+ pce->data = enckey32[0];
+ pce++;
+ pce->data = enckey32[1];
+ }
+ break;
+ case CIPHER_ALG_3DES:
+ if (creq->mode != QCE_MODE_ECB) {
+ _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+ pce = cmdlistinfo->encr_cntr_iv;
+ pce->data = enciv32[0];
+ pce++;
+ pce->data = enciv32[1];
+ }
+ if (use_hw_key == false) {
+ /* write encr key */
+ pce = cmdlistinfo->encr_key;
+ for (i = 0; i < 6; i++, pce++)
+ pce->data = enckey32[i];
+ }
+ break;
+ case CIPHER_ALG_AES:
+ default:
+ if (creq->mode == QCE_MODE_XTS) {
+ uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
+ = {0, 0, 0, 0, 0, 0, 0, 0};
+ uint32_t xtsklen =
+ creq->encklen/(2 * sizeof(uint32_t));
+
+ if ((use_hw_key == false) && (use_pipe_key == false)) {
+ _byte_stream_to_net_words(xtskey32,
+ (creq->enckey + creq->encklen/2),
+ creq->encklen/2);
+ /* write xts encr key */
+ pce = cmdlistinfo->encr_xts_key;
+ for (i = 0; i < xtsklen; i++, pce++)
+ pce->data = xtskey32[i];
+ }
+ /* write xts du size */
+ pce = cmdlistinfo->encr_xts_du_size;
+ switch (creq->flags & QCRYPTO_CTX_XTS_MASK) {
+ case QCRYPTO_CTX_XTS_DU_SIZE_512B:
+ pce->data = min((unsigned int)QCE_SECTOR_SIZE,
+ creq->cryptlen);
+ break;
+ case QCRYPTO_CTX_XTS_DU_SIZE_1KB:
+ pce->data =
+ min((unsigned int)QCE_SECTOR_SIZE * 2,
+ creq->cryptlen);
+ break;
+ default:
+ pce->data = creq->cryptlen;
+ break;
+ }
+ }
+ if (creq->mode != QCE_MODE_ECB) {
+ if (creq->mode == QCE_MODE_XTS)
+ _byte_stream_swap_to_net_words(enciv32,
+ creq->iv, ivsize);
+ else
+ _byte_stream_to_net_words(enciv32, creq->iv,
+ ivsize);
+ /* write encr cntr iv */
+ pce = cmdlistinfo->encr_cntr_iv;
+ for (i = 0; i < 4; i++, pce++)
+ pce->data = enciv32[i];
+
+ if (creq->mode == QCE_MODE_CCM) {
+ /* write cntr iv for ccm */
+ pce = cmdlistinfo->encr_ccm_cntr_iv;
+ for (i = 0; i < 4; i++, pce++)
+ pce->data = enciv32[i];
+ /* update cntr_iv[3] by one */
+ pce = cmdlistinfo->encr_cntr_iv;
+ pce += 3;
+ pce->data += 1;
+ }
+ }
+
+ if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
+ encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
+ CRYPTO_ENCR_KEY_SZ);
+ } else {
+ if (use_hw_key == false) {
+ /* write encr key */
+ pce = cmdlistinfo->encr_key;
+ for (i = 0; i < enck_size_in_word; i++, pce++)
+ pce->data = enckey32[i];
+ }
+ } /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
+ break;
+ } /* end of switch (creq->mode) */
+
+ if (use_pipe_key)
+ encr_cfg |= (CRYPTO_USE_PIPE_KEY_ENCR_ENABLED
+ << CRYPTO_USE_PIPE_KEY_ENCR);
+
+ /* write encr seg cfg */
+ pce = cmdlistinfo->encr_seg_cfg;
+ if ((creq->alg == CIPHER_ALG_DES) || (creq->alg == CIPHER_ALG_3DES)) {
+ if (creq->dir == QCE_ENCRYPT)
+ pce->data |= (1 << CRYPTO_ENCODE);
+ else
+ pce->data &= ~(1 << CRYPTO_ENCODE);
+ encr_cfg = pce->data;
+ } else {
+ encr_cfg |=
+ ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
+ }
+ if (use_hw_key == true)
+ encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
+ else
+ encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
+ pce->data = encr_cfg;
+
+ /* write encr seg size */
+ pce = cmdlistinfo->encr_seg_size;
+ if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT))
+ pce->data = (creq->cryptlen + creq->authsize);
+ else
+ pce->data = creq->cryptlen;
+
+ /* write encr seg start */
+ pce = cmdlistinfo->encr_seg_start;
+ pce->data = (coffset & 0xffff);
+
+ /* write seg size */
+ pce = cmdlistinfo->seg_size;
+ pce->data = totallen_in;
+
+ return 0;
+};
+
+static int _ce_f9_setup(struct qce_device *pce_dev, struct qce_f9_req *req,
+ struct qce_cmdlist_info *cmdlistinfo)
+{
+ uint32_t ikey32[OTA_KEY_SIZE/sizeof(uint32_t)];
+ uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
+ uint32_t cfg;
+ struct sps_command_element *pce;
+ int i;
+
+ switch (req->algorithm) {
+ case QCE_OTA_ALGO_KASUMI:
+ cfg = pce_dev->reg.auth_cfg_kasumi;
+ break;
+ case QCE_OTA_ALGO_SNOW3G:
+ default:
+ cfg = pce_dev->reg.auth_cfg_snow3g;
+ break;
+ };
+
+ /* write key in CRYPTO_AUTH_IV0-3_REG */
+ _byte_stream_to_net_words(ikey32, &req->ikey[0], OTA_KEY_SIZE);
+ pce = cmdlistinfo->auth_iv;
+ for (i = 0; i < key_size_in_word; i++, pce++)
+ pce->data = ikey32[i];
+
+ /* write last bits in CRYPTO_AUTH_IV4_REG */
+ pce->data = req->last_bits;
+
+ /* write fresh to CRYPTO_AUTH_BYTECNT0_REG */
+ pce = cmdlistinfo->auth_bytecount;
+ pce->data = req->fresh;
+
+ /* write count-i to CRYPTO_AUTH_BYTECNT1_REG */
+ pce++;
+ pce->data = req->count_i;
+
+ /* write auth seg cfg */
+ pce = cmdlistinfo->auth_seg_cfg;
+ if (req->direction == QCE_OTA_DIR_DOWNLINK)
+ cfg |= BIT(CRYPTO_F9_DIRECTION);
+ pce->data = cfg;
+
+ /* write auth seg size */
+ pce = cmdlistinfo->auth_seg_size;
+ pce->data = req->msize;
+
+ /* write auth seg start*/
+ pce = cmdlistinfo->auth_seg_start;
+ pce->data = 0;
+
+ /* write seg size */
+ pce = cmdlistinfo->seg_size;
+ pce->data = req->msize;
+
+
+ /* write go */
+ pce = cmdlistinfo->go_proc;
+ pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + pce_dev->phy_iobase);
+ return 0;
+}
+
+static int _ce_f8_setup(struct qce_device *pce_dev, struct qce_f8_req *req,
+ bool key_stream_mode, uint16_t npkts, uint16_t cipher_offset,
+ uint16_t cipher_size,
+ struct qce_cmdlist_info *cmdlistinfo)
+{
+ uint32_t ckey32[OTA_KEY_SIZE/sizeof(uint32_t)];
+ uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
+ uint32_t cfg;
+ struct sps_command_element *pce;
+ int i;
+
+ switch (req->algorithm) {
+ case QCE_OTA_ALGO_KASUMI:
+ cfg = pce_dev->reg.encr_cfg_kasumi;
+ break;
+ case QCE_OTA_ALGO_SNOW3G:
+ default:
+ cfg = pce_dev->reg.encr_cfg_snow3g;
+ break;
+ };
+ /* write key */
+ _byte_stream_to_net_words(ckey32, &req->ckey[0], OTA_KEY_SIZE);
+ pce = cmdlistinfo->encr_key;
+ for (i = 0; i < key_size_in_word; i++, pce++)
+ pce->data = ckey32[i];
+
+ /* write encr seg cfg */
+ pce = cmdlistinfo->encr_seg_cfg;
+ if (key_stream_mode)
+ cfg |= BIT(CRYPTO_F8_KEYSTREAM_ENABLE);
+ if (req->direction == QCE_OTA_DIR_DOWNLINK)
+ cfg |= BIT(CRYPTO_F8_DIRECTION);
+ pce->data = cfg;
+
+ /* write encr seg start */
+ pce = cmdlistinfo->encr_seg_start;
+ pce->data = (cipher_offset & 0xffff);
+
+ /* write encr seg size */
+ pce = cmdlistinfo->encr_seg_size;
+ pce->data = cipher_size;
+
+ /* write seg size */
+ pce = cmdlistinfo->seg_size;
+ pce->data = req->data_len;
+
+ /* write cntr0_iv0 for countC */
+ pce = cmdlistinfo->encr_cntr_iv;
+ pce->data = req->count_c;
+ /* write cntr1_iv1 for nPkts, and bearer */
+ pce++;
+ if (npkts == 1)
+ npkts = 0;
+ pce->data = req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
+ npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT;
+
+ /* write go */
+ pce = cmdlistinfo->go_proc;
+ pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + pce_dev->phy_iobase);
+
+ return 0;
+}
+
+static void _qce_dump_descr_fifos(struct qce_device *pce_dev, int req_info)
+{
+ int i, j, ents;
+ struct ce_sps_data *pce_sps_data;
+ struct sps_iovec *iovec;
+ uint32_t cmd_flags = SPS_IOVEC_FLAG_CMD;
+
+ pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
+ iovec = pce_sps_data->in_transfer.iovec;
+ pr_info("==============================================\n");
+ pr_info("CONSUMER (TX/IN/DEST) PIPE DESCRIPTOR\n");
+ pr_info("==============================================\n");
+ for (i = 0; i < pce_sps_data->in_transfer.iovec_count; i++) {
+ pr_info(" [%d] addr=0x%x size=0x%x flags=0x%x\n", i,
+ iovec->addr, iovec->size, iovec->flags);
+ if (iovec->flags & cmd_flags) {
+ struct sps_command_element *pced;
+
+ pced = (struct sps_command_element *)
+ (GET_VIRT_ADDR(iovec->addr));
+ ents = iovec->size/(sizeof(struct sps_command_element));
+ for (j = 0; j < ents; j++) {
+ pr_info(" [%d] [0x%x] 0x%x\n", j,
+ pced->addr, pced->data);
+ pced++;
+ }
+ }
+ iovec++;
+ }
+
+ pr_info("==============================================\n");
+ pr_info("PRODUCER (RX/OUT/SRC) PIPE DESCRIPTOR\n");
+ pr_info("==============================================\n");
+ iovec = pce_sps_data->out_transfer.iovec;
+ for (i = 0; i < pce_sps_data->out_transfer.iovec_count; i++) {
+ pr_info(" [%d] addr=0x%x size=0x%x flags=0x%x\n", i,
+ iovec->addr, iovec->size, iovec->flags);
+ iovec++;
+ }
+}
+
+#ifdef QCE_DEBUG
+
+static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev, int req_info)
+{
+ _qce_dump_descr_fifos(pce_dev, req_info);
+}
+
+#define QCE_WRITE_REG(val, addr) \
+{ \
+ pr_info(" [0x%p] 0x%x\n", addr, (uint32_t)val); \
+ writel_relaxed(val, addr); \
+}
+
+#else
+
+static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev, int req_info)
+{
+}
+
+#define QCE_WRITE_REG(val, addr) \
+ writel_relaxed(val, addr)
+
+#endif
+
+static int _ce_setup_hash_direct(struct qce_device *pce_dev,
+ struct qce_sha_req *sreq)
+{
+ uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
+ uint32_t diglen;
+ bool use_hw_key = false;
+ bool use_pipe_key = false;
+ int i;
+ uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
+ bool sha1 = false;
+ uint32_t auth_cfg = 0;
+
+ /* clear status */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+
+ QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
+ CRYPTO_CONFIG_REG));
+ /*
+ * Ensure previous instructions (setting the CONFIG register)
+ * was completed before issuing starting to set other config register
+ * This is to ensure the configurations are done in correct endian-ness
+ * as set in the CONFIG registers
+ */
+ mb();
+
+ if (sreq->alg == QCE_HASH_AES_CMAC) {
+ /* write seg_cfg */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+ /* write seg_cfg */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+ /* write seg_cfg */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
+
+ /* Clear auth_ivn, auth_keyn registers */
+ for (i = 0; i < 16; i++) {
+ QCE_WRITE_REG(0, (pce_dev->iobase +
+ (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+ QCE_WRITE_REG(0, (pce_dev->iobase +
+ (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t))));
+ }
+ /* write auth_bytecnt 0/1/2/3, start with 0 */
+ for (i = 0; i < 4; i++)
+ QCE_WRITE_REG(0, pce_dev->iobase +
+ CRYPTO_AUTH_BYTECNT0_REG +
+ i * sizeof(uint32_t));
+
+ if (sreq->authklen == AES128_KEY_SIZE)
+ auth_cfg = pce_dev->reg.auth_cfg_cmac_128;
+ else
+ auth_cfg = pce_dev->reg.auth_cfg_cmac_256;
+ }
+
+ if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
+ (sreq->alg == QCE_HASH_SHA256_HMAC) ||
+ (sreq->alg == QCE_HASH_AES_CMAC)) {
+
+ _byte_stream_to_net_words(mackey32, sreq->authkey,
+ sreq->authklen);
+
+ /* no more check for null key. use flag to check*/
+
+ if ((sreq->flags & QCRYPTO_CTX_USE_HW_KEY) ==
+ QCRYPTO_CTX_USE_HW_KEY) {
+ use_hw_key = true;
+ } else if ((sreq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
+ QCRYPTO_CTX_USE_PIPE_KEY) {
+ use_pipe_key = true;
+ } else {
+ /* setup key */
+ for (i = 0; i < authk_size_in_word; i++)
+ QCE_WRITE_REG(mackey32[i], (pce_dev->iobase +
+ (CRYPTO_AUTH_KEY0_REG +
+ i*sizeof(uint32_t))));
+ }
+ }
+
+ if (sreq->alg == QCE_HASH_AES_CMAC)
+ goto go_proc;
+
+ /* if not the last, the size has to be on the block boundary */
+ if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
+ return -EIO;
+
+ switch (sreq->alg) {
+ case QCE_HASH_SHA1:
+ auth_cfg = pce_dev->reg.auth_cfg_sha1;
+ diglen = SHA1_DIGEST_SIZE;
+ sha1 = true;
+ break;
+ case QCE_HASH_SHA1_HMAC:
+ auth_cfg = pce_dev->reg.auth_cfg_hmac_sha1;
+ diglen = SHA1_DIGEST_SIZE;
+ sha1 = true;
+ break;
+ case QCE_HASH_SHA256:
+ auth_cfg = pce_dev->reg.auth_cfg_sha256;
+ diglen = SHA256_DIGEST_SIZE;
+ break;
+ case QCE_HASH_SHA256_HMAC:
+ auth_cfg = pce_dev->reg.auth_cfg_hmac_sha256;
+ diglen = SHA256_DIGEST_SIZE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
+ if (sreq->first_blk) {
+ if (sha1) {
+ for (i = 0; i < 5; i++)
+ auth32[i] = _std_init_vector_sha1[i];
+ } else {
+ for (i = 0; i < 8; i++)
+ auth32[i] = _std_init_vector_sha256[i];
+ }
+ } else {
+ _byte_stream_to_net_words(auth32, sreq->digest, diglen);
+ }
+
+ /* Set auth_ivn, auth_keyn registers */
+ for (i = 0; i < 5; i++)
+ QCE_WRITE_REG(auth32[i], (pce_dev->iobase +
+ (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+
+ if ((sreq->alg == QCE_HASH_SHA256) ||
+ (sreq->alg == QCE_HASH_SHA256_HMAC)) {
+ for (i = 5; i < 8; i++)
+ QCE_WRITE_REG(auth32[i], (pce_dev->iobase +
+ (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+ }
+
+
+ /* write auth_bytecnt 0/1/2/3, start with 0 */
+ for (i = 0; i < 2; i++)
+ QCE_WRITE_REG(sreq->auth_data[i], pce_dev->iobase +
+ CRYPTO_AUTH_BYTECNT0_REG +
+ i * sizeof(uint32_t));
+
+ /* Set/reset last bit in CFG register */
+ if (sreq->last_blk)
+ auth_cfg |= 1 << CRYPTO_LAST;
+ else
+ auth_cfg &= ~(1 << CRYPTO_LAST);
+ if (sreq->first_blk)
+ auth_cfg |= 1 << CRYPTO_FIRST;
+ else
+ auth_cfg &= ~(1 << CRYPTO_FIRST);
+ if (use_hw_key)
+ auth_cfg |= 1 << CRYPTO_USE_HW_KEY_AUTH;
+ if (use_pipe_key)
+ auth_cfg |= 1 << CRYPTO_USE_PIPE_KEY_AUTH;
+go_proc:
+ /* write seg_cfg */
+ QCE_WRITE_REG(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+ /* write auth seg_size */
+ QCE_WRITE_REG(sreq->size, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+
+ /* write auth_seg_start */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
+
+ /* reset encr seg_cfg */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+ /* write seg_size */
+ QCE_WRITE_REG(sreq->size, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+ QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
+ CRYPTO_CONFIG_REG));
+ /* issue go to crypto */
+ if (use_hw_key == false) {
+ QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+ pce_dev->iobase + CRYPTO_GOPROC_REG);
+ } else {
+ QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+ pce_dev->iobase + CRYPTO_GOPROC_QC_KEY_REG);
+ }
+ /*
+ * Ensure previous instructions (setting the GO register)
+ * was completed before issuing a DMA transfer request
+ */
+ mb();
+ return 0;
+}
+
+static int _ce_setup_aead_direct(struct qce_device *pce_dev,
+ struct qce_req *q_req, uint32_t totallen_in, uint32_t coffset)
+{
+ int32_t authk_size_in_word = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
+ int i;
+ uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {0};
+ uint32_t a_cfg;
+ uint32_t enckey32[(MAX_CIPHER_KEY_SIZE*2)/sizeof(uint32_t)] = {0};
+ uint32_t enciv32[MAX_IV_LENGTH/sizeof(uint32_t)] = {0};
+ uint32_t enck_size_in_word = 0;
+ uint32_t enciv_in_word;
+ uint32_t key_size;
+ uint32_t ivsize = q_req->ivsize;
+ uint32_t encr_cfg;
+
+
+ /* clear status */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+
+ QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
+ CRYPTO_CONFIG_REG));
+ /*
+ * Ensure previous instructions (setting the CONFIG register)
+ * was completed before issuing starting to set other config register
+ * This is to ensure the configurations are done in correct endian-ness
+ * as set in the CONFIG registers
+ */
+ mb();
+
+ key_size = q_req->encklen;
+ enck_size_in_word = key_size/sizeof(uint32_t);
+
+ switch (q_req->alg) {
+
+ case CIPHER_ALG_DES:
+
+ switch (q_req->mode) {
+ case QCE_MODE_CBC:
+ encr_cfg = pce_dev->reg.encr_cfg_des_cbc;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ enciv_in_word = 2;
+ break;
+
+ case CIPHER_ALG_3DES:
+
+ switch (q_req->mode) {
+ case QCE_MODE_CBC:
+ encr_cfg = pce_dev->reg.encr_cfg_3des_cbc;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ enciv_in_word = 2;
+
+ break;
+
+ case CIPHER_ALG_AES:
+
+ switch (q_req->mode) {
+ case QCE_MODE_CBC:
+ if (key_size == AES128_KEY_SIZE)
+ encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
+ else if (key_size == AES256_KEY_SIZE)
+ encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
+ else
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ enciv_in_word = 4;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+
+
+
+ /* write CNTR0_IV0_REG */
+ if (q_req->mode != QCE_MODE_ECB) {
+ _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
+ for (i = 0; i < enciv_in_word; i++)
+ QCE_WRITE_REG(enciv32[i], pce_dev->iobase +
+ (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)));
+ }
+
+ /*
+ * write encr key
+ * do not use hw key or pipe key
+ */
+ _byte_stream_to_net_words(enckey32, q_req->enckey, key_size);
+ for (i = 0; i < enck_size_in_word; i++)
+ QCE_WRITE_REG(enckey32[i], pce_dev->iobase +
+ (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)));
+
+ /* write encr seg cfg */
+ if (q_req->dir == QCE_ENCRYPT)
+ encr_cfg |= (1 << CRYPTO_ENCODE);
+ QCE_WRITE_REG(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+ /* we only support sha1-hmac and sha256-hmac at this point */
+ _byte_stream_to_net_words(mackey32, q_req->authkey,
+ q_req->authklen);
+ for (i = 0; i < authk_size_in_word; i++)
+ QCE_WRITE_REG(mackey32[i], pce_dev->iobase +
+ (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)));
+
+ if (q_req->auth_alg == QCE_HASH_SHA1_HMAC) {
+ for (i = 0; i < 5; i++)
+ QCE_WRITE_REG(_std_init_vector_sha1[i],
+ pce_dev->iobase +
+ (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)));
+ } else {
+ for (i = 0; i < 8; i++)
+ QCE_WRITE_REG(_std_init_vector_sha256[i],
+ pce_dev->iobase +
+ (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)));
+ }
+
+ /* write auth_bytecnt 0/1, start with 0 */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG);
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT1_REG);
+
+ /* write encr seg size */
+ QCE_WRITE_REG(q_req->cryptlen, pce_dev->iobase +
+ CRYPTO_ENCR_SEG_SIZE_REG);
+
+ /* write encr start */
+ QCE_WRITE_REG(coffset & 0xffff, pce_dev->iobase +
+ CRYPTO_ENCR_SEG_START_REG);
+
+ if (q_req->auth_alg == QCE_HASH_SHA1_HMAC)
+ a_cfg = pce_dev->reg.auth_cfg_aead_sha1_hmac;
+ else
+ a_cfg = pce_dev->reg.auth_cfg_aead_sha256_hmac;
+
+ if (q_req->dir == QCE_ENCRYPT)
+ a_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
+ else
+ a_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+ /* write auth seg_cfg */
+ QCE_WRITE_REG(a_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+
+ /* write auth seg_size */
+ QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+
+ /* write auth_seg_start */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
+
+
+ /* write seg_size */
+ QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+
+ QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
+
+ CRYPTO_CONFIG_REG));
+ /* issue go to crypto */
+ QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+ pce_dev->iobase + CRYPTO_GOPROC_REG);
+ /*
+ * Ensure previous instructions (setting the GO register)
+ * was completed before issuing a DMA transfer request
+ */
+ mb();
+ return 0;
+};
+
+static int _ce_setup_cipher_direct(struct qce_device *pce_dev,
+ struct qce_req *creq, uint32_t totallen_in, uint32_t coffset)
+{
+ uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
+ 0, 0, 0, 0};
+ uint32_t enck_size_in_word = 0;
+ uint32_t key_size;
+ bool use_hw_key = false;
+ bool use_pipe_key = false;
+ uint32_t encr_cfg = 0;
+ uint32_t ivsize = creq->ivsize;
+ int i;
+
+ /* clear status */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+
+ QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
+ CRYPTO_CONFIG_REG));
+ /*
+ * Ensure previous instructions (setting the CONFIG register)
+ * was completed before issuing starting to set other config register
+ * This is to ensure the configurations are done in correct endian-ness
+ * as set in the CONFIG registers
+ */
+ mb();
+
+ if (creq->mode == QCE_MODE_XTS)
+ key_size = creq->encklen/2;
+ else
+ key_size = creq->encklen;
+
+ if ((creq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
+ use_hw_key = true;
+ } else {
+ if ((creq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
+ QCRYPTO_CTX_USE_PIPE_KEY)
+ use_pipe_key = true;
+ }
+ if ((use_pipe_key == false) && (use_hw_key == false)) {
+ _byte_stream_to_net_words(enckey32, creq->enckey, key_size);
+ enck_size_in_word = key_size/sizeof(uint32_t);
+ }
+ if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
+ uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
+ uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
+ uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
+ uint32_t auth_cfg = 0;
+
+ /* Clear auth_ivn, auth_keyn registers */
+ for (i = 0; i < 16; i++) {
+ QCE_WRITE_REG(0, (pce_dev->iobase +
+ (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+ QCE_WRITE_REG(0, (pce_dev->iobase +
+ (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t))));
+ }
+ /* write auth_bytecnt 0/1/2/3, start with 0 */
+ for (i = 0; i < 4; i++)
+ QCE_WRITE_REG(0, pce_dev->iobase +
+ CRYPTO_AUTH_BYTECNT0_REG +
+ i * sizeof(uint32_t));
+ /* write nonce */
+ _byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
+ for (i = 0; i < noncelen32; i++)
+ QCE_WRITE_REG(nonce32[i], pce_dev->iobase +
+ CRYPTO_AUTH_INFO_NONCE0_REG +
+ (i*sizeof(uint32_t)));
+
+ if (creq->authklen == AES128_KEY_SIZE)
+ auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_128;
+ else {
+ if (creq->authklen == AES256_KEY_SIZE)
+ auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_256;
+ }
+ if (creq->dir == QCE_ENCRYPT)
+ auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+ else
+ auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
+ auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE);
+
+ if (use_hw_key == true) {
+ auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH);
+ } else {
+ auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+ /* write auth key */
+ for (i = 0; i < authklen32; i++)
+ QCE_WRITE_REG(enckey32[i], pce_dev->iobase +
+ CRYPTO_AUTH_KEY0_REG + (i*sizeof(uint32_t)));
+ }
+ QCE_WRITE_REG(auth_cfg, pce_dev->iobase +
+ CRYPTO_AUTH_SEG_CFG_REG);
+ if (creq->dir == QCE_ENCRYPT) {
+ QCE_WRITE_REG(totallen_in, pce_dev->iobase +
+ CRYPTO_AUTH_SEG_SIZE_REG);
+ } else {
+ QCE_WRITE_REG((totallen_in - creq->authsize),
+ pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+ }
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
+ } else {
+ if (creq->op != QCE_REQ_AEAD)
+ QCE_WRITE_REG(0, pce_dev->iobase +
+ CRYPTO_AUTH_SEG_CFG_REG);
+ }
+ /*
+ * Ensure previous instructions (write to all AUTH registers)
+ * was completed before accessing a register that is not in
+ * in the same 1K range.
+ */
+ mb();
+ switch (creq->mode) {
+ case QCE_MODE_ECB:
+ if (key_size == AES128_KEY_SIZE)
+ encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_128;
+ else
+ encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_256;
+ break;
+ case QCE_MODE_CBC:
+ if (key_size == AES128_KEY_SIZE)
+ encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
+ else
+ encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
+ break;
+ case QCE_MODE_XTS:
+ if (key_size == AES128_KEY_SIZE)
+ encr_cfg = pce_dev->reg.encr_cfg_aes_xts_128;
+ else
+ encr_cfg = pce_dev->reg.encr_cfg_aes_xts_256;
+ break;
+ case QCE_MODE_CCM:
+ if (key_size == AES128_KEY_SIZE)
+ encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_128;
+ else
+ encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_256;
+ break;
+ case QCE_MODE_CTR:
+ default:
+ if (key_size == AES128_KEY_SIZE)
+ encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_128;
+ else
+ encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_256;
+ break;
+ }
+
+ switch (creq->alg) {
+ case CIPHER_ALG_DES:
+ if (creq->mode != QCE_MODE_ECB) {
+ encr_cfg = pce_dev->reg.encr_cfg_des_cbc;
+ _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+ QCE_WRITE_REG(enciv32[0], pce_dev->iobase +
+ CRYPTO_CNTR0_IV0_REG);
+ QCE_WRITE_REG(enciv32[1], pce_dev->iobase +
+ CRYPTO_CNTR1_IV1_REG);
+ } else {
+ encr_cfg = pce_dev->reg.encr_cfg_des_ecb;
+ }
+ if (use_hw_key == false) {
+ QCE_WRITE_REG(enckey32[0], pce_dev->iobase +
+ CRYPTO_ENCR_KEY0_REG);
+ QCE_WRITE_REG(enckey32[1], pce_dev->iobase +
+ CRYPTO_ENCR_KEY1_REG);
+ }
+ break;
+ case CIPHER_ALG_3DES:
+ if (creq->mode != QCE_MODE_ECB) {
+ _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+ QCE_WRITE_REG(enciv32[0], pce_dev->iobase +
+ CRYPTO_CNTR0_IV0_REG);
+ QCE_WRITE_REG(enciv32[1], pce_dev->iobase +
+ CRYPTO_CNTR1_IV1_REG);
+ encr_cfg = pce_dev->reg.encr_cfg_3des_cbc;
+ } else {
+ encr_cfg = pce_dev->reg.encr_cfg_3des_ecb;
+ }
+ if (use_hw_key == false) {
+ /* write encr key */
+ for (i = 0; i < 6; i++)
+ QCE_WRITE_REG(enckey32[0], (pce_dev->iobase +
+ (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t))));
+ }
+ break;
+ case CIPHER_ALG_AES:
+ default:
+ if (creq->mode == QCE_MODE_XTS) {
+ uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
+ = {0, 0, 0, 0, 0, 0, 0, 0};
+ uint32_t xtsklen =
+ creq->encklen/(2 * sizeof(uint32_t));
+
+ if ((use_hw_key == false) && (use_pipe_key == false)) {
+ _byte_stream_to_net_words(xtskey32,
+ (creq->enckey + creq->encklen/2),
+ creq->encklen/2);
+ /* write xts encr key */
+ for (i = 0; i < xtsklen; i++)
+ QCE_WRITE_REG(xtskey32[i],
+ pce_dev->iobase +
+ CRYPTO_ENCR_XTS_KEY0_REG +
+ (i * sizeof(uint32_t)));
+ }
+ /* write xts du size */
+ switch (creq->flags & QCRYPTO_CTX_XTS_MASK) {
+ case QCRYPTO_CTX_XTS_DU_SIZE_512B:
+ QCE_WRITE_REG(
+ min((uint32_t)QCE_SECTOR_SIZE,
+ creq->cryptlen), pce_dev->iobase +
+ CRYPTO_ENCR_XTS_DU_SIZE_REG);
+ break;
+ case QCRYPTO_CTX_XTS_DU_SIZE_1KB:
+ QCE_WRITE_REG(
+ min((uint32_t)(QCE_SECTOR_SIZE * 2),
+ creq->cryptlen), pce_dev->iobase +
+ CRYPTO_ENCR_XTS_DU_SIZE_REG);
+ break;
+ default:
+ QCE_WRITE_REG(creq->cryptlen,
+ pce_dev->iobase +
+ CRYPTO_ENCR_XTS_DU_SIZE_REG);
+ break;
+ }
+ }
+ if (creq->mode != QCE_MODE_ECB) {
+ if (creq->mode == QCE_MODE_XTS)
+ _byte_stream_swap_to_net_words(enciv32,
+ creq->iv, ivsize);
+ else
+ _byte_stream_to_net_words(enciv32, creq->iv,
+ ivsize);
+
+ /* write encr cntr iv */
+ for (i = 0; i <= 3; i++)
+ QCE_WRITE_REG(enciv32[i], pce_dev->iobase +
+ CRYPTO_CNTR0_IV0_REG +
+ (i * sizeof(uint32_t)));
+
+ if (creq->mode == QCE_MODE_CCM) {
+ /* write cntr iv for ccm */
+ for (i = 0; i <= 3; i++)
+ QCE_WRITE_REG(enciv32[i],
+ pce_dev->iobase +
+ CRYPTO_ENCR_CCM_INT_CNTR0_REG +
+ (i * sizeof(uint32_t)));
+ /* update cntr_iv[3] by one */
+ QCE_WRITE_REG((enciv32[3] + 1),
+ pce_dev->iobase +
+ CRYPTO_CNTR0_IV0_REG +
+ (3 * sizeof(uint32_t)));
+ }
+ }
+
+ if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
+ encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
+ CRYPTO_ENCR_KEY_SZ);
+ } else {
+ if ((use_hw_key == false) && (use_pipe_key == false)) {
+ for (i = 0; i < enck_size_in_word; i++)
+ QCE_WRITE_REG(enckey32[i],
+ pce_dev->iobase +
+ CRYPTO_ENCR_KEY0_REG +
+ (i * sizeof(uint32_t)));
+ }
+ } /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
+ break;
+ } /* end of switch (creq->mode) */
+
+ if (use_pipe_key)
+ encr_cfg |= (CRYPTO_USE_PIPE_KEY_ENCR_ENABLED
+ << CRYPTO_USE_PIPE_KEY_ENCR);
+
+ /* write encr seg cfg */
+ encr_cfg |= ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
+ if (use_hw_key == true)
+ encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
+ else
+ encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
+ /* write encr seg cfg */
+ QCE_WRITE_REG(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+ /* write encr seg size */
+ if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT)) {
+ QCE_WRITE_REG((creq->cryptlen + creq->authsize),
+ pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
+ } else {
+ QCE_WRITE_REG(creq->cryptlen,
+ pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
+ }
+
+ /* write encr seg start */
+ QCE_WRITE_REG((coffset & 0xffff),
+ pce_dev->iobase + CRYPTO_ENCR_SEG_START_REG);
+
+ /* write encr counter mask */
+ QCE_WRITE_REG(0xffffffff,
+ pce_dev->iobase + CRYPTO_CNTR_MASK_REG);
+ QCE_WRITE_REG(0xffffffff,
+ pce_dev->iobase + CRYPTO_CNTR_MASK_REG0);
+ QCE_WRITE_REG(0xffffffff,
+ pce_dev->iobase + CRYPTO_CNTR_MASK_REG1);
+ QCE_WRITE_REG(0xffffffff,
+ pce_dev->iobase + CRYPTO_CNTR_MASK_REG2);
+
+ /* write seg size */
+ QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+ QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
+ CRYPTO_CONFIG_REG));
+ /* issue go to crypto */
+ if (use_hw_key == false) {
+ QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+ pce_dev->iobase + CRYPTO_GOPROC_REG);
+ } else {
+ QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+ pce_dev->iobase + CRYPTO_GOPROC_QC_KEY_REG);
+ }
+ /*
+ * Ensure previous instructions (setting the GO register)
+ * was completed before issuing a DMA transfer request
+ */
+ mb();
+ return 0;
+};
+
+static int _ce_f9_setup_direct(struct qce_device *pce_dev,
+ struct qce_f9_req *req)
+{
+ uint32_t ikey32[OTA_KEY_SIZE/sizeof(uint32_t)];
+ uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
+ uint32_t auth_cfg;
+ int i;
+
+ switch (req->algorithm) {
+ case QCE_OTA_ALGO_KASUMI:
+ auth_cfg = pce_dev->reg.auth_cfg_kasumi;
+ break;
+ case QCE_OTA_ALGO_SNOW3G:
+ default:
+ auth_cfg = pce_dev->reg.auth_cfg_snow3g;
+ break;
+ };
+
+ /* clear status */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+
+ /* set big endian configuration */
+ QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
+ CRYPTO_CONFIG_REG));
+ /*
+ * Ensure previous instructions (setting the CONFIG register)
+ * was completed before issuing starting to set other config register
+ * This is to ensure the configurations are done in correct endian-ness
+ * as set in the CONFIG registers
+ */
+ mb();
+
+ /* write enc_seg_cfg */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+ /* write ecn_seg_size */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
+
+ /* write key in CRYPTO_AUTH_IV0-3_REG */
+ _byte_stream_to_net_words(ikey32, &req->ikey[0], OTA_KEY_SIZE);
+ for (i = 0; i < key_size_in_word; i++)
+ QCE_WRITE_REG(ikey32[i], (pce_dev->iobase +
+ (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+
+ /* write last bits in CRYPTO_AUTH_IV4_REG */
+ QCE_WRITE_REG(req->last_bits, (pce_dev->iobase +
+ CRYPTO_AUTH_IV4_REG));
+
+ /* write fresh to CRYPTO_AUTH_BYTECNT0_REG */
+ QCE_WRITE_REG(req->fresh, (pce_dev->iobase +
+ CRYPTO_AUTH_BYTECNT0_REG));
+
+ /* write count-i to CRYPTO_AUTH_BYTECNT1_REG */
+ QCE_WRITE_REG(req->count_i, (pce_dev->iobase +
+ CRYPTO_AUTH_BYTECNT1_REG));
+
+ /* write auth seg cfg */
+ if (req->direction == QCE_OTA_DIR_DOWNLINK)
+ auth_cfg |= BIT(CRYPTO_F9_DIRECTION);
+ QCE_WRITE_REG(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+
+ /* write auth seg size */
+ QCE_WRITE_REG(req->msize, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+
+ /* write auth seg start*/
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
+
+ /* write seg size */
+ QCE_WRITE_REG(req->msize, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+ /* set little endian configuration before go*/
+ QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
+ CRYPTO_CONFIG_REG));
+ /* write go */
+ QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+ pce_dev->iobase + CRYPTO_GOPROC_REG);
+ /*
+ * Ensure previous instructions (setting the GO register)
+ * was completed before issuing a DMA transfer request
+ */
+ mb();
+ return 0;
+}
+
+static int _ce_f8_setup_direct(struct qce_device *pce_dev,
+ struct qce_f8_req *req, bool key_stream_mode,
+ uint16_t npkts, uint16_t cipher_offset, uint16_t cipher_size)
+{
+ int i = 0;
+ uint32_t encr_cfg = 0;
+ uint32_t ckey32[OTA_KEY_SIZE/sizeof(uint32_t)];
+ uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
+
+ switch (req->algorithm) {
+ case QCE_OTA_ALGO_KASUMI:
+ encr_cfg = pce_dev->reg.encr_cfg_kasumi;
+ break;
+ case QCE_OTA_ALGO_SNOW3G:
+ default:
+ encr_cfg = pce_dev->reg.encr_cfg_snow3g;
+ break;
+ };
+ /* clear status */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+ /* set big endian configuration */
+ QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
+ CRYPTO_CONFIG_REG));
+ /* write auth seg configuration */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+ /* write auth seg size */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+
+ /* write key */
+ _byte_stream_to_net_words(ckey32, &req->ckey[0], OTA_KEY_SIZE);
+
+ for (i = 0; i < key_size_in_word; i++)
+ QCE_WRITE_REG(ckey32[i], (pce_dev->iobase +
+ (CRYPTO_ENCR_KEY0_REG + i*sizeof(uint32_t))));
+ /* write encr seg cfg */
+ if (key_stream_mode)
+ encr_cfg |= BIT(CRYPTO_F8_KEYSTREAM_ENABLE);
+ if (req->direction == QCE_OTA_DIR_DOWNLINK)
+ encr_cfg |= BIT(CRYPTO_F8_DIRECTION);
+ QCE_WRITE_REG(encr_cfg, pce_dev->iobase +
+ CRYPTO_ENCR_SEG_CFG_REG);
+
+ /* write encr seg start */
+ QCE_WRITE_REG((cipher_offset & 0xffff), pce_dev->iobase +
+ CRYPTO_ENCR_SEG_START_REG);
+ /* write encr seg size */
+ QCE_WRITE_REG(cipher_size, pce_dev->iobase +
+ CRYPTO_ENCR_SEG_SIZE_REG);
+
+ /* write seg size */
+ QCE_WRITE_REG(req->data_len, pce_dev->iobase +
+ CRYPTO_SEG_SIZE_REG);
+
+ /* write cntr0_iv0 for countC */
+ QCE_WRITE_REG(req->count_c, pce_dev->iobase +
+ CRYPTO_CNTR0_IV0_REG);
+ /* write cntr1_iv1 for nPkts, and bearer */
+ if (npkts == 1)
+ npkts = 0;
+ QCE_WRITE_REG(req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
+ npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT,
+ pce_dev->iobase + CRYPTO_CNTR1_IV1_REG);
+
+ /* set little endian configuration before go*/
+ QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
+ CRYPTO_CONFIG_REG));
+ /* write go */
+ QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+ pce_dev->iobase + CRYPTO_GOPROC_REG);
+ /*
+ * Ensure previous instructions (setting the GO register)
+ * was completed before issuing a DMA transfer request
+ */
+ mb();
+ return 0;
+}
+
+
+static int _qce_unlock_other_pipes(struct qce_device *pce_dev, int req_info)
+{
+ int rc = 0;
+ struct ce_sps_data *pce_sps_data = &pce_dev->ce_request_info
+ [req_info].ce_sps;
+
+ if (pce_dev->no_get_around || pce_dev->support_cmd_dscr == false)
+ return rc;
+
+ rc = sps_transfer_one(pce_dev->ce_bam_info.consumer.pipe,
+ GET_PHYS_ADDR(pce_sps_data->
+ cmdlistptr.unlock_all_pipes.cmdlist),
+ 0, NULL, (SPS_IOVEC_FLAG_CMD | SPS_IOVEC_FLAG_UNLOCK));
+ if (rc) {
+ pr_err("sps_xfr_one() fail rc=%d", rc);
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info,
+ bool is_complete);
+static int _aead_complete(struct qce_device *pce_dev, int req_info)
+{
+ struct aead_request *areq;
+ unsigned char mac[SHA256_DIGEST_SIZE];
+ uint32_t ccm_fail_status = 0;
+ uint32_t result_dump_status;
+ int32_t result_status = 0;
+ struct ce_request_info *preq_info;
+ struct ce_sps_data *pce_sps_data;
+ qce_comp_func_ptr_t qce_callback;
+
+ preq_info = &pce_dev->ce_request_info[req_info];
+ pce_sps_data = &preq_info->ce_sps;
+ qce_callback = preq_info->qce_cb;
+ areq = (struct aead_request *) preq_info->areq;
+ if (areq->src != areq->dst) {
+ qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
+ DMA_FROM_DEVICE);
+ }
+ qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+ DMA_TO_DEVICE);
+ qce_dma_unmap_sg(pce_dev->pdev, areq->assoc, preq_info->assoc_nents,
+ DMA_TO_DEVICE);
+ /* check MAC */
+ memcpy(mac, (char *)(&pce_sps_data->result->auth_iv[0]),
+ SHA256_DIGEST_SIZE);
+
+ /* read status before unlock */
+ if (preq_info->dir == QCE_DECRYPT) {
+ if (pce_dev->no_get_around)
+ if (pce_dev->no_ccm_mac_status_get_around)
+ ccm_fail_status = be32_to_cpu(pce_sps_data->
+ result->status);
+ else
+ ccm_fail_status = be32_to_cpu(pce_sps_data->
+ result_null->status);
+ else
+ ccm_fail_status = readl_relaxed(pce_dev->iobase +
+ CRYPTO_STATUS_REG);
+ }
+ if (_qce_unlock_other_pipes(pce_dev, req_info)) {
+ qce_free_req_info(pce_dev, req_info, true);
+ qce_callback(areq, mac, NULL, -ENXIO);
+ return -ENXIO;
+ }
+ result_dump_status = be32_to_cpu(pce_sps_data->result->status);
+ pce_sps_data->result->status = 0;
+
+ if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
+ | (1 << CRYPTO_HSD_ERR))) {
+ pr_err("aead operation error. Status %x\n", result_dump_status);
+ result_status = -ENXIO;
+ } else if (pce_sps_data->consumer_status |
+ pce_sps_data->producer_status) {
+ pr_err("aead sps operation error. sps status %x %x\n",
+ pce_sps_data->consumer_status,
+ pce_sps_data->producer_status);
+ result_status = -ENXIO;
+ }
+
+ if (preq_info->mode == QCE_MODE_CCM) {
+ /*
+ * Not from result dump, instead, use the status we just
+ * read of device for MAC_FAILED.
+ */
+ if (result_status == 0 && (preq_info->dir == QCE_DECRYPT) &&
+ (ccm_fail_status & (1 << CRYPTO_MAC_FAILED)))
+ result_status = -EBADMSG;
+ qce_free_req_info(pce_dev, req_info, true);
+ qce_callback(areq, mac, NULL, result_status);
+
+ } else {
+ uint32_t ivsize = 0;
+ struct crypto_aead *aead;
+ unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
+ aead = crypto_aead_reqtfm(areq);
+ ivsize = crypto_aead_ivsize(aead);
+ if (pce_dev->ce_bam_info.minor_version != 0)
+ dma_unmap_single(pce_dev->pdev, preq_info->phy_iv_in,
+ ivsize, DMA_TO_DEVICE);
+ memcpy(iv, (char *)(pce_sps_data->result->encr_cntr_iv),
+ sizeof(iv));
+ qce_free_req_info(pce_dev, req_info, true);
+ qce_callback(areq, mac, iv, result_status);
+
+ }
+ return 0;
+};
+
+static int _sha_complete(struct qce_device *pce_dev, int req_info)
+{
+ struct ahash_request *areq;
+ unsigned char digest[SHA256_DIGEST_SIZE];
+ uint32_t bytecount32[2];
+ int32_t result_status = 0;
+ uint32_t result_dump_status;
+ struct ce_request_info *preq_info;
+ struct ce_sps_data *pce_sps_data;
+ qce_comp_func_ptr_t qce_callback;
+
+ preq_info = &pce_dev->ce_request_info[req_info];
+ pce_sps_data = &preq_info->ce_sps;
+ qce_callback = preq_info->qce_cb;
+ areq = (struct ahash_request *) preq_info->areq;
+ qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+ DMA_TO_DEVICE);
+ memcpy(digest, (char *)(&pce_sps_data->result->auth_iv[0]),
+ SHA256_DIGEST_SIZE);
+ _byte_stream_to_net_words(bytecount32,
+ (unsigned char *)pce_sps_data->result->auth_byte_count,
+ 2 * CRYPTO_REG_SIZE);
+
+ if (_qce_unlock_other_pipes(pce_dev, req_info)) {
+ qce_free_req_info(pce_dev, req_info, true);
+ qce_callback(areq, digest, (char *)bytecount32,
+ -ENXIO);
+ return -ENXIO;
+ }
+
+ result_dump_status = be32_to_cpu(pce_sps_data->result->status);
+ pce_sps_data->result->status = 0;
+ if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
+ | (1 << CRYPTO_HSD_ERR))) {
+
+ pr_err("sha operation error. Status %x\n", result_dump_status);
+ result_status = -ENXIO;
+ } else if (pce_sps_data->consumer_status) {
+ pr_err("sha sps operation error. sps status %x\n",
+ pce_sps_data->consumer_status);
+ result_status = -ENXIO;
+ }
+ qce_free_req_info(pce_dev, req_info, true);
+ qce_callback(areq, digest, (char *)bytecount32, result_status);
+ return 0;
+}
+
+static int _f9_complete(struct qce_device *pce_dev, int req_info)
+{
+ uint32_t mac_i;
+ int32_t result_status = 0;
+ uint32_t result_dump_status;
+ struct ce_request_info *preq_info;
+ struct ce_sps_data *pce_sps_data;
+ qce_comp_func_ptr_t qce_callback;
+ void *areq;
+
+ preq_info = &pce_dev->ce_request_info[req_info];
+ pce_sps_data = &preq_info->ce_sps;
+ qce_callback = preq_info->qce_cb;
+ areq = preq_info->areq;
+ dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
+ preq_info->ota_size, DMA_TO_DEVICE);
+ _byte_stream_to_net_words(&mac_i,
+ (char *)(&pce_sps_data->result->auth_iv[0]),
+ CRYPTO_REG_SIZE);
+
+ if (_qce_unlock_other_pipes(pce_dev, req_info)) {
+ qce_free_req_info(pce_dev, req_info, true);
+ qce_callback(areq, NULL, NULL, -ENXIO);
+ return -ENXIO;
+ }
+
+ result_dump_status = be32_to_cpu(pce_sps_data->result->status);
+ pce_sps_data->result->status = 0;
+ if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
+ | (1 << CRYPTO_HSD_ERR))) {
+ pr_err("f9 operation error. Status %x\n", result_dump_status);
+ result_status = -ENXIO;
+ } else if (pce_sps_data->consumer_status |
+ pce_sps_data->producer_status) {
+ pr_err("f9 sps operation error. sps status %x %x\n",
+ pce_sps_data->consumer_status,
+ pce_sps_data->producer_status);
+ result_status = -ENXIO;
+ }
+ qce_free_req_info(pce_dev, req_info, true);
+ qce_callback(areq, (char *)&mac_i, NULL, result_status);
+
+ return 0;
+}
+
+static int _ablk_cipher_complete(struct qce_device *pce_dev, int req_info)
+{
+ struct ablkcipher_request *areq;
+ unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
+ int32_t result_status = 0;
+ uint32_t result_dump_status;
+ struct ce_request_info *preq_info;
+ struct ce_sps_data *pce_sps_data;
+ qce_comp_func_ptr_t qce_callback;
+
+ preq_info = &pce_dev->ce_request_info[req_info];
+ pce_sps_data = &preq_info->ce_sps;
+ qce_callback = preq_info->qce_cb;
+ areq = (struct ablkcipher_request *) preq_info->areq;
+ if (areq->src != areq->dst) {
+ qce_dma_unmap_sg(pce_dev->pdev, areq->dst,
+ preq_info->dst_nents, DMA_FROM_DEVICE);
+ }
+ qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+ DMA_TO_DEVICE);
+
+ if (_qce_unlock_other_pipes(pce_dev, req_info)) {
+ qce_free_req_info(pce_dev, req_info, true);
+ qce_callback(areq, NULL, NULL, -ENXIO);
+ return -ENXIO;
+ }
+ result_dump_status = be32_to_cpu(pce_sps_data->result->status);
+ pce_sps_data->result->status = 0;
+
+ if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
+ | (1 << CRYPTO_HSD_ERR))) {
+ pr_err("ablk_cipher operation error. Status %x\n",
+ result_dump_status);
+ result_status = -ENXIO;
+ } else if (pce_sps_data->consumer_status |
+ pce_sps_data->producer_status) {
+ pr_err("ablk_cipher sps operation error. sps status %x %x\n",
+ pce_sps_data->consumer_status,
+ pce_sps_data->producer_status);
+ result_status = -ENXIO;
+ }
+
+ if (preq_info->mode == QCE_MODE_ECB) {
+ qce_free_req_info(pce_dev, req_info, true);
+ qce_callback(areq, NULL, NULL, pce_sps_data->consumer_status |
+ result_status);
+ } else {
+ if (pce_dev->ce_bam_info.minor_version == 0) {
+ if (preq_info->mode == QCE_MODE_CBC) {
+ if (preq_info->dir == QCE_DECRYPT)
+ memcpy(iv, (char *)preq_info->dec_iv,
+ sizeof(iv));
+ else
+ memcpy(iv, (unsigned char *)
+ (sg_virt(areq->src) +
+ areq->src->length - 16),
+ sizeof(iv));
+ }
+ if ((preq_info->mode == QCE_MODE_CTR) ||
+ (preq_info->mode == QCE_MODE_XTS)) {
+ uint32_t num_blk = 0;
+ uint32_t cntr_iv3 = 0;
+ unsigned long long cntr_iv64 = 0;
+ unsigned char *b = (unsigned char *)(&cntr_iv3);
+
+ memcpy(iv, areq->info, sizeof(iv));
+ if (preq_info->mode != QCE_MODE_XTS)
+ num_blk = areq->nbytes/16;
+ else
+ num_blk = 1;
+ cntr_iv3 = ((*(iv + 12) << 24) & 0xff000000) |
+ (((*(iv + 13)) << 16) & 0xff0000) |
+ (((*(iv + 14)) << 8) & 0xff00) |
+ (*(iv + 15) & 0xff);
+ cntr_iv64 =
+ (((unsigned long long)cntr_iv3 &
+ (unsigned long long)0xFFFFFFFFULL) +
+ (unsigned long long)num_blk) %
+ (unsigned long long)(0x100000000ULL);
+
+ cntr_iv3 = (u32)(cntr_iv64 & 0xFFFFFFFF);
+ *(iv + 15) = (char)(*b);
+ *(iv + 14) = (char)(*(b + 1));
+ *(iv + 13) = (char)(*(b + 2));
+ *(iv + 12) = (char)(*(b + 3));
+ }
+ } else {
+ memcpy(iv,
+ (char *)(pce_sps_data->result->encr_cntr_iv),
+ sizeof(iv));
+ }
+ qce_free_req_info(pce_dev, req_info, true);
+ qce_callback(areq, NULL, iv, result_status);
+ }
+ return 0;
+}
+
+static int _f8_complete(struct qce_device *pce_dev, int req_info)
+{
+ int32_t result_status = 0;
+ uint32_t result_dump_status;
+ uint32_t result_dump_status2;
+ struct ce_request_info *preq_info;
+ struct ce_sps_data *pce_sps_data;
+ qce_comp_func_ptr_t qce_callback;
+ void *areq;
+
+ preq_info = &pce_dev->ce_request_info[req_info];
+ pce_sps_data = &preq_info->ce_sps;
+ qce_callback = preq_info->qce_cb;
+ areq = preq_info->areq;
+ if (preq_info->phy_ota_dst)
+ dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst,
+ preq_info->ota_size, DMA_FROM_DEVICE);
+ if (preq_info->phy_ota_src)
+ dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
+ preq_info->ota_size, (preq_info->phy_ota_dst) ?
+ DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
+
+ if (_qce_unlock_other_pipes(pce_dev, req_info)) {
+ qce_free_req_info(pce_dev, req_info, true);
+ qce_callback(areq, NULL, NULL, -ENXIO);
+ return -ENXIO;
+ }
+ result_dump_status = be32_to_cpu(pce_sps_data->result->status);
+ result_dump_status2 = be32_to_cpu(pce_sps_data->result->status2);
+
+ if ((result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
+ | (1 << CRYPTO_HSD_ERR)))) {
+ pr_err(
+ "f8 oper error. Dump Sta %x Sta2 %x req %d\n",
+ result_dump_status, result_dump_status2, req_info);
+ result_status = -ENXIO;
+ } else if (pce_sps_data->consumer_status |
+ pce_sps_data->producer_status) {
+ pr_err("f8 sps operation error. sps status %x %x\n",
+ pce_sps_data->consumer_status,
+ pce_sps_data->producer_status);
+ result_status = -ENXIO;
+ }
+ pce_sps_data->result->status = 0;
+ pce_sps_data->result->status2 = 0;
+ qce_free_req_info(pce_dev, req_info, true);
+ qce_callback(areq, NULL, NULL, result_status);
+ return 0;
+}
+
+static void _qce_sps_iovec_count_init(struct qce_device *pce_dev, int req_info)
+{
+ struct ce_sps_data *pce_sps_data = &pce_dev->ce_request_info[req_info]
+ .ce_sps;
+ pce_sps_data->in_transfer.iovec_count = 0;
+ pce_sps_data->out_transfer.iovec_count = 0;
+}
+
+static void _qce_set_flag(struct sps_transfer *sps_bam_pipe, uint32_t flag)
+{
+ struct sps_iovec *iovec;
+
+ if (sps_bam_pipe->iovec_count == 0)
+ return;
+ iovec = sps_bam_pipe->iovec + (sps_bam_pipe->iovec_count - 1);
+ iovec->flags |= flag;
+}
+
+static int _qce_sps_add_data(dma_addr_t paddr, uint32_t len,
+ struct sps_transfer *sps_bam_pipe)
+{
+ struct sps_iovec *iovec = sps_bam_pipe->iovec +
+ sps_bam_pipe->iovec_count;
+ uint32_t data_cnt;
+
+ while (len > 0) {
+ if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
+ pr_err("Num of descrptor %d exceed max (%d)",
+ sps_bam_pipe->iovec_count,
+ (uint32_t)QCE_MAX_NUM_DSCR);
+ return -ENOMEM;
+ }
+ if (len > SPS_MAX_PKT_SIZE)
+ data_cnt = SPS_MAX_PKT_SIZE;
+ else
+ data_cnt = len;
+ iovec->size = data_cnt;
+ iovec->addr = SPS_GET_LOWER_ADDR(paddr);
+ iovec->flags = SPS_GET_UPPER_ADDR(paddr);
+ sps_bam_pipe->iovec_count++;
+ iovec++;
+ paddr += data_cnt;
+ len -= data_cnt;
+ }
+ return 0;
+}
+
+static int _qce_sps_add_sg_data(struct qce_device *pce_dev,
+ struct scatterlist *sg_src, uint32_t nbytes,
+ struct sps_transfer *sps_bam_pipe)
+{
+ uint32_t data_cnt, len;
+ dma_addr_t addr;
+ struct sps_iovec *iovec = sps_bam_pipe->iovec +
+ sps_bam_pipe->iovec_count;
+
+ while (nbytes > 0) {
+ len = min(nbytes, sg_dma_len(sg_src));
+ nbytes -= len;
+ addr = sg_dma_address(sg_src);
+ if (pce_dev->ce_bam_info.minor_version == 0)
+ len = ALIGN(len, pce_dev->ce_bam_info.ce_burst_size);
+ while (len > 0) {
+ if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
+ pr_err("Num of descrptor %d exceed max (%d)",
+ sps_bam_pipe->iovec_count,
+ (uint32_t)QCE_MAX_NUM_DSCR);
+ return -ENOMEM;
+ }
+ if (len > SPS_MAX_PKT_SIZE) {
+ data_cnt = SPS_MAX_PKT_SIZE;
+ iovec->size = data_cnt;
+ iovec->addr = SPS_GET_LOWER_ADDR(addr);
+ iovec->flags = SPS_GET_UPPER_ADDR(addr);
+ } else {
+ data_cnt = len;
+ iovec->size = data_cnt;
+ iovec->addr = SPS_GET_LOWER_ADDR(addr);
+ iovec->flags = SPS_GET_UPPER_ADDR(addr);
+ }
+ iovec++;
+ sps_bam_pipe->iovec_count++;
+ addr += data_cnt;
+ len -= data_cnt;
+ }
+ sg_src = scatterwalk_sg_next(sg_src);
+ }
+ return 0;
+}
+
+static int _qce_sps_add_cmd(struct qce_device *pce_dev, uint32_t flag,
+ struct qce_cmdlist_info *cmdptr,
+ struct sps_transfer *sps_bam_pipe)
+{
+ dma_addr_t paddr = GET_PHYS_ADDR(cmdptr->cmdlist);
+ struct sps_iovec *iovec = sps_bam_pipe->iovec +
+ sps_bam_pipe->iovec_count;
+ iovec->size = cmdptr->size;
+ iovec->addr = SPS_GET_LOWER_ADDR(paddr);
+ iovec->flags = SPS_GET_UPPER_ADDR(paddr) | SPS_IOVEC_FLAG_CMD | flag;
+ sps_bam_pipe->iovec_count++;
+ if (sps_bam_pipe->iovec_count >= QCE_MAX_NUM_DSCR) {
+ pr_err("Num of descrptor %d exceed max (%d)",
+ sps_bam_pipe->iovec_count, (uint32_t)QCE_MAX_NUM_DSCR);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int _qce_sps_transfer(struct qce_device *pce_dev, int req_info)
+{
+ int rc = 0;
+ unsigned long flags;
+ struct ce_sps_data *pce_sps_data;
+
+ pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
+ pce_sps_data->out_transfer.user =
+ (void *)((uintptr_t)(CRYPTO_REQ_USER_PAT |
+ (unsigned int) req_info));
+ pce_sps_data->in_transfer.user =
+ (void *)((uintptr_t)(CRYPTO_REQ_USER_PAT |
+ (unsigned int) req_info));
+ _qce_dump_descr_fifos_dbg(pce_dev, req_info);
+
+ spin_lock_irqsave(&pce_dev->sps_lock, flags);
+ if (pce_sps_data->in_transfer.iovec_count) {
+ rc = sps_transfer(pce_dev->ce_bam_info.consumer.pipe,
+ &pce_sps_data->in_transfer);
+ if (rc) {
+ pr_err("sps_xfr() fail (consumer pipe=0x%lx) rc = %d\n",
+ (uintptr_t)pce_dev->ce_bam_info.consumer.pipe,
+ rc);
+ goto ret;
+ }
+ }
+ rc = sps_transfer(pce_dev->ce_bam_info.producer.pipe,
+ &pce_sps_data->out_transfer);
+ if (rc)
+ pr_err("sps_xfr() fail (producer pipe=0x%lx) rc = %d\n",
+ (uintptr_t)pce_dev->ce_bam_info.producer.pipe, rc);
+ret:
+ if (rc)
+ _qce_dump_descr_fifos(pce_dev, req_info);
+ spin_unlock_irqrestore(&pce_dev->sps_lock, flags);
+ return rc;
+}
+
+/**
+ * Allocate and Connect a CE peripheral's SPS endpoint
+ *
+ * This function allocates endpoint context and
+ * connect it with memory endpoint by calling
+ * appropriate SPS driver APIs.
+ *
+ * Also registers a SPS callback function with
+ * SPS driver
+ *
+ * This function should only be called once typically
+ * during driver probe.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ * @ep - Pointer to sps endpoint data structure
+ * @is_produce - 1 means Producer endpoint
+ * 0 means Consumer endpoint
+ *
+ * @return - 0 if successful else negative value.
+ *
+ */
+static int qce_sps_init_ep_conn(struct qce_device *pce_dev,
+ struct qce_sps_ep_conn_data *ep,
+ bool is_producer)
+{
+ int rc = 0;
+ struct sps_pipe *sps_pipe_info;
+ struct sps_connect *sps_connect_info = &ep->connect;
+ struct sps_register_event *sps_event = &ep->event;
+
+ /* Allocate endpoint context */
+ sps_pipe_info = sps_alloc_endpoint();
+ if (!sps_pipe_info) {
+ pr_err("sps_alloc_endpoint() failed!!! is_producer=%d",
+ is_producer);
+ rc = -ENOMEM;
+ goto out;
+ }
+ /* Now save the sps pipe handle */
+ ep->pipe = sps_pipe_info;
+
+ /* Get default connection configuration for an endpoint */
+ rc = sps_get_config(sps_pipe_info, sps_connect_info);
+ if (rc) {
+ pr_err("sps_get_config() fail pipe_handle=0x%lx, rc = %d\n",
+ (uintptr_t)sps_pipe_info, rc);
+ goto get_config_err;
+ }
+
+ /* Modify the default connection configuration */
+ if (is_producer) {
+ /*
+ * For CE producer transfer, source should be
+ * CE peripheral where as destination should
+ * be system memory.
+ */
+ sps_connect_info->source = pce_dev->ce_bam_info.bam_handle;
+ sps_connect_info->destination = SPS_DEV_HANDLE_MEM;
+ /* Producer pipe will handle this connection */
+ sps_connect_info->mode = SPS_MODE_SRC;
+ sps_connect_info->options =
+ SPS_O_AUTO_ENABLE | SPS_O_DESC_DONE;
+ } else {
+ /* For CE consumer transfer, source should be
+ * system memory where as destination should
+ * CE peripheral
+ */
+ sps_connect_info->source = SPS_DEV_HANDLE_MEM;
+ sps_connect_info->destination = pce_dev->ce_bam_info.bam_handle;
+ sps_connect_info->mode = SPS_MODE_DEST;
+ sps_connect_info->options =
+ SPS_O_AUTO_ENABLE;
+ }
+
+ /* Producer pipe index */
+ sps_connect_info->src_pipe_index =
+ pce_dev->ce_bam_info.src_pipe_index;
+ /* Consumer pipe index */
+ sps_connect_info->dest_pipe_index =
+ pce_dev->ce_bam_info.dest_pipe_index;
+ /* Set pipe group */
+ sps_connect_info->lock_group = pce_dev->ce_bam_info.pipe_pair_index;
+ sps_connect_info->event_thresh = 0x10;
+ /*
+ * Max. no of scatter/gather buffers that can
+ * be passed by block layer = 32 (NR_SG).
+ * Each BAM descritor needs 64 bits (8 bytes).
+ * One BAM descriptor is required per buffer transfer.
+ * So we would require total 256 (32 * 8) bytes of descriptor FIFO.
+ * But due to HW limitation we need to allocate atleast one extra
+ * descriptor memory (256 bytes + 8 bytes). But in order to be
+ * in power of 2, we are allocating 512 bytes of memory.
+ */
+ sps_connect_info->desc.size = QCE_MAX_NUM_DSCR * MAX_QCE_ALLOC_BAM_REQ *
+ sizeof(struct sps_iovec);
+ if (sps_connect_info->desc.size > MAX_SPS_DESC_FIFO_SIZE)
+ sps_connect_info->desc.size = MAX_SPS_DESC_FIFO_SIZE;
+ sps_connect_info->desc.base = dma_alloc_coherent(pce_dev->pdev,
+ sps_connect_info->desc.size,
+ &sps_connect_info->desc.phys_base,
+ GFP_KERNEL);
+ if (sps_connect_info->desc.base == NULL) {
+ rc = -ENOMEM;
+ pr_err("Can not allocate coherent memory for sps data\n");
+ goto get_config_err;
+ }
+
+ memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
+
+ /* Establish connection between peripheral and memory endpoint */
+ rc = sps_connect(sps_pipe_info, sps_connect_info);
+ if (rc) {
+ pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n",
+ (uintptr_t)sps_pipe_info, rc);
+ goto sps_connect_err;
+ }
+
+ sps_event->mode = SPS_TRIGGER_CALLBACK;
+ sps_event->xfer_done = NULL;
+ sps_event->user = (void *)pce_dev;
+ if (is_producer) {
+ sps_event->options = SPS_O_EOT | SPS_O_DESC_DONE;
+ sps_event->callback = _sps_producer_callback;
+ rc = sps_register_event(ep->pipe, sps_event);
+ if (rc) {
+ pr_err("Producer callback registration failed rc=%d\n",
+ rc);
+ goto sps_connect_err;
+ }
+ } else {
+ sps_event->options = SPS_O_EOT;
+ sps_event->callback = NULL;
+ }
+
+ pr_debug("success, %s : pipe_handle=0x%lx, desc fifo base (phy) = 0x%p\n",
+ is_producer ? "PRODUCER(RX/OUT)" : "CONSUMER(TX/IN)",
+ (uintptr_t)sps_pipe_info, &sps_connect_info->desc.phys_base);
+ goto out;
+
+sps_connect_err:
+ dma_free_coherent(pce_dev->pdev,
+ sps_connect_info->desc.size,
+ sps_connect_info->desc.base,
+ sps_connect_info->desc.phys_base);
+get_config_err:
+ sps_free_endpoint(sps_pipe_info);
+out:
+ return rc;
+}
+
+/**
+ * Disconnect and Deallocate a CE peripheral's SPS endpoint
+ *
+ * This function disconnect endpoint and deallocates
+ * endpoint context.
+ *
+ * This function should only be called once typically
+ * during driver remove.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ * @ep - Pointer to sps endpoint data structure
+ *
+ */
+static void qce_sps_exit_ep_conn(struct qce_device *pce_dev,
+ struct qce_sps_ep_conn_data *ep)
+{
+ struct sps_pipe *sps_pipe_info = ep->pipe;
+ struct sps_connect *sps_connect_info = &ep->connect;
+
+ sps_disconnect(sps_pipe_info);
+ dma_free_coherent(pce_dev->pdev,
+ sps_connect_info->desc.size,
+ sps_connect_info->desc.base,
+ sps_connect_info->desc.phys_base);
+ sps_free_endpoint(sps_pipe_info);
+}
+
+static void qce_sps_release_bam(struct qce_device *pce_dev)
+{
+ struct bam_registration_info *pbam;
+
+ mutex_lock(&bam_register_lock);
+ pbam = pce_dev->pbam;
+ if (pbam == NULL)
+ goto ret;
+
+ pbam->cnt--;
+ if (pbam->cnt > 0)
+ goto ret;
+
+ if (pce_dev->ce_bam_info.bam_handle) {
+ sps_deregister_bam_device(pce_dev->ce_bam_info.bam_handle);
+
+ pr_debug("deregister bam handle 0x%lx\n",
+ pce_dev->ce_bam_info.bam_handle);
+ pce_dev->ce_bam_info.bam_handle = 0;
+ }
+ iounmap(pbam->bam_iobase);
+ pr_debug("delete bam 0x%x\n", pbam->bam_mem);
+ list_del(&pbam->qlist);
+ kfree(pbam);
+
+ret:
+ pce_dev->pbam = NULL;
+ mutex_unlock(&bam_register_lock);
+}
+
+static int qce_sps_get_bam(struct qce_device *pce_dev)
+{
+ int rc = 0;
+ struct sps_bam_props bam = {0};
+ struct bam_registration_info *pbam = NULL;
+ struct bam_registration_info *p;
+ uint32_t bam_cfg = 0;
+
+
+ mutex_lock(&bam_register_lock);
+
+ list_for_each_entry(p, &qce50_bam_list, qlist) {
+ if (p->bam_mem == pce_dev->bam_mem) {
+ pbam = p; /* found */
+ break;
+ }
+ }
+
+ if (pbam) {
+ pr_debug("found bam 0x%x\n", pbam->bam_mem);
+ pbam->cnt++;
+ pce_dev->ce_bam_info.bam_handle = pbam->handle;
+ pce_dev->ce_bam_info.bam_mem = pbam->bam_mem;
+ pce_dev->ce_bam_info.bam_iobase = pbam->bam_iobase;
+ pce_dev->pbam = pbam;
+ pce_dev->support_cmd_dscr = pbam->support_cmd_dscr;
+ goto ret;
+ }
+
+ pbam = kzalloc(sizeof(struct bam_registration_info), GFP_KERNEL);
+ if (!pbam) {
+ pr_err("qce50 Memory allocation of bam FAIL, error %ld\n",
+ PTR_ERR(pbam));
+
+ rc = -ENOMEM;
+ goto ret;
+ }
+ pbam->cnt = 1;
+ pbam->bam_mem = pce_dev->bam_mem;
+ pbam->bam_iobase = ioremap_nocache(pce_dev->bam_mem,
+ pce_dev->bam_mem_size);
+ if (!pbam->bam_iobase) {
+ kfree(pbam);
+ rc = -ENOMEM;
+ pr_err("Can not map BAM io memory\n");
+ goto ret;
+ }
+ pce_dev->ce_bam_info.bam_mem = pbam->bam_mem;
+ pce_dev->ce_bam_info.bam_iobase = pbam->bam_iobase;
+ pbam->handle = 0;
+ pr_debug("allocate bam 0x%x\n", pbam->bam_mem);
+ bam_cfg = readl_relaxed(pce_dev->ce_bam_info.bam_iobase +
+ CRYPTO_BAM_CNFG_BITS_REG);
+ pbam->support_cmd_dscr = (bam_cfg & CRYPTO_BAM_CD_ENABLE_MASK) ?
+ true : false;
+ if (pbam->support_cmd_dscr == false) {
+ pr_info("qce50 don't support command descriptor. bam_cfg%x\n",
+ bam_cfg);
+ pce_dev->no_get_around = false;
+ }
+ pce_dev->support_cmd_dscr = pbam->support_cmd_dscr;
+
+ bam.phys_addr = pce_dev->ce_bam_info.bam_mem;
+ bam.virt_addr = pce_dev->ce_bam_info.bam_iobase;
+
+ /*
+ * This event thresold value is only significant for BAM-to-BAM
+ * transfer. It's ignored for BAM-to-System mode transfer.
+ */
+ bam.event_threshold = 0x10; /* Pipe event threshold */
+ /*
+ * This threshold controls when the BAM publish
+ * the descriptor size on the sideband interface.
+ * SPS HW will only be used when
+ * data transfer size > 64 bytes.
+ */
+ bam.summing_threshold = 64;
+ /* SPS driver wll handle the crypto BAM IRQ */
+ bam.irq = (u32)pce_dev->ce_bam_info.bam_irq;
+ /*
+ * Set flag to indicate BAM global device control is managed
+ * remotely.
+ */
+ if ((pce_dev->support_cmd_dscr == false) || (pce_dev->is_shared))
+ bam.manage = SPS_BAM_MGR_DEVICE_REMOTE;
+ else
+ bam.manage = SPS_BAM_MGR_LOCAL;
+
+ bam.ee = pce_dev->ce_bam_info.bam_ee;
+
+ pr_debug("bam physical base=0x%lx\n", (uintptr_t)bam.phys_addr);
+ pr_debug("bam virtual base=0x%p\n", bam.virt_addr);
+
+ /* Register CE Peripheral BAM device to SPS driver */
+ rc = sps_register_bam_device(&bam, &pbam->handle);
+ if (rc) {
+ pr_err("sps_register_bam_device() failed! err=%d", rc);
+ rc = -EIO;
+ iounmap(pbam->bam_iobase);
+ kfree(pbam);
+ goto ret;
+ }
+
+ pce_dev->pbam = pbam;
+ list_add_tail(&pbam->qlist, &qce50_bam_list);
+ pce_dev->ce_bam_info.bam_handle = pbam->handle;
+
+ret:
+ mutex_unlock(&bam_register_lock);
+
+ return rc;
+}
+/**
+ * Initialize SPS HW connected with CE core
+ *
+ * This function register BAM HW resources with
+ * SPS driver and then initialize 2 SPS endpoints
+ *
+ * This function should only be called once typically
+ * during driver probe.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ *
+ * @return - 0 if successful else negative value.
+ *
+ */
+static int qce_sps_init(struct qce_device *pce_dev)
+{
+ int rc = 0;
+
+ rc = qce_sps_get_bam(pce_dev);
+ if (rc)
+ return rc;
+ pr_debug("BAM device registered. bam_handle=0x%lx\n",
+ pce_dev->ce_bam_info.bam_handle);
+
+ rc = qce_sps_init_ep_conn(pce_dev,
+ &pce_dev->ce_bam_info.producer, true);
+ if (rc)
+ goto sps_connect_producer_err;
+ rc = qce_sps_init_ep_conn(pce_dev,
+ &pce_dev->ce_bam_info.consumer, false);
+ if (rc)
+ goto sps_connect_consumer_err;
+
+ pr_info(" Qualcomm MSM CE-BAM at 0x%016llx irq %d\n",
+ (unsigned long long)pce_dev->ce_bam_info.bam_mem,
+ (unsigned int)pce_dev->ce_bam_info.bam_irq);
+ return rc;
+
+sps_connect_consumer_err:
+ qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.producer);
+sps_connect_producer_err:
+ qce_sps_release_bam(pce_dev);
+ return rc;
+}
+
+static inline int qce_alloc_req_info(struct qce_device *pce_dev)
+{
+ int i;
+ int request_index = pce_dev->ce_request_index;
+
+ for (i = 0; i < MAX_QCE_BAM_REQ; i++) {
+ request_index++;
+ if (request_index >= MAX_QCE_BAM_REQ)
+ request_index = 0;
+ if (xchg(&pce_dev->ce_request_info[request_index].
+ in_use, true) == false) {
+ pce_dev->ce_request_index = request_index;
+ return request_index;
+ }
+ }
+ pr_warn("pcedev %d no reqs available no_of_queued_req %d\n",
+ pce_dev->dev_no, pce_dev->no_of_queued_req);
+ return -EBUSY;
+}
+
+static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info,
+ bool is_complete)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pce_dev->lock, flags);
+ pce_dev->ce_request_info[req_info].xfer_type = QCE_XFER_TYPE_LAST;
+ if (xchg(&pce_dev->ce_request_info[req_info].in_use, false) == true) {
+ if (req_info < MAX_QCE_BAM_REQ && is_complete)
+ pce_dev->no_of_queued_req--;
+ } else
+ pr_warn("request info %d free already\n", req_info);
+ spin_unlock_irqrestore(&pce_dev->lock, flags);
+}
+
+static void print_notify_debug(struct sps_event_notify *notify)
+{
+ phys_addr_t addr =
+ DESC_FULL_ADDR((phys_addr_t) notify->data.transfer.iovec.flags,
+ notify->data.transfer.iovec.addr);
+ pr_debug("sps ev_id=%d, addr=0x%pa, size=0x%x, flags=0x%x user=0x%p\n",
+ notify->event_id, &addr,
+ notify->data.transfer.iovec.size,
+ notify->data.transfer.iovec.flags,
+ notify->data.transfer.user);
+}
+
+static void _qce_req_complete(struct qce_device *pce_dev, unsigned int req_info)
+{
+ struct ce_request_info *preq_info;
+
+ preq_info = &pce_dev->ce_request_info[req_info];
+
+ switch (preq_info->xfer_type) {
+ case QCE_XFER_CIPHERING:
+ _ablk_cipher_complete(pce_dev, req_info);
+ break;
+ case QCE_XFER_HASHING:
+ _sha_complete(pce_dev, req_info);
+ break;
+ case QCE_XFER_AEAD:
+ _aead_complete(pce_dev, req_info);
+ break;
+ case QCE_XFER_F8:
+ _f8_complete(pce_dev, req_info);
+ break;
+ case QCE_XFER_F9:
+ _f9_complete(pce_dev, req_info);
+ break;
+ default:
+ qce_free_req_info(pce_dev, req_info, true);
+ break;
+ }
+}
+
+static void qce_multireq_timeout(unsigned long data)
+{
+ struct qce_device *pce_dev = (struct qce_device *)data;
+ int ret = 0;
+ unsigned long flags;
+ int last_seq;
+
+ last_seq = atomic_read(&pce_dev->bunch_cmd_seq);
+ if (last_seq == 0 ||
+ last_seq != atomic_read(&pce_dev->last_intr_seq)) {
+ atomic_set(&pce_dev->last_intr_seq, last_seq);
+ mod_timer(&(pce_dev->timer), (jiffies + DELAY_IN_JIFFIES));
+ return;
+ }
+ /* last bunch mode command time out */
+ spin_lock_irqsave(&pce_dev->lock, flags);
+ del_timer(&(pce_dev->timer));
+ pce_dev->mode = IN_INTERRUPT_MODE;
+ pce_dev->qce_stats.no_of_timeouts++;
+ pr_debug("pcedev %d mode switch to INTR\n", pce_dev->dev_no);
+ spin_unlock_irqrestore(&pce_dev->lock, flags);
+
+ ret = qce_dummy_req(pce_dev);
+ if (ret)
+ pr_warn("pcedev %d: Failed to insert dummy req\n",
+ pce_dev->dev_no);
+}
+
+void qce_get_driver_stats(void *handle)
+{
+ unsigned long flags;
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+
+ if (!_qce50_disp_stats)
+ return;
+ spin_lock_irqsave(&pce_dev->lock, flags);
+ pr_info("Engine %d timeout occuured %d\n", pce_dev->dev_no,
+ pce_dev->qce_stats.no_of_timeouts);
+ pr_info("Engine %d dummy request inserted %d\n", pce_dev->dev_no,
+ pce_dev->qce_stats.no_of_dummy_reqs);
+ if (pce_dev->mode)
+ pr_info("Engine %d is in BUNCH MODE\n", pce_dev->dev_no);
+ else
+ pr_info("Engine %d is in INTERRUPT MODE\n", pce_dev->dev_no);
+ pr_info("Engine %d outstanding request %d\n", pce_dev->dev_no,
+ pce_dev->no_of_queued_req);
+ spin_unlock_irqrestore(&pce_dev->lock, flags);
+}
+EXPORT_SYMBOL(qce_get_driver_stats);
+
+void qce_clear_driver_stats(void *handle)
+{
+ unsigned long flags;
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+
+ spin_lock_irqsave(&pce_dev->lock, flags);
+ pce_dev->qce_stats.no_of_timeouts = 0;
+ pce_dev->qce_stats.no_of_dummy_reqs = 0;
+ spin_unlock_irqrestore(&pce_dev->lock, flags);
+}
+EXPORT_SYMBOL(qce_clear_driver_stats);
+
+static void _sps_producer_callback(struct sps_event_notify *notify)
+{
+ struct qce_device *pce_dev = (struct qce_device *)
+ ((struct sps_event_notify *)notify)->user;
+ int rc = 0;
+ unsigned int req_info;
+ struct ce_sps_data *pce_sps_data;
+ struct ce_request_info *preq_info;
+ unsigned long flags;
+
+ print_notify_debug(notify);
+
+ req_info = (unsigned int)((uintptr_t)notify->data.transfer.user);
+ if ((req_info & 0xffff0000) != CRYPTO_REQ_USER_PAT) {
+ pr_warn("request information %d out of range\n", req_info);
+ return;
+ }
+
+ req_info = req_info & 0x00ff;
+ if (req_info < 0 || req_info >= MAX_QCE_ALLOC_BAM_REQ) {
+ pr_warn("request information %d out of range\n", req_info);
+ return;
+ }
+
+ preq_info = &pce_dev->ce_request_info[req_info];
+
+ pce_sps_data = &preq_info->ce_sps;
+ if ((preq_info->xfer_type == QCE_XFER_CIPHERING ||
+ preq_info->xfer_type == QCE_XFER_AEAD) &&
+ pce_sps_data->producer_state == QCE_PIPE_STATE_IDLE) {
+ pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
+ pce_sps_data->out_transfer.iovec_count = 0;
+ _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
+ CRYPTO_RESULT_DUMP_SIZE,
+ &pce_sps_data->out_transfer);
+ _qce_set_flag(&pce_sps_data->out_transfer,
+ SPS_IOVEC_FLAG_INT);
+ spin_lock_irqsave(&pce_dev->sps_lock, flags);
+ rc = sps_transfer(pce_dev->ce_bam_info.producer.pipe,
+ &pce_sps_data->out_transfer);
+ spin_unlock_irqrestore(&pce_dev->sps_lock, flags);
+ if (rc) {
+ pr_err("sps_xfr() fail (producer pipe=0x%lx) rc = %d\n",
+ (uintptr_t)pce_dev->ce_bam_info.producer.pipe,
+ rc);
+ }
+ return;
+ }
+
+ _qce_req_complete(pce_dev, req_info);
+}
+
+/**
+ * De-initialize SPS HW connected with CE core
+ *
+ * This function deinitialize SPS endpoints and then
+ * deregisters BAM resources from SPS driver.
+ *
+ * This function should only be called once typically
+ * during driver remove.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ *
+ */
+static void qce_sps_exit(struct qce_device *pce_dev)
+{
+ qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.consumer);
+ qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.producer);
+ qce_sps_release_bam(pce_dev);
+}
+
+static void qce_add_cmd_element(struct qce_device *pdev,
+ struct sps_command_element **cmd_ptr, u32 addr,
+ u32 data, struct sps_command_element **populate)
+{
+ (*cmd_ptr)->addr = (uint32_t)(addr + pdev->phy_iobase);
+ (*cmd_ptr)->command = 0;
+ (*cmd_ptr)->data = data;
+ (*cmd_ptr)->mask = 0xFFFFFFFF;
+ (*cmd_ptr)->reserved = 0;
+ if (populate != NULL)
+ *populate = *cmd_ptr;
+ (*cmd_ptr)++;
+}
+
+static int _setup_cipher_aes_cmdlistptrs(struct qce_device *pdev, int cri_index,
+ unsigned char **pvaddr, enum qce_cipher_mode_enum mode,
+ bool key_128)
+{
+ struct sps_command_element *ce_vaddr;
+ uintptr_t ce_vaddr_start;
+ struct qce_cmdlistptr_ops *cmdlistptr;
+ struct qce_cmdlist_info *pcl_info = NULL;
+ int i = 0;
+ uint32_t encr_cfg = 0;
+ uint32_t key_reg = 0;
+ uint32_t xts_key_reg = 0;
+ uint32_t iv_reg = 0;
+
+ cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+ *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+ pdev->ce_bam_info.ce_burst_size);
+ ce_vaddr = (struct sps_command_element *)(*pvaddr);
+ ce_vaddr_start = (uintptr_t)(*pvaddr);
+ /*
+ * Designate chunks of the allocated memory to various
+ * command list pointers related to AES cipher operations defined
+ * in ce_cmdlistptrs_ops structure.
+ */
+ switch (mode) {
+ case QCE_MODE_CBC:
+ case QCE_MODE_CTR:
+ if (key_128 == true) {
+ cmdlistptr->cipher_aes_128_cbc_ctr.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_aes_128_cbc_ctr);
+ if (mode == QCE_MODE_CBC)
+ encr_cfg = pdev->reg.encr_cfg_aes_cbc_128;
+ else
+ encr_cfg = pdev->reg.encr_cfg_aes_ctr_128;
+ iv_reg = 4;
+ key_reg = 4;
+ xts_key_reg = 0;
+ } else {
+ cmdlistptr->cipher_aes_256_cbc_ctr.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_aes_256_cbc_ctr);
+
+ if (mode == QCE_MODE_CBC)
+ encr_cfg = pdev->reg.encr_cfg_aes_cbc_256;
+ else
+ encr_cfg = pdev->reg.encr_cfg_aes_ctr_256;
+ iv_reg = 4;
+ key_reg = 8;
+ xts_key_reg = 0;
+ }
+ break;
+ case QCE_MODE_ECB:
+ if (key_128 == true) {
+ cmdlistptr->cipher_aes_128_ecb.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_aes_128_ecb);
+
+ encr_cfg = pdev->reg.encr_cfg_aes_ecb_128;
+ iv_reg = 0;
+ key_reg = 4;
+ xts_key_reg = 0;
+ } else {
+ cmdlistptr->cipher_aes_256_ecb.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_aes_256_ecb);
+
+ encr_cfg = pdev->reg.encr_cfg_aes_ecb_256;
+ iv_reg = 0;
+ key_reg = 8;
+ xts_key_reg = 0;
+ }
+ break;
+ case QCE_MODE_XTS:
+ if (key_128 == true) {
+ cmdlistptr->cipher_aes_128_xts.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_aes_128_xts);
+
+ encr_cfg = pdev->reg.encr_cfg_aes_xts_128;
+ iv_reg = 4;
+ key_reg = 4;
+ xts_key_reg = 4;
+ } else {
+ cmdlistptr->cipher_aes_256_xts.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_aes_256_xts);
+
+ encr_cfg = pdev->reg.encr_cfg_aes_xts_256;
+ iv_reg = 4;
+ key_reg = 8;
+ xts_key_reg = 8;
+ }
+ break;
+ default:
+ pr_err("Unknown mode of operation %d received, exiting now\n",
+ mode);
+ return -EINVAL;
+ break;
+ }
+
+ /* clear status register */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+ &pcl_info->seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
+ &pcl_info->encr_seg_cfg);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+ &pcl_info->encr_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+ &pcl_info->encr_seg_start);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
+ (uint32_t)0xffffffff, &pcl_info->encr_mask);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG0,
+ (uint32_t)0xffffffff, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG1,
+ (uint32_t)0xffffffff, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG2,
+ (uint32_t)0xffffffff, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
+ &pcl_info->auth_seg_cfg);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+ &pcl_info->encr_key);
+ for (i = 1; i < key_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+ if (xts_key_reg) {
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_XTS_KEY0_REG,
+ 0, &pcl_info->encr_xts_key);
+ for (i = 1; i < xts_key_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_ENCR_XTS_KEY0_REG +
+ i * sizeof(uint32_t)), 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ CRYPTO_ENCR_XTS_DU_SIZE_REG, 0,
+ &pcl_info->encr_xts_du_size);
+ }
+ if (iv_reg) {
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+ &pcl_info->encr_cntr_iv);
+ for (i = 1; i < iv_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+ }
+ /* Add dummy to align size to burst-size multiple */
+ if (mode == QCE_MODE_XTS) {
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
+ 0, &pcl_info->auth_seg_size);
+ } else {
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
+ 0, &pcl_info->auth_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
+ 0, &pcl_info->auth_seg_size);
+ }
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_le, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+ ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+ &pcl_info->go_proc);
+
+ pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+ *pvaddr = (unsigned char *) ce_vaddr;
+
+ return 0;
+}
+
+static int _setup_cipher_des_cmdlistptrs(struct qce_device *pdev, int cri_index,
+ unsigned char **pvaddr, enum qce_cipher_alg_enum alg,
+ bool mode_cbc)
+{
+
+ struct sps_command_element *ce_vaddr;
+ uintptr_t ce_vaddr_start;
+ struct qce_cmdlistptr_ops *cmdlistptr;
+ struct qce_cmdlist_info *pcl_info = NULL;
+ int i = 0;
+ uint32_t encr_cfg = 0;
+ uint32_t key_reg = 0;
+ uint32_t iv_reg = 0;
+
+ cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+ *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+ pdev->ce_bam_info.ce_burst_size);
+ ce_vaddr = (struct sps_command_element *)(*pvaddr);
+ ce_vaddr_start = (uintptr_t)(*pvaddr);
+
+ /*
+ * Designate chunks of the allocated memory to various
+ * command list pointers related to cipher operations defined
+ * in ce_cmdlistptrs_ops structure.
+ */
+ switch (alg) {
+ case CIPHER_ALG_DES:
+ if (mode_cbc) {
+ cmdlistptr->cipher_des_cbc.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_des_cbc);
+
+
+ encr_cfg = pdev->reg.encr_cfg_des_cbc;
+ iv_reg = 2;
+ key_reg = 2;
+ } else {
+ cmdlistptr->cipher_des_ecb.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_des_ecb);
+
+ encr_cfg = pdev->reg.encr_cfg_des_ecb;
+ iv_reg = 0;
+ key_reg = 2;
+ }
+ break;
+ case CIPHER_ALG_3DES:
+ if (mode_cbc) {
+ cmdlistptr->cipher_3des_cbc.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_3des_cbc);
+
+ encr_cfg = pdev->reg.encr_cfg_3des_cbc;
+ iv_reg = 2;
+ key_reg = 6;
+ } else {
+ cmdlistptr->cipher_3des_ecb.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_3des_ecb);
+
+ encr_cfg = pdev->reg.encr_cfg_3des_ecb;
+ iv_reg = 0;
+ key_reg = 6;
+ }
+ break;
+ default:
+ pr_err("Unknown algorithms %d received, exiting now\n", alg);
+ return -EINVAL;
+ break;
+ }
+
+ /* clear status register */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+ &pcl_info->seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
+ &pcl_info->encr_seg_cfg);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+ &pcl_info->encr_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+ &pcl_info->encr_seg_start);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
+ &pcl_info->auth_seg_cfg);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+ &pcl_info->encr_key);
+ for (i = 1; i < key_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+ if (iv_reg) {
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+ &pcl_info->encr_cntr_iv);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0,
+ NULL);
+ }
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_le, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+ ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+ &pcl_info->go_proc);
+
+ pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+ *pvaddr = (unsigned char *) ce_vaddr;
+
+ return 0;
+}
+
+static int _setup_cipher_null_cmdlistptrs(struct qce_device *pdev,
+ int cri_index, unsigned char **pvaddr)
+{
+ struct sps_command_element *ce_vaddr;
+ uintptr_t ce_vaddr_start;
+ struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_request_info
+ [cri_index].ce_sps.cmdlistptr;
+ struct qce_cmdlist_info *pcl_info = NULL;
+
+ *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+ pdev->ce_bam_info.ce_burst_size);
+ ce_vaddr_start = (uintptr_t)(*pvaddr);
+ ce_vaddr = (struct sps_command_element *)(*pvaddr);
+
+ cmdlistptr->cipher_null.cmdlist = (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_null);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG,
+ pdev->ce_bam_info.ce_burst_size, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG,
+ pdev->reg.encr_cfg_aes_ecb_128, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+ NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+ NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+ NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+ ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+ &pcl_info->go_proc);
+
+ pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+ *pvaddr = (unsigned char *) ce_vaddr;
+ return 0;
+}
+
+static int _setup_auth_cmdlistptrs(struct qce_device *pdev, int cri_index,
+ unsigned char **pvaddr, enum qce_hash_alg_enum alg,
+ bool key_128)
+{
+ struct sps_command_element *ce_vaddr;
+ uintptr_t ce_vaddr_start;
+ struct qce_cmdlistptr_ops *cmdlistptr;
+ struct qce_cmdlist_info *pcl_info = NULL;
+ int i = 0;
+ uint32_t key_reg = 0;
+ uint32_t auth_cfg = 0;
+ uint32_t iv_reg = 0;
+
+ cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+ *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+ pdev->ce_bam_info.ce_burst_size);
+ ce_vaddr_start = (uintptr_t)(*pvaddr);
+ ce_vaddr = (struct sps_command_element *)(*pvaddr);
+
+ /*
+ * Designate chunks of the allocated memory to various
+ * command list pointers related to authentication operations
+ * defined in ce_cmdlistptrs_ops structure.
+ */
+ switch (alg) {
+ case QCE_HASH_SHA1:
+ cmdlistptr->auth_sha1.cmdlist = (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->auth_sha1);
+
+ auth_cfg = pdev->reg.auth_cfg_sha1;
+ iv_reg = 5;
+
+ /* clear status register */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+ 0, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+ break;
+ case QCE_HASH_SHA256:
+ cmdlistptr->auth_sha256.cmdlist = (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->auth_sha256);
+
+ auth_cfg = pdev->reg.auth_cfg_sha256;
+ iv_reg = 8;
+
+ /* clear status register */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+ 0, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+ /* 1 dummy write */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+ 0, NULL);
+ break;
+ case QCE_HASH_SHA1_HMAC:
+ cmdlistptr->auth_sha1_hmac.cmdlist = (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->auth_sha1_hmac);
+
+ auth_cfg = pdev->reg.auth_cfg_hmac_sha1;
+ key_reg = 16;
+ iv_reg = 5;
+
+ /* clear status register */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+ 0, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+ break;
+ case QCE_HASH_SHA256_HMAC:
+ cmdlistptr->auth_sha256_hmac.cmdlist = (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->auth_sha256_hmac);
+
+ auth_cfg = pdev->reg.auth_cfg_hmac_sha256;
+ key_reg = 16;
+ iv_reg = 8;
+
+ /* clear status register */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0,
+ NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+ /* 1 dummy write */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+ 0, NULL);
+ break;
+ case QCE_HASH_AES_CMAC:
+ if (key_128 == true) {
+ cmdlistptr->auth_aes_128_cmac.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->auth_aes_128_cmac);
+
+ auth_cfg = pdev->reg.auth_cfg_cmac_128;
+ key_reg = 4;
+ } else {
+ cmdlistptr->auth_aes_256_cmac.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->auth_aes_256_cmac);
+
+ auth_cfg = pdev->reg.auth_cfg_cmac_256;
+ key_reg = 8;
+ }
+
+ /* clear status register */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0,
+ NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+ /* 1 dummy write */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+ 0, NULL);
+ break;
+ default:
+ pr_err("Unknown algorithms %d received, exiting now\n", alg);
+ return -EINVAL;
+ break;
+ }
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+ &pcl_info->seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0,
+ &pcl_info->encr_seg_cfg);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+ auth_cfg, &pcl_info->auth_seg_cfg);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
+ &pcl_info->auth_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+ &pcl_info->auth_seg_start);
+
+ if (alg == QCE_HASH_AES_CMAC) {
+ /* reset auth iv, bytecount and key registers */
+ for (i = 0; i < 16; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+ for (i = 0; i < 16; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+ 0, NULL);
+ } else {
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
+ &pcl_info->auth_iv);
+ for (i = 1; i < iv_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+ 0, &pcl_info->auth_bytecount);
+ }
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
+
+ if (key_reg) {
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ CRYPTO_AUTH_KEY0_REG, 0, &pcl_info->auth_key);
+ for (i = 1; i < key_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
+ 0, NULL);
+ }
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_le, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+ ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+ &pcl_info->go_proc);
+
+ pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+ *pvaddr = (unsigned char *) ce_vaddr;
+
+ return 0;
+}
+
+static int _setup_aead_cmdlistptrs(struct qce_device *pdev,
+ int cri_index,
+ unsigned char **pvaddr,
+ uint32_t alg,
+ uint32_t mode,
+ uint32_t key_size,
+ bool sha1)
+{
+ struct sps_command_element *ce_vaddr;
+ uintptr_t ce_vaddr_start;
+ struct qce_cmdlistptr_ops *cmdlistptr;
+ struct qce_cmdlist_info *pcl_info = NULL;
+ uint32_t key_reg;
+ uint32_t iv_reg;
+ uint32_t i;
+ uint32_t enciv_in_word;
+ uint32_t encr_cfg;
+
+ cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+ *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+ pdev->ce_bam_info.ce_burst_size);
+
+ ce_vaddr_start = (uintptr_t)(*pvaddr);
+ ce_vaddr = (struct sps_command_element *)(*pvaddr);
+
+ switch (alg) {
+
+ case CIPHER_ALG_DES:
+
+ switch (mode) {
+
+ case QCE_MODE_CBC:
+ if (sha1) {
+ cmdlistptr->aead_hmac_sha1_cbc_des.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->
+ aead_hmac_sha1_cbc_des);
+ } else {
+ cmdlistptr->aead_hmac_sha256_cbc_des.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->
+ aead_hmac_sha256_cbc_des);
+ }
+ encr_cfg = pdev->reg.encr_cfg_des_cbc;
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ enciv_in_word = 2;
+
+ break;
+
+ case CIPHER_ALG_3DES:
+ switch (mode) {
+
+ case QCE_MODE_CBC:
+ if (sha1) {
+ cmdlistptr->aead_hmac_sha1_cbc_3des.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->
+ aead_hmac_sha1_cbc_3des);
+ } else {
+ cmdlistptr->aead_hmac_sha256_cbc_3des.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->
+ aead_hmac_sha256_cbc_3des);
+ }
+ encr_cfg = pdev->reg.encr_cfg_3des_cbc;
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ enciv_in_word = 2;
+
+ break;
+
+ case CIPHER_ALG_AES:
+ switch (mode) {
+
+ case QCE_MODE_CBC:
+ if (key_size == AES128_KEY_SIZE) {
+ if (sha1) {
+ cmdlistptr->
+ aead_hmac_sha1_cbc_aes_128.
+ cmdlist = (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->
+ aead_hmac_sha1_cbc_aes_128);
+ } else {
+ cmdlistptr->
+ aead_hmac_sha256_cbc_aes_128.
+ cmdlist = (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->
+ aead_hmac_sha256_cbc_aes_128);
+ }
+ encr_cfg = pdev->reg.encr_cfg_aes_cbc_128;
+ } else if (key_size == AES256_KEY_SIZE) {
+ if (sha1) {
+ cmdlistptr->
+ aead_hmac_sha1_cbc_aes_256.
+ cmdlist = (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->
+ aead_hmac_sha1_cbc_aes_256);
+ } else {
+ cmdlistptr->
+ aead_hmac_sha256_cbc_aes_256.
+ cmdlist = (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->
+ aead_hmac_sha256_cbc_aes_256);
+ }
+ encr_cfg = pdev->reg.encr_cfg_aes_cbc_256;
+ } else {
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ enciv_in_word = 4;
+
+ break;
+
+ default:
+ return -EINVAL;
+ };
+
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+
+ key_reg = key_size/sizeof(uint32_t);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+ &pcl_info->encr_key);
+ for (i = 1; i < key_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+
+ if (mode != QCE_MODE_ECB) {
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+ &pcl_info->encr_cntr_iv);
+ for (i = 1; i < enciv_in_word; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+ };
+
+ if (sha1)
+ iv_reg = 5;
+ else
+ iv_reg = 8;
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
+ &pcl_info->auth_iv);
+ for (i = 1; i < iv_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
+ 0, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+ 0, &pcl_info->auth_bytecount);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
+
+ key_reg = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0,
+ &pcl_info->auth_key);
+ for (i = 1; i < key_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)), 0, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+ &pcl_info->seg_size);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
+ &pcl_info->encr_seg_cfg);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+ &pcl_info->encr_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+ &pcl_info->encr_seg_start);
+
+ if (sha1)
+ qce_add_cmd_element(
+ pdev,
+ &ce_vaddr,
+ CRYPTO_AUTH_SEG_CFG_REG,
+ pdev->reg.auth_cfg_aead_sha1_hmac,
+ &pcl_info->auth_seg_cfg);
+ else
+ qce_add_cmd_element(
+ pdev,
+ &ce_vaddr,
+ CRYPTO_AUTH_SEG_CFG_REG,
+ pdev->reg.auth_cfg_aead_sha256_hmac,
+ &pcl_info->auth_seg_cfg);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
+ &pcl_info->auth_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+ &pcl_info->auth_seg_start);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_le, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+ ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+ &pcl_info->go_proc);
+
+ pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+ *pvaddr = (unsigned char *) ce_vaddr;
+ return 0;
+}
+
+static int _setup_aead_ccm_cmdlistptrs(struct qce_device *pdev, int cri_index,
+ unsigned char **pvaddr, bool key_128)
+{
+ struct sps_command_element *ce_vaddr;
+ uintptr_t ce_vaddr_start;
+ struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_request_info
+ [cri_index].ce_sps.cmdlistptr;
+ struct qce_cmdlist_info *pcl_info = NULL;
+ int i = 0;
+ uint32_t encr_cfg = 0;
+ uint32_t auth_cfg = 0;
+ uint32_t key_reg = 0;
+
+ *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+ pdev->ce_bam_info.ce_burst_size);
+ ce_vaddr_start = (uintptr_t)(*pvaddr);
+ ce_vaddr = (struct sps_command_element *)(*pvaddr);
+
+ /*
+ * Designate chunks of the allocated memory to various
+ * command list pointers related to aead operations
+ * defined in ce_cmdlistptrs_ops structure.
+ */
+ if (key_128 == true) {
+ cmdlistptr->aead_aes_128_ccm.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->aead_aes_128_ccm);
+
+ auth_cfg = pdev->reg.auth_cfg_aes_ccm_128;
+ encr_cfg = pdev->reg.encr_cfg_aes_ccm_128;
+ key_reg = 4;
+ } else {
+
+ cmdlistptr->aead_aes_256_ccm.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->aead_aes_256_ccm);
+
+ auth_cfg = pdev->reg.auth_cfg_aes_ccm_256;
+ encr_cfg = pdev->reg.encr_cfg_aes_ccm_256;
+
+ key_reg = 8;
+ }
+
+ /* clear status register */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+ NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+ &pcl_info->seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG,
+ encr_cfg, &pcl_info->encr_seg_cfg);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+ &pcl_info->encr_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+ &pcl_info->encr_seg_start);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
+ (uint32_t)0xffffffff, &pcl_info->encr_mask);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG0,
+ (uint32_t)0xffffffff, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG1,
+ (uint32_t)0xffffffff, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG2,
+ (uint32_t)0xffffffff, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+ auth_cfg, &pcl_info->auth_seg_cfg);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
+ &pcl_info->auth_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+ &pcl_info->auth_seg_start);
+ /* reset auth iv, bytecount and key registers */
+ for (i = 0; i < 8; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG,
+ 0, NULL);
+ for (i = 0; i < 16; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+ /* set auth key */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0,
+ &pcl_info->auth_key);
+ for (i = 1; i < key_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+ /* set NONCE info */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_INFO_NONCE0_REG, 0,
+ &pcl_info->auth_nonce_info);
+ for (i = 1; i < 4; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_AUTH_INFO_NONCE0_REG +
+ i * sizeof(uint32_t)), 0, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+ &pcl_info->encr_key);
+ for (i = 1; i < key_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+ &pcl_info->encr_cntr_iv);
+ for (i = 1; i < 4; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_CCM_INT_CNTR0_REG, 0,
+ &pcl_info->encr_ccm_cntr_iv);
+ for (i = 1; i < 4; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_ENCR_CCM_INT_CNTR0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_le, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+ ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+ &pcl_info->go_proc);
+
+ pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+ *pvaddr = (unsigned char *) ce_vaddr;
+
+ return 0;
+}
+
+static int _setup_f8_cmdlistptrs(struct qce_device *pdev, int cri_index,
+ unsigned char **pvaddr, enum qce_ota_algo_enum alg)
+{
+ struct sps_command_element *ce_vaddr;
+ uintptr_t ce_vaddr_start;
+ struct qce_cmdlistptr_ops *cmdlistptr;
+ struct qce_cmdlist_info *pcl_info = NULL;
+ int i = 0;
+ uint32_t encr_cfg = 0;
+ uint32_t key_reg = 4;
+
+ cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+ *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+ pdev->ce_bam_info.ce_burst_size);
+ ce_vaddr = (struct sps_command_element *)(*pvaddr);
+ ce_vaddr_start = (uintptr_t)(*pvaddr);
+
+ /*
+ * Designate chunks of the allocated memory to various
+ * command list pointers related to f8 cipher algorithm defined
+ * in ce_cmdlistptrs_ops structure.
+ */
+
+ switch (alg) {
+ case QCE_OTA_ALGO_KASUMI:
+ cmdlistptr->f8_kasumi.cmdlist = (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->f8_kasumi);
+ encr_cfg = pdev->reg.encr_cfg_kasumi;
+ break;
+
+ case QCE_OTA_ALGO_SNOW3G:
+ default:
+ cmdlistptr->f8_snow3g.cmdlist = (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->f8_snow3g);
+ encr_cfg = pdev->reg.encr_cfg_snow3g;
+ break;
+ }
+ /* clear status register */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+ 0, NULL);
+ /* set config to big endian */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+ &pcl_info->seg_size);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
+ &pcl_info->encr_seg_cfg);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+ &pcl_info->encr_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+ &pcl_info->encr_seg_start);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
+ &pcl_info->auth_seg_cfg);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
+ 0, &pcl_info->auth_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
+ 0, &pcl_info->auth_seg_start);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+ &pcl_info->encr_key);
+ for (i = 1; i < key_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+ &pcl_info->encr_cntr_iv);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0,
+ NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_le, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+ ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+ &pcl_info->go_proc);
+
+ pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+ *pvaddr = (unsigned char *) ce_vaddr;
+
+ return 0;
+}
+
+static int _setup_f9_cmdlistptrs(struct qce_device *pdev, int cri_index,
+ unsigned char **pvaddr, enum qce_ota_algo_enum alg)
+{
+ struct sps_command_element *ce_vaddr;
+ uintptr_t ce_vaddr_start;
+ struct qce_cmdlistptr_ops *cmdlistptr;
+ struct qce_cmdlist_info *pcl_info = NULL;
+ int i = 0;
+ uint32_t auth_cfg = 0;
+ uint32_t iv_reg = 0;
+
+ cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+ *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+ pdev->ce_bam_info.ce_burst_size);
+ ce_vaddr_start = (uintptr_t)(*pvaddr);
+ ce_vaddr = (struct sps_command_element *)(*pvaddr);
+
+ /*
+ * Designate chunks of the allocated memory to various
+ * command list pointers related to authentication operations
+ * defined in ce_cmdlistptrs_ops structure.
+ */
+ switch (alg) {
+ case QCE_OTA_ALGO_KASUMI:
+ cmdlistptr->f9_kasumi.cmdlist = (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->f9_kasumi);
+ auth_cfg = pdev->reg.auth_cfg_kasumi;
+ break;
+
+ case QCE_OTA_ALGO_SNOW3G:
+ default:
+ cmdlistptr->f9_snow3g.cmdlist = (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->f9_snow3g);
+ auth_cfg = pdev->reg.auth_cfg_snow3g;
+ };
+
+ /* clear status register */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+ 0, NULL);
+ /* set config to big endian */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+ iv_reg = 5;
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+ &pcl_info->seg_size);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0,
+ &pcl_info->encr_seg_cfg);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+ auth_cfg, &pcl_info->auth_seg_cfg);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
+ &pcl_info->auth_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+ &pcl_info->auth_seg_start);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
+ &pcl_info->auth_iv);
+ for (i = 1; i < iv_reg; i++) {
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
+ 0, NULL);
+ }
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+ 0, &pcl_info->auth_bytecount);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_le, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+ ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+ &pcl_info->go_proc);
+
+ pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+ *pvaddr = (unsigned char *) ce_vaddr;
+
+ return 0;
+}
+
+static int _setup_unlock_pipe_cmdlistptrs(struct qce_device *pdev,
+ int cri_index, unsigned char **pvaddr)
+{
+ struct sps_command_element *ce_vaddr;
+ uintptr_t ce_vaddr_start = (uintptr_t)(*pvaddr);
+ struct qce_cmdlistptr_ops *cmdlistptr;
+ struct qce_cmdlist_info *pcl_info = NULL;
+
+ cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+ *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+ pdev->ce_bam_info.ce_burst_size);
+ ce_vaddr = (struct sps_command_element *)(*pvaddr);
+ cmdlistptr->unlock_all_pipes.cmdlist = (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->unlock_all_pipes);
+
+ /*
+ * Designate chunks of the allocated memory to command list
+ * to unlock pipes.
+ */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ CRYPTO_CONFIG_RESET, NULL);
+ pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+ *pvaddr = (unsigned char *) ce_vaddr;
+
+ return 0;
+}
+
+static int qce_setup_cmdlistptrs(struct qce_device *pdev, int cri_index,
+ unsigned char **pvaddr)
+{
+ struct sps_command_element *ce_vaddr =
+ (struct sps_command_element *)(*pvaddr);
+ /*
+ * Designate chunks of the allocated memory to various
+ * command list pointers related to operations defined
+ * in ce_cmdlistptrs_ops structure.
+ */
+ ce_vaddr =
+ (struct sps_command_element *)ALIGN(((uintptr_t) ce_vaddr),
+ pdev->ce_bam_info.ce_burst_size);
+ *pvaddr = (unsigned char *) ce_vaddr;
+
+ _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CBC,
+ true);
+ _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CTR,
+ true);
+ _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_ECB,
+ true);
+ _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_XTS,
+ true);
+ _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CBC,
+ false);
+ _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CTR,
+ false);
+ _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_ECB,
+ false);
+ _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_XTS,
+ false);
+
+ _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
+ true);
+ _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
+ false);
+ _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
+ true);
+ _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
+ false);
+
+ _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA1,
+ false);
+ _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA256,
+ false);
+
+ _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA1_HMAC,
+ false);
+ _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA256_HMAC,
+ false);
+
+ _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_AES_CMAC,
+ true);
+ _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_AES_CMAC,
+ false);
+
+ _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
+ QCE_MODE_CBC, DES_KEY_SIZE, true);
+ _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
+ QCE_MODE_CBC, DES3_EDE_KEY_SIZE, true);
+ _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
+ QCE_MODE_CBC, AES128_KEY_SIZE, true);
+ _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
+ QCE_MODE_CBC, AES256_KEY_SIZE, true);
+ _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
+ QCE_MODE_CBC, DES_KEY_SIZE, false);
+ _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
+ QCE_MODE_CBC, DES3_EDE_KEY_SIZE, false);
+ _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
+ QCE_MODE_CBC, AES128_KEY_SIZE, false);
+ _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
+ QCE_MODE_CBC, AES256_KEY_SIZE, false);
+
+ _setup_cipher_null_cmdlistptrs(pdev, cri_index, pvaddr);
+
+ _setup_aead_ccm_cmdlistptrs(pdev, cri_index, pvaddr, true);
+ _setup_aead_ccm_cmdlistptrs(pdev, cri_index, pvaddr, false);
+ _setup_f8_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_KASUMI);
+ _setup_f8_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_SNOW3G);
+ _setup_f9_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_KASUMI);
+ _setup_f9_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_SNOW3G);
+ _setup_unlock_pipe_cmdlistptrs(pdev, cri_index, pvaddr);
+
+ return 0;
+}
+
+static int qce_setup_ce_sps_data(struct qce_device *pce_dev)
+{
+ unsigned char *vaddr;
+ int i;
+
+ vaddr = pce_dev->coh_vmem;
+ vaddr = (unsigned char *)ALIGN(((uintptr_t)vaddr),
+ pce_dev->ce_bam_info.ce_burst_size);
+ for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++) {
+ /* Allow for 256 descriptor (cmd and data) entries per pipe */
+ pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec =
+ (struct sps_iovec *)vaddr;
+ pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec_phys =
+ (uintptr_t)GET_PHYS_ADDR(vaddr);
+ vaddr += QCE_MAX_NUM_DSCR * sizeof(struct sps_iovec);
+
+ pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec =
+ (struct sps_iovec *)vaddr;
+ pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec_phys =
+ (uintptr_t)GET_PHYS_ADDR(vaddr);
+ vaddr += QCE_MAX_NUM_DSCR * sizeof(struct sps_iovec);
+
+ if (pce_dev->support_cmd_dscr)
+ qce_setup_cmdlistptrs(pce_dev, i, &vaddr);
+ vaddr = (unsigned char *)ALIGN(((uintptr_t)vaddr),
+ pce_dev->ce_bam_info.ce_burst_size);
+ pce_dev->ce_request_info[i].ce_sps.result_dump =
+ (uintptr_t)vaddr;
+ pce_dev->ce_request_info[i].ce_sps.result_dump_phy =
+ GET_PHYS_ADDR((uintptr_t)vaddr);
+ pce_dev->ce_request_info[i].ce_sps.result =
+ (struct ce_result_dump_format *)vaddr;
+ vaddr += CRYPTO_RESULT_DUMP_SIZE;
+
+ pce_dev->ce_request_info[i].ce_sps.result_dump_null =
+ (uintptr_t)vaddr;
+ pce_dev->ce_request_info[i].ce_sps.result_dump_null_phy =
+ GET_PHYS_ADDR((uintptr_t)vaddr);
+ pce_dev->ce_request_info[i].ce_sps.result_null =
+ (struct ce_result_dump_format *)vaddr;
+ vaddr += CRYPTO_RESULT_DUMP_SIZE;
+
+ pce_dev->ce_request_info[i].ce_sps.ignore_buffer =
+ (uintptr_t)vaddr;
+ vaddr += pce_dev->ce_bam_info.ce_burst_size * 2;
+ }
+ pce_dev->dummyreq.in_buf = (uint8_t *)vaddr;
+ vaddr += DUMMY_REQ_DATA_LEN;
+ if ((vaddr - pce_dev->coh_vmem) > pce_dev->memsize)
+ panic("qce50: Not enough coherent memory. Allocate %x , need %lx\n",
+ pce_dev->memsize, (uintptr_t)vaddr -
+ (uintptr_t)pce_dev->coh_vmem);
+ return 0;
+}
+
+static int qce_init_ce_cfg_val(struct qce_device *pce_dev)
+{
+ uint32_t beats = (pce_dev->ce_bam_info.ce_burst_size >> 3) - 1;
+ uint32_t pipe_pair = pce_dev->ce_bam_info.pipe_pair_index;
+
+ pce_dev->reg.crypto_cfg_be = (beats << CRYPTO_REQ_SIZE) |
+ BIT(CRYPTO_MASK_DOUT_INTR) | BIT(CRYPTO_MASK_DIN_INTR) |
+ BIT(CRYPTO_MASK_OP_DONE_INTR) | (0 << CRYPTO_HIGH_SPD_EN_N) |
+ (pipe_pair << CRYPTO_PIPE_SET_SELECT);
+
+ pce_dev->reg.crypto_cfg_le =
+ (pce_dev->reg.crypto_cfg_be | CRYPTO_LITTLE_ENDIAN_MASK);
+
+ /* Initialize encr_cfg register for AES alg */
+ pce_dev->reg.encr_cfg_aes_cbc_128 =
+ (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+
+ pce_dev->reg.encr_cfg_aes_cbc_256 =
+ (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+
+ pce_dev->reg.encr_cfg_aes_ctr_128 =
+ (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
+
+ pce_dev->reg.encr_cfg_aes_ctr_256 =
+ (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
+
+ pce_dev->reg.encr_cfg_aes_xts_128 =
+ (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
+
+ pce_dev->reg.encr_cfg_aes_xts_256 =
+ (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
+
+ pce_dev->reg.encr_cfg_aes_ecb_128 =
+ (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+
+ pce_dev->reg.encr_cfg_aes_ecb_256 =
+ (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+
+ pce_dev->reg.encr_cfg_aes_ccm_128 =
+ (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE)|
+ (CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
+
+ pce_dev->reg.encr_cfg_aes_ccm_256 =
+ (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE) |
+ (CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
+
+ /* Initialize encr_cfg register for DES alg */
+ pce_dev->reg.encr_cfg_des_ecb =
+ (CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+
+ pce_dev->reg.encr_cfg_des_cbc =
+ (CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+
+ pce_dev->reg.encr_cfg_3des_ecb =
+ (CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+
+ pce_dev->reg.encr_cfg_3des_cbc =
+ (CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+
+ /* Initialize encr_cfg register for kasumi/snow3g alg */
+ pce_dev->reg.encr_cfg_kasumi =
+ (CRYPTO_ENCR_ALG_KASUMI << CRYPTO_ENCR_ALG);
+
+ pce_dev->reg.encr_cfg_snow3g =
+ (CRYPTO_ENCR_ALG_SNOW_3G << CRYPTO_ENCR_ALG);
+
+ /* Initialize auth_cfg register for CMAC alg */
+ pce_dev->reg.auth_cfg_cmac_128 =
+ (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+ (CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
+ (CRYPTO_AUTH_SIZE_ENUM_16_BYTES << CRYPTO_AUTH_SIZE) |
+ (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+ (CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE);
+
+ pce_dev->reg.auth_cfg_cmac_256 =
+ (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+ (CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
+ (CRYPTO_AUTH_SIZE_ENUM_16_BYTES << CRYPTO_AUTH_SIZE) |
+ (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+ (CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE);
+
+ /* Initialize auth_cfg register for HMAC alg */
+ pce_dev->reg.auth_cfg_hmac_sha1 =
+ (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+ (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
+ (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+ (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+ pce_dev->reg.auth_cfg_hmac_sha256 =
+ (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+ (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
+ (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+ (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+ /* Initialize auth_cfg register for SHA1/256 alg */
+ pce_dev->reg.auth_cfg_sha1 =
+ (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
+ (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
+ (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+ (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+ pce_dev->reg.auth_cfg_sha256 =
+ (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
+ (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
+ (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+ (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+ /* Initialize auth_cfg register for AEAD alg */
+ pce_dev->reg.auth_cfg_aead_sha1_hmac =
+ (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+ (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
+ (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+ (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST);
+
+ pce_dev->reg.auth_cfg_aead_sha256_hmac =
+ (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+ (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
+ (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+ (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST);
+
+ pce_dev->reg.auth_cfg_aes_ccm_128 =
+ (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+ (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
+ (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+ (CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE) |
+ ((MAX_NONCE/sizeof(uint32_t)) << CRYPTO_AUTH_NONCE_NUM_WORDS);
+ pce_dev->reg.auth_cfg_aes_ccm_128 &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+
+ pce_dev->reg.auth_cfg_aes_ccm_256 =
+ (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+ (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
+ (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+ (CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE) |
+ ((MAX_NONCE/sizeof(uint32_t)) << CRYPTO_AUTH_NONCE_NUM_WORDS);
+ pce_dev->reg.auth_cfg_aes_ccm_256 &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+
+ /* Initialize auth_cfg register for kasumi/snow3g */
+ pce_dev->reg.auth_cfg_kasumi =
+ (CRYPTO_AUTH_ALG_KASUMI << CRYPTO_AUTH_ALG) |
+ BIT(CRYPTO_FIRST) | BIT(CRYPTO_LAST);
+ pce_dev->reg.auth_cfg_snow3g =
+ (CRYPTO_AUTH_ALG_SNOW3G << CRYPTO_AUTH_ALG) |
+ BIT(CRYPTO_FIRST) | BIT(CRYPTO_LAST);
+ return 0;
+}
+
+static void _qce_ccm_get_around_input(struct qce_device *pce_dev,
+ struct ce_request_info *preq_info, enum qce_cipher_dir_enum dir)
+{
+ struct qce_cmdlist_info *cmdlistinfo;
+ struct ce_sps_data *pce_sps_data;
+
+ pce_sps_data = &preq_info->ce_sps;
+ if ((dir == QCE_DECRYPT) && pce_dev->no_get_around &&
+ !(pce_dev->no_ccm_mac_status_get_around)) {
+ cmdlistinfo = &pce_sps_data->cmdlistptr.cipher_null;
+ _qce_sps_add_cmd(pce_dev, 0, cmdlistinfo,
+ &pce_sps_data->in_transfer);
+ _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
+ pce_dev->ce_bam_info.ce_burst_size,
+ &pce_sps_data->in_transfer);
+ _qce_set_flag(&pce_sps_data->in_transfer,
+ SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD);
+ }
+}
+
+static void _qce_ccm_get_around_output(struct qce_device *pce_dev,
+ struct ce_request_info *preq_info, enum qce_cipher_dir_enum dir)
+{
+ struct ce_sps_data *pce_sps_data;
+
+ pce_sps_data = &preq_info->ce_sps;
+
+ if ((dir == QCE_DECRYPT) && pce_dev->no_get_around &&
+ !(pce_dev->no_ccm_mac_status_get_around)) {
+ _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
+ pce_dev->ce_bam_info.ce_burst_size,
+ &pce_sps_data->out_transfer);
+ _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump_null),
+ CRYPTO_RESULT_DUMP_SIZE, &pce_sps_data->out_transfer);
+ }
+}
+
+/* QCE_DUMMY_REQ */
+static void qce_dummy_complete(void *cookie, unsigned char *digest,
+ unsigned char *authdata, int ret)
+{
+ if (!cookie)
+ pr_err("invalid cookie\n");
+}
+
+static int qce_dummy_req(struct qce_device *pce_dev)
+{
+ int ret = 0;
+
+ if (!(xchg(&pce_dev->ce_request_info[DUMMY_REQ_INDEX].
+ in_use, true) == false))
+ return -EBUSY;
+ ret = qce_process_sha_req(pce_dev, NULL);
+ pce_dev->qce_stats.no_of_dummy_reqs++;
+ return ret;
+}
+
+static int select_mode(struct qce_device *pce_dev,
+ struct ce_request_info *preq_info)
+{
+ unsigned long flags;
+ struct ce_sps_data *pce_sps_data = &preq_info->ce_sps;
+
+ if (!pce_dev->no_get_around) {
+ _qce_set_flag(&pce_sps_data->out_transfer, SPS_IOVEC_FLAG_INT);
+ return 0;
+ }
+
+ spin_lock_irqsave(&pce_dev->lock, flags);
+ pce_dev->no_of_queued_req++;
+ if (pce_dev->mode == IN_INTERRUPT_MODE) {
+ if (pce_dev->no_of_queued_req >= MAX_BUNCH_MODE_REQ) {
+ pce_dev->mode = IN_BUNCH_MODE;
+ pr_debug("pcedev %d mode switch to BUNCH\n",
+ pce_dev->dev_no);
+ _qce_set_flag(&pce_sps_data->out_transfer,
+ SPS_IOVEC_FLAG_INT);
+ pce_dev->intr_cadence = 0;
+ atomic_set(&pce_dev->bunch_cmd_seq, 1);
+ atomic_set(&pce_dev->last_intr_seq, 1);
+ mod_timer(&(pce_dev->timer),
+ (jiffies + DELAY_IN_JIFFIES));
+ } else {
+ _qce_set_flag(&pce_sps_data->out_transfer,
+ SPS_IOVEC_FLAG_INT);
+ }
+ } else {
+ pce_dev->intr_cadence++;
+ if (pce_dev->intr_cadence >= SET_INTR_AT_REQ) {
+ _qce_set_flag(&pce_sps_data->out_transfer,
+ SPS_IOVEC_FLAG_INT);
+ pce_dev->intr_cadence = 0;
+ atomic_set(&pce_dev->bunch_cmd_seq, 0);
+ atomic_set(&pce_dev->last_intr_seq, 0);
+ } else {
+ atomic_inc(&pce_dev->bunch_cmd_seq);
+ }
+ }
+ spin_unlock_irqrestore(&pce_dev->lock, flags);
+
+ return 0;
+}
+
+static int _qce_aead_ccm_req(void *handle, struct qce_req *q_req)
+{
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+ struct aead_request *areq = (struct aead_request *) q_req->areq;
+ uint32_t authsize = q_req->authsize;
+ uint32_t totallen_in, out_len;
+ uint32_t hw_pad_out = 0;
+ int rc = 0;
+ int ce_burst_size;
+ struct qce_cmdlist_info *cmdlistinfo = NULL;
+ int req_info = -1;
+ struct ce_request_info *preq_info;
+ struct ce_sps_data *pce_sps_data;
+
+ req_info = qce_alloc_req_info(pce_dev);
+ if (req_info < 0)
+ return -EBUSY;
+ preq_info = &pce_dev->ce_request_info[req_info];
+ pce_sps_data = &preq_info->ce_sps;
+
+ ce_burst_size = pce_dev->ce_bam_info.ce_burst_size;
+ totallen_in = areq->cryptlen + areq->assoclen;
+ if (q_req->dir == QCE_ENCRYPT) {
+ q_req->cryptlen = areq->cryptlen;
+ out_len = areq->cryptlen + authsize;
+ hw_pad_out = ALIGN(authsize, ce_burst_size) - authsize;
+ } else {
+ q_req->cryptlen = areq->cryptlen - authsize;
+ out_len = q_req->cryptlen;
+ hw_pad_out = authsize;
+ }
+
+ /*
+ * For crypto 5.0 that has burst size alignment requirement
+ * for data descritpor,
+ * the agent above(qcrypto) prepares the src scatter list with
+ * memory starting with associated data, followed by
+ * data stream to be ciphered.
+ * The destination scatter list is pointing to the same
+ * data area as source.
+ */
+ if (pce_dev->ce_bam_info.minor_version == 0)
+ preq_info->src_nents = count_sg(areq->src, totallen_in);
+ else
+ preq_info->src_nents = count_sg(areq->src, areq->cryptlen);
+
+ preq_info->assoc_nents = count_sg(areq->assoc, areq->assoclen);
+
+ /* associated data input */
+ qce_dma_map_sg(pce_dev->pdev, areq->assoc, preq_info->assoc_nents,
+ DMA_TO_DEVICE);
+ /* cipher input */
+ qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+ DMA_TO_DEVICE);
+ /* cipher + mac output for encryption */
+ if (areq->src != areq->dst) {
+ if (pce_dev->ce_bam_info.minor_version == 0)
+ /*
+ * The destination scatter list is pointing to the same
+ * data area as src.
+ * Note, the associated data will be pass-through
+ * at the begining of destination area.
+ */
+ preq_info->dst_nents = count_sg(areq->dst,
+ out_len + areq->assoclen);
+ else
+ preq_info->dst_nents = count_sg(areq->dst, out_len);
+
+ qce_dma_map_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
+ DMA_FROM_DEVICE);
+ } else {
+ preq_info->dst_nents = preq_info->src_nents;
+ }
+
+ if (pce_dev->support_cmd_dscr) {
+ cmdlistinfo = _ce_get_cipher_cmdlistinfo(pce_dev, req_info,
+ q_req);
+ if (cmdlistinfo == NULL) {
+ pr_err("Unsupported cipher algorithm %d, mode %d\n",
+ q_req->alg, q_req->mode);
+ qce_free_req_info(pce_dev, req_info, false);
+ return -EINVAL;
+ }
+ /* set up crypto device */
+ rc = _ce_setup_cipher(pce_dev, q_req, totallen_in,
+ areq->assoclen, cmdlistinfo);
+ } else {
+ /* set up crypto device */
+ rc = _ce_setup_cipher_direct(pce_dev, q_req, totallen_in,
+ areq->assoclen);
+ }
+
+ if (rc < 0)
+ goto bad;
+
+ preq_info->mode = q_req->mode;
+
+ /* setup for callback, and issue command to bam */
+ preq_info->areq = q_req->areq;
+ preq_info->qce_cb = q_req->qce_cb;
+ preq_info->dir = q_req->dir;
+
+ /* setup xfer type for producer callback handling */
+ preq_info->xfer_type = QCE_XFER_AEAD;
+
+ _qce_sps_iovec_count_init(pce_dev, req_info);
+
+ if (pce_dev->support_cmd_dscr)
+ _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+ &pce_sps_data->in_transfer);
+
+ if (pce_dev->ce_bam_info.minor_version == 0) {
+ if (_qce_sps_add_sg_data(pce_dev, areq->src, totallen_in,
+ &pce_sps_data->in_transfer))
+ goto bad;
+
+ _qce_set_flag(&pce_sps_data->in_transfer,
+ SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+ /*
+ * The destination data should be big enough to
+ * include CCM padding.
+ */
+ if (_qce_sps_add_sg_data(pce_dev, areq->dst, out_len +
+ areq->assoclen + hw_pad_out,
+ &pce_sps_data->out_transfer))
+ goto bad;
+ if (totallen_in > SPS_MAX_PKT_SIZE) {
+ _qce_set_flag(&pce_sps_data->out_transfer,
+ SPS_IOVEC_FLAG_INT);
+ pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
+ } else {
+ if (_qce_sps_add_data(GET_PHYS_ADDR(
+ pce_sps_data->result_dump),
+ CRYPTO_RESULT_DUMP_SIZE,
+ &pce_sps_data->out_transfer))
+ goto bad;
+ _qce_set_flag(&pce_sps_data->out_transfer,
+ SPS_IOVEC_FLAG_INT);
+ pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
+ }
+ } else {
+ if (_qce_sps_add_sg_data(pce_dev, areq->assoc, areq->assoclen,
+ &pce_sps_data->in_transfer))
+ goto bad;
+ if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->cryptlen,
+ &pce_sps_data->in_transfer))
+ goto bad;
+ _qce_set_flag(&pce_sps_data->in_transfer,
+ SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+ _qce_ccm_get_around_input(pce_dev, preq_info, q_req->dir);
+
+ if (pce_dev->no_get_around)
+ _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+ &pce_sps_data->cmdlistptr.unlock_all_pipes,
+ &pce_sps_data->in_transfer);
+
+ /* Pass through to ignore associated data*/
+ if (_qce_sps_add_data(
+ GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
+ areq->assoclen,
+ &pce_sps_data->out_transfer))
+ goto bad;
+ if (_qce_sps_add_sg_data(pce_dev, areq->dst, out_len,
+ &pce_sps_data->out_transfer))
+ goto bad;
+ /* Pass through to ignore hw_pad (padding of the MAC data) */
+ if (_qce_sps_add_data(
+ GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
+ hw_pad_out, &pce_sps_data->out_transfer))
+ goto bad;
+ if (pce_dev->no_get_around ||
+ totallen_in <= SPS_MAX_PKT_SIZE) {
+ if (_qce_sps_add_data(
+ GET_PHYS_ADDR(pce_sps_data->result_dump),
+ CRYPTO_RESULT_DUMP_SIZE,
+ &pce_sps_data->out_transfer))
+ goto bad;
+ pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
+ } else {
+ pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
+ }
+
+ _qce_ccm_get_around_output(pce_dev, preq_info, q_req->dir);
+
+ select_mode(pce_dev, preq_info);
+ }
+ rc = _qce_sps_transfer(pce_dev, req_info);
+ if (rc)
+ goto bad;
+ return 0;
+
+bad:
+ if (preq_info->assoc_nents) {
+ qce_dma_unmap_sg(pce_dev->pdev, areq->assoc,
+ preq_info->assoc_nents, DMA_TO_DEVICE);
+ }
+ if (preq_info->src_nents) {
+ qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+ DMA_TO_DEVICE);
+ }
+ if (areq->src != areq->dst) {
+ qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
+ DMA_FROM_DEVICE);
+ }
+ qce_free_req_info(pce_dev, req_info, false);
+
+ return rc;
+}
+
+static int _qce_suspend(void *handle)
+{
+ struct qce_device *pce_dev = (struct qce_device *)handle;
+ struct sps_pipe *sps_pipe_info;
+
+ if (handle == NULL)
+ return -ENODEV;
+
+ qce_enable_clk(pce_dev);
+
+ sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe;
+ sps_disconnect(sps_pipe_info);
+
+ sps_pipe_info = pce_dev->ce_bam_info.producer.pipe;
+ sps_disconnect(sps_pipe_info);
+
+ qce_disable_clk(pce_dev);
+ return 0;
+}
+
+static int _qce_resume(void *handle)
+{
+ struct qce_device *pce_dev = (struct qce_device *)handle;
+ struct sps_pipe *sps_pipe_info;
+ struct sps_connect *sps_connect_info;
+ int rc;
+
+ if (handle == NULL)
+ return -ENODEV;
+
+ qce_enable_clk(pce_dev);
+
+ sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe;
+ sps_connect_info = &pce_dev->ce_bam_info.consumer.connect;
+ memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
+ rc = sps_connect(sps_pipe_info, sps_connect_info);
+ if (rc) {
+ pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n",
+ (uintptr_t)sps_pipe_info, rc);
+ return rc;
+ }
+ sps_pipe_info = pce_dev->ce_bam_info.producer.pipe;
+ sps_connect_info = &pce_dev->ce_bam_info.producer.connect;
+ memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
+ rc = sps_connect(sps_pipe_info, sps_connect_info);
+ if (rc)
+ pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n",
+ (uintptr_t)sps_pipe_info, rc);
+
+ rc = sps_register_event(sps_pipe_info,
+ &pce_dev->ce_bam_info.producer.event);
+ if (rc)
+ pr_err("Producer callback registration failed rc = %d\n", rc);
+
+ qce_disable_clk(pce_dev);
+ return rc;
+}
+
+struct qce_pm_table qce_pm_table = {_qce_suspend, _qce_resume};
+EXPORT_SYMBOL(qce_pm_table);
+
+int qce_aead_req(void *handle, struct qce_req *q_req)
+{
+ struct qce_device *pce_dev = (struct qce_device *)handle;
+ struct aead_request *areq;
+ uint32_t authsize;
+ struct crypto_aead *aead;
+ uint32_t ivsize;
+ uint32_t totallen;
+ int rc = 0;
+ struct qce_cmdlist_info *cmdlistinfo = NULL;
+ int req_info = -1;
+ struct ce_sps_data *pce_sps_data;
+ struct ce_request_info *preq_info;
+
+ if (q_req->mode == QCE_MODE_CCM)
+ return _qce_aead_ccm_req(handle, q_req);
+
+ req_info = qce_alloc_req_info(pce_dev);
+ if (req_info < 0)
+ return -EBUSY;
+ preq_info = &pce_dev->ce_request_info[req_info];
+ pce_sps_data = &preq_info->ce_sps;
+ areq = (struct aead_request *) q_req->areq;
+ aead = crypto_aead_reqtfm(areq);
+ ivsize = crypto_aead_ivsize(aead);
+ q_req->ivsize = ivsize;
+ authsize = q_req->authsize;
+ if (q_req->dir == QCE_ENCRYPT)
+ q_req->cryptlen = areq->cryptlen;
+ else
+ q_req->cryptlen = areq->cryptlen - authsize;
+
+ totallen = q_req->cryptlen + areq->assoclen + ivsize;
+
+ if (pce_dev->support_cmd_dscr) {
+ cmdlistinfo = _ce_get_aead_cmdlistinfo(pce_dev,
+ req_info, q_req);
+ if (cmdlistinfo == NULL) {
+ pr_err("Unsupported aead ciphering algorithm %d, mode %d, ciphering key length %d, auth digest size %d\n",
+ q_req->alg, q_req->mode, q_req->encklen,
+ q_req->authsize);
+ qce_free_req_info(pce_dev, req_info, false);
+ return -EINVAL;
+ }
+ /* set up crypto device */
+ rc = _ce_setup_aead(pce_dev, q_req, totallen,
+ areq->assoclen + ivsize, cmdlistinfo);
+ if (rc < 0) {
+ qce_free_req_info(pce_dev, req_info, false);
+ return -EINVAL;
+ }
+ }
+
+ preq_info->assoc_nents = count_sg(areq->assoc, areq->assoclen);
+
+ /*
+ * For crypto 5.0 that has burst size alignment requirement
+ * for data descritpor,
+ * the agent above(qcrypto) prepares the src scatter list with
+ * memory starting with associated data, followed by
+ * iv, and data stream to be ciphered.
+ */
+ if (pce_dev->ce_bam_info.minor_version == 0)
+ preq_info->src_nents = count_sg(areq->src, totallen);
+ else
+ preq_info->src_nents = count_sg(areq->src, q_req->cryptlen);
+
+ preq_info->phy_iv_in = 0;
+
+ /* associated data input */
+ qce_dma_map_sg(pce_dev->pdev, areq->assoc, preq_info->assoc_nents,
+ DMA_TO_DEVICE);
+ /* cipher input */
+ qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+ DMA_TO_DEVICE);
+ /* cipher output for encryption */
+ if (areq->src != areq->dst) {
+ if (pce_dev->ce_bam_info.minor_version == 0)
+ /*
+ * The destination scatter list is pointing to the same
+ * data area as source.
+ */
+ preq_info->dst_nents = count_sg(areq->dst, totallen);
+ else
+ preq_info->dst_nents = count_sg(areq->dst,
+ q_req->cryptlen);
+
+ qce_dma_map_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
+ DMA_FROM_DEVICE);
+ }
+
+
+ /* cipher iv for input */
+ if (pce_dev->ce_bam_info.minor_version != 0)
+ preq_info->phy_iv_in = dma_map_single(pce_dev->pdev, q_req->iv,
+ ivsize, DMA_TO_DEVICE);
+
+ /* setup for callback, and issue command to bam */
+ preq_info->areq = q_req->areq;
+ preq_info->qce_cb = q_req->qce_cb;
+ preq_info->dir = q_req->dir;
+
+ /* setup xfer type for producer callback handling */
+ preq_info->xfer_type = QCE_XFER_AEAD;
+
+ _qce_sps_iovec_count_init(pce_dev, req_info);
+
+ if (pce_dev->support_cmd_dscr) {
+ _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+ &pce_sps_data->in_transfer);
+ } else {
+ rc = _ce_setup_aead_direct(pce_dev, q_req, totallen,
+ areq->assoclen + ivsize);
+ if (rc)
+ goto bad;
+ }
+
+ preq_info->mode = q_req->mode;
+
+ if (pce_dev->ce_bam_info.minor_version == 0) {
+ if (_qce_sps_add_sg_data(pce_dev, areq->src, totallen,
+ &pce_sps_data->in_transfer))
+ goto bad;
+
+ _qce_set_flag(&pce_sps_data->in_transfer,
+ SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+ if (_qce_sps_add_sg_data(pce_dev, areq->dst, totallen,
+ &pce_sps_data->out_transfer))
+ goto bad;
+ if (totallen > SPS_MAX_PKT_SIZE) {
+ _qce_set_flag(&pce_sps_data->out_transfer,
+ SPS_IOVEC_FLAG_INT);
+ pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
+ } else {
+ if (_qce_sps_add_data(GET_PHYS_ADDR(
+ pce_sps_data->result_dump),
+ CRYPTO_RESULT_DUMP_SIZE,
+ &pce_sps_data->out_transfer))
+ goto bad;
+ _qce_set_flag(&pce_sps_data->out_transfer,
+ SPS_IOVEC_FLAG_INT);
+ pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
+ }
+ } else {
+ if (_qce_sps_add_sg_data(pce_dev, areq->assoc, areq->assoclen,
+ &pce_sps_data->in_transfer))
+ goto bad;
+ if (_qce_sps_add_data((uint32_t)preq_info->phy_iv_in, ivsize,
+ &pce_sps_data->in_transfer))
+ goto bad;
+ if (_qce_sps_add_sg_data(pce_dev, areq->src, q_req->cryptlen,
+ &pce_sps_data->in_transfer))
+ goto bad;
+ _qce_set_flag(&pce_sps_data->in_transfer,
+ SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+ if (pce_dev->no_get_around)
+ _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+ &pce_sps_data->cmdlistptr.unlock_all_pipes,
+ &pce_sps_data->in_transfer);
+
+ /* Pass through to ignore associated + iv data*/
+ if (_qce_sps_add_data(
+ GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
+ (ivsize + areq->assoclen),
+ &pce_sps_data->out_transfer))
+ goto bad;
+ if (_qce_sps_add_sg_data(pce_dev, areq->dst, q_req->cryptlen,
+ &pce_sps_data->out_transfer))
+ goto bad;
+
+ if (pce_dev->no_get_around || totallen <= SPS_MAX_PKT_SIZE) {
+ if (_qce_sps_add_data(
+ GET_PHYS_ADDR(pce_sps_data->result_dump),
+ CRYPTO_RESULT_DUMP_SIZE,
+ &pce_sps_data->out_transfer))
+ goto bad;
+ pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
+ } else {
+ pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
+ }
+ select_mode(pce_dev, preq_info);
+ }
+ rc = _qce_sps_transfer(pce_dev, req_info);
+ if (rc)
+ goto bad;
+ return 0;
+
+bad:
+ if (preq_info->assoc_nents)
+ qce_dma_unmap_sg(pce_dev->pdev, areq->assoc,
+ preq_info->assoc_nents, DMA_TO_DEVICE);
+ if (preq_info->src_nents)
+ qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+ DMA_TO_DEVICE);
+ if (areq->src != areq->dst)
+ qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
+ DMA_FROM_DEVICE);
+ if (preq_info->phy_iv_in)
+ dma_unmap_single(pce_dev->pdev, preq_info->phy_iv_in,
+ ivsize, DMA_TO_DEVICE);
+ qce_free_req_info(pce_dev, req_info, false);
+
+ return rc;
+}
+EXPORT_SYMBOL(qce_aead_req);
+
+int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
+{
+ int rc = 0;
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+ struct ablkcipher_request *areq = (struct ablkcipher_request *)
+ c_req->areq;
+ struct qce_cmdlist_info *cmdlistinfo = NULL;
+ int req_info = -1;
+ struct ce_sps_data *pce_sps_data;
+ struct ce_request_info *preq_info;
+
+ req_info = qce_alloc_req_info(pce_dev);
+ if (req_info < 0)
+ return -EBUSY;
+ preq_info = &pce_dev->ce_request_info[req_info];
+ pce_sps_data = &preq_info->ce_sps;
+
+ preq_info->src_nents = 0;
+ preq_info->dst_nents = 0;
+
+ /* cipher input */
+ preq_info->src_nents = count_sg(areq->src, areq->nbytes);
+
+ qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+ DMA_TO_DEVICE);
+ /* cipher output */
+ if (areq->src != areq->dst) {
+ preq_info->dst_nents = count_sg(areq->dst, areq->nbytes);
+ qce_dma_map_sg(pce_dev->pdev, areq->dst,
+ preq_info->dst_nents, DMA_FROM_DEVICE);
+ } else {
+ preq_info->dst_nents = preq_info->src_nents;
+ }
+ preq_info->dir = c_req->dir;
+ if ((pce_dev->ce_bam_info.minor_version == 0) &&
+ (preq_info->dir == QCE_DECRYPT) &&
+ (c_req->mode == QCE_MODE_CBC)) {
+ memcpy(preq_info->dec_iv, (unsigned char *)
+ sg_virt(areq->src) + areq->src->length - 16,
+ NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE);
+ }
+
+ /* set up crypto device */
+ if (pce_dev->support_cmd_dscr) {
+ cmdlistinfo = _ce_get_cipher_cmdlistinfo(pce_dev,
+ req_info, c_req);
+ if (cmdlistinfo == NULL) {
+ pr_err("Unsupported cipher algorithm %d, mode %d\n",
+ c_req->alg, c_req->mode);
+ qce_free_req_info(pce_dev, req_info, false);
+ return -EINVAL;
+ }
+ rc = _ce_setup_cipher(pce_dev, c_req, areq->nbytes, 0,
+ cmdlistinfo);
+ } else {
+ rc = _ce_setup_cipher_direct(pce_dev, c_req, areq->nbytes, 0);
+ }
+ if (rc < 0)
+ goto bad;
+
+ preq_info->mode = c_req->mode;
+
+ /* setup for client callback, and issue command to BAM */
+ preq_info->areq = areq;
+ preq_info->qce_cb = c_req->qce_cb;
+
+ /* setup xfer type for producer callback handling */
+ preq_info->xfer_type = QCE_XFER_CIPHERING;
+
+ _qce_sps_iovec_count_init(pce_dev, req_info);
+ if (pce_dev->support_cmd_dscr)
+ _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+ &pce_sps_data->in_transfer);
+ if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
+ &pce_sps_data->in_transfer))
+ goto bad;
+ _qce_set_flag(&pce_sps_data->in_transfer,
+ SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+ if (pce_dev->no_get_around)
+ _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+ &pce_sps_data->cmdlistptr.unlock_all_pipes,
+ &pce_sps_data->in_transfer);
+
+ if (_qce_sps_add_sg_data(pce_dev, areq->dst, areq->nbytes,
+ &pce_sps_data->out_transfer))
+ goto bad;
+ if (pce_dev->no_get_around || areq->nbytes <= SPS_MAX_PKT_SIZE) {
+ pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
+ if (_qce_sps_add_data(
+ GET_PHYS_ADDR(pce_sps_data->result_dump),
+ CRYPTO_RESULT_DUMP_SIZE,
+ &pce_sps_data->out_transfer))
+ goto bad;
+ } else {
+ pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
+ }
+
+ select_mode(pce_dev, preq_info);
+
+ rc = _qce_sps_transfer(pce_dev, req_info);
+ if (rc)
+ goto bad;
+
+ return 0;
+bad:
+ if (areq->src != areq->dst) {
+ if (preq_info->dst_nents) {
+ qce_dma_unmap_sg(pce_dev->pdev, areq->dst,
+ preq_info->dst_nents, DMA_FROM_DEVICE);
+ }
+ }
+ if (preq_info->src_nents) {
+ qce_dma_unmap_sg(pce_dev->pdev, areq->src,
+ preq_info->src_nents,
+ (areq->src == areq->dst) ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+ }
+ qce_free_req_info(pce_dev, req_info, false);
+ return rc;
+}
+EXPORT_SYMBOL(qce_ablk_cipher_req);
+
+int qce_process_sha_req(void *handle, struct qce_sha_req *sreq)
+{
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+ int rc;
+
+ struct ahash_request *areq;
+ struct qce_cmdlist_info *cmdlistinfo = NULL;
+ int req_info = -1;
+ struct ce_sps_data *pce_sps_data;
+ struct ce_request_info *preq_info;
+ bool is_dummy = false;
+
+ if (!sreq) {
+ sreq = &(pce_dev->dummyreq.sreq);
+ req_info = DUMMY_REQ_INDEX;
+ is_dummy = true;
+ } else {
+ req_info = qce_alloc_req_info(pce_dev);
+ if (req_info < 0)
+ return -EBUSY;
+ }
+
+ areq = (struct ahash_request *)sreq->areq;
+ preq_info = &pce_dev->ce_request_info[req_info];
+ pce_sps_data = &preq_info->ce_sps;
+
+ preq_info->src_nents = count_sg(sreq->src, sreq->size);
+ qce_dma_map_sg(pce_dev->pdev, sreq->src, preq_info->src_nents,
+ DMA_TO_DEVICE);
+
+ if (pce_dev->support_cmd_dscr) {
+ cmdlistinfo = _ce_get_hash_cmdlistinfo(pce_dev, req_info, sreq);
+ if (cmdlistinfo == NULL) {
+ pr_err("Unsupported hash algorithm %d\n", sreq->alg);
+ qce_free_req_info(pce_dev, req_info, false);
+ return -EINVAL;
+ }
+ rc = _ce_setup_hash(pce_dev, sreq, cmdlistinfo);
+ } else {
+ rc = _ce_setup_hash_direct(pce_dev, sreq);
+ }
+ if (rc < 0)
+ goto bad;
+
+ preq_info->areq = areq;
+ preq_info->qce_cb = sreq->qce_cb;
+
+ /* setup xfer type for producer callback handling */
+ preq_info->xfer_type = QCE_XFER_HASHING;
+
+ _qce_sps_iovec_count_init(pce_dev, req_info);
+
+ if (pce_dev->support_cmd_dscr)
+ _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+ &pce_sps_data->in_transfer);
+ if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
+ &pce_sps_data->in_transfer))
+ goto bad;
+
+ /* always ensure there is input data. ZLT does not work for bam-ndp */
+ if (!areq->nbytes)
+ _qce_sps_add_data(
+ GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
+ pce_dev->ce_bam_info.ce_burst_size,
+ &pce_sps_data->in_transfer);
+ _qce_set_flag(&pce_sps_data->in_transfer,
+ SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+ if (pce_dev->no_get_around)
+ _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+ &pce_sps_data->cmdlistptr.unlock_all_pipes,
+ &pce_sps_data->in_transfer);
+
+ if (_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
+ CRYPTO_RESULT_DUMP_SIZE,
+ &pce_sps_data->out_transfer))
+ goto bad;
+
+ if (is_dummy)
+ _qce_set_flag(&pce_sps_data->out_transfer, SPS_IOVEC_FLAG_INT);
+ else
+ select_mode(pce_dev, preq_info);
+ rc = _qce_sps_transfer(pce_dev, req_info);
+ if (rc)
+ goto bad;
+ return 0;
+bad:
+ if (preq_info->src_nents) {
+ qce_dma_unmap_sg(pce_dev->pdev, sreq->src,
+ preq_info->src_nents, DMA_TO_DEVICE);
+ }
+ qce_free_req_info(pce_dev, req_info, false);
+ return rc;
+}
+EXPORT_SYMBOL(qce_process_sha_req);
+
+int qce_f8_req(void *handle, struct qce_f8_req *req,
+ void *cookie, qce_comp_func_ptr_t qce_cb)
+{
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+ bool key_stream_mode;
+ dma_addr_t dst;
+ int rc;
+ struct qce_cmdlist_info *cmdlistinfo;
+ int req_info = -1;
+ struct ce_request_info *preq_info;
+ struct ce_sps_data *pce_sps_data;
+
+ req_info = qce_alloc_req_info(pce_dev);
+ if (req_info < 0)
+ return -EBUSY;
+ preq_info = &pce_dev->ce_request_info[req_info];
+ pce_sps_data = &preq_info->ce_sps;
+
+ switch (req->algorithm) {
+ case QCE_OTA_ALGO_KASUMI:
+ cmdlistinfo = &pce_sps_data->cmdlistptr.f8_kasumi;
+ break;
+ case QCE_OTA_ALGO_SNOW3G:
+ cmdlistinfo = &pce_sps_data->cmdlistptr.f8_snow3g;
+ break;
+ default:
+ qce_free_req_info(pce_dev, req_info, false);
+ return -EINVAL;
+ };
+
+ key_stream_mode = (req->data_in == NULL);
+
+ /* don't support key stream mode */
+
+ if (key_stream_mode || (req->bearer >= QCE_OTA_MAX_BEARER)) {
+ qce_free_req_info(pce_dev, req_info, false);
+ return -EINVAL;
+ }
+
+ /* F8 cipher input */
+ preq_info->phy_ota_src = dma_map_single(pce_dev->pdev,
+ req->data_in, req->data_len,
+ (req->data_in == req->data_out) ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+
+ /* F8 cipher output */
+ if (req->data_in != req->data_out) {
+ dst = dma_map_single(pce_dev->pdev, req->data_out,
+ req->data_len, DMA_FROM_DEVICE);
+ preq_info->phy_ota_dst = dst;
+ } else {
+ /* in place ciphering */
+ dst = preq_info->phy_ota_src;
+ preq_info->phy_ota_dst = 0;
+ }
+ preq_info->ota_size = req->data_len;
+
+
+ /* set up crypto device */
+ if (pce_dev->support_cmd_dscr)
+ rc = _ce_f8_setup(pce_dev, req, key_stream_mode, 1, 0,
+ req->data_len, cmdlistinfo);
+ else
+ rc = _ce_f8_setup_direct(pce_dev, req, key_stream_mode, 1, 0,
+ req->data_len);
+ if (rc < 0)
+ goto bad;
+
+ /* setup for callback, and issue command to sps */
+ preq_info->areq = cookie;
+ preq_info->qce_cb = qce_cb;
+
+ /* setup xfer type for producer callback handling */
+ preq_info->xfer_type = QCE_XFER_F8;
+
+ _qce_sps_iovec_count_init(pce_dev, req_info);
+
+ if (pce_dev->support_cmd_dscr)
+ _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+ &pce_sps_data->in_transfer);
+
+ _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, req->data_len,
+ &pce_sps_data->in_transfer);
+
+ _qce_set_flag(&pce_sps_data->in_transfer,
+ SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+ _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+ &pce_sps_data->cmdlistptr.unlock_all_pipes,
+ &pce_sps_data->in_transfer);
+
+ _qce_sps_add_data((uint32_t)dst, req->data_len,
+ &pce_sps_data->out_transfer);
+
+ _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
+ CRYPTO_RESULT_DUMP_SIZE,
+ &pce_sps_data->out_transfer);
+
+ select_mode(pce_dev, preq_info);
+
+ rc = _qce_sps_transfer(pce_dev, req_info);
+ if (rc)
+ goto bad;
+ return 0;
+bad:
+ if (preq_info->phy_ota_dst != 0)
+ dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst,
+ req->data_len, DMA_FROM_DEVICE);
+ if (preq_info->phy_ota_src != 0)
+ dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
+ req->data_len,
+ (req->data_in == req->data_out) ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+ qce_free_req_info(pce_dev, req_info, false);
+ return rc;
+}
+EXPORT_SYMBOL(qce_f8_req);
+
+int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *mreq,
+ void *cookie, qce_comp_func_ptr_t qce_cb)
+{
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+ uint16_t num_pkt = mreq->num_pkt;
+ uint16_t cipher_start = mreq->cipher_start;
+ uint16_t cipher_size = mreq->cipher_size;
+ struct qce_f8_req *req = &mreq->qce_f8_req;
+ uint32_t total;
+ dma_addr_t dst = 0;
+ int rc = 0;
+ struct qce_cmdlist_info *cmdlistinfo;
+ int req_info = -1;
+ struct ce_request_info *preq_info;
+ struct ce_sps_data *pce_sps_data;
+
+ req_info = qce_alloc_req_info(pce_dev);
+ if (req_info < 0)
+ return -EBUSY;
+ preq_info = &pce_dev->ce_request_info[req_info];
+ pce_sps_data = &preq_info->ce_sps;
+
+ switch (req->algorithm) {
+ case QCE_OTA_ALGO_KASUMI:
+ cmdlistinfo = &pce_sps_data->cmdlistptr.f8_kasumi;
+ break;
+ case QCE_OTA_ALGO_SNOW3G:
+ cmdlistinfo = &pce_sps_data->cmdlistptr.f8_snow3g;
+ break;
+ default:
+ qce_free_req_info(pce_dev, req_info, false);
+ return -EINVAL;
+ };
+
+ total = num_pkt * req->data_len;
+
+ /* F8 cipher input */
+ preq_info->phy_ota_src = dma_map_single(pce_dev->pdev,
+ req->data_in, total,
+ (req->data_in == req->data_out) ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+
+ /* F8 cipher output */
+ if (req->data_in != req->data_out) {
+ dst = dma_map_single(pce_dev->pdev, req->data_out, total,
+ DMA_FROM_DEVICE);
+ preq_info->phy_ota_dst = dst;
+ } else {
+ /* in place ciphering */
+ dst = preq_info->phy_ota_src;
+ preq_info->phy_ota_dst = 0;
+ }
+
+ preq_info->ota_size = total;
+
+ /* set up crypto device */
+ if (pce_dev->support_cmd_dscr)
+ rc = _ce_f8_setup(pce_dev, req, false, num_pkt, cipher_start,
+ cipher_size, cmdlistinfo);
+ else
+ rc = _ce_f8_setup_direct(pce_dev, req, false, num_pkt,
+ cipher_start, cipher_size);
+ if (rc)
+ goto bad;
+
+ /* setup for callback, and issue command to sps */
+ preq_info->areq = cookie;
+ preq_info->qce_cb = qce_cb;
+
+ /* setup xfer type for producer callback handling */
+ preq_info->xfer_type = QCE_XFER_F8;
+
+ _qce_sps_iovec_count_init(pce_dev, req_info);
+
+ if (pce_dev->support_cmd_dscr)
+ _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+ &pce_sps_data->in_transfer);
+
+ _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, total,
+ &pce_sps_data->in_transfer);
+ _qce_set_flag(&pce_sps_data->in_transfer,
+ SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+ _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+ &pce_sps_data->cmdlistptr.unlock_all_pipes,
+ &pce_sps_data->in_transfer);
+
+ _qce_sps_add_data((uint32_t)dst, total,
+ &pce_sps_data->out_transfer);
+
+ _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
+ CRYPTO_RESULT_DUMP_SIZE,
+ &pce_sps_data->out_transfer);
+
+ select_mode(pce_dev, preq_info);
+
+ rc = _qce_sps_transfer(pce_dev, req_info);
+
+ if (rc == 0)
+ return 0;
+bad:
+ if (preq_info->phy_ota_dst)
+ dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst, total,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src, total,
+ (req->data_in == req->data_out) ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+ qce_free_req_info(pce_dev, req_info, false);
+ return rc;
+}
+EXPORT_SYMBOL(qce_f8_multi_pkt_req);
+
+int qce_f9_req(void *handle, struct qce_f9_req *req, void *cookie,
+ qce_comp_func_ptr_t qce_cb)
+{
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+ int rc;
+ struct qce_cmdlist_info *cmdlistinfo;
+ int req_info = -1;
+ struct ce_sps_data *pce_sps_data;
+ struct ce_request_info *preq_info;
+
+ req_info = qce_alloc_req_info(pce_dev);
+ if (req_info < 0)
+ return -EBUSY;
+ preq_info = &pce_dev->ce_request_info[req_info];
+ pce_sps_data = &preq_info->ce_sps;
+ switch (req->algorithm) {
+ case QCE_OTA_ALGO_KASUMI:
+ cmdlistinfo = &pce_sps_data->cmdlistptr.f9_kasumi;
+ break;
+ case QCE_OTA_ALGO_SNOW3G:
+ cmdlistinfo = &pce_sps_data->cmdlistptr.f9_snow3g;
+ break;
+ default:
+ qce_free_req_info(pce_dev, req_info, false);
+ return -EINVAL;
+ };
+
+ preq_info->phy_ota_src = dma_map_single(pce_dev->pdev, req->message,
+ req->msize, DMA_TO_DEVICE);
+
+ preq_info->ota_size = req->msize;
+
+ if (pce_dev->support_cmd_dscr)
+ rc = _ce_f9_setup(pce_dev, req, cmdlistinfo);
+ else
+ rc = _ce_f9_setup_direct(pce_dev, req);
+ if (rc < 0)
+ goto bad;
+
+ /* setup for callback, and issue command to sps */
+ preq_info->areq = cookie;
+ preq_info->qce_cb = qce_cb;
+
+ /* setup xfer type for producer callback handling */
+ preq_info->xfer_type = QCE_XFER_F9;
+
+ _qce_sps_iovec_count_init(pce_dev, req_info);
+ if (pce_dev->support_cmd_dscr)
+ _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+ &pce_sps_data->in_transfer);
+ _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, req->msize,
+ &pce_sps_data->in_transfer);
+ _qce_set_flag(&pce_sps_data->in_transfer,
+ SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+ _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+ &pce_sps_data->cmdlistptr.unlock_all_pipes,
+ &pce_sps_data->in_transfer);
+
+ _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
+ CRYPTO_RESULT_DUMP_SIZE,
+ &pce_sps_data->out_transfer);
+
+ select_mode(pce_dev, preq_info);
+
+ rc = _qce_sps_transfer(pce_dev, req_info);
+ if (rc)
+ goto bad;
+ return 0;
+bad:
+ dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
+ req->msize, DMA_TO_DEVICE);
+ qce_free_req_info(pce_dev, req_info, false);
+ return rc;
+}
+EXPORT_SYMBOL(qce_f9_req);
+
+static int __qce_get_device_tree_data(struct platform_device *pdev,
+ struct qce_device *pce_dev)
+{
+ struct resource *resource;
+ int rc = 0;
+
+ pce_dev->is_shared = of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,ce-hw-shared");
+ pce_dev->support_hw_key = of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,ce-hw-key");
+
+ pce_dev->use_sw_aes_cbc_ecb_ctr_algo =
+ of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,use-sw-aes-cbc-ecb-ctr-algo");
+ pce_dev->use_sw_aead_algo =
+ of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,use-sw-aead-algo");
+ pce_dev->use_sw_aes_xts_algo =
+ of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,use-sw-aes-xts-algo");
+ pce_dev->use_sw_ahash_algo =
+ of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,use-sw-ahash-algo");
+ pce_dev->use_sw_hmac_algo =
+ of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,use-sw-hmac-algo");
+ pce_dev->use_sw_aes_ccm_algo =
+ of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,use-sw-aes-ccm-algo");
+ pce_dev->support_clk_mgmt_sus_res = of_property_read_bool(
+ (&pdev->dev)->of_node, "qcom,clk-mgmt-sus-res");
+ pce_dev->support_only_core_src_clk = of_property_read_bool(
+ (&pdev->dev)->of_node, "qcom,support-core-clk-only");
+
+ if (of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,bam-pipe-pair",
+ &pce_dev->ce_bam_info.pipe_pair_index)) {
+ pr_err("Fail to get bam pipe pair information.\n");
+ return -EINVAL;
+ }
+ if (of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,ce-device",
+ &pce_dev->ce_bam_info.ce_device)) {
+ pr_err("Fail to get CE device information.\n");
+ return -EINVAL;
+ }
+ if (of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,ce-hw-instance",
+ &pce_dev->ce_bam_info.ce_hw_instance)) {
+ pr_err("Fail to get CE hw instance information.\n");
+ return -EINVAL;
+ }
+ if (of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,bam-ee",
+ &pce_dev->ce_bam_info.bam_ee)) {
+ pr_info("BAM Apps EE is not defined, setting to default 1\n");
+ pce_dev->ce_bam_info.bam_ee = 1;
+ }
+ if (of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,ce-opp-freq",
+ &pce_dev->ce_opp_freq_hz)) {
+ pr_info("CE operating frequency is not defined, setting to default 100MHZ\n");
+ pce_dev->ce_opp_freq_hz = CE_CLK_100MHZ;
+ }
+ pce_dev->ce_bam_info.dest_pipe_index =
+ 2 * pce_dev->ce_bam_info.pipe_pair_index;
+ pce_dev->ce_bam_info.src_pipe_index =
+ pce_dev->ce_bam_info.dest_pipe_index + 1;
+
+ resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "crypto-base");
+ if (resource) {
+ pce_dev->phy_iobase = resource->start;
+ pce_dev->iobase = ioremap_nocache(resource->start,
+ resource_size(resource));
+ if (!pce_dev->iobase) {
+ pr_err("Can not map CRYPTO io memory\n");
+ return -ENOMEM;
+ }
+ } else {
+ pr_err("CRYPTO HW mem unavailable.\n");
+ return -ENODEV;
+ }
+
+ resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "crypto-bam-base");
+ if (resource) {
+ pce_dev->bam_mem = resource->start;
+ pce_dev->bam_mem_size = resource_size(resource);
+ } else {
+ pr_err("CRYPTO BAM mem unavailable.\n");
+ rc = -ENODEV;
+ goto err_getting_bam_info;
+ }
+
+ resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (resource) {
+ pce_dev->ce_bam_info.bam_irq = resource->start;
+ } else {
+ pr_err("CRYPTO BAM IRQ unavailable.\n");
+ goto err_dev;
+ }
+ return rc;
+err_dev:
+ if (pce_dev->ce_bam_info.bam_iobase)
+ iounmap(pce_dev->ce_bam_info.bam_iobase);
+
+err_getting_bam_info:
+ if (pce_dev->iobase)
+ iounmap(pce_dev->iobase);
+
+ return rc;
+}
+
+static int __qce_init_clk(struct qce_device *pce_dev)
+{
+ int rc = 0;
+
+ pce_dev->ce_core_src_clk = clk_get(pce_dev->pdev, "core_clk_src");
+ if (!IS_ERR(pce_dev->ce_core_src_clk)) {
+ rc = clk_set_rate(pce_dev->ce_core_src_clk,
+ pce_dev->ce_opp_freq_hz);
+ if (rc) {
+ pr_err("Unable to set the core src clk @%uMhz.\n",
+ pce_dev->ce_opp_freq_hz/CE_CLK_DIV);
+ goto exit_put_core_src_clk;
+ }
+ } else {
+ if (pce_dev->support_only_core_src_clk) {
+ rc = PTR_ERR(pce_dev->ce_core_src_clk);
+ pce_dev->ce_core_src_clk = NULL;
+ pr_err("Unable to get CE core src clk\n");
+ return rc;
+ } else {
+ pr_warn("Unable to get CE core src clk, set to NULL\n");
+ pce_dev->ce_core_src_clk = NULL;
+ }
+ }
+
+ if (pce_dev->support_only_core_src_clk) {
+ pce_dev->ce_core_clk = NULL;
+ pce_dev->ce_clk = NULL;
+ pce_dev->ce_bus_clk = NULL;
+ } else {
+ pce_dev->ce_core_clk = clk_get(pce_dev->pdev, "core_clk");
+ if (IS_ERR(pce_dev->ce_core_clk)) {
+ rc = PTR_ERR(pce_dev->ce_core_clk);
+ pr_err("Unable to get CE core clk\n");
+ goto exit_put_core_src_clk;
+ }
+ pce_dev->ce_clk = clk_get(pce_dev->pdev, "iface_clk");
+ if (IS_ERR(pce_dev->ce_clk)) {
+ rc = PTR_ERR(pce_dev->ce_clk);
+ pr_err("Unable to get CE interface clk\n");
+ goto exit_put_core_clk;
+ }
+
+ pce_dev->ce_bus_clk = clk_get(pce_dev->pdev, "bus_clk");
+ if (IS_ERR(pce_dev->ce_bus_clk)) {
+ rc = PTR_ERR(pce_dev->ce_bus_clk);
+ pr_err("Unable to get CE BUS interface clk\n");
+ goto exit_put_iface_clk;
+ }
+ }
+ return rc;
+
+exit_put_iface_clk:
+ if (pce_dev->ce_clk)
+ clk_put(pce_dev->ce_clk);
+exit_put_core_clk:
+ if (pce_dev->ce_core_clk)
+ clk_put(pce_dev->ce_core_clk);
+exit_put_core_src_clk:
+ if (pce_dev->ce_core_src_clk)
+ clk_put(pce_dev->ce_core_src_clk);
+ pr_err("Unable to init CE clks, rc = %d\n", rc);
+ return rc;
+}
+
+static void __qce_deinit_clk(struct qce_device *pce_dev)
+{
+ if (pce_dev->ce_bus_clk)
+ clk_put(pce_dev->ce_bus_clk);
+ if (pce_dev->ce_clk)
+ clk_put(pce_dev->ce_clk);
+ if (pce_dev->ce_core_clk)
+ clk_put(pce_dev->ce_core_clk);
+ if (pce_dev->ce_core_src_clk)
+ clk_put(pce_dev->ce_core_src_clk);
+}
+
+int qce_enable_clk(void *handle)
+{
+ struct qce_device *pce_dev = (struct qce_device *)handle;
+ int rc = 0;
+
+ if (pce_dev->ce_core_src_clk) {
+ rc = clk_prepare_enable(pce_dev->ce_core_src_clk);
+ if (rc) {
+ pr_err("Unable to enable/prepare CE core src clk\n");
+ return rc;
+ }
+ }
+
+ if (pce_dev->support_only_core_src_clk)
+ return rc;
+
+ if (pce_dev->ce_core_clk) {
+ rc = clk_prepare_enable(pce_dev->ce_core_clk);
+ if (rc) {
+ pr_err("Unable to enable/prepare CE core clk\n");
+ goto exit_disable_core_src_clk;
+ }
+ }
+
+ if (pce_dev->ce_clk) {
+ rc = clk_prepare_enable(pce_dev->ce_clk);
+ if (rc) {
+ pr_err("Unable to enable/prepare CE iface clk\n");
+ goto exit_disable_core_clk;
+ }
+ }
+
+ if (pce_dev->ce_bus_clk) {
+ rc = clk_prepare_enable(pce_dev->ce_bus_clk);
+ if (rc) {
+ pr_err("Unable to enable/prepare CE BUS clk\n");
+ goto exit_disable_ce_clk;
+ }
+ }
+ return rc;
+
+exit_disable_ce_clk:
+ if (pce_dev->ce_clk)
+ clk_disable_unprepare(pce_dev->ce_clk);
+exit_disable_core_clk:
+ if (pce_dev->ce_core_clk)
+ clk_disable_unprepare(pce_dev->ce_core_clk);
+exit_disable_core_src_clk:
+ if (pce_dev->ce_core_src_clk)
+ clk_disable_unprepare(pce_dev->ce_core_src_clk);
+ return rc;
+}
+EXPORT_SYMBOL(qce_enable_clk);
+
+int qce_disable_clk(void *handle)
+{
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+ int rc = 0;
+
+ if (pce_dev->ce_bus_clk)
+ clk_disable_unprepare(pce_dev->ce_bus_clk);
+ if (pce_dev->ce_clk)
+ clk_disable_unprepare(pce_dev->ce_clk);
+ if (pce_dev->ce_core_clk)
+ clk_disable_unprepare(pce_dev->ce_core_clk);
+ if (pce_dev->ce_core_src_clk)
+ clk_disable_unprepare(pce_dev->ce_core_src_clk);
+
+ return rc;
+}
+EXPORT_SYMBOL(qce_disable_clk);
+
+/* dummy req setup */
+static int setup_dummy_req(struct qce_device *pce_dev)
+{
+ char *input =
+ "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopqopqrpqrs";
+ int len = DUMMY_REQ_DATA_LEN;
+
+ memcpy(pce_dev->dummyreq.in_buf, input, len);
+ sg_set_buf(&pce_dev->dummyreq.sg, pce_dev->dummyreq.in_buf, len);
+ sg_mark_end(&pce_dev->dummyreq.sg);
+
+ pce_dev->dummyreq.sreq.alg = QCE_HASH_SHA1;
+ pce_dev->dummyreq.sreq.qce_cb = qce_dummy_complete;
+ pce_dev->dummyreq.sreq.src = &pce_dev->dummyreq.sg;
+ pce_dev->dummyreq.sreq.auth_data[0] = 0;
+ pce_dev->dummyreq.sreq.auth_data[1] = 0;
+ pce_dev->dummyreq.sreq.auth_data[2] = 0;
+ pce_dev->dummyreq.sreq.auth_data[3] = 0;
+ pce_dev->dummyreq.sreq.first_blk = 1;
+ pce_dev->dummyreq.sreq.last_blk = 1;
+ pce_dev->dummyreq.sreq.size = len;
+ pce_dev->dummyreq.sreq.areq = &pce_dev->dummyreq.areq;
+ pce_dev->dummyreq.sreq.flags = 0;
+ pce_dev->dummyreq.sreq.authkey = NULL;
+
+ pce_dev->dummyreq.areq.src = pce_dev->dummyreq.sreq.src;
+ pce_dev->dummyreq.areq.nbytes = pce_dev->dummyreq.sreq.size;
+
+ return 0;
+}
+
+/* crypto engine open function. */
+void *qce_open(struct platform_device *pdev, int *rc)
+{
+ struct qce_device *pce_dev;
+ int i;
+ static int pcedev_no = 1;
+
+ pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL);
+ if (!pce_dev) {
+ *rc = -ENOMEM;
+ pr_err("Can not allocate memory: %d\n", *rc);
+ return NULL;
+ }
+ pce_dev->pdev = &pdev->dev;
+
+ mutex_lock(&qce_iomap_mutex);
+ if (pdev->dev.of_node) {
+ *rc = __qce_get_device_tree_data(pdev, pce_dev);
+ if (*rc)
+ goto err_pce_dev;
+ } else {
+ *rc = -EINVAL;
+ pr_err("Device Node not found.\n");
+ goto err_pce_dev;
+ }
+
+ for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++)
+ pce_dev->ce_request_info[i].in_use = false;
+ pce_dev->ce_request_index = 0;
+
+ pce_dev->memsize = 10 * PAGE_SIZE * MAX_QCE_ALLOC_BAM_REQ;
+ pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
+ pce_dev->memsize, &pce_dev->coh_pmem, GFP_KERNEL);
+ if (pce_dev->coh_vmem == NULL) {
+ *rc = -ENOMEM;
+ pr_err("Can not allocate coherent memory for sps data\n");
+ goto err_iobase;
+ }
+
+ *rc = __qce_init_clk(pce_dev);
+ if (*rc)
+ goto err_mem;
+ *rc = qce_enable_clk(pce_dev);
+ if (*rc)
+ goto err_enable_clk;
+
+ if (_probe_ce_engine(pce_dev)) {
+ *rc = -ENXIO;
+ goto err;
+ }
+ *rc = 0;
+
+ qce_init_ce_cfg_val(pce_dev);
+ *rc = qce_sps_init(pce_dev);
+ if (*rc)
+ goto err;
+ qce_setup_ce_sps_data(pce_dev);
+ qce_disable_clk(pce_dev);
+ setup_dummy_req(pce_dev);
+ spin_lock_init(&pce_dev->lock);
+ spin_lock_init(&pce_dev->sps_lock);
+ pce_dev->no_of_queued_req = 0;
+ pce_dev->mode = IN_INTERRUPT_MODE;
+ init_timer(&(pce_dev->timer));
+ pce_dev->timer.function = qce_multireq_timeout;
+ pce_dev->timer.data = (unsigned long)pce_dev;
+ pce_dev->timer.expires = jiffies + DELAY_IN_JIFFIES;
+ pce_dev->intr_cadence = 0;
+ pce_dev->dev_no = pcedev_no;
+ pcedev_no++;
+ mutex_unlock(&qce_iomap_mutex);
+ return pce_dev;
+err:
+ qce_disable_clk(pce_dev);
+
+err_enable_clk:
+ __qce_deinit_clk(pce_dev);
+
+err_mem:
+ if (pce_dev->coh_vmem)
+ dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
+ pce_dev->coh_vmem, pce_dev->coh_pmem);
+err_iobase:
+ if (pce_dev->iobase)
+ iounmap(pce_dev->iobase);
+err_pce_dev:
+ mutex_unlock(&qce_iomap_mutex);
+ kfree(pce_dev);
+ return NULL;
+}
+EXPORT_SYMBOL(qce_open);
+
+/* crypto engine close function. */
+int qce_close(void *handle)
+{
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+
+ if (handle == NULL)
+ return -ENODEV;
+
+ mutex_lock(&qce_iomap_mutex);
+ qce_enable_clk(pce_dev);
+ qce_sps_exit(pce_dev);
+
+ if (pce_dev->iobase)
+ iounmap(pce_dev->iobase);
+ if (pce_dev->coh_vmem)
+ dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
+ pce_dev->coh_vmem, pce_dev->coh_pmem);
+
+ qce_disable_clk(pce_dev);
+ __qce_deinit_clk(pce_dev);
+ mutex_unlock(&qce_iomap_mutex);
+ kfree(handle);
+
+ return 0;
+}
+EXPORT_SYMBOL(qce_close);
+
+#define OTA_SUPPORT_MASK (1 << CRYPTO_ENCR_SNOW3G_SEL |\
+ 1 << CRYPTO_ENCR_KASUMI_SEL |\
+ 1 << CRYPTO_AUTH_SNOW3G_SEL |\
+ 1 << CRYPTO_AUTH_KASUMI_SEL)
+
+int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
+{
+ struct qce_device *pce_dev = (struct qce_device *)handle;
+
+ if (ce_support == NULL)
+ return -EINVAL;
+
+ ce_support->sha1_hmac_20 = false;
+ ce_support->sha1_hmac = false;
+ ce_support->sha256_hmac = false;
+ ce_support->sha_hmac = true;
+ ce_support->cmac = true;
+ ce_support->aes_key_192 = false;
+ ce_support->aes_xts = true;
+ if ((pce_dev->engines_avail & OTA_SUPPORT_MASK) == OTA_SUPPORT_MASK)
+ ce_support->ota = true;
+ else
+ ce_support->ota = false;
+ ce_support->bam = true;
+ ce_support->is_shared = (pce_dev->is_shared == 1) ? true : false;
+ ce_support->hw_key = pce_dev->support_hw_key;
+ ce_support->aes_ccm = true;
+ ce_support->clk_mgmt_sus_res = pce_dev->support_clk_mgmt_sus_res;
+ if (pce_dev->ce_bam_info.minor_version)
+ ce_support->aligned_only = false;
+ else
+ ce_support->aligned_only = true;
+
+ ce_support->use_sw_aes_cbc_ecb_ctr_algo =
+ pce_dev->use_sw_aes_cbc_ecb_ctr_algo;
+ ce_support->use_sw_aead_algo =
+ pce_dev->use_sw_aead_algo;
+ ce_support->use_sw_aes_xts_algo =
+ pce_dev->use_sw_aes_xts_algo;
+ ce_support->use_sw_ahash_algo =
+ pce_dev->use_sw_ahash_algo;
+ ce_support->use_sw_hmac_algo =
+ pce_dev->use_sw_hmac_algo;
+ ce_support->use_sw_aes_ccm_algo =
+ pce_dev->use_sw_aes_ccm_algo;
+ ce_support->ce_device = pce_dev->ce_bam_info.ce_device;
+ ce_support->ce_hw_instance = pce_dev->ce_bam_info.ce_hw_instance;
+ if (pce_dev->no_get_around)
+ ce_support->max_request = MAX_QCE_BAM_REQ;
+ else
+ ce_support->max_request = 1;
+ return 0;
+}
+EXPORT_SYMBOL(qce_hw_support);
+
+void qce_dump_req(void *handle)
+{
+ int i;
+ struct qce_device *pce_dev = (struct qce_device *)handle;
+
+ for (i = 0; i < MAX_QCE_BAM_REQ; i++) {
+ pr_info("qce_dump_req %d %d\n", i,
+ pce_dev->ce_request_info[i].in_use);
+ if (pce_dev->ce_request_info[i].in_use == true)
+ _qce_dump_descr_fifos(pce_dev, i);
+ }
+}
+EXPORT_SYMBOL(qce_dump_req);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Crypto Engine driver");
diff --git a/drivers/crypto/msm/qce50.h b/drivers/crypto/msm/qce50.h
new file mode 100644
index 000000000000..19f6edf21878
--- /dev/null
+++ b/drivers/crypto/msm/qce50.h
@@ -0,0 +1,240 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _DRIVERS_CRYPTO_MSM_QCE50_H_
+#define _DRIVERS_CRYPTO_MSM_QCE50_H_
+
+#include <linux/msm-sps.h>
+
+/* MAX Data xfer block size between BAM and CE */
+#define MAX_CE_BAM_BURST_SIZE 0x40
+#define QCEBAM_BURST_SIZE MAX_CE_BAM_BURST_SIZE
+
+#define GET_VIRT_ADDR(x) \
+ ((uintptr_t)pce_dev->coh_vmem + \
+ ((uintptr_t)x - (uintptr_t)pce_dev->coh_pmem))
+#define GET_PHYS_ADDR(x) \
+ (phys_addr_t)(((uintptr_t)pce_dev->coh_pmem + \
+ ((uintptr_t)x - (uintptr_t)pce_dev->coh_vmem)))
+
+#define CRYPTO_REG_SIZE 4
+#define NUM_OF_CRYPTO_AUTH_IV_REG 16
+#define NUM_OF_CRYPTO_CNTR_IV_REG 4
+#define NUM_OF_CRYPTO_AUTH_BYTE_COUNT_REG 4
+#define CRYPTO_TOTAL_REGISTERS_DUMPED 26
+#define CRYPTO_RESULT_DUMP_SIZE \
+ ALIGN((CRYPTO_TOTAL_REGISTERS_DUMPED * CRYPTO_REG_SIZE), \
+ QCEBAM_BURST_SIZE)
+
+/* QCE max number of descriptor in a descriptor list */
+#define QCE_MAX_NUM_DESC 128
+#define SPS_MAX_PKT_SIZE (32 * 1024 - 64)
+
+/* State of consumer/producer Pipe */
+enum qce_pipe_st_enum {
+ QCE_PIPE_STATE_IDLE = 0,
+ QCE_PIPE_STATE_IN_PROG = 1,
+ QCE_PIPE_STATE_COMP = 2,
+ QCE_PIPE_STATE_LAST
+};
+
+enum qce_xfer_type_enum {
+ QCE_XFER_HASHING,
+ QCE_XFER_CIPHERING,
+ QCE_XFER_AEAD,
+ QCE_XFER_F8,
+ QCE_XFER_F9,
+ QCE_XFER_TYPE_LAST
+};
+
+struct qce_sps_ep_conn_data {
+ struct sps_pipe *pipe;
+ struct sps_connect connect;
+ struct sps_register_event event;
+};
+
+/* CE Result DUMP format*/
+struct ce_result_dump_format {
+ uint32_t auth_iv[NUM_OF_CRYPTO_AUTH_IV_REG];
+ uint32_t auth_byte_count[NUM_OF_CRYPTO_AUTH_BYTE_COUNT_REG];
+ uint32_t encr_cntr_iv[NUM_OF_CRYPTO_CNTR_IV_REG];
+ uint32_t status;
+ uint32_t status2;
+};
+
+struct qce_cmdlist_info {
+
+ unsigned long cmdlist;
+ struct sps_command_element *crypto_cfg;
+ struct sps_command_element *encr_seg_cfg;
+ struct sps_command_element *encr_seg_size;
+ struct sps_command_element *encr_seg_start;
+ struct sps_command_element *encr_key;
+ struct sps_command_element *encr_xts_key;
+ struct sps_command_element *encr_cntr_iv;
+ struct sps_command_element *encr_ccm_cntr_iv;
+ struct sps_command_element *encr_mask;
+ struct sps_command_element *encr_xts_du_size;
+
+ struct sps_command_element *auth_seg_cfg;
+ struct sps_command_element *auth_seg_size;
+ struct sps_command_element *auth_seg_start;
+ struct sps_command_element *auth_key;
+ struct sps_command_element *auth_iv;
+ struct sps_command_element *auth_nonce_info;
+ struct sps_command_element *auth_bytecount;
+ struct sps_command_element *seg_size;
+ struct sps_command_element *go_proc;
+ ptrdiff_t size;
+};
+
+struct qce_cmdlistptr_ops {
+ struct qce_cmdlist_info cipher_aes_128_cbc_ctr;
+ struct qce_cmdlist_info cipher_aes_256_cbc_ctr;
+ struct qce_cmdlist_info cipher_aes_128_ecb;
+ struct qce_cmdlist_info cipher_aes_256_ecb;
+ struct qce_cmdlist_info cipher_aes_128_xts;
+ struct qce_cmdlist_info cipher_aes_256_xts;
+ struct qce_cmdlist_info cipher_des_cbc;
+ struct qce_cmdlist_info cipher_des_ecb;
+ struct qce_cmdlist_info cipher_3des_cbc;
+ struct qce_cmdlist_info cipher_3des_ecb;
+ struct qce_cmdlist_info auth_sha1;
+ struct qce_cmdlist_info auth_sha256;
+ struct qce_cmdlist_info auth_sha1_hmac;
+ struct qce_cmdlist_info auth_sha256_hmac;
+ struct qce_cmdlist_info auth_aes_128_cmac;
+ struct qce_cmdlist_info auth_aes_256_cmac;
+ struct qce_cmdlist_info aead_hmac_sha1_cbc_aes_128;
+ struct qce_cmdlist_info aead_hmac_sha1_cbc_aes_256;
+ struct qce_cmdlist_info aead_hmac_sha1_cbc_des;
+ struct qce_cmdlist_info aead_hmac_sha1_cbc_3des;
+ struct qce_cmdlist_info aead_hmac_sha256_cbc_aes_128;
+ struct qce_cmdlist_info aead_hmac_sha256_cbc_aes_256;
+ struct qce_cmdlist_info aead_hmac_sha256_cbc_des;
+ struct qce_cmdlist_info aead_hmac_sha256_cbc_3des;
+ struct qce_cmdlist_info aead_aes_128_ccm;
+ struct qce_cmdlist_info aead_aes_256_ccm;
+ struct qce_cmdlist_info cipher_null;
+ struct qce_cmdlist_info f8_kasumi;
+ struct qce_cmdlist_info f8_snow3g;
+ struct qce_cmdlist_info f9_kasumi;
+ struct qce_cmdlist_info f9_snow3g;
+ struct qce_cmdlist_info unlock_all_pipes;
+};
+
+struct qce_ce_cfg_reg_setting {
+ uint32_t crypto_cfg_be;
+ uint32_t crypto_cfg_le;
+
+ uint32_t encr_cfg_aes_cbc_128;
+ uint32_t encr_cfg_aes_cbc_256;
+
+ uint32_t encr_cfg_aes_ecb_128;
+ uint32_t encr_cfg_aes_ecb_256;
+
+ uint32_t encr_cfg_aes_xts_128;
+ uint32_t encr_cfg_aes_xts_256;
+
+ uint32_t encr_cfg_aes_ctr_128;
+ uint32_t encr_cfg_aes_ctr_256;
+
+ uint32_t encr_cfg_aes_ccm_128;
+ uint32_t encr_cfg_aes_ccm_256;
+
+ uint32_t encr_cfg_des_cbc;
+ uint32_t encr_cfg_des_ecb;
+
+ uint32_t encr_cfg_3des_cbc;
+ uint32_t encr_cfg_3des_ecb;
+ uint32_t encr_cfg_kasumi;
+ uint32_t encr_cfg_snow3g;
+
+ uint32_t auth_cfg_cmac_128;
+ uint32_t auth_cfg_cmac_256;
+
+ uint32_t auth_cfg_sha1;
+ uint32_t auth_cfg_sha256;
+
+ uint32_t auth_cfg_hmac_sha1;
+ uint32_t auth_cfg_hmac_sha256;
+
+ uint32_t auth_cfg_aes_ccm_128;
+ uint32_t auth_cfg_aes_ccm_256;
+ uint32_t auth_cfg_aead_sha1_hmac;
+ uint32_t auth_cfg_aead_sha256_hmac;
+ uint32_t auth_cfg_kasumi;
+ uint32_t auth_cfg_snow3g;
+};
+
+struct ce_bam_info {
+ uint32_t bam_irq;
+ uint32_t bam_mem;
+ void __iomem *bam_iobase;
+ uint32_t ce_device;
+ uint32_t ce_hw_instance;
+ uint32_t bam_ee;
+ unsigned int pipe_pair_index;
+ unsigned int src_pipe_index;
+ unsigned int dest_pipe_index;
+ unsigned long bam_handle;
+ int ce_burst_size;
+ uint32_t minor_version;
+ struct qce_sps_ep_conn_data producer;
+ struct qce_sps_ep_conn_data consumer;
+};
+
+/* SPS data structure with buffers, commandlists & commmand pointer lists */
+struct ce_sps_data {
+ enum qce_pipe_st_enum producer_state; /* Producer pipe state */
+ int consumer_status; /* consumer pipe status */
+ int producer_status; /* producer pipe status */
+ struct sps_transfer in_transfer;
+ struct sps_transfer out_transfer;
+ struct qce_cmdlistptr_ops cmdlistptr;
+ uint32_t result_dump; /* reuslt dump virtual address */
+ uint32_t result_dump_null;
+ uint32_t result_dump_phy; /* result dump physical address (32 bits) */
+ uint32_t result_dump_null_phy;
+
+ uint32_t ignore_buffer; /* ignore buffer virtual address */
+ struct ce_result_dump_format *result; /* ponter to result dump */
+ struct ce_result_dump_format *result_null;
+};
+
+struct ce_request_info {
+ bool in_use;
+ bool in_prog;
+ enum qce_xfer_type_enum xfer_type;
+ struct ce_sps_data ce_sps;
+ qce_comp_func_ptr_t qce_cb; /* qce callback function pointer */
+ void *user;
+ void *areq;
+ int assoc_nents;
+ int src_nents;
+ int dst_nents;
+ dma_addr_t phy_iv_in;
+ unsigned char dec_iv[16];
+ int dir;
+ enum qce_cipher_mode_enum mode;
+ dma_addr_t phy_ota_src;
+ dma_addr_t phy_ota_dst;
+ unsigned int ota_size;
+};
+
+struct qce_driver_stats {
+ int no_of_timeouts;
+ int no_of_dummy_reqs;
+ int current_mode;
+ int outstanding_reqs;
+};
+
+#endif /* _DRIVERS_CRYPTO_MSM_QCE50_H */
diff --git a/drivers/crypto/msm/qce_ota.h b/drivers/crypto/msm/qce_ota.h
new file mode 100644
index 000000000000..8778ca5ca200
--- /dev/null
+++ b/drivers/crypto/msm/qce_ota.h
@@ -0,0 +1,30 @@
+/* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* Qualcomm Crypto Engine driver OTA APIi */
+
+#ifndef __CRYPTO_MSM_QCE_OTA_H
+#define __CRYPTO_MSM_QCE_OTA_H
+
+#include <linux/platform_device.h>
+#include <linux/qcota.h>
+
+
+int qce_f8_req(void *handle, struct qce_f8_req *req,
+ void *cookie, qce_comp_func_ptr_t qce_cb);
+int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *req,
+ void *cookie, qce_comp_func_ptr_t qce_cb);
+int qce_f9_req(void *handle, struct qce_f9_req *req,
+ void *cookie, qce_comp_func_ptr_t qce_cb);
+
+#endif /* __CRYPTO_MSM_QCE_OTA_H */
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
new file mode 100644
index 000000000000..615539834948
--- /dev/null
+++ b/drivers/crypto/msm/qcedev.c
@@ -0,0 +1,2125 @@
+/* Qualcomm CE device driver.
+ *
+ * Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/mman.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <linux/platform_data/qcom_crypto_device.h>
+#include <linux/msm-bus.h>
+#include <linux/qcedev.h>
+
+#include <crypto/hash.h>
+#include "qcedevi.h"
+#include "qce.h"
+
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#include <linux/compat_qcedev.h>
+#endif
+
+#define CACHE_LINE_SIZE 32
+#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
+
+static uint8_t _std_init_vector_sha1_uint8[] = {
+ 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
+ 0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
+ 0xC3, 0xD2, 0xE1, 0xF0
+};
+/* standard initialization vector for SHA-256, source: FIPS 180-2 */
+static uint8_t _std_init_vector_sha256_uint8[] = {
+ 0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
+ 0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
+ 0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
+ 0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
+};
+
+static DEFINE_MUTEX(send_cmd_lock);
+static DEFINE_MUTEX(qcedev_sent_bw_req);
+
+static void qcedev_ce_high_bw_req(struct qcedev_control *podev,
+ bool high_bw_req)
+{
+ int ret = 0;
+
+ mutex_lock(&qcedev_sent_bw_req);
+ if (high_bw_req) {
+ if (podev->high_bw_req_count == 0) {
+ ret = qce_enable_clk(podev->qce);
+ if (ret) {
+ pr_err("%s Unable enable clk\n", __func__);
+ mutex_unlock(&qcedev_sent_bw_req);
+ return;
+ }
+ ret = msm_bus_scale_client_update_request(
+ podev->bus_scale_handle, 1);
+ if (ret) {
+ pr_err("%s Unable to set to high bandwidth\n",
+ __func__);
+ ret = qce_disable_clk(podev->qce);
+ mutex_unlock(&qcedev_sent_bw_req);
+ return;
+ }
+ }
+ podev->high_bw_req_count++;
+ } else {
+ if (podev->high_bw_req_count == 1) {
+ ret = msm_bus_scale_client_update_request(
+ podev->bus_scale_handle, 0);
+ if (ret) {
+ pr_err("%s Unable to set to low bandwidth\n",
+ __func__);
+ mutex_unlock(&qcedev_sent_bw_req);
+ return;
+ }
+ ret = qce_disable_clk(podev->qce);
+ if (ret) {
+ pr_err("%s Unable disable clk\n", __func__);
+ ret = msm_bus_scale_client_update_request(
+ podev->bus_scale_handle, 1);
+ if (ret)
+ pr_err("%s Unable to set to high bandwidth\n",
+ __func__);
+ mutex_unlock(&qcedev_sent_bw_req);
+ return;
+ }
+ }
+ podev->high_bw_req_count--;
+ }
+ mutex_unlock(&qcedev_sent_bw_req);
+}
+
+#define QCEDEV_MAGIC 0x56434544 /* "qced" */
+
+static int qcedev_open(struct inode *inode, struct file *file);
+static int qcedev_release(struct inode *inode, struct file *file);
+static int start_cipher_req(struct qcedev_control *podev);
+static int start_sha_req(struct qcedev_control *podev);
+
+static const struct file_operations qcedev_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = qcedev_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = compat_qcedev_ioctl,
+#endif
+ .open = qcedev_open,
+ .release = qcedev_release,
+};
+
+static struct qcedev_control qce_dev[] = {
+ {
+ .miscdevice = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "qce",
+ .fops = &qcedev_fops,
+ },
+ .magic = QCEDEV_MAGIC,
+ },
+};
+
+#define MAX_QCE_DEVICE ARRAY_SIZE(qce_dev)
+#define DEBUG_MAX_FNAME 16
+#define DEBUG_MAX_RW_BUF 1024
+
+struct qcedev_stat {
+ u32 qcedev_dec_success;
+ u32 qcedev_dec_fail;
+ u32 qcedev_enc_success;
+ u32 qcedev_enc_fail;
+ u32 qcedev_sha_success;
+ u32 qcedev_sha_fail;
+};
+
+static struct qcedev_stat _qcedev_stat;
+static struct dentry *_debug_dent;
+static char _debug_read_buf[DEBUG_MAX_RW_BUF];
+static int _debug_qcedev;
+
+static struct qcedev_control *qcedev_minor_to_control(unsigned n)
+{
+ int i;
+
+ for (i = 0; i < MAX_QCE_DEVICE; i++) {
+ if (qce_dev[i].miscdevice.minor == n)
+ return &qce_dev[i];
+ }
+ return NULL;
+}
+
+static int qcedev_open(struct inode *inode, struct file *file)
+{
+ struct qcedev_handle *handle;
+ struct qcedev_control *podev;
+
+ podev = qcedev_minor_to_control(MINOR(inode->i_rdev));
+ if (podev == NULL) {
+ pr_err("%s: no such device %d\n", __func__,
+ MINOR(inode->i_rdev));
+ return -ENOENT;
+ }
+
+ handle = kzalloc(sizeof(struct qcedev_handle), GFP_KERNEL);
+ if (handle == NULL) {
+ pr_err("Failed to allocate memory %ld\n",
+ PTR_ERR(handle));
+ return -ENOMEM;
+ }
+
+ handle->cntl = podev;
+ file->private_data = handle;
+ if (podev->platform_support.bus_scale_table != NULL)
+ qcedev_ce_high_bw_req(podev, true);
+ return 0;
+}
+
+static int qcedev_release(struct inode *inode, struct file *file)
+{
+ struct qcedev_control *podev;
+ struct qcedev_handle *handle;
+
+ handle = file->private_data;
+ podev = handle->cntl;
+ if (podev != NULL && podev->magic != QCEDEV_MAGIC) {
+ pr_err("%s: invalid handle %p\n",
+ __func__, podev);
+ }
+ kzfree(handle);
+ file->private_data = NULL;
+ if (podev != NULL && podev->platform_support.bus_scale_table != NULL)
+ qcedev_ce_high_bw_req(podev, false);
+ return 0;
+}
+
+static void req_done(unsigned long data)
+{
+ struct qcedev_control *podev = (struct qcedev_control *)data;
+ struct qcedev_async_req *areq;
+ unsigned long flags = 0;
+ struct qcedev_async_req *new_req = NULL;
+ int ret = 0;
+
+ spin_lock_irqsave(&podev->lock, flags);
+ areq = podev->active_command;
+ podev->active_command = NULL;
+
+again:
+ if (!list_empty(&podev->ready_commands)) {
+ new_req = container_of(podev->ready_commands.next,
+ struct qcedev_async_req, list);
+ list_del(&new_req->list);
+ podev->active_command = new_req;
+ new_req->err = 0;
+ if (new_req->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
+ ret = start_cipher_req(podev);
+ else
+ ret = start_sha_req(podev);
+ }
+
+ spin_unlock_irqrestore(&podev->lock, flags);
+
+ if (areq)
+ complete(&areq->complete);
+
+ if (new_req && ret) {
+ complete(&new_req->complete);
+ spin_lock_irqsave(&podev->lock, flags);
+ podev->active_command = NULL;
+ areq = NULL;
+ ret = 0;
+ new_req = NULL;
+ goto again;
+ }
+
+ return;
+}
+
+void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
+ unsigned char *authdata, int ret)
+{
+ struct qcedev_sha_req *areq;
+ struct qcedev_control *pdev;
+ struct qcedev_handle *handle;
+
+ uint32_t *auth32 = (uint32_t *)authdata;
+
+ areq = (struct qcedev_sha_req *) cookie;
+ handle = (struct qcedev_handle *) areq->cookie;
+ pdev = handle->cntl;
+
+ if (digest)
+ memcpy(&handle->sha_ctxt.digest[0], digest, 32);
+
+ if (authdata) {
+ handle->sha_ctxt.auth_data[0] = auth32[0];
+ handle->sha_ctxt.auth_data[1] = auth32[1];
+ handle->sha_ctxt.auth_data[2] = auth32[2];
+ handle->sha_ctxt.auth_data[3] = auth32[3];
+ }
+
+ tasklet_schedule(&pdev->done_tasklet);
+};
+
+
+void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
+ unsigned char *iv, int ret)
+{
+ struct qcedev_cipher_req *areq;
+ struct qcedev_handle *handle;
+ struct qcedev_control *podev;
+ struct qcedev_async_req *qcedev_areq;
+
+ areq = (struct qcedev_cipher_req *) cookie;
+ handle = (struct qcedev_handle *) areq->cookie;
+ podev = handle->cntl;
+ qcedev_areq = podev->active_command;
+
+ if (iv)
+ memcpy(&qcedev_areq->cipher_op_req.iv[0], iv,
+ qcedev_areq->cipher_op_req.ivlen);
+ tasklet_schedule(&podev->done_tasklet);
+};
+
+static int start_cipher_req(struct qcedev_control *podev)
+{
+ struct qcedev_async_req *qcedev_areq;
+ struct qce_req creq;
+ int ret = 0;
+
+ /* start the command on the podev->active_command */
+ qcedev_areq = podev->active_command;
+ qcedev_areq->cipher_req.cookie = qcedev_areq->handle;
+ if (qcedev_areq->cipher_op_req.use_pmem == QCEDEV_USE_PMEM) {
+ pr_err("%s: Use of PMEM is not supported\n", __func__);
+ goto unsupported;
+ }
+ creq.pmem = NULL;
+ switch (qcedev_areq->cipher_op_req.alg) {
+ case QCEDEV_ALG_DES:
+ creq.alg = CIPHER_ALG_DES;
+ break;
+ case QCEDEV_ALG_3DES:
+ creq.alg = CIPHER_ALG_3DES;
+ break;
+ case QCEDEV_ALG_AES:
+ creq.alg = CIPHER_ALG_AES;
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ switch (qcedev_areq->cipher_op_req.mode) {
+ case QCEDEV_AES_MODE_CBC:
+ case QCEDEV_DES_MODE_CBC:
+ creq.mode = QCE_MODE_CBC;
+ break;
+ case QCEDEV_AES_MODE_ECB:
+ case QCEDEV_DES_MODE_ECB:
+ creq.mode = QCE_MODE_ECB;
+ break;
+ case QCEDEV_AES_MODE_CTR:
+ creq.mode = QCE_MODE_CTR;
+ break;
+ case QCEDEV_AES_MODE_XTS:
+ creq.mode = QCE_MODE_XTS;
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ if ((creq.alg == CIPHER_ALG_AES) &&
+ (creq.mode == QCE_MODE_CTR)) {
+ creq.dir = QCE_ENCRYPT;
+ } else {
+ if (QCEDEV_OPER_ENC == qcedev_areq->cipher_op_req.op)
+ creq.dir = QCE_ENCRYPT;
+ else
+ creq.dir = QCE_DECRYPT;
+ }
+
+ creq.iv = &qcedev_areq->cipher_op_req.iv[0];
+ creq.ivsize = qcedev_areq->cipher_op_req.ivlen;
+
+ creq.enckey = &qcedev_areq->cipher_op_req.enckey[0];
+ creq.encklen = qcedev_areq->cipher_op_req.encklen;
+
+ creq.cryptlen = qcedev_areq->cipher_op_req.data_len;
+
+ if (qcedev_areq->cipher_op_req.encklen == 0) {
+ if ((qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC_NO_KEY)
+ || (qcedev_areq->cipher_op_req.op ==
+ QCEDEV_OPER_DEC_NO_KEY))
+ creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY;
+ else {
+ int i;
+
+ for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
+ if (qcedev_areq->cipher_op_req.enckey[i] != 0)
+ break;
+ }
+
+ if ((podev->platform_support.hw_key_support == 1) &&
+ (i == QCEDEV_MAX_KEY_SIZE))
+ creq.op = QCE_REQ_ABLK_CIPHER;
+ else {
+ ret = -EINVAL;
+ goto unsupported;
+ }
+ }
+ } else {
+ creq.op = QCE_REQ_ABLK_CIPHER;
+ }
+
+ creq.qce_cb = qcedev_cipher_req_cb;
+ creq.areq = (void *)&qcedev_areq->cipher_req;
+ creq.flags = 0;
+ ret = qce_ablk_cipher_req(podev->qce, &creq);
+unsupported:
+ if (ret)
+ qcedev_areq->err = -ENXIO;
+ else
+ qcedev_areq->err = 0;
+ return ret;
+};
+
+static int start_sha_req(struct qcedev_control *podev)
+{
+ struct qcedev_async_req *qcedev_areq;
+ struct qce_sha_req sreq;
+ int ret = 0;
+ struct qcedev_handle *handle;
+
+ /* start the command on the podev->active_command */
+ qcedev_areq = podev->active_command;
+ handle = qcedev_areq->handle;
+
+ switch (qcedev_areq->sha_op_req.alg) {
+ case QCEDEV_ALG_SHA1:
+ sreq.alg = QCE_HASH_SHA1;
+ break;
+ case QCEDEV_ALG_SHA256:
+ sreq.alg = QCE_HASH_SHA256;
+ break;
+ case QCEDEV_ALG_SHA1_HMAC:
+ if (podev->ce_support.sha_hmac) {
+ sreq.alg = QCE_HASH_SHA1_HMAC;
+ sreq.authkey = &handle->sha_ctxt.authkey[0];
+ sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
+
+ } else {
+ sreq.alg = QCE_HASH_SHA1;
+ sreq.authkey = NULL;
+ }
+ break;
+ case QCEDEV_ALG_SHA256_HMAC:
+ if (podev->ce_support.sha_hmac) {
+ sreq.alg = QCE_HASH_SHA256_HMAC;
+ sreq.authkey = &handle->sha_ctxt.authkey[0];
+ sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
+ } else {
+ sreq.alg = QCE_HASH_SHA256;
+ sreq.authkey = NULL;
+ }
+ break;
+ case QCEDEV_ALG_AES_CMAC:
+ sreq.alg = QCE_HASH_AES_CMAC;
+ sreq.authkey = &handle->sha_ctxt.authkey[0];
+ sreq.authklen = qcedev_areq->sha_op_req.authklen;
+ break;
+ default:
+ pr_err("Algorithm %d not supported, exiting\n",
+ qcedev_areq->sha_op_req.alg);
+ return -EINVAL;
+ break;
+ };
+
+ qcedev_areq->sha_req.cookie = handle;
+
+ sreq.qce_cb = qcedev_sha_req_cb;
+ if (qcedev_areq->sha_op_req.alg != QCEDEV_ALG_AES_CMAC) {
+ sreq.auth_data[0] = handle->sha_ctxt.auth_data[0];
+ sreq.auth_data[1] = handle->sha_ctxt.auth_data[1];
+ sreq.auth_data[2] = handle->sha_ctxt.auth_data[2];
+ sreq.auth_data[3] = handle->sha_ctxt.auth_data[3];
+ sreq.digest = &handle->sha_ctxt.digest[0];
+ sreq.first_blk = handle->sha_ctxt.first_blk;
+ sreq.last_blk = handle->sha_ctxt.last_blk;
+ }
+ sreq.size = qcedev_areq->sha_req.sreq.nbytes;
+ sreq.src = qcedev_areq->sha_req.sreq.src;
+ sreq.areq = (void *)&qcedev_areq->sha_req;
+ sreq.flags = 0;
+
+ ret = qce_process_sha_req(podev->qce, &sreq);
+
+ if (ret)
+ qcedev_areq->err = -ENXIO;
+ else
+ qcedev_areq->err = 0;
+ return ret;
+};
+
+static int submit_req(struct qcedev_async_req *qcedev_areq,
+ struct qcedev_handle *handle)
+{
+ struct qcedev_control *podev;
+ unsigned long flags = 0;
+ int ret = 0;
+ struct qcedev_stat *pstat;
+
+ qcedev_areq->err = 0;
+ podev = handle->cntl;
+
+ spin_lock_irqsave(&podev->lock, flags);
+
+ if (podev->active_command == NULL) {
+ podev->active_command = qcedev_areq;
+ if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
+ ret = start_cipher_req(podev);
+ else
+ ret = start_sha_req(podev);
+ } else {
+ list_add_tail(&qcedev_areq->list, &podev->ready_commands);
+ }
+
+ if (ret != 0)
+ podev->active_command = NULL;
+
+ spin_unlock_irqrestore(&podev->lock, flags);
+
+ if (ret == 0)
+ wait_for_completion(&qcedev_areq->complete);
+
+ if (ret)
+ qcedev_areq->err = -EIO;
+
+ pstat = &_qcedev_stat;
+ if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) {
+ switch (qcedev_areq->cipher_op_req.op) {
+ case QCEDEV_OPER_DEC:
+ if (qcedev_areq->err)
+ pstat->qcedev_dec_fail++;
+ else
+ pstat->qcedev_dec_success++;
+ break;
+ case QCEDEV_OPER_ENC:
+ if (qcedev_areq->err)
+ pstat->qcedev_enc_fail++;
+ else
+ pstat->qcedev_enc_success++;
+ break;
+ default:
+ break;
+ };
+ } else {
+ if (qcedev_areq->err)
+ pstat->qcedev_sha_fail++;
+ else
+ pstat->qcedev_sha_success++;
+ }
+
+ return qcedev_areq->err;
+}
+
+static int qcedev_sha_init(struct qcedev_async_req *areq,
+ struct qcedev_handle *handle)
+{
+ struct qcedev_sha_ctxt *sha_ctxt = &handle->sha_ctxt;
+
+ memset(sha_ctxt, 0, sizeof(struct qcedev_sha_ctxt));
+ sha_ctxt->first_blk = 1;
+
+ if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
+ (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)) {
+ memcpy(&sha_ctxt->digest[0],
+ &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
+ sha_ctxt->diglen = SHA1_DIGEST_SIZE;
+ } else {
+ if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA256) ||
+ (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)) {
+ memcpy(&sha_ctxt->digest[0],
+ &_std_init_vector_sha256_uint8[0],
+ SHA256_DIGEST_SIZE);
+ sha_ctxt->diglen = SHA256_DIGEST_SIZE;
+ }
+ }
+ sha_ctxt->init_done = true;
+ return 0;
+}
+
+
+static int qcedev_sha_update_max_xfer(struct qcedev_async_req *qcedev_areq,
+ struct qcedev_handle *handle,
+ struct scatterlist *sg_src)
+{
+ int err = 0;
+ int i = 0;
+ uint32_t total;
+
+ uint8_t *user_src = NULL;
+ uint8_t *k_src = NULL;
+ uint8_t *k_buf_src = NULL;
+ uint8_t *k_align_src = NULL;
+
+ uint32_t sha_pad_len = 0;
+ uint32_t trailing_buf_len = 0;
+ uint32_t t_buf = handle->sha_ctxt.trailing_buf_len;
+ uint32_t sha_block_size;
+
+ total = qcedev_areq->sha_op_req.data_len + t_buf;
+
+ if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1)
+ sha_block_size = SHA1_BLOCK_SIZE;
+ else
+ sha_block_size = SHA256_BLOCK_SIZE;
+
+ if (total <= sha_block_size) {
+ uint32_t len = qcedev_areq->sha_op_req.data_len;
+
+ i = 0;
+
+ k_src = &handle->sha_ctxt.trailing_buf[t_buf];
+
+ /* Copy data from user src(s) */
+ while (len > 0) {
+ user_src =
+ (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
+ if (user_src && __copy_from_user(k_src,
+ (void __user *)user_src,
+ qcedev_areq->sha_op_req.data[i].len))
+ return -EFAULT;
+
+ len -= qcedev_areq->sha_op_req.data[i].len;
+ k_src += qcedev_areq->sha_op_req.data[i].len;
+ i++;
+ }
+ handle->sha_ctxt.trailing_buf_len = total;
+
+ return 0;
+ }
+
+
+ k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
+ GFP_KERNEL);
+ if (k_buf_src == NULL) {
+ pr_err("%s: Can't Allocate memory: k_buf_src 0x%lx\n",
+ __func__, (uintptr_t)k_buf_src);
+ return -ENOMEM;
+ }
+
+ k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
+ CACHE_LINE_SIZE);
+ k_src = k_align_src;
+
+ /* check for trailing buffer from previous updates and append it */
+ if (t_buf > 0) {
+ memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
+ t_buf);
+ k_src += t_buf;
+ }
+
+ /* Copy data from user src(s) */
+ user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
+ if (user_src && __copy_from_user(k_src,
+ (void __user *)user_src,
+ qcedev_areq->sha_op_req.data[0].len)) {
+ kzfree(k_buf_src);
+ return -EFAULT;
+ }
+ k_src += qcedev_areq->sha_op_req.data[0].len;
+ for (i = 1; i < qcedev_areq->sha_op_req.entries; i++) {
+ user_src = (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
+ if (user_src && __copy_from_user(k_src,
+ (void __user *)user_src,
+ qcedev_areq->sha_op_req.data[i].len)) {
+ kzfree(k_buf_src);
+ return -EFAULT;
+ }
+ k_src += qcedev_areq->sha_op_req.data[i].len;
+ }
+
+ /* get new trailing buffer */
+ sha_pad_len = ALIGN(total, CE_SHA_BLOCK_SIZE) - total;
+ trailing_buf_len = CE_SHA_BLOCK_SIZE - sha_pad_len;
+
+ qcedev_areq->sha_req.sreq.src = sg_src;
+ sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src,
+ total-trailing_buf_len);
+ sg_mark_end(qcedev_areq->sha_req.sreq.src);
+
+ qcedev_areq->sha_req.sreq.nbytes = total - trailing_buf_len;
+
+ /* update sha_ctxt trailing buf content to new trailing buf */
+ if (trailing_buf_len > 0) {
+ memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
+ memcpy(&handle->sha_ctxt.trailing_buf[0],
+ (k_src - trailing_buf_len),
+ trailing_buf_len);
+ }
+ handle->sha_ctxt.trailing_buf_len = trailing_buf_len;
+
+ err = submit_req(qcedev_areq, handle);
+
+ handle->sha_ctxt.last_blk = 0;
+ handle->sha_ctxt.first_blk = 0;
+
+ kzfree(k_buf_src);
+ return err;
+}
+
+static int qcedev_sha_update(struct qcedev_async_req *qcedev_areq,
+ struct qcedev_handle *handle,
+ struct scatterlist *sg_src)
+{
+ int err = 0;
+ int i = 0;
+ int j = 0;
+ int k = 0;
+ int num_entries = 0;
+ uint32_t total = 0;
+
+ if (handle->sha_ctxt.init_done == false) {
+ pr_err("%s Init was not called\n", __func__);
+ return -EINVAL;
+ }
+
+ /* verify address src(s) */
+ for (i = 0; i < qcedev_areq->sha_op_req.entries; i++)
+ if (!access_ok(VERIFY_READ,
+ (void __user *)qcedev_areq->sha_op_req.data[i].vaddr,
+ qcedev_areq->sha_op_req.data[i].len))
+ return -EFAULT;
+
+ if (qcedev_areq->sha_op_req.data_len > QCE_MAX_OPER_DATA) {
+
+ struct qcedev_sha_op_req *saved_req;
+ struct qcedev_sha_op_req req;
+ struct qcedev_sha_op_req *sreq = &qcedev_areq->sha_op_req;
+
+ /* save the original req structure */
+ saved_req =
+ kmalloc(sizeof(struct qcedev_sha_op_req), GFP_KERNEL);
+ if (saved_req == NULL) {
+ pr_err("%s:Can't Allocate mem:saved_req 0x%lx\n",
+ __func__, (uintptr_t)saved_req);
+ return -ENOMEM;
+ }
+ memcpy(&req, sreq, sizeof(struct qcedev_sha_op_req));
+ memcpy(saved_req, sreq, sizeof(struct qcedev_sha_op_req));
+
+ i = 0;
+ /* Address 32 KB at a time */
+ while ((i < req.entries) && (err == 0)) {
+ if (sreq->data[i].len > QCE_MAX_OPER_DATA) {
+ sreq->data[0].len = QCE_MAX_OPER_DATA;
+ if (i > 0) {
+ sreq->data[0].vaddr =
+ sreq->data[i].vaddr;
+ }
+
+ sreq->data_len = QCE_MAX_OPER_DATA;
+ sreq->entries = 1;
+
+ err = qcedev_sha_update_max_xfer(qcedev_areq,
+ handle, sg_src);
+
+ sreq->data[i].len = req.data[i].len -
+ QCE_MAX_OPER_DATA;
+ sreq->data[i].vaddr = req.data[i].vaddr +
+ QCE_MAX_OPER_DATA;
+ req.data[i].vaddr = sreq->data[i].vaddr;
+ req.data[i].len = sreq->data[i].len;
+ } else {
+ total = 0;
+ for (j = i; j < req.entries; j++) {
+ num_entries++;
+ if ((total + sreq->data[j].len) >=
+ QCE_MAX_OPER_DATA) {
+ sreq->data[j].len =
+ (QCE_MAX_OPER_DATA - total);
+ total = QCE_MAX_OPER_DATA;
+ break;
+ }
+ total += sreq->data[j].len;
+ }
+
+ sreq->data_len = total;
+ if (i > 0)
+ for (k = 0; k < num_entries; k++) {
+ sreq->data[k].len =
+ sreq->data[i+k].len;
+ sreq->data[k].vaddr =
+ sreq->data[i+k].vaddr;
+ }
+ sreq->entries = num_entries;
+
+ i = j;
+ err = qcedev_sha_update_max_xfer(qcedev_areq,
+ handle, sg_src);
+ num_entries = 0;
+
+ sreq->data[i].vaddr = req.data[i].vaddr +
+ sreq->data[i].len;
+ sreq->data[i].len = req.data[i].len -
+ sreq->data[i].len;
+ req.data[i].vaddr = sreq->data[i].vaddr;
+ req.data[i].len = sreq->data[i].len;
+
+ if (sreq->data[i].len == 0)
+ i++;
+ }
+ } /* end of while ((i < req.entries) && (err == 0)) */
+
+ /* Restore the original req structure */
+ for (i = 0; i < saved_req->entries; i++) {
+ sreq->data[i].len = saved_req->data[i].len;
+ sreq->data[i].vaddr = saved_req->data[i].vaddr;
+ }
+ sreq->entries = saved_req->entries;
+ sreq->data_len = saved_req->data_len;
+ kzfree(saved_req);
+ } else
+ err = qcedev_sha_update_max_xfer(qcedev_areq, handle, sg_src);
+
+ return err;
+}
+
+static int qcedev_sha_final(struct qcedev_async_req *qcedev_areq,
+ struct qcedev_handle *handle)
+{
+ int err = 0;
+ struct scatterlist sg_src;
+ uint32_t total;
+ uint8_t *k_buf_src = NULL;
+ uint8_t *k_align_src = NULL;
+
+ if (handle->sha_ctxt.init_done == false) {
+ pr_err("%s Init was not called\n", __func__);
+ return -EINVAL;
+ }
+
+ handle->sha_ctxt.last_blk = 1;
+
+ total = handle->sha_ctxt.trailing_buf_len;
+
+ if (total) {
+ k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
+ GFP_KERNEL);
+ if (k_buf_src == NULL) {
+ pr_err("%s: Can't Allocate memory: k_buf_src 0x%lx\n",
+ __func__, (uintptr_t)k_buf_src);
+ return -ENOMEM;
+ }
+
+ k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
+ CACHE_LINE_SIZE);
+ memcpy(k_align_src, &handle->sha_ctxt.trailing_buf[0], total);
+ }
+ qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
+ sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src, total);
+ sg_mark_end(qcedev_areq->sha_req.sreq.src);
+
+ qcedev_areq->sha_req.sreq.nbytes = total;
+
+ err = submit_req(qcedev_areq, handle);
+
+ handle->sha_ctxt.first_blk = 0;
+ handle->sha_ctxt.last_blk = 0;
+ handle->sha_ctxt.auth_data[0] = 0;
+ handle->sha_ctxt.auth_data[1] = 0;
+ handle->sha_ctxt.trailing_buf_len = 0;
+ handle->sha_ctxt.init_done = false;
+ memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
+
+ kzfree(k_buf_src);
+ return err;
+}
+
+static int qcedev_hash_cmac(struct qcedev_async_req *qcedev_areq,
+ struct qcedev_handle *handle,
+ struct scatterlist *sg_src)
+{
+ int err = 0;
+ int i = 0;
+ uint32_t total;
+
+ uint8_t *user_src = NULL;
+ uint8_t *k_src = NULL;
+ uint8_t *k_buf_src = NULL;
+
+ total = qcedev_areq->sha_op_req.data_len;
+
+ /* verify address src(s) */
+ for (i = 0; i < qcedev_areq->sha_op_req.entries; i++)
+ if (!access_ok(VERIFY_READ,
+ (void __user *)qcedev_areq->sha_op_req.data[i].vaddr,
+ qcedev_areq->sha_op_req.data[i].len))
+ return -EFAULT;
+
+ /* Verify Source Address */
+ if (!access_ok(VERIFY_READ,
+ (void __user *)qcedev_areq->sha_op_req.authkey,
+ qcedev_areq->sha_op_req.authklen))
+ return -EFAULT;
+ if (__copy_from_user(&handle->sha_ctxt.authkey[0],
+ (void __user *)qcedev_areq->sha_op_req.authkey,
+ qcedev_areq->sha_op_req.authklen))
+ return -EFAULT;
+
+
+ k_buf_src = kmalloc(total, GFP_KERNEL);
+ if (k_buf_src == NULL) {
+ pr_err("%s: Can't Allocate memory: k_buf_src 0x%lx\n",
+ __func__, (uintptr_t)k_buf_src);
+ return -ENOMEM;
+ }
+
+ k_src = k_buf_src;
+
+ /* Copy data from user src(s) */
+ user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
+ for (i = 0; i < qcedev_areq->sha_op_req.entries; i++) {
+ user_src =
+ (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
+ if (user_src && __copy_from_user(k_src, (void __user *)user_src,
+ qcedev_areq->sha_op_req.data[i].len)) {
+ kzfree(k_buf_src);
+ return -EFAULT;
+ }
+ k_src += qcedev_areq->sha_op_req.data[i].len;
+ }
+
+ qcedev_areq->sha_req.sreq.src = sg_src;
+ sg_set_buf(qcedev_areq->sha_req.sreq.src, k_buf_src, total);
+ sg_mark_end(qcedev_areq->sha_req.sreq.src);
+
+ qcedev_areq->sha_req.sreq.nbytes = total;
+ handle->sha_ctxt.diglen = qcedev_areq->sha_op_req.diglen;
+ err = submit_req(qcedev_areq, handle);
+
+ kzfree(k_buf_src);
+ return err;
+}
+
+static int qcedev_set_hmac_auth_key(struct qcedev_async_req *areq,
+ struct qcedev_handle *handle,
+ struct scatterlist *sg_src)
+{
+ int err = 0;
+
+ if (areq->sha_op_req.authklen <= QCEDEV_MAX_KEY_SIZE) {
+ qcedev_sha_init(areq, handle);
+ /* Verify Source Address */
+ if (!access_ok(VERIFY_READ,
+ (void __user *)areq->sha_op_req.authkey,
+ areq->sha_op_req.authklen))
+ return -EFAULT;
+ if (__copy_from_user(&handle->sha_ctxt.authkey[0],
+ (void __user *)areq->sha_op_req.authkey,
+ areq->sha_op_req.authklen))
+ return -EFAULT;
+ } else {
+ struct qcedev_async_req authkey_areq;
+ uint8_t authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
+
+ init_completion(&authkey_areq.complete);
+
+ authkey_areq.sha_op_req.entries = 1;
+ authkey_areq.sha_op_req.data[0].vaddr =
+ areq->sha_op_req.authkey;
+ authkey_areq.sha_op_req.data[0].len = areq->sha_op_req.authklen;
+ authkey_areq.sha_op_req.data_len = areq->sha_op_req.authklen;
+ authkey_areq.sha_op_req.diglen = 0;
+ authkey_areq.handle = handle;
+
+ memset(&authkey_areq.sha_op_req.digest[0], 0,
+ QCEDEV_MAX_SHA_DIGEST);
+ if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
+ authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA1;
+ if (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)
+ authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA256;
+
+ authkey_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+
+ qcedev_sha_init(&authkey_areq, handle);
+ err = qcedev_sha_update(&authkey_areq, handle, sg_src);
+ if (!err)
+ err = qcedev_sha_final(&authkey_areq, handle);
+ else
+ return err;
+ memcpy(&authkey[0], &handle->sha_ctxt.digest[0],
+ handle->sha_ctxt.diglen);
+ qcedev_sha_init(areq, handle);
+
+ memcpy(&handle->sha_ctxt.authkey[0], &authkey[0],
+ handle->sha_ctxt.diglen);
+ }
+ return err;
+}
+
+static int qcedev_hmac_get_ohash(struct qcedev_async_req *qcedev_areq,
+ struct qcedev_handle *handle)
+{
+ int err = 0;
+ struct scatterlist sg_src;
+ uint8_t *k_src = NULL;
+ uint32_t sha_block_size = 0;
+ uint32_t sha_digest_size = 0;
+
+ if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
+ sha_digest_size = SHA1_DIGEST_SIZE;
+ sha_block_size = SHA1_BLOCK_SIZE;
+ } else {
+ if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
+ sha_digest_size = SHA256_DIGEST_SIZE;
+ sha_block_size = SHA256_BLOCK_SIZE;
+ }
+ }
+ k_src = kmalloc(sha_block_size, GFP_KERNEL);
+ if (k_src == NULL) {
+ pr_err("%s: Can't Allocate memory: k_src 0x%lx\n",
+ __func__, (uintptr_t)k_src);
+ return -ENOMEM;
+ }
+
+ /* check for trailing buffer from previous updates and append it */
+ memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
+ handle->sha_ctxt.trailing_buf_len);
+
+ qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
+ sg_set_buf(qcedev_areq->sha_req.sreq.src, k_src, sha_block_size);
+ sg_mark_end(qcedev_areq->sha_req.sreq.src);
+
+ qcedev_areq->sha_req.sreq.nbytes = sha_block_size;
+ memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
+ memcpy(&handle->sha_ctxt.trailing_buf[0], &handle->sha_ctxt.digest[0],
+ sha_digest_size);
+ handle->sha_ctxt.trailing_buf_len = sha_digest_size;
+
+ handle->sha_ctxt.first_blk = 1;
+ handle->sha_ctxt.last_blk = 0;
+ handle->sha_ctxt.auth_data[0] = 0;
+ handle->sha_ctxt.auth_data[1] = 0;
+
+ if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
+ memcpy(&handle->sha_ctxt.digest[0],
+ &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
+ handle->sha_ctxt.diglen = SHA1_DIGEST_SIZE;
+ }
+
+ if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
+ memcpy(&handle->sha_ctxt.digest[0],
+ &_std_init_vector_sha256_uint8[0], SHA256_DIGEST_SIZE);
+ handle->sha_ctxt.diglen = SHA256_DIGEST_SIZE;
+ }
+ err = submit_req(qcedev_areq, handle);
+
+ handle->sha_ctxt.last_blk = 0;
+ handle->sha_ctxt.first_blk = 0;
+
+ kzfree(k_src);
+ return err;
+}
+
+static int qcedev_hmac_update_iokey(struct qcedev_async_req *areq,
+ struct qcedev_handle *handle, bool ikey)
+{
+ int i;
+ uint32_t constant;
+ uint32_t sha_block_size;
+
+ if (ikey)
+ constant = 0x36;
+ else
+ constant = 0x5c;
+
+ if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
+ sha_block_size = SHA1_BLOCK_SIZE;
+ else
+ sha_block_size = SHA256_BLOCK_SIZE;
+
+ memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
+ for (i = 0; i < sha_block_size; i++)
+ handle->sha_ctxt.trailing_buf[i] =
+ (handle->sha_ctxt.authkey[i] ^ constant);
+
+ handle->sha_ctxt.trailing_buf_len = sha_block_size;
+ return 0;
+}
+
+static int qcedev_hmac_init(struct qcedev_async_req *areq,
+ struct qcedev_handle *handle,
+ struct scatterlist *sg_src)
+{
+ int err;
+ struct qcedev_control *podev = handle->cntl;
+
+ err = qcedev_set_hmac_auth_key(areq, handle, sg_src);
+ if (err)
+ return err;
+ if (!podev->ce_support.sha_hmac)
+ qcedev_hmac_update_iokey(areq, handle, true);
+ return 0;
+}
+
+static int qcedev_hmac_final(struct qcedev_async_req *areq,
+ struct qcedev_handle *handle)
+{
+ int err;
+ struct qcedev_control *podev = handle->cntl;
+
+ err = qcedev_sha_final(areq, handle);
+ if (podev->ce_support.sha_hmac)
+ return err;
+
+ qcedev_hmac_update_iokey(areq, handle, false);
+ err = qcedev_hmac_get_ohash(areq, handle);
+ if (err)
+ return err;
+ err = qcedev_sha_final(areq, handle);
+
+ return err;
+}
+
+static int qcedev_hash_init(struct qcedev_async_req *areq,
+ struct qcedev_handle *handle,
+ struct scatterlist *sg_src)
+{
+ if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
+ (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
+ return qcedev_sha_init(areq, handle);
+ else
+ return qcedev_hmac_init(areq, handle, sg_src);
+}
+
+static int qcedev_hash_update(struct qcedev_async_req *qcedev_areq,
+ struct qcedev_handle *handle,
+ struct scatterlist *sg_src)
+{
+ return qcedev_sha_update(qcedev_areq, handle, sg_src);
+}
+
+static int qcedev_hash_final(struct qcedev_async_req *areq,
+ struct qcedev_handle *handle)
+{
+ if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
+ (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
+ return qcedev_sha_final(areq, handle);
+ else
+ return qcedev_hmac_final(areq, handle);
+}
+
+static int qcedev_vbuf_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
+ int *di, struct qcedev_handle *handle,
+ uint8_t *k_align_src)
+{
+ int err = 0;
+ int i = 0;
+ int dst_i = *di;
+ struct scatterlist sg_src;
+ uint32_t byteoffset = 0;
+ uint8_t *user_src = NULL;
+ uint8_t *k_align_dst = k_align_src;
+ struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
+
+
+ if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
+ byteoffset = areq->cipher_op_req.byteoffset;
+
+ user_src = (void __user *)areq->cipher_op_req.vbuf.src[0].vaddr;
+ if (user_src && __copy_from_user((k_align_src + byteoffset),
+ (void __user *)user_src,
+ areq->cipher_op_req.vbuf.src[0].len))
+ return -EFAULT;
+
+ k_align_src += byteoffset + areq->cipher_op_req.vbuf.src[0].len;
+
+ for (i = 1; i < areq->cipher_op_req.entries; i++) {
+ user_src =
+ (void __user *)areq->cipher_op_req.vbuf.src[i].vaddr;
+ if (user_src && __copy_from_user(k_align_src,
+ (void __user *)user_src,
+ areq->cipher_op_req.vbuf.src[i].len)) {
+ return -EFAULT;
+ }
+ k_align_src += areq->cipher_op_req.vbuf.src[i].len;
+ }
+
+ /* restore src beginning */
+ k_align_src = k_align_dst;
+ areq->cipher_op_req.data_len += byteoffset;
+
+ areq->cipher_req.creq.src = (struct scatterlist *) &sg_src;
+ areq->cipher_req.creq.dst = (struct scatterlist *) &sg_src;
+
+ /* In place encryption/decryption */
+ sg_set_buf(areq->cipher_req.creq.src,
+ k_align_dst,
+ areq->cipher_op_req.data_len);
+ sg_mark_end(areq->cipher_req.creq.src);
+
+ areq->cipher_req.creq.nbytes = areq->cipher_op_req.data_len;
+ areq->cipher_req.creq.info = areq->cipher_op_req.iv;
+ areq->cipher_op_req.entries = 1;
+
+ err = submit_req(areq, handle);
+
+ /* copy data to destination buffer*/
+ creq->data_len -= byteoffset;
+
+ while (creq->data_len > 0) {
+ if (creq->vbuf.dst[dst_i].len <= creq->data_len) {
+ if (err == 0 && __copy_to_user(
+ (void __user *)creq->vbuf.dst[dst_i].vaddr,
+ (k_align_dst + byteoffset),
+ creq->vbuf.dst[dst_i].len))
+ return -EFAULT;
+
+ k_align_dst += creq->vbuf.dst[dst_i].len +
+ byteoffset;
+ creq->data_len -= creq->vbuf.dst[dst_i].len;
+ dst_i++;
+ } else {
+ if (err == 0 && __copy_to_user(
+ (void __user *)creq->vbuf.dst[dst_i].vaddr,
+ (k_align_dst + byteoffset),
+ creq->data_len))
+ return -EFAULT;
+
+ k_align_dst += creq->data_len;
+ creq->vbuf.dst[dst_i].len -= creq->data_len;
+ creq->vbuf.dst[dst_i].vaddr += creq->data_len;
+ creq->data_len = 0;
+ }
+ }
+ *di = dst_i;
+
+ return err;
+};
+
+static int qcedev_vbuf_ablk_cipher(struct qcedev_async_req *areq,
+ struct qcedev_handle *handle)
+{
+ int err = 0;
+ int di = 0;
+ int i = 0;
+ int j = 0;
+ int k = 0;
+ uint32_t byteoffset = 0;
+ int num_entries = 0;
+ uint32_t total = 0;
+ uint32_t len;
+ uint8_t *k_buf_src = NULL;
+ uint8_t *k_align_src = NULL;
+ uint32_t max_data_xfer;
+ struct qcedev_cipher_op_req *saved_req;
+ struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
+
+ /* Verify Source Address's */
+ for (i = 0; i < areq->cipher_op_req.entries; i++)
+ if (!access_ok(VERIFY_READ,
+ (void __user *)areq->cipher_op_req.vbuf.src[i].vaddr,
+ areq->cipher_op_req.vbuf.src[i].len))
+ return -EFAULT;
+
+ /* Verify Destination Address's */
+ if (creq->in_place_op != 1) {
+ for (i = 0, total = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+ if ((areq->cipher_op_req.vbuf.dst[i].vaddr != 0) &&
+ (total < creq->data_len)) {
+ if (!access_ok(VERIFY_WRITE,
+ (void __user *)creq->vbuf.dst[i].vaddr,
+ creq->vbuf.dst[i].len)) {
+ pr_err("%s:DST WR_VERIFY err %d=0x%lx\n",
+ __func__, i, (uintptr_t)
+ creq->vbuf.dst[i].vaddr);
+ return -EFAULT;
+ }
+ total += creq->vbuf.dst[i].len;
+ }
+ }
+ } else {
+ for (i = 0, total = 0; i < creq->entries; i++) {
+ if (total < creq->data_len) {
+ if (!access_ok(VERIFY_WRITE,
+ (void __user *)creq->vbuf.src[i].vaddr,
+ creq->vbuf.src[i].len)) {
+ pr_err("%s:SRC WR_VERIFY err %d=0x%lx\n",
+ __func__, i, (uintptr_t)
+ creq->vbuf.src[i].vaddr);
+ return -EFAULT;
+ }
+ total += creq->vbuf.src[i].len;
+ }
+ }
+ }
+ total = 0;
+
+ if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
+ byteoffset = areq->cipher_op_req.byteoffset;
+ k_buf_src = kmalloc(QCE_MAX_OPER_DATA + CACHE_LINE_SIZE * 2,
+ GFP_KERNEL);
+ if (k_buf_src == NULL) {
+ pr_err("%s: Can't Allocate memory: k_buf_src 0x%lx\n",
+ __func__, (uintptr_t)k_buf_src);
+ return -ENOMEM;
+ }
+ k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
+ CACHE_LINE_SIZE);
+ max_data_xfer = QCE_MAX_OPER_DATA - byteoffset;
+
+ saved_req = kmalloc(sizeof(struct qcedev_cipher_op_req), GFP_KERNEL);
+ if (saved_req == NULL) {
+ pr_err("%s: Can't Allocate memory:saved_req 0x%lx\n",
+ __func__, (uintptr_t)saved_req);
+ kzfree(k_buf_src);
+ return -ENOMEM;
+
+ }
+ memcpy(saved_req, creq, sizeof(struct qcedev_cipher_op_req));
+
+ if (areq->cipher_op_req.data_len > max_data_xfer) {
+ struct qcedev_cipher_op_req req;
+
+ /* save the original req structure */
+ memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req));
+
+ i = 0;
+ /* Address 32 KB at a time */
+ while ((i < req.entries) && (err == 0)) {
+ if (creq->vbuf.src[i].len > max_data_xfer) {
+ creq->vbuf.src[0].len = max_data_xfer;
+ if (i > 0) {
+ creq->vbuf.src[0].vaddr =
+ creq->vbuf.src[i].vaddr;
+ }
+
+ creq->data_len = max_data_xfer;
+ creq->entries = 1;
+
+ err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
+ &di, handle, k_align_src);
+ if (err < 0) {
+ kzfree(k_buf_src);
+ kzfree(saved_req);
+ return err;
+ }
+
+ creq->vbuf.src[i].len = req.vbuf.src[i].len -
+ max_data_xfer;
+ creq->vbuf.src[i].vaddr =
+ req.vbuf.src[i].vaddr +
+ max_data_xfer;
+ req.vbuf.src[i].vaddr =
+ creq->vbuf.src[i].vaddr;
+ req.vbuf.src[i].len = creq->vbuf.src[i].len;
+
+ } else {
+ total = areq->cipher_op_req.byteoffset;
+ for (j = i; j < req.entries; j++) {
+ num_entries++;
+ if ((total + creq->vbuf.src[j].len)
+ >= max_data_xfer) {
+ creq->vbuf.src[j].len =
+ max_data_xfer - total;
+ total = max_data_xfer;
+ break;
+ }
+ total += creq->vbuf.src[j].len;
+ }
+
+ creq->data_len = total;
+ if (i > 0)
+ for (k = 0; k < num_entries; k++) {
+ creq->vbuf.src[k].len =
+ creq->vbuf.src[i+k].len;
+ creq->vbuf.src[k].vaddr =
+ creq->vbuf.src[i+k].vaddr;
+ }
+ creq->entries = num_entries;
+
+ i = j;
+ err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
+ &di, handle, k_align_src);
+ if (err < 0) {
+ kzfree(k_buf_src);
+ kzfree(saved_req);
+ return err;
+ }
+
+ num_entries = 0;
+ areq->cipher_op_req.byteoffset = 0;
+
+ creq->vbuf.src[i].vaddr = req.vbuf.src[i].vaddr
+ + creq->vbuf.src[i].len;
+ creq->vbuf.src[i].len = req.vbuf.src[i].len -
+ creq->vbuf.src[i].len;
+
+ req.vbuf.src[i].vaddr =
+ creq->vbuf.src[i].vaddr;
+ req.vbuf.src[i].len = creq->vbuf.src[i].len;
+
+ if (creq->vbuf.src[i].len == 0)
+ i++;
+ }
+
+ areq->cipher_op_req.byteoffset = 0;
+ max_data_xfer = QCE_MAX_OPER_DATA;
+ byteoffset = 0;
+
+ } /* end of while ((i < req.entries) && (err == 0)) */
+ } else
+ err = qcedev_vbuf_ablk_cipher_max_xfer(areq, &di, handle,
+ k_align_src);
+
+ /* Restore the original req structure */
+ for (i = 0; i < saved_req->entries; i++) {
+ creq->vbuf.src[i].len = saved_req->vbuf.src[i].len;
+ creq->vbuf.src[i].vaddr = saved_req->vbuf.src[i].vaddr;
+ }
+ for (len = 0, i = 0; len < saved_req->data_len; i++) {
+ creq->vbuf.dst[i].len = saved_req->vbuf.dst[i].len;
+ creq->vbuf.dst[i].vaddr = saved_req->vbuf.dst[i].vaddr;
+ len += saved_req->vbuf.dst[i].len;
+ }
+ creq->entries = saved_req->entries;
+ creq->data_len = saved_req->data_len;
+ creq->byteoffset = saved_req->byteoffset;
+
+ kzfree(saved_req);
+ kzfree(k_buf_src);
+ return err;
+
+}
+
+static int qcedev_check_cipher_key(struct qcedev_cipher_op_req *req,
+ struct qcedev_control *podev)
+{
+ /* if intending to use HW key make sure key fields are set
+ * correctly and HW key is indeed supported in target
+ */
+ if (req->encklen == 0) {
+ int i;
+ for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
+ if (req->enckey[i]) {
+ pr_err("%s: Invalid key: non-zero key input\n",
+ __func__);
+ goto error;
+ }
+ }
+ if ((req->op != QCEDEV_OPER_ENC_NO_KEY) &&
+ (req->op != QCEDEV_OPER_DEC_NO_KEY))
+ if (!podev->platform_support.hw_key_support) {
+ pr_err("%s: Invalid op %d\n", __func__,
+ (uint32_t)req->op);
+ goto error;
+ }
+ } else {
+ if (req->encklen == QCEDEV_AES_KEY_192) {
+ if (!podev->ce_support.aes_key_192) {
+ pr_err("%s: AES-192 not supported\n", __func__);
+ goto error;
+ }
+ } else {
+ /* if not using HW key make sure key
+ * length is valid
+ */
+ if ((req->mode == QCEDEV_AES_MODE_XTS)) {
+ if ((req->encklen != QCEDEV_AES_KEY_128*2) &&
+ (req->encklen != QCEDEV_AES_KEY_256*2)) {
+ pr_err("%s: unsupported key size: %d\n",
+ __func__, req->encklen);
+ goto error;
+ }
+ } else {
+ if ((req->encklen != QCEDEV_AES_KEY_128) &&
+ (req->encklen != QCEDEV_AES_KEY_256)) {
+ pr_err("%s: unsupported key size %d\n",
+ __func__, req->encklen);
+ goto error;
+ }
+ }
+ }
+ }
+ return 0;
+error:
+ return -EINVAL;
+}
+
+static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
+ struct qcedev_control *podev)
+{
+ uint32_t total = 0;
+ uint32_t i;
+
+ if (req->use_pmem) {
+ pr_err("%s: Use of PMEM is not supported\n", __func__);
+ goto error;
+ }
+ if ((req->entries == 0) || (req->data_len == 0) ||
+ (req->entries > QCEDEV_MAX_BUFFERS)) {
+ pr_err("%s: Invalid cipher length/entries\n", __func__);
+ goto error;
+ }
+ if ((req->alg >= QCEDEV_ALG_LAST) ||
+ (req->mode >= QCEDEV_AES_DES_MODE_LAST)) {
+ pr_err("%s: Invalid algorithm %d\n", __func__,
+ (uint32_t)req->alg);
+ goto error;
+ }
+ if ((req->mode == QCEDEV_AES_MODE_XTS) &&
+ (!podev->ce_support.aes_xts)) {
+ pr_err("%s: XTS algorithm is not supported\n", __func__);
+ goto error;
+ }
+ if (req->alg == QCEDEV_ALG_AES) {
+ if (qcedev_check_cipher_key(req, podev))
+ goto error;
+
+ }
+ /* if using a byteoffset, make sure it is CTR mode using vbuf */
+ if (req->byteoffset) {
+ if (req->mode != QCEDEV_AES_MODE_CTR) {
+ pr_err("%s: Operation on byte offset not supported\n",
+ __func__);
+ goto error;
+ }
+ if (req->byteoffset >= AES_CE_BLOCK_SIZE) {
+ pr_err("%s: Invalid byte offset\n", __func__);
+ goto error;
+ }
+ }
+
+ if (req->data_len < req->byteoffset) {
+ pr_err("%s: req data length %u is less than byteoffset %u\n",
+ __func__, req->data_len, req->byteoffset);
+ goto error;
+ }
+
+ /* Ensure IV size */
+ if (req->ivlen > QCEDEV_MAX_IV_SIZE) {
+ pr_err("%s: ivlen is not correct: %u\n", __func__, req->ivlen);
+ goto error;
+ }
+
+ /* Ensure Key size */
+ if (req->encklen > QCEDEV_MAX_KEY_SIZE) {
+ pr_err("%s: Klen is not correct: %u\n", __func__, req->encklen);
+ goto error;
+ }
+
+ /* Ensure zer ivlen for ECB mode */
+ if (req->ivlen > 0) {
+ if ((req->mode == QCEDEV_AES_MODE_ECB) ||
+ (req->mode == QCEDEV_DES_MODE_ECB)) {
+ pr_err("%s: Expecting a zero length IV\n", __func__);
+ goto error;
+ }
+ } else {
+ if ((req->mode != QCEDEV_AES_MODE_ECB) &&
+ (req->mode != QCEDEV_DES_MODE_ECB)) {
+ pr_err("%s: Expecting a non-zero ength IV\n", __func__);
+ goto error;
+ }
+ }
+ /* Check for sum of all dst length is equal to data_len */
+ for (i = 0; i < req->entries; i++) {
+ if (req->vbuf.dst[i].len >= ULONG_MAX - total) {
+ pr_err("%s: Integer overflow on total req dst vbuf length\n",
+ __func__);
+ goto error;
+ }
+ total += req->vbuf.dst[i].len;
+ }
+ if (total != req->data_len) {
+ pr_err("%s: Total (i=%d) dst(%d) buf size != data_len (%d)\n",
+ __func__, i, total, req->data_len);
+ goto error;
+ }
+ /* Check for sum of all src length is equal to data_len */
+ for (i = 0, total = 0; i < req->entries; i++) {
+ if (req->vbuf.src[i].len > ULONG_MAX - total) {
+ pr_err("%s: Integer overflow on total req src vbuf length\n",
+ __func__);
+ goto error;
+ }
+ total += req->vbuf.src[i].len;
+ }
+ if (total != req->data_len) {
+ pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
+ __func__, total, req->data_len);
+ goto error;
+ }
+ return 0;
+error:
+ return -EINVAL;
+
+}
+
+static int qcedev_check_sha_params(struct qcedev_sha_op_req *req,
+ struct qcedev_control *podev)
+{
+ uint32_t total = 0;
+ uint32_t i;
+
+ if ((req->alg == QCEDEV_ALG_AES_CMAC) &&
+ (!podev->ce_support.cmac)) {
+ pr_err("%s: CMAC not supported\n", __func__);
+ goto sha_error;
+ }
+ if ((!req->entries) || (req->entries > QCEDEV_MAX_BUFFERS)) {
+ pr_err("%s: Invalid num entries (%d)\n",
+ __func__, req->entries);
+ goto sha_error;
+ }
+
+ if (req->alg >= QCEDEV_ALG_SHA_ALG_LAST) {
+ pr_err("%s: Invalid algorithm (%d)\n", __func__, req->alg);
+ goto sha_error;
+ }
+ if ((req->alg == QCEDEV_ALG_SHA1_HMAC) ||
+ (req->alg == QCEDEV_ALG_SHA1_HMAC)) {
+ if (req->authkey == NULL) {
+ pr_err("%s: Invalid authkey pointer\n", __func__);
+ goto sha_error;
+ }
+ if (req->authklen <= 0) {
+ pr_err("%s: Invalid authkey length (%d)\n",
+ __func__, req->authklen);
+ goto sha_error;
+ }
+ }
+
+ if (req->alg == QCEDEV_ALG_AES_CMAC) {
+ if ((req->authklen != QCEDEV_AES_KEY_128) &&
+ (req->authklen != QCEDEV_AES_KEY_256)) {
+ pr_err("%s: unsupported key length\n", __func__);
+ goto sha_error;
+ }
+ }
+
+ /* Check for sum of all src length is equal to data_len */
+ for (i = 0, total = 0; i < req->entries; i++) {
+ if (req->data[i].len > ULONG_MAX - total) {
+ pr_err("%s: Integer overflow on total req buf length\n",
+ __func__);
+ goto sha_error;
+ }
+ total += req->data[i].len;
+ }
+
+ if (total != req->data_len) {
+ pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
+ __func__, total, req->data_len);
+ goto sha_error;
+ }
+ return 0;
+sha_error:
+ return -EINVAL;
+}
+
+long qcedev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+{
+ int err = 0;
+ struct qcedev_handle *handle;
+ struct qcedev_control *podev;
+ struct qcedev_async_req qcedev_areq;
+ struct qcedev_stat *pstat;
+
+ handle = file->private_data;
+ podev = handle->cntl;
+ qcedev_areq.handle = handle;
+ if (podev == NULL || podev->magic != QCEDEV_MAGIC) {
+ pr_err("%s: invalid handle %p\n",
+ __func__, podev);
+ return -ENOENT;
+ }
+
+ /* Verify user arguments. */
+ if (_IOC_TYPE(cmd) != QCEDEV_IOC_MAGIC)
+ return -ENOTTY;
+
+ init_completion(&qcedev_areq.complete);
+ pstat = &_qcedev_stat;
+
+ switch (cmd) {
+ case QCEDEV_IOCTL_ENC_REQ:
+ case QCEDEV_IOCTL_DEC_REQ:
+ if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+ sizeof(struct qcedev_cipher_op_req)))
+ return -EFAULT;
+
+ if (__copy_from_user(&qcedev_areq.cipher_op_req,
+ (void __user *)arg,
+ sizeof(struct qcedev_cipher_op_req)))
+ return -EFAULT;
+ qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_CIPHER;
+
+ if (qcedev_check_cipher_params(&qcedev_areq.cipher_op_req,
+ podev))
+ return -EINVAL;
+
+ err = qcedev_vbuf_ablk_cipher(&qcedev_areq, handle);
+ if (err)
+ return err;
+ if (__copy_to_user((void __user *)arg,
+ &qcedev_areq.cipher_op_req,
+ sizeof(struct qcedev_cipher_op_req)))
+ return -EFAULT;
+ break;
+
+ case QCEDEV_IOCTL_SHA_INIT_REQ:
+ {
+ struct scatterlist sg_src;
+ if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+ sizeof(struct qcedev_sha_op_req)))
+ return -EFAULT;
+
+ if (__copy_from_user(&qcedev_areq.sha_op_req,
+ (void __user *)arg,
+ sizeof(struct qcedev_sha_op_req)))
+ return -EFAULT;
+ if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
+ return -EINVAL;
+ qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+ err = qcedev_hash_init(&qcedev_areq, handle, &sg_src);
+ if (err)
+ return err;
+ if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
+ sizeof(struct qcedev_sha_op_req)))
+ return -EFAULT;
+ }
+ handle->sha_ctxt.init_done = true;
+ break;
+ case QCEDEV_IOCTL_GET_CMAC_REQ:
+ if (!podev->ce_support.cmac)
+ return -ENOTTY;
+ case QCEDEV_IOCTL_SHA_UPDATE_REQ:
+ {
+ struct scatterlist sg_src;
+ if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+ sizeof(struct qcedev_sha_op_req)))
+ return -EFAULT;
+
+ if (__copy_from_user(&qcedev_areq.sha_op_req,
+ (void __user *)arg,
+ sizeof(struct qcedev_sha_op_req)))
+ return -EFAULT;
+ if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
+ return -EINVAL;
+ qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+
+ if (qcedev_areq.sha_op_req.alg == QCEDEV_ALG_AES_CMAC) {
+ err = qcedev_hash_cmac(&qcedev_areq, handle, &sg_src);
+ if (err)
+ return err;
+ } else {
+ if (handle->sha_ctxt.init_done == false) {
+ pr_err("%s Init was not called\n", __func__);
+ return -EINVAL;
+ }
+ err = qcedev_hash_update(&qcedev_areq, handle, &sg_src);
+ if (err)
+ return err;
+ }
+
+ memcpy(&qcedev_areq.sha_op_req.digest[0],
+ &handle->sha_ctxt.digest[0],
+ handle->sha_ctxt.diglen);
+ if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
+ sizeof(struct qcedev_sha_op_req)))
+ return -EFAULT;
+ }
+ break;
+
+ case QCEDEV_IOCTL_SHA_FINAL_REQ:
+
+ if (handle->sha_ctxt.init_done == false) {
+ pr_err("%s Init was not called\n", __func__);
+ return -EINVAL;
+ }
+ if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+ sizeof(struct qcedev_sha_op_req)))
+ return -EFAULT;
+
+ if (__copy_from_user(&qcedev_areq.sha_op_req,
+ (void __user *)arg,
+ sizeof(struct qcedev_sha_op_req)))
+ return -EFAULT;
+ if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
+ return -EINVAL;
+ qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+ err = qcedev_hash_final(&qcedev_areq, handle);
+ if (err)
+ return err;
+ qcedev_areq.sha_op_req.diglen = handle->sha_ctxt.diglen;
+ memcpy(&qcedev_areq.sha_op_req.digest[0],
+ &handle->sha_ctxt.digest[0],
+ handle->sha_ctxt.diglen);
+ if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
+ sizeof(struct qcedev_sha_op_req)))
+ return -EFAULT;
+ handle->sha_ctxt.init_done = false;
+ break;
+
+ case QCEDEV_IOCTL_GET_SHA_REQ:
+ {
+ struct scatterlist sg_src;
+ if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+ sizeof(struct qcedev_sha_op_req)))
+ return -EFAULT;
+
+ if (__copy_from_user(&qcedev_areq.sha_op_req,
+ (void __user *)arg,
+ sizeof(struct qcedev_sha_op_req)))
+ return -EFAULT;
+ if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
+ return -EINVAL;
+ qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+ qcedev_hash_init(&qcedev_areq, handle, &sg_src);
+ err = qcedev_hash_update(&qcedev_areq, handle, &sg_src);
+ if (err)
+ return err;
+ err = qcedev_hash_final(&qcedev_areq, handle);
+ if (err)
+ return err;
+ qcedev_areq.sha_op_req.diglen = handle->sha_ctxt.diglen;
+ memcpy(&qcedev_areq.sha_op_req.digest[0],
+ &handle->sha_ctxt.digest[0],
+ handle->sha_ctxt.diglen);
+ if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
+ sizeof(struct qcedev_sha_op_req)))
+ return -EFAULT;
+ }
+ break;
+
+ default:
+ return -ENOTTY;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(qcedev_ioctl);
+
+static int qcedev_probe(struct platform_device *pdev)
+{
+ void *handle = NULL;
+ int rc = 0;
+ struct qcedev_control *podev;
+ struct msm_ce_hw_support *platform_support;
+
+ podev = &qce_dev[0];
+
+ podev->high_bw_req_count = 0;
+ INIT_LIST_HEAD(&podev->ready_commands);
+ podev->active_command = NULL;
+
+ spin_lock_init(&podev->lock);
+
+ tasklet_init(&podev->done_tasklet, req_done, (unsigned long)podev);
+
+ /* open qce */
+ handle = qce_open(pdev, &rc);
+ if (handle == NULL) {
+ platform_set_drvdata(pdev, NULL);
+ return rc;
+ }
+
+ podev->qce = handle;
+ podev->pdev = pdev;
+ platform_set_drvdata(pdev, podev);
+
+ rc = misc_register(&podev->miscdevice);
+ qce_hw_support(podev->qce, &podev->ce_support);
+ if (podev->ce_support.bam) {
+ podev->platform_support.ce_shared = 0;
+ podev->platform_support.shared_ce_resource = 0;
+ podev->platform_support.hw_key_support =
+ podev->ce_support.hw_key;
+ podev->platform_support.bus_scale_table = NULL;
+ podev->platform_support.sha_hmac = 1;
+
+ podev->platform_support.bus_scale_table =
+ (struct msm_bus_scale_pdata *)
+ msm_bus_cl_get_pdata(pdev);
+ if (!podev->platform_support.bus_scale_table)
+ pr_err("bus_scale_table is NULL\n");
+ } else {
+ platform_support =
+ (struct msm_ce_hw_support *)pdev->dev.platform_data;
+ podev->platform_support.ce_shared = platform_support->ce_shared;
+ podev->platform_support.shared_ce_resource =
+ platform_support->shared_ce_resource;
+ podev->platform_support.hw_key_support =
+ platform_support->hw_key_support;
+ podev->platform_support.bus_scale_table =
+ platform_support->bus_scale_table;
+ podev->platform_support.sha_hmac = platform_support->sha_hmac;
+ }
+ if (podev->platform_support.bus_scale_table != NULL) {
+ podev->bus_scale_handle =
+ msm_bus_scale_register_client(
+ (struct msm_bus_scale_pdata *)
+ podev->platform_support.bus_scale_table);
+ if (!podev->bus_scale_handle) {
+ pr_err("%s not able to get bus scale\n",
+ __func__);
+ rc = -ENOMEM;
+ goto err;
+ }
+ }
+
+ if (rc >= 0)
+ return 0;
+ else
+ if (podev->platform_support.bus_scale_table != NULL)
+ msm_bus_scale_unregister_client(
+ podev->bus_scale_handle);
+err:
+
+ if (handle)
+ qce_close(handle);
+ platform_set_drvdata(pdev, NULL);
+ podev->qce = NULL;
+ podev->pdev = NULL;
+ return rc;
+};
+
+static int qcedev_remove(struct platform_device *pdev)
+{
+ struct qcedev_control *podev;
+
+ podev = platform_get_drvdata(pdev);
+ if (!podev)
+ return 0;
+ if (podev->qce)
+ qce_close(podev->qce);
+
+ if (podev->platform_support.bus_scale_table != NULL)
+ msm_bus_scale_unregister_client(podev->bus_scale_handle);
+
+ if (podev->miscdevice.minor != MISC_DYNAMIC_MINOR)
+ misc_deregister(&podev->miscdevice);
+ tasklet_kill(&podev->done_tasklet);
+ return 0;
+};
+
+static int qcedev_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct qcedev_control *podev;
+ int ret;
+ podev = platform_get_drvdata(pdev);
+
+ if (!podev || !podev->platform_support.bus_scale_table)
+ return 0;
+
+ mutex_lock(&qcedev_sent_bw_req);
+ if (podev->high_bw_req_count) {
+ ret = msm_bus_scale_client_update_request(
+ podev->bus_scale_handle, 0);
+ if (ret) {
+ pr_err("%s Unable to set to low bandwidth\n",
+ __func__);
+ goto suspend_exit;
+ }
+ ret = qce_disable_clk(podev->qce);
+ if (ret) {
+ pr_err("%s Unable disable clk\n", __func__);
+ ret = msm_bus_scale_client_update_request(
+ podev->bus_scale_handle, 1);
+ if (ret)
+ pr_err("%s Unable to set to high bandwidth\n",
+ __func__);
+ goto suspend_exit;
+ }
+ }
+
+suspend_exit:
+ mutex_unlock(&qcedev_sent_bw_req);
+ return 0;
+}
+
+static int qcedev_resume(struct platform_device *pdev)
+{
+ struct qcedev_control *podev;
+ int ret;
+ podev = platform_get_drvdata(pdev);
+
+ if (!podev || !podev->platform_support.bus_scale_table)
+ return 0;
+
+ mutex_lock(&qcedev_sent_bw_req);
+ if (podev->high_bw_req_count) {
+ ret = qce_enable_clk(podev->qce);
+ if (ret) {
+ pr_err("%s Unable enable clk\n", __func__);
+ goto resume_exit;
+ }
+ ret = msm_bus_scale_client_update_request(
+ podev->bus_scale_handle, 1);
+ if (ret) {
+ pr_err("%s Unable to set to high bandwidth\n",
+ __func__);
+ ret = qce_disable_clk(podev->qce);
+ if (ret)
+ pr_err("%s Unable enable clk\n",
+ __func__);
+ goto resume_exit;
+ }
+ }
+
+resume_exit:
+ mutex_unlock(&qcedev_sent_bw_req);
+ return 0;
+}
+
+static struct of_device_id qcedev_match[] = {
+ { .compatible = "qcom,qcedev",
+ },
+ {}
+};
+
+static struct platform_driver qcedev_plat_driver = {
+ .probe = qcedev_probe,
+ .remove = qcedev_remove,
+ .suspend = qcedev_suspend,
+ .resume = qcedev_resume,
+ .driver = {
+ .name = "qce",
+ .owner = THIS_MODULE,
+ .of_match_table = qcedev_match,
+ },
+};
+
+static int _disp_stats(int id)
+{
+ struct qcedev_stat *pstat;
+ int len = 0;
+
+ pstat = &_qcedev_stat;
+ len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
+ "\nQualcomm QCE dev driver %d Statistics:\n",
+ id + 1);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " Encryption operation success : %d\n",
+ pstat->qcedev_enc_success);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " Encryption operation fail : %d\n",
+ pstat->qcedev_enc_fail);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " Decryption operation success : %d\n",
+ pstat->qcedev_dec_success);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " Encryption operation fail : %d\n",
+ pstat->qcedev_dec_fail);
+
+ return len;
+}
+
+static int _debug_stats_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t _debug_stats_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ ssize_t rc = -EINVAL;
+ int qcedev = *((int *) file->private_data);
+ int len;
+
+ len = _disp_stats(qcedev);
+
+ rc = simple_read_from_buffer((void __user *) buf, len,
+ ppos, (void *) _debug_read_buf, len);
+
+ return rc;
+}
+
+static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ memset((char *)&_qcedev_stat, 0, sizeof(struct qcedev_stat));
+ return count;
+};
+
+static const struct file_operations _debug_stats_ops = {
+ .open = _debug_stats_open,
+ .read = _debug_stats_read,
+ .write = _debug_stats_write,
+};
+
+static int _qcedev_debug_init(void)
+{
+ int rc;
+ char name[DEBUG_MAX_FNAME];
+ struct dentry *dent;
+
+ _debug_dent = debugfs_create_dir("qcedev", NULL);
+ if (IS_ERR(_debug_dent)) {
+ pr_err("qcedev debugfs_create_dir fail, error %ld\n",
+ PTR_ERR(_debug_dent));
+ return PTR_ERR(_debug_dent);
+ }
+
+ snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", 1);
+ _debug_qcedev = 0;
+ dent = debugfs_create_file(name, 0644, _debug_dent,
+ &_debug_qcedev, &_debug_stats_ops);
+ if (dent == NULL) {
+ pr_err("qcedev debugfs_create_file fail, error %ld\n",
+ PTR_ERR(dent));
+ rc = PTR_ERR(dent);
+ goto err;
+ }
+ return 0;
+err:
+ debugfs_remove_recursive(_debug_dent);
+ return rc;
+}
+
+static int qcedev_init(void)
+{
+ int rc;
+
+ rc = _qcedev_debug_init();
+ if (rc)
+ return rc;
+ return platform_driver_register(&qcedev_plat_driver);
+}
+
+static void qcedev_exit(void)
+{
+ debugfs_remove_recursive(_debug_dent);
+ platform_driver_unregister(&qcedev_plat_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm DEV Crypto driver");
+
+module_init(qcedev_init);
+module_exit(qcedev_exit);
diff --git a/drivers/crypto/msm/qcedevi.h b/drivers/crypto/msm/qcedevi.h
new file mode 100644
index 000000000000..ca358ac3d4c6
--- /dev/null
+++ b/drivers/crypto/msm/qcedevi.h
@@ -0,0 +1,124 @@
+/* QTI crypto Driver
+ *
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CRYPTO_MSM_QCEDEVI_H
+#define __CRYPTO_MSM_QCEDEVI_H
+
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include <crypto/hash.h>
+#include <linux/platform_data/qcom_crypto_device.h>
+#include <linux/fips_status.h>
+#include "qce.h"
+
+#define CACHE_LINE_SIZE 32
+#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
+
+enum qcedev_crypto_oper_type {
+ QCEDEV_CRYPTO_OPER_CIPHER = 0,
+ QCEDEV_CRYPTO_OPER_SHA = 1,
+ QCEDEV_CRYPTO_OPER_LAST
+};
+
+struct qcedev_handle;
+
+struct qcedev_cipher_req {
+ struct ablkcipher_request creq;
+ void *cookie;
+};
+
+struct qcedev_sha_req {
+ struct ahash_request sreq;
+ void *cookie;
+};
+
+struct qcedev_sha_ctxt {
+ uint32_t auth_data[4];
+ uint8_t digest[QCEDEV_MAX_SHA_DIGEST];
+ uint32_t diglen;
+ uint8_t trailing_buf[64];
+ uint32_t trailing_buf_len;
+ uint8_t first_blk;
+ uint8_t last_blk;
+ uint8_t authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
+ bool init_done;
+};
+
+struct qcedev_async_req {
+ struct list_head list;
+ struct completion complete;
+ enum qcedev_crypto_oper_type op_type;
+ union {
+ struct qcedev_cipher_op_req cipher_op_req;
+ struct qcedev_sha_op_req sha_op_req;
+ };
+
+ union {
+ struct qcedev_cipher_req cipher_req;
+ struct qcedev_sha_req sha_req;
+ };
+ struct qcedev_handle *handle;
+ int err;
+};
+
+/**********************************************************************
+ * Register ourselves as a misc device to be able to access the dev driver
+ * from userspace. */
+
+#define QCEDEV_DEV "qcedev"
+
+struct qcedev_control {
+
+ /* CE features supported by platform */
+ struct msm_ce_hw_support platform_support;
+
+ uint32_t ce_lock_count;
+ uint32_t high_bw_req_count;
+
+ /* CE features/algorithms supported by HW engine*/
+ struct ce_hw_support ce_support;
+
+ uint32_t bus_scale_handle;
+
+ /* misc device */
+ struct miscdevice miscdevice;
+
+ /* qce handle */
+ void *qce;
+
+ /* platform device */
+ struct platform_device *pdev;
+
+ unsigned magic;
+
+ struct list_head ready_commands;
+ struct qcedev_async_req *active_command;
+ spinlock_t lock;
+ struct tasklet_struct done_tasklet;
+};
+
+struct qcedev_handle {
+ /* qcedev control handle */
+ struct qcedev_control *cntl;
+ /* qce internal sha context*/
+ struct qcedev_sha_ctxt sha_ctxt;
+};
+
+void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
+ unsigned char *iv, int ret);
+
+void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
+ unsigned char *authdata, int ret);
+
+#endif /* __CRYPTO_MSM_QCEDEVI_H */
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
new file mode 100644
index 000000000000..d799e9adf616
--- /dev/null
+++ b/drivers/crypto/msm/qcrypto.c
@@ -0,0 +1,5332 @@
+/* Qualcomm Crypto driver
+ *
+ * Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/rtnetlink.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/llist.h>
+#include <linux/debugfs.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/cache.h>
+#include <linux/platform_data/qcom_crypto_device.h>
+#include <linux/msm-bus.h>
+#include <linux/qcrypto.h>
+
+#include <crypto/ctr.h>
+#include <crypto/des.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/algapi.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/aead.h>
+
+#include <linux/fips_status.h>
+
+#include "qce.h"
+
+#define DEBUG_MAX_FNAME 16
+#define DEBUG_MAX_RW_BUF 2048
+#define QCRYPTO_BIG_NUMBER 9999999 /* a big number */
+
+/*
+ * For crypto 5.0 which has burst size alignment requirement.
+ */
+#define MAX_ALIGN_SIZE 0x40
+
+#define QCRYPTO_HIGH_BANDWIDTH_TIMEOUT 1000
+
+
+
+/* Status of response workq */
+enum resp_workq_sts {
+ NOT_SCHEDULED = 0,
+ IS_SCHEDULED = 1,
+ SCHEDULE_AGAIN = 2
+};
+
+/* Status of req processing by CEs */
+enum req_processing_sts {
+ STOPPED = 0,
+ IN_PROGRESS = 1
+};
+
+enum qcrypto_bus_state {
+ BUS_NO_BANDWIDTH = 0,
+ BUS_HAS_BANDWIDTH,
+ BUS_BANDWIDTH_RELEASING,
+ BUS_BANDWIDTH_ALLOCATING,
+ BUS_SUSPENDED,
+ BUS_SUSPENDING,
+};
+
+struct crypto_stat {
+ u64 aead_sha1_aes_enc;
+ u64 aead_sha1_aes_dec;
+ u64 aead_sha1_des_enc;
+ u64 aead_sha1_des_dec;
+ u64 aead_sha1_3des_enc;
+ u64 aead_sha1_3des_dec;
+ u64 aead_sha256_aes_enc;
+ u64 aead_sha256_aes_dec;
+ u64 aead_sha256_des_enc;
+ u64 aead_sha256_des_dec;
+ u64 aead_sha256_3des_enc;
+ u64 aead_sha256_3des_dec;
+ u64 aead_ccm_aes_enc;
+ u64 aead_ccm_aes_dec;
+ u64 aead_rfc4309_ccm_aes_enc;
+ u64 aead_rfc4309_ccm_aes_dec;
+ u64 aead_op_success;
+ u64 aead_op_fail;
+ u64 aead_bad_msg;
+ u64 ablk_cipher_aes_enc;
+ u64 ablk_cipher_aes_dec;
+ u64 ablk_cipher_des_enc;
+ u64 ablk_cipher_des_dec;
+ u64 ablk_cipher_3des_enc;
+ u64 ablk_cipher_3des_dec;
+ u64 ablk_cipher_op_success;
+ u64 ablk_cipher_op_fail;
+ u64 sha1_digest;
+ u64 sha256_digest;
+ u64 sha1_hmac_digest;
+ u64 sha256_hmac_digest;
+ u64 ahash_op_success;
+ u64 ahash_op_fail;
+};
+static struct crypto_stat _qcrypto_stat;
+static struct dentry *_debug_dent;
+static char _debug_read_buf[DEBUG_MAX_RW_BUF];
+static bool _qcrypto_init_assign;
+struct crypto_priv;
+struct qcrypto_req_control {
+ unsigned int index;
+ bool in_use;
+ struct crypto_engine *pce;
+ struct crypto_async_request *req;
+ struct qcrypto_resp_ctx *arsp;
+};
+
+struct crypto_engine {
+ struct list_head elist;
+ void *qce; /* qce handle */
+ struct platform_device *pdev; /* platform device */
+ struct crypto_priv *pcp;
+ uint32_t bus_scale_handle;
+ struct crypto_queue req_queue; /*
+ * request queue for those requests
+ * that have this engine assigned
+ * waiting to be executed
+ */
+ u64 total_req;
+ u64 err_req;
+ u32 unit;
+ u32 ce_device;
+ u32 ce_hw_instance;
+ unsigned int signature;
+
+ enum qcrypto_bus_state bw_state;
+ bool high_bw_req;
+ struct timer_list bw_reaper_timer;
+ struct work_struct bw_reaper_ws;
+ struct work_struct bw_allocate_ws;
+
+ /* engine execution sequence number */
+ u32 active_seq;
+ /* last QCRYPTO_HIGH_BANDWIDTH_TIMEOUT active_seq */
+ u32 last_active_seq;
+
+ bool check_flag;
+ /*Added to support multi-requests*/
+ unsigned int max_req;
+ struct qcrypto_req_control *preq_pool;
+ atomic_t req_count;
+};
+
+struct crypto_priv {
+ /* CE features supported by target device*/
+ struct msm_ce_hw_support platform_support;
+
+ /* CE features/algorithms supported by HW engine*/
+ struct ce_hw_support ce_support;
+
+ /* the lock protects crypto queue and req */
+ spinlock_t lock;
+
+ /* list of registered algorithms */
+ struct list_head alg_list;
+
+ /* current active request */
+ struct crypto_async_request *req;
+
+ struct work_struct unlock_ce_ws;
+ struct list_head engine_list; /* list of qcrypto engines */
+ int32_t total_units; /* total units of engines */
+ struct mutex engine_lock;
+
+ struct crypto_engine *next_engine; /* next assign engine */
+ struct crypto_queue req_queue; /*
+ * request queue for those requests
+ * that waiting for an available
+ * engine.
+ */
+ struct llist_head ordered_resp_list; /* Queue to maintain
+ * responses in sequence.
+ */
+ atomic_t resp_cnt;
+ struct workqueue_struct *resp_wq;
+ struct work_struct resp_work; /*
+ * Workq to send responses
+ * in sequence.
+ */
+ enum resp_workq_sts sched_resp_workq_status;
+ enum req_processing_sts ce_req_proc_sts;
+ int cpu_getting_irqs_frm_first_ce;
+};
+static struct crypto_priv qcrypto_dev;
+static struct crypto_engine *_qcrypto_static_assign_engine(
+ struct crypto_priv *cp);
+static struct crypto_engine *_avail_eng(struct crypto_priv *cp);
+
+static struct qcrypto_req_control *qcrypto_alloc_req_control(
+ struct crypto_engine *pce)
+{
+ int i;
+ struct qcrypto_req_control *pqcrypto_req_control = pce->preq_pool;
+
+ for (i = 0; i < pce->max_req; i++) {
+ if (xchg(&pqcrypto_req_control->in_use, true) == false) {
+ atomic_inc(&pce->req_count);
+ return pqcrypto_req_control;
+ }
+ pqcrypto_req_control++;
+ }
+ return NULL;
+}
+
+static void qcrypto_free_req_control(struct crypto_engine *pce,
+ struct qcrypto_req_control *preq)
+{
+ if (xchg(&preq->in_use, false) == false) {
+ pr_warn("request info %p free already\n", preq);
+ } else {
+ preq->req = NULL;
+ preq->arsp = NULL;
+ atomic_dec(&pce->req_count);
+ }
+}
+
+static struct qcrypto_req_control *find_req_control_for_areq(
+ struct crypto_engine *pce,
+ struct crypto_async_request *areq)
+{
+ int i;
+ struct qcrypto_req_control *pqcrypto_req_control = pce->preq_pool;
+
+ for (i = 0; i < pce->max_req; i++) {
+ if (pqcrypto_req_control->req == areq)
+ return pqcrypto_req_control;
+ pqcrypto_req_control++;
+ }
+ return NULL;
+}
+
+static void qcrypto_init_req_control(struct crypto_engine *pce,
+ struct qcrypto_req_control *pqcrypto_req_control)
+{
+ int i;
+
+ pce->preq_pool = pqcrypto_req_control;
+ atomic_set(&pce->req_count, 0);
+ for (i = 0; i < pce->max_req; i++) {
+ pqcrypto_req_control->index = i;
+ pqcrypto_req_control->in_use = false;
+ pqcrypto_req_control->pce = pce;
+ pqcrypto_req_control++;
+ }
+}
+
+static struct crypto_engine *_qrypto_find_pengine_device(struct crypto_priv *cp,
+ unsigned int device)
+{
+ struct crypto_engine *entry = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ list_for_each_entry(entry, &cp->engine_list, elist) {
+ if (entry->ce_device == device)
+ break;
+ }
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ if (((entry != NULL) && (entry->ce_device != device)) ||
+ (entry == NULL)) {
+ pr_err("Device node for CE device %d NOT FOUND!!\n",
+ device);
+ return NULL;
+ }
+
+ return entry;
+}
+
+static struct crypto_engine *_qrypto_find_pengine_device_hw
+ (struct crypto_priv *cp,
+ u32 device,
+ u32 hw_instance)
+{
+ struct crypto_engine *entry = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ list_for_each_entry(entry, &cp->engine_list, elist) {
+ if ((entry->ce_device == device) &&
+ (entry->ce_hw_instance == hw_instance))
+ break;
+ }
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ if (((entry != NULL) &&
+ ((entry->ce_device != device)
+ || (entry->ce_hw_instance != hw_instance)))
+ || (entry == NULL)) {
+ pr_err("Device node for CE device %d NOT FOUND!!\n",
+ device);
+ return NULL;
+ }
+ return entry;
+}
+
+int qcrypto_get_num_engines(void)
+{
+ struct crypto_priv *cp = &qcrypto_dev;
+ struct crypto_engine *entry = NULL;
+ int count = 0;
+
+ list_for_each_entry(entry, &cp->engine_list, elist) {
+ count++;
+ }
+ return count;
+}
+EXPORT_SYMBOL(qcrypto_get_num_engines);
+
+void qcrypto_get_engine_list(size_t num_engines,
+ struct crypto_engine_entry *arr)
+{
+ struct crypto_priv *cp = &qcrypto_dev;
+ struct crypto_engine *entry = NULL;
+ size_t arr_index = 0;
+
+ list_for_each_entry(entry, &cp->engine_list, elist) {
+ arr[arr_index].ce_device = entry->ce_device;
+ arr[arr_index].hw_instance = entry->ce_hw_instance;
+ arr_index++;
+ if (arr_index >= num_engines)
+ break;
+ }
+}
+EXPORT_SYMBOL(qcrypto_get_engine_list);
+
+enum qcrypto_alg_type {
+ QCRYPTO_ALG_CIPHER = 0,
+ QCRYPTO_ALG_SHA = 1,
+ QCRYPTO_ALG_LAST
+};
+
+struct qcrypto_alg {
+ struct list_head entry;
+ struct crypto_alg cipher_alg;
+ struct ahash_alg sha_alg;
+ enum qcrypto_alg_type alg_type;
+ struct crypto_priv *cp;
+};
+
+#define QCRYPTO_MAX_KEY_SIZE 64
+/* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
+#define QCRYPTO_MAX_IV_LENGTH 16
+
+#define QCRYPTO_CCM4309_NONCE_LEN 3
+
+struct qcrypto_cipher_ctx {
+ struct list_head rsp_queue; /* response queue */
+ struct crypto_engine *pengine; /* fixed engine assigned to this tfm */
+ struct crypto_priv *cp;
+ unsigned int flags;
+
+ enum qce_hash_alg_enum auth_alg; /* for aead */
+ u8 auth_key[QCRYPTO_MAX_KEY_SIZE];
+ u8 iv[QCRYPTO_MAX_IV_LENGTH];
+
+ u8 enc_key[QCRYPTO_MAX_KEY_SIZE];
+ unsigned int enc_key_len;
+
+ unsigned int authsize;
+ unsigned int auth_key_len;
+
+ u8 ccm4309_nonce[QCRYPTO_CCM4309_NONCE_LEN];
+
+ union {
+ struct crypto_ablkcipher *cipher_fb;
+ struct crypto_aead *aead_fb;
+ } fallback;
+};
+
+struct qcrypto_resp_ctx {
+ struct list_head list;
+ struct llist_node llist;
+ struct crypto_async_request *async_req; /* async req */
+ int res; /* execution result */
+};
+
+struct qcrypto_cipher_req_ctx {
+ struct qcrypto_resp_ctx rsp_entry;/* rsp entry. */
+ struct crypto_engine *pengine; /* engine assigned to this request */
+ u8 *iv;
+ u8 rfc4309_iv[QCRYPTO_MAX_IV_LENGTH];
+ unsigned int ivsize;
+ int aead;
+ struct scatterlist asg; /* Formatted associated data sg */
+ unsigned char *assoc; /* Pointer to formatted assoc data */
+ unsigned int assoclen; /* Save Unformatted assoc data length */
+ struct scatterlist *assoc_sg; /* Save Unformatted assoc data sg */
+ enum qce_cipher_alg_enum alg;
+ enum qce_cipher_dir_enum dir;
+ enum qce_cipher_mode_enum mode;
+
+ struct scatterlist *orig_src; /* Original src sg ptr */
+ struct scatterlist *orig_dst; /* Original dst sg ptr */
+ struct scatterlist dsg; /* Dest Data sg */
+ struct scatterlist ssg; /* Source Data sg */
+ unsigned char *data; /* Incoming data pointer*/
+
+};
+
+#define SHA_MAX_BLOCK_SIZE SHA256_BLOCK_SIZE
+#define SHA_MAX_STATE_SIZE (SHA256_DIGEST_SIZE / sizeof(u32))
+#define SHA_MAX_DIGEST_SIZE SHA256_DIGEST_SIZE
+
+#define MSM_QCRYPTO_REQ_QUEUE_LENGTH 768
+#define COMPLETION_CB_BACKLOG_LENGTH 768
+
+static uint8_t _std_init_vector_sha1_uint8[] = {
+ 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
+ 0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
+ 0xC3, 0xD2, 0xE1, 0xF0
+};
+
+/* standard initialization vector for SHA-256, source: FIPS 180-2 */
+static uint8_t _std_init_vector_sha256_uint8[] = {
+ 0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
+ 0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
+ 0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
+ 0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
+};
+
+struct qcrypto_sha_ctx {
+ struct list_head rsp_queue; /* response queue */
+ struct crypto_engine *pengine; /* fixed engine assigned to this tfm */
+ struct crypto_priv *cp;
+ unsigned int flags;
+ enum qce_hash_alg_enum alg;
+ uint32_t diglen;
+ uint32_t authkey_in_len;
+ uint8_t authkey[SHA_MAX_BLOCK_SIZE];
+ struct ahash_request *ahash_req;
+ struct completion ahash_req_complete;
+};
+
+struct qcrypto_sha_req_ctx {
+ struct qcrypto_resp_ctx rsp_entry;/* rsp entry. */
+ struct crypto_engine *pengine; /* engine assigned to this request */
+
+ struct scatterlist *src;
+ uint32_t nbytes;
+
+ struct scatterlist *orig_src; /* Original src sg ptr */
+ struct scatterlist dsg; /* Data sg */
+ unsigned char *data; /* Incoming data pointer*/
+ unsigned char *data2; /* Updated data pointer*/
+
+ uint32_t byte_count[4];
+ u64 count;
+ uint8_t first_blk;
+ uint8_t last_blk;
+ uint8_t trailing_buf[SHA_MAX_BLOCK_SIZE];
+ uint32_t trailing_buf_len;
+
+ /* dma buffer, Internal use */
+ uint8_t staging_dmabuf
+ [SHA_MAX_BLOCK_SIZE+SHA_MAX_DIGEST_SIZE+MAX_ALIGN_SIZE];
+
+ uint8_t digest[SHA_MAX_DIGEST_SIZE];
+ struct scatterlist sg[2];
+};
+
+static void _byte_stream_to_words(uint32_t *iv, unsigned char *b,
+ unsigned int len)
+{
+ unsigned n;
+
+ n = len / sizeof(uint32_t);
+ for (; n > 0; n--) {
+ *iv = ((*b << 24) & 0xff000000) |
+ (((*(b+1)) << 16) & 0xff0000) |
+ (((*(b+2)) << 8) & 0xff00) |
+ (*(b+3) & 0xff);
+ b += sizeof(uint32_t);
+ iv++;
+ }
+
+ n = len % sizeof(uint32_t);
+ if (n == 3) {
+ *iv = ((*b << 24) & 0xff000000) |
+ (((*(b+1)) << 16) & 0xff0000) |
+ (((*(b+2)) << 8) & 0xff00);
+ } else if (n == 2) {
+ *iv = ((*b << 24) & 0xff000000) |
+ (((*(b+1)) << 16) & 0xff0000);
+ } else if (n == 1) {
+ *iv = ((*b << 24) & 0xff000000);
+ }
+}
+
+static void _words_to_byte_stream(uint32_t *iv, unsigned char *b,
+ unsigned int len)
+{
+ unsigned n = len / sizeof(uint32_t);
+
+ for (; n > 0; n--) {
+ *b++ = (unsigned char) ((*iv >> 24) & 0xff);
+ *b++ = (unsigned char) ((*iv >> 16) & 0xff);
+ *b++ = (unsigned char) ((*iv >> 8) & 0xff);
+ *b++ = (unsigned char) (*iv & 0xff);
+ iv++;
+ }
+ n = len % sizeof(uint32_t);
+ if (n == 3) {
+ *b++ = (unsigned char) ((*iv >> 24) & 0xff);
+ *b++ = (unsigned char) ((*iv >> 16) & 0xff);
+ *b = (unsigned char) ((*iv >> 8) & 0xff);
+ } else if (n == 2) {
+ *b++ = (unsigned char) ((*iv >> 24) & 0xff);
+ *b = (unsigned char) ((*iv >> 16) & 0xff);
+ } else if (n == 1) {
+ *b = (unsigned char) ((*iv >> 24) & 0xff);
+ }
+}
+
+static void qcrypto_ce_set_bus(struct crypto_engine *pengine,
+ bool high_bw_req)
+{
+ int ret = 0;
+
+ if (high_bw_req) {
+ ret = qce_enable_clk(pengine->qce);
+ if (ret) {
+ pr_err("%s Unable enable clk\n", __func__);
+ goto clk_err;
+ }
+ ret = msm_bus_scale_client_update_request(
+ pengine->bus_scale_handle, 1);
+ if (ret) {
+ pr_err("%s Unable to set to high bandwidth\n",
+ __func__);
+ qce_disable_clk(pengine->qce);
+ goto clk_err;
+ }
+ } else {
+ ret = msm_bus_scale_client_update_request(
+ pengine->bus_scale_handle, 0);
+ if (ret) {
+ pr_err("%s Unable to set to low bandwidth\n",
+ __func__);
+ goto clk_err;
+ }
+ ret = qce_disable_clk(pengine->qce);
+ if (ret) {
+ pr_err("%s Unable disable clk\n", __func__);
+ ret = msm_bus_scale_client_update_request(
+ pengine->bus_scale_handle, 1);
+ if (ret)
+ pr_err("%s Unable to set to high bandwidth\n",
+ __func__);
+ goto clk_err;
+ }
+ }
+clk_err:
+ return;
+
+}
+
+static void qcrypto_bw_reaper_timer_callback(unsigned long data)
+{
+ struct crypto_engine *pengine = (struct crypto_engine *)data;
+
+ schedule_work(&pengine->bw_reaper_ws);
+
+ return;
+}
+
+static void qcrypto_bw_set_timeout(struct crypto_engine *pengine)
+{
+ pengine->bw_reaper_timer.data =
+ (unsigned long)(pengine);
+ pengine->bw_reaper_timer.expires = jiffies +
+ msecs_to_jiffies(QCRYPTO_HIGH_BANDWIDTH_TIMEOUT);
+ mod_timer(&(pengine->bw_reaper_timer),
+ pengine->bw_reaper_timer.expires);
+}
+
+static void qcrypto_ce_bw_allocate_req(struct crypto_engine *pengine)
+{
+ schedule_work(&pengine->bw_allocate_ws);
+}
+
+static int _start_qcrypto_process(struct crypto_priv *cp,
+ struct crypto_engine *pengine);
+
+static void qcrypto_bw_allocate_work(struct work_struct *work)
+{
+ struct crypto_engine *pengine = container_of(work,
+ struct crypto_engine, bw_allocate_ws);
+ unsigned long flags;
+ struct crypto_priv *cp = pengine->pcp;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ pengine->bw_state = BUS_BANDWIDTH_ALLOCATING;
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ qcrypto_ce_set_bus(pengine, true);
+ qcrypto_bw_set_timeout(pengine);
+ spin_lock_irqsave(&cp->lock, flags);
+ pengine->bw_state = BUS_HAS_BANDWIDTH;
+ pengine->high_bw_req = false;
+ pengine->active_seq++;
+ pengine->check_flag = true;
+ spin_unlock_irqrestore(&cp->lock, flags);
+ _start_qcrypto_process(cp, pengine);
+};
+
+static void qcrypto_bw_reaper_work(struct work_struct *work)
+{
+ struct crypto_engine *pengine = container_of(work,
+ struct crypto_engine, bw_reaper_ws);
+ struct crypto_priv *cp = pengine->pcp;
+ unsigned long flags;
+ u32 active_seq;
+ bool restart = false;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ active_seq = pengine->active_seq;
+ if (pengine->bw_state == BUS_HAS_BANDWIDTH &&
+ (active_seq == pengine->last_active_seq)) {
+
+ /* check if engine is stuck */
+ if (atomic_read(&pengine->req_count) > 0) {
+ if (pengine->check_flag)
+ dev_warn(&pengine->pdev->dev,
+ "The engine appears to be stuck seq %d.\n",
+ active_seq);
+ pengine->check_flag = false;
+ goto ret;
+ }
+ if (cp->platform_support.bus_scale_table == NULL)
+ goto ret;
+ pengine->bw_state = BUS_BANDWIDTH_RELEASING;
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ qcrypto_ce_set_bus(pengine, false);
+
+ spin_lock_irqsave(&cp->lock, flags);
+
+ if (pengine->high_bw_req == true) {
+ /* we got request while we are disabling clock */
+ pengine->bw_state = BUS_BANDWIDTH_ALLOCATING;
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ qcrypto_ce_set_bus(pengine, true);
+
+ spin_lock_irqsave(&cp->lock, flags);
+ pengine->bw_state = BUS_HAS_BANDWIDTH;
+ pengine->high_bw_req = false;
+ restart = true;
+ } else
+ pengine->bw_state = BUS_NO_BANDWIDTH;
+ }
+ret:
+ pengine->last_active_seq = active_seq;
+ spin_unlock_irqrestore(&cp->lock, flags);
+ if (restart)
+ _start_qcrypto_process(cp, pengine);
+ if (pengine->bw_state != BUS_NO_BANDWIDTH)
+ qcrypto_bw_set_timeout(pengine);
+}
+
+static int qcrypto_count_sg(struct scatterlist *sg, int nbytes)
+{
+ int i;
+
+ for (i = 0; nbytes > 0 && sg != NULL; i++, sg = scatterwalk_sg_next(sg))
+ nbytes -= sg->length;
+
+ return i;
+}
+
+static size_t qcrypto_sg_copy_from_buffer(struct scatterlist *sgl,
+ unsigned int nents, void *buf, size_t buflen)
+{
+ int i;
+ size_t offset, len;
+
+ for (i = 0, offset = 0; i < nents; ++i) {
+ len = sg_copy_from_buffer(sgl, 1, buf, buflen);
+ buf += len;
+ buflen -= len;
+ offset += len;
+ sgl = scatterwalk_sg_next(sgl);
+ }
+
+ return offset;
+}
+
+static size_t qcrypto_sg_copy_to_buffer(struct scatterlist *sgl,
+ unsigned int nents, void *buf, size_t buflen)
+{
+ int i;
+ size_t offset, len;
+
+ for (i = 0, offset = 0; i < nents; ++i) {
+ len = sg_copy_to_buffer(sgl, 1, buf, buflen);
+ buf += len;
+ buflen -= len;
+ offset += len;
+ sgl = scatterwalk_sg_next(sgl);
+ }
+
+ return offset;
+}
+static struct qcrypto_alg *_qcrypto_sha_alg_alloc(struct crypto_priv *cp,
+ struct ahash_alg *template)
+{
+ struct qcrypto_alg *q_alg;
+ q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL);
+ if (!q_alg) {
+ pr_err("qcrypto Memory allocation of q_alg FAIL, error %ld\n",
+ PTR_ERR(q_alg));
+ return ERR_PTR(-ENOMEM);
+ }
+
+ q_alg->alg_type = QCRYPTO_ALG_SHA;
+ q_alg->sha_alg = *template;
+ q_alg->cp = cp;
+
+ return q_alg;
+};
+
+static struct qcrypto_alg *_qcrypto_cipher_alg_alloc(struct crypto_priv *cp,
+ struct crypto_alg *template)
+{
+ struct qcrypto_alg *q_alg;
+
+ q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL);
+ if (!q_alg) {
+ pr_err("qcrypto Memory allocation of q_alg FAIL, error %ld\n",
+ PTR_ERR(q_alg));
+ return ERR_PTR(-ENOMEM);
+ }
+
+ q_alg->alg_type = QCRYPTO_ALG_CIPHER;
+ q_alg->cipher_alg = *template;
+ q_alg->cp = cp;
+
+ return q_alg;
+};
+
+static int _qcrypto_cipher_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct qcrypto_alg *q_alg;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+
+ q_alg = container_of(alg, struct qcrypto_alg, cipher_alg);
+ ctx->flags = 0;
+
+ /* update context with ptr to cp */
+ ctx->cp = q_alg->cp;
+
+ /* random first IV */
+ get_random_bytes(ctx->iv, QCRYPTO_MAX_IV_LENGTH);
+ if (_qcrypto_init_assign) {
+ ctx->pengine = _qcrypto_static_assign_engine(ctx->cp);
+ if (ctx->pengine == NULL)
+ return -ENODEV;
+ } else
+ ctx->pengine = NULL;
+ INIT_LIST_HEAD(&ctx->rsp_queue);
+ ctx->auth_alg = QCE_HASH_LAST;
+ return 0;
+};
+
+static int _qcrypto_ahash_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
+ struct ahash_alg *alg = container_of(crypto_hash_alg_common(ahash),
+ struct ahash_alg, halg);
+ struct qcrypto_alg *q_alg = container_of(alg, struct qcrypto_alg,
+ sha_alg);
+
+ crypto_ahash_set_reqsize(ahash, sizeof(struct qcrypto_sha_req_ctx));
+ /* update context with ptr to cp */
+ sha_ctx->cp = q_alg->cp;
+ sha_ctx->flags = 0;
+ sha_ctx->ahash_req = NULL;
+ if (_qcrypto_init_assign) {
+ sha_ctx->pengine = _qcrypto_static_assign_engine(sha_ctx->cp);
+ if (sha_ctx->pengine == NULL)
+ return -ENODEV;
+ } else
+ sha_ctx->pengine = NULL;
+ INIT_LIST_HEAD(&sha_ctx->rsp_queue);
+ return 0;
+};
+
+static void _qcrypto_ahash_cra_exit(struct crypto_tfm *tfm)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
+
+ if (!list_empty(&sha_ctx->rsp_queue))
+ pr_err("_qcrypto_ahash_cra_exit: requests still outstanding");
+ if (sha_ctx->ahash_req != NULL) {
+ ahash_request_free(sha_ctx->ahash_req);
+ sha_ctx->ahash_req = NULL;
+ }
+};
+
+
+static void _crypto_sha_hmac_ahash_req_complete(
+ struct crypto_async_request *req, int err);
+
+static int _qcrypto_ahash_hmac_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
+ int ret = 0;
+
+ ret = _qcrypto_ahash_cra_init(tfm);
+ if (ret)
+ return ret;
+ sha_ctx->ahash_req = ahash_request_alloc(ahash, GFP_KERNEL);
+
+ if (sha_ctx->ahash_req == NULL) {
+ _qcrypto_ahash_cra_exit(tfm);
+ return -ENOMEM;
+ }
+
+ init_completion(&sha_ctx->ahash_req_complete);
+ ahash_request_set_callback(sha_ctx->ahash_req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ _crypto_sha_hmac_ahash_req_complete,
+ &sha_ctx->ahash_req_complete);
+ crypto_ahash_clear_flags(ahash, ~0);
+
+ return 0;
+};
+
+static int _qcrypto_cra_ablkcipher_init(struct crypto_tfm *tfm)
+{
+ tfm->crt_ablkcipher.reqsize = sizeof(struct qcrypto_cipher_req_ctx);
+ return _qcrypto_cipher_cra_init(tfm);
+};
+
+static int _qcrypto_cra_aes_ablkcipher_init(struct crypto_tfm *tfm)
+{
+ const char *name = tfm->__crt_alg->cra_name;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+ struct crypto_priv *cp = &qcrypto_dev;
+
+ if (cp->ce_support.use_sw_aes_cbc_ecb_ctr_algo) {
+ ctx->fallback.cipher_fb = NULL;
+ return _qcrypto_cra_ablkcipher_init(tfm);
+ }
+ ctx->fallback.cipher_fb = crypto_alloc_ablkcipher(name, 0,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fallback.cipher_fb)) {
+ pr_err("Error allocating fallback algo %s\n", name);
+ ret = PTR_ERR(ctx->fallback.cipher_fb);
+ ctx->fallback.cipher_fb = NULL;
+ return ret;
+ }
+ return _qcrypto_cra_ablkcipher_init(tfm);
+};
+
+static int _qcrypto_cra_aead_sha1_init(struct crypto_tfm *tfm)
+{
+ int rc;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ tfm->crt_aead.reqsize = sizeof(struct qcrypto_cipher_req_ctx);
+ rc = _qcrypto_cipher_cra_init(tfm);
+ ctx->auth_alg = QCE_HASH_SHA1_HMAC;
+ return rc;
+}
+
+static int _qcrypto_cra_aead_sha256_init(struct crypto_tfm *tfm)
+{
+ int rc;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ tfm->crt_aead.reqsize = sizeof(struct qcrypto_cipher_req_ctx);
+ rc = _qcrypto_cipher_cra_init(tfm);
+ ctx->auth_alg = QCE_HASH_SHA256_HMAC;
+ return rc;
+}
+
+static int _qcrypto_cra_aead_ccm_init(struct crypto_tfm *tfm)
+{
+ int rc;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ tfm->crt_aead.reqsize = sizeof(struct qcrypto_cipher_req_ctx);
+ rc = _qcrypto_cipher_cra_init(tfm);
+ ctx->auth_alg = QCE_HASH_AES_CMAC;
+ return rc;
+}
+
+static int _qcrypto_cra_aead_rfc4309_ccm_init(struct crypto_tfm *tfm)
+{
+ int rc;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ tfm->crt_aead.reqsize = sizeof(struct qcrypto_cipher_req_ctx);
+ rc = _qcrypto_cipher_cra_init(tfm);
+ ctx->auth_alg = QCE_HASH_AES_CMAC;
+ return rc;
+}
+
+static void _qcrypto_cra_ablkcipher_exit(struct crypto_tfm *tfm)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (!list_empty(&ctx->rsp_queue))
+ pr_err("_qcrypto__cra_ablkcipher_exit: requests still outstanding");
+};
+
+static void _qcrypto_cra_aes_ablkcipher_exit(struct crypto_tfm *tfm)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ _qcrypto_cra_ablkcipher_exit(tfm);
+ if (ctx->fallback.cipher_fb)
+ crypto_free_ablkcipher(ctx->fallback.cipher_fb);
+ ctx->fallback.cipher_fb = NULL;
+}
+
+static void _qcrypto_cra_aead_exit(struct crypto_tfm *tfm)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (!list_empty(&ctx->rsp_queue))
+ pr_err("_qcrypto__cra_aead_exit: requests still outstanding");
+};
+
+static int _disp_stats(int id)
+{
+ struct crypto_stat *pstat;
+ int len = 0;
+ unsigned long flags;
+ struct crypto_priv *cp = &qcrypto_dev;
+ struct crypto_engine *pe;
+
+ pstat = &_qcrypto_stat;
+ len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
+ "\nQualcomm crypto accelerator %d Statistics\n",
+ id + 1);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " ABLK CIPHER AES encryption : %llu\n",
+ pstat->ablk_cipher_aes_enc);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " ABLK CIPHER AES decryption : %llu\n",
+ pstat->ablk_cipher_aes_dec);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " ABLK CIPHER DES encryption : %llu\n",
+ pstat->ablk_cipher_des_enc);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " ABLK CIPHER DES decryption : %llu\n",
+ pstat->ablk_cipher_des_dec);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " ABLK CIPHER 3DES encryption : %llu\n",
+ pstat->ablk_cipher_3des_enc);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " ABLK CIPHER 3DES decryption : %llu\n",
+ pstat->ablk_cipher_3des_dec);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " ABLK CIPHER operation success : %llu\n",
+ pstat->ablk_cipher_op_success);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " ABLK CIPHER operation fail : %llu\n",
+ pstat->ablk_cipher_op_fail);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ "\n");
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD SHA1-AES encryption : %llu\n",
+ pstat->aead_sha1_aes_enc);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD SHA1-AES decryption : %llu\n",
+ pstat->aead_sha1_aes_dec);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD SHA1-DES encryption : %llu\n",
+ pstat->aead_sha1_des_enc);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD SHA1-DES decryption : %llu\n",
+ pstat->aead_sha1_des_dec);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD SHA1-3DES encryption : %llu\n",
+ pstat->aead_sha1_3des_enc);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD SHA1-3DES decryption : %llu\n",
+ pstat->aead_sha1_3des_dec);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD SHA256-AES encryption : %llu\n",
+ pstat->aead_sha256_aes_enc);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD SHA256-AES decryption : %llu\n",
+ pstat->aead_sha256_aes_dec);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD SHA256-DES encryption : %llu\n",
+ pstat->aead_sha256_des_enc);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD SHA256-DES decryption : %llu\n",
+ pstat->aead_sha256_des_dec);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD SHA256-3DES encryption : %llu\n",
+ pstat->aead_sha256_3des_enc);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD SHA256-3DES decryption : %llu\n",
+ pstat->aead_sha256_3des_dec);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD CCM-AES encryption : %llu\n",
+ pstat->aead_ccm_aes_enc);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD CCM-AES decryption : %llu\n",
+ pstat->aead_ccm_aes_dec);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD RFC4309-CCM-AES encryption : %llu\n",
+ pstat->aead_rfc4309_ccm_aes_enc);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD RFC4309-CCM-AES decryption : %llu\n",
+ pstat->aead_rfc4309_ccm_aes_dec);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD operation success : %llu\n",
+ pstat->aead_op_success);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD operation fail : %llu\n",
+ pstat->aead_op_fail);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD bad message : %llu\n",
+ pstat->aead_bad_msg);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ "\n");
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AHASH SHA1 digest : %llu\n",
+ pstat->sha1_digest);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AHASH SHA256 digest : %llu\n",
+ pstat->sha256_digest);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AHASH SHA1 HMAC digest : %llu\n",
+ pstat->sha1_hmac_digest);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AHASH SHA256 HMAC digest : %llu\n",
+ pstat->sha256_hmac_digest);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AHASH operation success : %llu\n",
+ pstat->ahash_op_success);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AHASH operation fail : %llu\n",
+ pstat->ahash_op_fail);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ "\n");
+ spin_lock_irqsave(&cp->lock, flags);
+ list_for_each_entry(pe, &cp->engine_list, elist) {
+ len += scnprintf(
+ _debug_read_buf + len,
+ DEBUG_MAX_RW_BUF - len - 1,
+ " Engine %4d Req : %llu\n",
+ pe->unit,
+ pe->total_req
+ );
+ len += scnprintf(
+ _debug_read_buf + len,
+ DEBUG_MAX_RW_BUF - len - 1,
+ " Engine %4d Req Error : %llu\n",
+ pe->unit,
+ pe->err_req
+ );
+ qce_get_driver_stats(pe->qce);
+ }
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return len;
+}
+
+static void _qcrypto_remove_engine(struct crypto_engine *pengine)
+{
+ struct crypto_priv *cp;
+ struct qcrypto_alg *q_alg;
+ struct qcrypto_alg *n;
+ unsigned long flags;
+
+ cp = pengine->pcp;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ list_del(&pengine->elist);
+ if (cp->next_engine == pengine)
+ cp->next_engine = NULL;
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ cp->total_units--;
+
+ cancel_work_sync(&pengine->bw_reaper_ws);
+ cancel_work_sync(&pengine->bw_allocate_ws);
+ del_timer_sync(&pengine->bw_reaper_timer);
+
+ if (pengine->bus_scale_handle != 0)
+ msm_bus_scale_unregister_client(pengine->bus_scale_handle);
+ pengine->bus_scale_handle = 0;
+
+ kzfree(pengine->preq_pool);
+
+ if (cp->total_units)
+ return;
+
+ list_for_each_entry_safe(q_alg, n, &cp->alg_list, entry) {
+ if (q_alg->alg_type == QCRYPTO_ALG_CIPHER)
+ crypto_unregister_alg(&q_alg->cipher_alg);
+ if (q_alg->alg_type == QCRYPTO_ALG_SHA)
+ crypto_unregister_ahash(&q_alg->sha_alg);
+ list_del(&q_alg->entry);
+ kzfree(q_alg);
+ }
+}
+
+static int _qcrypto_remove(struct platform_device *pdev)
+{
+ struct crypto_engine *pengine;
+ struct crypto_priv *cp;
+
+ pengine = platform_get_drvdata(pdev);
+
+ if (!pengine)
+ return 0;
+ cp = pengine->pcp;
+ mutex_lock(&cp->engine_lock);
+ _qcrypto_remove_engine(pengine);
+ mutex_unlock(&cp->engine_lock);
+ if (pengine->qce)
+ qce_close(pengine->qce);
+ kzfree(pengine);
+ return 0;
+}
+
+static int _qcrypto_check_aes_keylen(struct crypto_ablkcipher *cipher,
+ struct crypto_priv *cp, unsigned int len)
+{
+
+ switch (len) {
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_256:
+ break;
+ case AES_KEYSIZE_192:
+ if (cp->ce_support.aes_key_192)
+ break;
+ default:
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ };
+
+ return 0;
+}
+
+static int _qcrypto_setkey_aes_192_fallback(struct crypto_ablkcipher *cipher,
+ const u8 *key)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ ctx->enc_key_len = AES_KEYSIZE_192;
+ ctx->fallback.cipher_fb->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+ ctx->fallback.cipher_fb->base.crt_flags |=
+ (cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+ ret = crypto_ablkcipher_setkey(ctx->fallback.cipher_fb, key,
+ AES_KEYSIZE_192);
+ if (ret) {
+ tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+ tfm->crt_flags |=
+ (cipher->base.crt_flags & CRYPTO_TFM_RES_MASK);
+ }
+ return ret;
+}
+
+static int _qcrypto_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_priv *cp = ctx->cp;
+
+ if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY)
+ return 0;
+
+ if ((len == AES_KEYSIZE_192) && (!cp->ce_support.aes_key_192)
+ && ctx->fallback.cipher_fb)
+ return _qcrypto_setkey_aes_192_fallback(cipher, key);
+
+ if (_qcrypto_check_aes_keylen(cipher, cp, len)) {
+ return -EINVAL;
+ } else {
+ ctx->enc_key_len = len;
+ if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY)) {
+ if (key != NULL) {
+ memcpy(ctx->enc_key, key, len);
+ } else {
+ pr_err("%s Inavlid key pointer\n", __func__);
+ return -EINVAL;
+ }
+ }
+ }
+ return 0;
+};
+
+static int _qcrypto_setkey_aes_xts(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_priv *cp = ctx->cp;
+
+ if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY)
+ return 0;
+ if (_qcrypto_check_aes_keylen(cipher, cp, len/2)) {
+ return -EINVAL;
+ } else {
+ ctx->enc_key_len = len;
+ if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY)) {
+ if (key != NULL) {
+ memcpy(ctx->enc_key, key, len);
+ } else {
+ pr_err("%s Inavlid key pointer\n", __func__);
+ return -EINVAL;
+ }
+ }
+ }
+ return 0;
+};
+
+static int _qcrypto_setkey_des(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 tmp[DES_EXPKEY_WORDS];
+ int ret;
+
+ if (!key) {
+ pr_err("%s Inavlid key pointer\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = des_ekey(tmp, key);
+
+ if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
+ pr_err("%s HW KEY usage not supported for DES algorithm\n",
+ __func__);
+ return 0;
+ };
+
+ if (len != DES_KEY_SIZE) {
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ };
+
+ if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ return -EINVAL;
+ }
+
+ ctx->enc_key_len = len;
+ if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY))
+ memcpy(ctx->enc_key, key, len);
+
+ return 0;
+};
+
+static int _qcrypto_setkey_3des(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
+ pr_err("%s HW KEY usage not supported for 3DES algorithm\n",
+ __func__);
+ return 0;
+ };
+ if (len != DES3_EDE_KEY_SIZE) {
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ };
+ ctx->enc_key_len = len;
+ if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY)) {
+ if (key != NULL) {
+ memcpy(ctx->enc_key, key, len);
+ } else {
+ pr_err("%s Inavlid key pointer\n", __func__);
+ return -EINVAL;
+ }
+ }
+ return 0;
+};
+
+static struct crypto_engine *eng_sel_avoid_first(struct crypto_priv *cp)
+{
+ /*
+ * This function need not be spinlock protected when called from
+ * the seq_response workq as it will not have any contentions when all
+ * request processing is stopped.
+ */
+ struct crypto_engine *p;
+ struct crypto_engine *q = NULL;
+ int max_user = QCRYPTO_BIG_NUMBER;
+ int use_cnt;
+
+ if (unlikely(list_empty(&cp->engine_list))) {
+ pr_err("%s: no valid ce to schedule\n", __func__);
+ return NULL;
+ }
+
+ p = list_first_entry(&cp->engine_list, struct crypto_engine,
+ elist);
+ list_for_each_entry_continue(p, &cp->engine_list, elist) {
+ use_cnt = atomic_read(&p->req_count);
+ if ((use_cnt < p->max_req) && (use_cnt < max_user)) {
+ q = p;
+ max_user = use_cnt;
+ }
+ }
+ return q;
+}
+
+static void seq_response(struct work_struct *work)
+{
+ struct crypto_priv *cp = container_of(work, struct crypto_priv,
+ resp_work);
+ struct llist_node *list;
+ struct llist_node *rev = NULL;
+
+again:
+ list = llist_del_all(&cp->ordered_resp_list);
+
+ if (!list)
+ goto end;
+
+ while (list) {
+ struct llist_node *t = list;
+ list = llist_next(list);
+
+ t->next = rev;
+ rev = t;
+ }
+
+ while (rev) {
+ struct qcrypto_resp_ctx *arsp;
+ struct crypto_async_request *areq;
+ struct crypto_engine *pengine;
+
+ arsp = container_of(rev, struct qcrypto_resp_ctx, llist);
+ rev = llist_next(rev);
+
+ areq = arsp->async_req;
+ local_bh_disable();
+ areq->complete(areq, arsp->res);
+ local_bh_enable();
+ atomic_dec(&cp->resp_cnt);
+ if (ACCESS_ONCE(cp->ce_req_proc_sts) == STOPPED &&
+ atomic_read(&cp->resp_cnt) <=
+ (COMPLETION_CB_BACKLOG_LENGTH / 2)) {
+ pengine = eng_sel_avoid_first(cp);
+ if (pengine)
+ _start_qcrypto_process(cp, pengine);
+ }
+ }
+end:
+ if (cmpxchg(&cp->sched_resp_workq_status, SCHEDULE_AGAIN,
+ IS_SCHEDULED) == SCHEDULE_AGAIN)
+ goto again;
+ else if (cmpxchg(&cp->sched_resp_workq_status, IS_SCHEDULED,
+ NOT_SCHEDULED) == SCHEDULE_AGAIN)
+ goto end;
+}
+
+static void _qcrypto_tfm_complete(struct crypto_priv *cp, u32 type,
+ void *tfm_ctx)
+{
+ unsigned long flags;
+ struct qcrypto_resp_ctx *arsp;
+ struct list_head *plist;
+
+ switch (type) {
+ case CRYPTO_ALG_TYPE_AHASH:
+ plist = &((struct qcrypto_sha_ctx *) tfm_ctx)->rsp_queue;
+ break;
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ case CRYPTO_ALG_TYPE_AEAD:
+ default:
+ plist = &((struct qcrypto_cipher_ctx *) tfm_ctx)->rsp_queue;
+ break;
+ }
+
+ spin_lock_irqsave(&cp->lock, flags);
+ while (!list_empty(plist)) {
+ arsp = list_first_entry(plist,
+ struct qcrypto_resp_ctx, list);
+ if (arsp->res == -EINPROGRESS)
+ break;
+ else {
+ list_del(&arsp->list);
+ llist_add(&arsp->llist, &cp->ordered_resp_list);
+ }
+ }
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+retry:
+ if (!llist_empty(&cp->ordered_resp_list)) {
+ if (cmpxchg(&cp->sched_resp_workq_status, NOT_SCHEDULED,
+ IS_SCHEDULED) == NOT_SCHEDULED)
+ queue_work_on(cp->cpu_getting_irqs_frm_first_ce,
+ cp->resp_wq, &cp->resp_work);
+ else if (cmpxchg(&cp->sched_resp_workq_status, IS_SCHEDULED,
+ SCHEDULE_AGAIN) == NOT_SCHEDULED)
+ goto retry;
+ }
+}
+
+static void req_done(struct qcrypto_req_control *pqcrypto_req_control)
+{
+ struct crypto_engine *pengine;
+ struct crypto_async_request *areq;
+ struct crypto_engine *pe;
+ struct crypto_priv *cp;
+ unsigned long flags;
+ struct qcrypto_resp_ctx *arsp;
+ u32 type = 0;
+ void *tfm_ctx = NULL;
+
+ pengine = pqcrypto_req_control->pce;
+ cp = pengine->pcp;
+ spin_lock_irqsave(&cp->lock, flags);
+ areq = pqcrypto_req_control->req;
+ arsp = pqcrypto_req_control->arsp;
+ qcrypto_free_req_control(pengine, pqcrypto_req_control);
+
+ if (areq) {
+ type = crypto_tfm_alg_type(areq->tfm);
+ tfm_ctx = crypto_tfm_ctx(areq->tfm);
+ }
+ pe = list_first_entry(&cp->engine_list, struct crypto_engine, elist);
+ if (pe == pengine)
+ if (cp->cpu_getting_irqs_frm_first_ce != smp_processor_id())
+ cp->cpu_getting_irqs_frm_first_ce = smp_processor_id();
+ spin_unlock_irqrestore(&cp->lock, flags);
+ if (atomic_read(&cp->resp_cnt) <= COMPLETION_CB_BACKLOG_LENGTH) {
+ cmpxchg(&cp->ce_req_proc_sts, STOPPED, IN_PROGRESS);
+ _start_qcrypto_process(cp, pengine);
+ } else
+ cmpxchg(&cp->ce_req_proc_sts, IN_PROGRESS, STOPPED);
+ if (areq)
+ _qcrypto_tfm_complete(cp, type, tfm_ctx);
+}
+
+static void _qce_ahash_complete(void *cookie, unsigned char *digest,
+ unsigned char *authdata, int ret)
+{
+ struct ahash_request *areq = (struct ahash_request *) cookie;
+ struct crypto_async_request *async_req;
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(areq->base.tfm);
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(areq);
+ struct crypto_priv *cp = sha_ctx->cp;
+ struct crypto_stat *pstat;
+ uint32_t diglen = crypto_ahash_digestsize(ahash);
+ uint32_t *auth32 = (uint32_t *)authdata;
+ struct crypto_engine *pengine;
+ struct qcrypto_req_control *pqcrypto_req_control;
+
+ async_req = &areq->base;
+ pstat = &_qcrypto_stat;
+
+ pengine = rctx->pengine;
+ pqcrypto_req_control = find_req_control_for_areq(pengine,
+ async_req);
+ if (pqcrypto_req_control == NULL) {
+ pr_err("async request not found\n");
+ return;
+ }
+
+#ifdef QCRYPTO_DEBUG
+ dev_info(&pengine->pdev->dev, "_qce_ahash_complete: %p ret %d\n",
+ areq, ret);
+#endif
+ if (digest) {
+ memcpy(rctx->digest, digest, diglen);
+ memcpy(areq->result, digest, diglen);
+ }
+ if (authdata) {
+ rctx->byte_count[0] = auth32[0];
+ rctx->byte_count[1] = auth32[1];
+ rctx->byte_count[2] = auth32[2];
+ rctx->byte_count[3] = auth32[3];
+ }
+ areq->src = rctx->src;
+ areq->nbytes = rctx->nbytes;
+
+ rctx->last_blk = 0;
+ rctx->first_blk = 0;
+
+ if (ret) {
+ pqcrypto_req_control->arsp->res = -ENXIO;
+ pstat->ahash_op_fail++;
+ } else {
+ pqcrypto_req_control->arsp->res = 0;
+ pstat->ahash_op_success++;
+ }
+ if (cp->ce_support.aligned_only) {
+ areq->src = rctx->orig_src;
+ kfree(rctx->data);
+ }
+ req_done(pqcrypto_req_control);
+};
+
+static void _qce_ablk_cipher_complete(void *cookie, unsigned char *icb,
+ unsigned char *iv, int ret)
+{
+ struct ablkcipher_request *areq = (struct ablkcipher_request *) cookie;
+ struct crypto_async_request *async_req;
+ struct crypto_ablkcipher *ablk = crypto_ablkcipher_reqtfm(areq);
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct crypto_engine *pengine;
+ struct qcrypto_req_control *pqcrypto_req_control;
+
+ async_req = &areq->base;
+ pstat = &_qcrypto_stat;
+ rctx = ablkcipher_request_ctx(areq);
+ pengine = rctx->pengine;
+ pqcrypto_req_control = find_req_control_for_areq(pengine,
+ async_req);
+ if (pqcrypto_req_control == NULL) {
+ pr_err("async request not found\n");
+ return;
+ }
+
+#ifdef QCRYPTO_DEBUG
+ dev_info(&pengine->pdev->dev, "_qce_ablk_cipher_complete: %p ret %d\n",
+ areq, ret);
+#endif
+ if (iv)
+ memcpy(ctx->iv, iv, crypto_ablkcipher_ivsize(ablk));
+
+ if (ret) {
+ pqcrypto_req_control->arsp->res = -ENXIO;
+ pstat->ablk_cipher_op_fail++;
+ } else {
+ pqcrypto_req_control->arsp->res = 0;
+ pstat->ablk_cipher_op_success++;
+ }
+
+ if (cp->ce_support.aligned_only) {
+ struct qcrypto_cipher_req_ctx *rctx;
+ uint32_t num_sg = 0;
+ uint32_t bytes = 0;
+
+ rctx = ablkcipher_request_ctx(areq);
+ areq->src = rctx->orig_src;
+ areq->dst = rctx->orig_dst;
+
+ num_sg = qcrypto_count_sg(areq->dst, areq->nbytes);
+ bytes = qcrypto_sg_copy_from_buffer(areq->dst, num_sg,
+ rctx->data, areq->nbytes);
+ if (bytes != areq->nbytes)
+ pr_warn("bytes copied=0x%x bytes to copy= 0x%x", bytes,
+ areq->nbytes);
+ kzfree(rctx->data);
+ }
+ req_done(pqcrypto_req_control);
+};
+
+
+static void _qce_aead_complete(void *cookie, unsigned char *icv,
+ unsigned char *iv, int ret)
+{
+ struct aead_request *areq = (struct aead_request *) cookie;
+ struct crypto_async_request *async_req;
+ struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct crypto_stat *pstat;
+ struct crypto_engine *pengine;
+ struct qcrypto_req_control *pqcrypto_req_control;
+
+ async_req = &areq->base;
+ pstat = &_qcrypto_stat;
+ rctx = aead_request_ctx(areq);
+ pengine = rctx->pengine;
+ pqcrypto_req_control = find_req_control_for_areq(pengine,
+ async_req);
+ if (pqcrypto_req_control == NULL) {
+ pr_err("async request not found\n");
+ return;
+ }
+
+ if (rctx->mode == QCE_MODE_CCM) {
+ if (cp->ce_support.aligned_only) {
+ struct qcrypto_cipher_req_ctx *rctx;
+ uint32_t bytes = 0;
+ uint32_t nbytes = 0;
+ uint32_t num_sg = 0;
+
+ rctx = aead_request_ctx(areq);
+ areq->src = rctx->orig_src;
+ areq->dst = rctx->orig_dst;
+ if (rctx->dir == QCE_ENCRYPT)
+ nbytes = areq->cryptlen +
+ crypto_aead_authsize(aead);
+ else
+ nbytes = areq->cryptlen -
+ crypto_aead_authsize(aead);
+ num_sg = qcrypto_count_sg(areq->dst, nbytes);
+ bytes = qcrypto_sg_copy_from_buffer(areq->dst, num_sg,
+ ((char *)rctx->data + areq->assoclen),
+ nbytes);
+ if (bytes != nbytes)
+ pr_warn("bytes copied=0x%x bytes to copy= 0x%x",
+ bytes, nbytes);
+ kzfree(rctx->data);
+ }
+ kzfree(rctx->assoc);
+ areq->assoc = rctx->assoc_sg;
+ areq->assoclen = rctx->assoclen;
+ } else {
+ uint32_t ivsize = crypto_aead_ivsize(aead);
+
+ /* for aead operations, other than aes(ccm) */
+ if (cp->ce_support.aligned_only) {
+ struct qcrypto_cipher_req_ctx *rctx;
+ uint32_t bytes = 0;
+ uint32_t nbytes = 0;
+ uint32_t num_sg = 0;
+ uint32_t offset = areq->assoclen + ivsize;
+
+ rctx = aead_request_ctx(areq);
+ areq->src = rctx->orig_src;
+ areq->dst = rctx->orig_dst;
+
+ if (rctx->dir == QCE_ENCRYPT)
+ nbytes = areq->cryptlen;
+ else
+ nbytes = areq->cryptlen -
+ crypto_aead_authsize(aead);
+ num_sg = qcrypto_count_sg(areq->dst, nbytes);
+ bytes = qcrypto_sg_copy_from_buffer(
+ areq->dst,
+ num_sg,
+ (char *)rctx->data + offset,
+ nbytes);
+ if (bytes != nbytes)
+ pr_warn("bytes copied=0x%x bytes to copy= 0x%x",
+ bytes, nbytes);
+ kzfree(rctx->data);
+ }
+
+ if (ret == 0) {
+ if (rctx->dir == QCE_ENCRYPT) {
+ /* copy the icv to dst */
+ scatterwalk_map_and_copy(icv, areq->dst,
+ areq->cryptlen,
+ ctx->authsize, 1);
+
+ } else {
+ unsigned char tmp[SHA256_DIGESTSIZE] = {0};
+
+ /* compare icv from src */
+ scatterwalk_map_and_copy(tmp,
+ areq->src, areq->cryptlen -
+ ctx->authsize, ctx->authsize, 0);
+ ret = memcmp(icv, tmp, ctx->authsize);
+ if (ret != 0)
+ ret = -EBADMSG;
+
+ }
+ } else {
+ ret = -ENXIO;
+ }
+
+ if (iv)
+ memcpy(ctx->iv, iv, ivsize);
+ }
+
+ if (ret == (-EBADMSG))
+ pstat->aead_bad_msg++;
+ else if (ret)
+ pstat->aead_op_fail++;
+ else
+ pstat->aead_op_success++;
+
+ pqcrypto_req_control->arsp->res = ret;
+ req_done(pqcrypto_req_control);
+}
+
+static int aead_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
+{
+ __be32 data;
+
+ memset(block, 0, csize);
+ block += csize;
+
+ if (csize >= 4)
+ csize = 4;
+ else if (msglen > (1 << (8 * csize)))
+ return -EOVERFLOW;
+
+ data = cpu_to_be32(msglen);
+ memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
+
+ return 0;
+}
+
+static int qccrypto_set_aead_ccm_nonce(struct qce_req *qreq)
+{
+ struct aead_request *areq = (struct aead_request *) qreq->areq;
+ unsigned int i = ((unsigned int)qreq->iv[0]) + 1;
+
+ memcpy(&qreq->nonce[0] , qreq->iv, qreq->ivsize);
+ /*
+ * Format control info per RFC 3610 and
+ * NIST Special Publication 800-38C
+ */
+ qreq->nonce[0] |= (8 * ((qreq->authsize - 2) / 2));
+ if (areq->assoclen)
+ qreq->nonce[0] |= 64;
+
+ if (i > MAX_NONCE)
+ return -EINVAL;
+
+ return aead_ccm_set_msg_len(qreq->nonce + 16 - i, qreq->cryptlen, i);
+}
+
+static int qcrypto_aead_ccm_format_adata(struct qce_req *qreq, uint32_t alen,
+ struct scatterlist *sg)
+{
+ unsigned char *adata;
+ uint32_t len;
+ uint32_t bytes = 0;
+ uint32_t num_sg = 0;
+
+ if (alen == 0) {
+ qreq->assoc = NULL;
+ qreq->assoclen = 0;
+ return 0;
+ }
+
+ qreq->assoc = kzalloc((alen + 0x64), GFP_ATOMIC);
+ if (!qreq->assoc) {
+ pr_err("qcrypto Memory allocation of adata FAIL, error %ld\n",
+ PTR_ERR(qreq->assoc));
+ return -ENOMEM;
+ }
+ adata = qreq->assoc;
+ /*
+ * Add control info for associated data
+ * RFC 3610 and NIST Special Publication 800-38C
+ */
+ if (alen < 65280) {
+ *(__be16 *)adata = cpu_to_be16(alen);
+ len = 2;
+ } else {
+ if ((alen >= 65280) && (alen <= 0xffffffff)) {
+ *(__be16 *)adata = cpu_to_be16(0xfffe);
+ *(__be32 *)&adata[2] = cpu_to_be32(alen);
+ len = 6;
+ } else {
+ *(__be16 *)adata = cpu_to_be16(0xffff);
+ *(__be32 *)&adata[6] = cpu_to_be32(alen);
+ len = 10;
+ }
+ }
+ adata += len;
+ qreq->assoclen = ALIGN((alen + len), 16);
+
+ num_sg = qcrypto_count_sg(sg, alen);
+ bytes = qcrypto_sg_copy_to_buffer(sg, num_sg, adata, alen);
+ if (bytes != alen)
+ pr_warn("bytes copied=0x%x bytes to copy= 0x%x", bytes, alen);
+
+ return 0;
+}
+
+static int _qcrypto_process_ablkcipher(struct crypto_engine *pengine,
+ struct qcrypto_req_control *pqcrypto_req_control)
+{
+ struct crypto_async_request *async_req;
+ struct qce_req qreq;
+ int ret;
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *cipher_ctx;
+ struct ablkcipher_request *req;
+ struct crypto_ablkcipher *tfm;
+
+ async_req = pqcrypto_req_control->req;
+ req = container_of(async_req, struct ablkcipher_request, base);
+ cipher_ctx = crypto_tfm_ctx(async_req->tfm);
+ rctx = ablkcipher_request_ctx(req);
+ rctx->pengine = pengine;
+ tfm = crypto_ablkcipher_reqtfm(req);
+ if (pengine->pcp->ce_support.aligned_only) {
+ uint32_t bytes = 0;
+ uint32_t num_sg = 0;
+
+ rctx->orig_src = req->src;
+ rctx->orig_dst = req->dst;
+ rctx->data = kzalloc((req->nbytes + 64), GFP_ATOMIC);
+
+ if (rctx->data == NULL) {
+ pr_err("Mem Alloc fail rctx->data, err %ld for 0x%x\n",
+ PTR_ERR(rctx->data), (req->nbytes + 64));
+ return -ENOMEM;
+ }
+ num_sg = qcrypto_count_sg(req->src, req->nbytes);
+ bytes = qcrypto_sg_copy_to_buffer(req->src, num_sg, rctx->data,
+ req->nbytes);
+ if (bytes != req->nbytes)
+ pr_warn("bytes copied=0x%x bytes to copy= 0x%x", bytes,
+ req->nbytes);
+ sg_set_buf(&rctx->dsg, rctx->data, req->nbytes);
+ sg_mark_end(&rctx->dsg);
+ rctx->iv = req->info;
+
+ req->src = &rctx->dsg;
+ req->dst = &rctx->dsg;
+ }
+ qreq.op = QCE_REQ_ABLK_CIPHER;
+ qreq.qce_cb = _qce_ablk_cipher_complete;
+ qreq.areq = req;
+ qreq.alg = rctx->alg;
+ qreq.dir = rctx->dir;
+ qreq.mode = rctx->mode;
+ qreq.enckey = cipher_ctx->enc_key;
+ qreq.encklen = cipher_ctx->enc_key_len;
+ qreq.iv = req->info;
+ qreq.ivsize = crypto_ablkcipher_ivsize(tfm);
+ qreq.cryptlen = req->nbytes;
+ qreq.use_pmem = 0;
+ qreq.flags = cipher_ctx->flags;
+
+ if ((cipher_ctx->enc_key_len == 0) &&
+ (pengine->pcp->platform_support.hw_key_support == 0))
+ ret = -EINVAL;
+ else
+ ret = qce_ablk_cipher_req(pengine->qce, &qreq);
+
+ return ret;
+}
+
+static int _qcrypto_process_ahash(struct crypto_engine *pengine,
+ struct qcrypto_req_control *pqcrypto_req_control)
+{
+ struct crypto_async_request *async_req;
+ struct ahash_request *req;
+ struct qce_sha_req sreq;
+ struct qcrypto_sha_req_ctx *rctx;
+ struct qcrypto_sha_ctx *sha_ctx;
+ int ret = 0;
+
+ async_req = pqcrypto_req_control->req;
+ req = container_of(async_req,
+ struct ahash_request, base);
+ rctx = ahash_request_ctx(req);
+ sha_ctx = crypto_tfm_ctx(async_req->tfm);
+ rctx->pengine = pengine;
+
+ sreq.qce_cb = _qce_ahash_complete;
+ sreq.digest = &rctx->digest[0];
+ sreq.src = req->src;
+ sreq.auth_data[0] = rctx->byte_count[0];
+ sreq.auth_data[1] = rctx->byte_count[1];
+ sreq.auth_data[2] = rctx->byte_count[2];
+ sreq.auth_data[3] = rctx->byte_count[3];
+ sreq.first_blk = rctx->first_blk;
+ sreq.last_blk = rctx->last_blk;
+ sreq.size = req->nbytes;
+ sreq.areq = req;
+ sreq.flags = sha_ctx->flags;
+
+ switch (sha_ctx->alg) {
+ case QCE_HASH_SHA1:
+ sreq.alg = QCE_HASH_SHA1;
+ sreq.authkey = NULL;
+ break;
+ case QCE_HASH_SHA256:
+ sreq.alg = QCE_HASH_SHA256;
+ sreq.authkey = NULL;
+ break;
+ case QCE_HASH_SHA1_HMAC:
+ sreq.alg = QCE_HASH_SHA1_HMAC;
+ sreq.authkey = &sha_ctx->authkey[0];
+ sreq.authklen = SHA_HMAC_KEY_SIZE;
+ break;
+ case QCE_HASH_SHA256_HMAC:
+ sreq.alg = QCE_HASH_SHA256_HMAC;
+ sreq.authkey = &sha_ctx->authkey[0];
+ sreq.authklen = SHA_HMAC_KEY_SIZE;
+ break;
+ default:
+ pr_err("Algorithm %d not supported, exiting", sha_ctx->alg);
+ ret = -1;
+ break;
+ };
+ ret = qce_process_sha_req(pengine->qce, &sreq);
+
+ return ret;
+}
+
+static int _qcrypto_process_aead(struct crypto_engine *pengine,
+ struct qcrypto_req_control *pqcrypto_req_control)
+{
+ struct crypto_async_request *async_req;
+ struct qce_req qreq;
+ int ret = 0;
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *cipher_ctx;
+ struct aead_request *req;
+ struct crypto_aead *aead;
+
+ async_req = pqcrypto_req_control->req;
+ req = container_of(async_req, struct aead_request, base);
+ aead = crypto_aead_reqtfm(req);
+ rctx = aead_request_ctx(req);
+ rctx->pengine = pengine;
+ cipher_ctx = crypto_tfm_ctx(async_req->tfm);
+
+ qreq.op = QCE_REQ_AEAD;
+ qreq.qce_cb = _qce_aead_complete;
+
+ qreq.areq = req;
+ qreq.alg = rctx->alg;
+ qreq.dir = rctx->dir;
+ qreq.mode = rctx->mode;
+ qreq.iv = rctx->iv;
+
+ qreq.enckey = cipher_ctx->enc_key;
+ qreq.encklen = cipher_ctx->enc_key_len;
+ qreq.authkey = cipher_ctx->auth_key;
+ qreq.authklen = cipher_ctx->auth_key_len;
+ qreq.authsize = crypto_aead_authsize(aead);
+ qreq.auth_alg = cipher_ctx->auth_alg;
+ if (qreq.mode == QCE_MODE_CCM)
+ qreq.ivsize = AES_BLOCK_SIZE;
+ else
+ qreq.ivsize = crypto_aead_ivsize(aead);
+ qreq.flags = cipher_ctx->flags;
+
+ if (qreq.mode == QCE_MODE_CCM) {
+ if (qreq.dir == QCE_ENCRYPT)
+ qreq.cryptlen = req->cryptlen;
+ else
+ qreq.cryptlen = req->cryptlen -
+ qreq.authsize;
+ /* Get NONCE */
+ ret = qccrypto_set_aead_ccm_nonce(&qreq);
+ if (ret)
+ return ret;
+
+ /* Format Associated data */
+ ret = qcrypto_aead_ccm_format_adata(&qreq,
+ req->assoclen,
+ req->assoc);
+ if (ret)
+ return ret;
+
+ if (pengine->pcp->ce_support.aligned_only) {
+ uint32_t bytes = 0;
+ uint32_t num_sg = 0;
+
+ rctx->orig_src = req->src;
+ rctx->orig_dst = req->dst;
+
+ if ((MAX_ALIGN_SIZE*2 > UINT_MAX - qreq.assoclen) ||
+ ((MAX_ALIGN_SIZE*2 + qreq.assoclen) >
+ UINT_MAX - qreq.authsize) ||
+ ((MAX_ALIGN_SIZE*2 + qreq.assoclen +
+ qreq.authsize) >
+ UINT_MAX - req->cryptlen)) {
+ pr_err("Integer overflow on aead req length.\n");
+ return -EINVAL;
+ }
+
+ rctx->data = kzalloc((req->cryptlen + qreq.assoclen +
+ qreq.authsize + MAX_ALIGN_SIZE*2),
+ GFP_ATOMIC);
+ if (rctx->data == NULL) {
+ pr_err("Mem Alloc fail rctx->data, err %ld\n",
+ PTR_ERR(rctx->data));
+ kzfree(qreq.assoc);
+ return -ENOMEM;
+ }
+ if (qreq.assoclen)
+ memcpy((char *)rctx->data, qreq.assoc,
+ qreq.assoclen);
+
+ num_sg = qcrypto_count_sg(req->src, req->cryptlen);
+ bytes = qcrypto_sg_copy_to_buffer(req->src, num_sg,
+ rctx->data + qreq.assoclen , req->cryptlen);
+ if (bytes != req->cryptlen)
+ pr_warn("bytes copied=0x%x bytes to copy= 0x%x",
+ bytes, req->cryptlen);
+ sg_set_buf(&rctx->ssg, rctx->data, req->cryptlen +
+ qreq.assoclen);
+ sg_mark_end(&rctx->ssg);
+
+ if (qreq.dir == QCE_ENCRYPT)
+ sg_set_buf(&rctx->dsg, rctx->data,
+ qreq.assoclen + qreq.cryptlen +
+ ALIGN(qreq.authsize, 64));
+ else
+ sg_set_buf(&rctx->dsg, rctx->data,
+ qreq.assoclen + req->cryptlen +
+ qreq.authsize);
+ sg_mark_end(&rctx->dsg);
+
+ req->src = &rctx->ssg;
+ req->dst = &rctx->dsg;
+ }
+ /*
+ * Save the original associated data
+ * length and sg
+ */
+ rctx->assoc_sg = req->assoc;
+ rctx->assoclen = req->assoclen;
+ rctx->assoc = qreq.assoc;
+ /*
+ * update req with new formatted associated
+ * data info
+ */
+ req->assoc = &rctx->asg;
+ req->assoclen = qreq.assoclen;
+ sg_set_buf(req->assoc, qreq.assoc,
+ req->assoclen);
+ sg_mark_end(req->assoc);
+ } else {
+ /* for aead operations, other than aes(ccm) */
+ if (pengine->pcp->ce_support.aligned_only) {
+ uint32_t bytes = 0;
+ uint32_t num_sg = 0;
+
+ rctx->orig_src = req->src;
+ rctx->orig_dst = req->dst;
+ /*
+ * The data area should be big enough to
+ * include assoicated data, ciphering data stream,
+ * generated MAC, and CCM padding.
+ */
+ if ((MAX_ALIGN_SIZE * 2 > ULONG_MAX - req->assoclen) ||
+ ((MAX_ALIGN_SIZE * 2 + req->assoclen) >
+ ULONG_MAX - qreq.ivsize) ||
+ ((MAX_ALIGN_SIZE * 2 + req->assoclen
+ + qreq.ivsize)
+ > ULONG_MAX - req->cryptlen)) {
+ pr_err("Integer overflow on aead req length.\n");
+ return -EINVAL;
+ }
+
+ rctx->data = kzalloc(
+ (req->cryptlen +
+ req->assoclen +
+ qreq.ivsize +
+ MAX_ALIGN_SIZE * 2),
+ GFP_ATOMIC);
+ if (rctx->data == NULL) {
+ pr_err("Mem Alloc fail rctx->data, err %ld\n",
+ PTR_ERR(rctx->data));
+ return -ENOMEM;
+ }
+
+ /* copy associated data */
+ num_sg = qcrypto_count_sg(req->assoc, req->assoclen);
+ bytes = qcrypto_sg_copy_to_buffer(
+ req->assoc, num_sg,
+ rctx->data, req->assoclen);
+
+ if (bytes != req->assoclen)
+ pr_warn("bytes copied=0x%x bytes to copy= 0x%x",
+ bytes, req->assoclen);
+
+ /* copy iv */
+ memcpy(rctx->data + req->assoclen, qreq.iv,
+ qreq.ivsize);
+
+ /* copy src */
+ num_sg = qcrypto_count_sg(req->src, req->cryptlen);
+ bytes = qcrypto_sg_copy_to_buffer(
+ req->src,
+ num_sg,
+ rctx->data + req->assoclen +
+ qreq.ivsize,
+ req->cryptlen);
+ if (bytes != req->cryptlen)
+ pr_warn("bytes copied=0x%x bytes to copy= 0x%x",
+ bytes, req->cryptlen);
+ sg_set_buf(&rctx->ssg, rctx->data,
+ req->cryptlen + req->assoclen
+ + qreq.ivsize);
+ sg_mark_end(&rctx->ssg);
+
+ sg_set_buf(&rctx->dsg, rctx->data,
+ req->cryptlen + req->assoclen
+ + qreq.ivsize);
+ sg_mark_end(&rctx->dsg);
+ req->src = &rctx->ssg;
+ req->dst = &rctx->dsg;
+ }
+ }
+ ret = qce_aead_req(pengine->qce, &qreq);
+
+ return ret;
+}
+
+static struct crypto_engine *_qcrypto_static_assign_engine(
+ struct crypto_priv *cp)
+{
+ struct crypto_engine *pengine;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ if (cp->next_engine)
+ pengine = cp->next_engine;
+ else
+ pengine = list_first_entry(&cp->engine_list,
+ struct crypto_engine, elist);
+
+ if (list_is_last(&pengine->elist, &cp->engine_list))
+ cp->next_engine = list_first_entry(
+ &cp->engine_list, struct crypto_engine, elist);
+ else
+ cp->next_engine = list_next_entry(pengine, elist);
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return pengine;
+}
+
+static int _start_qcrypto_process(struct crypto_priv *cp,
+ struct crypto_engine *pengine)
+{
+ struct crypto_async_request *async_req = NULL;
+ struct crypto_async_request *backlog_eng = NULL;
+ struct crypto_async_request *backlog_cp = NULL;
+ unsigned long flags;
+ u32 type;
+ int ret = 0;
+ struct crypto_stat *pstat;
+ void *tfm_ctx;
+ struct qcrypto_cipher_req_ctx *cipher_rctx;
+ struct qcrypto_sha_req_ctx *ahash_rctx;
+ struct ablkcipher_request *ablkcipher_req;
+ struct ahash_request *ahash_req;
+ struct aead_request *aead_req;
+ struct qcrypto_resp_ctx *arsp;
+ struct qcrypto_req_control *pqcrypto_req_control;
+
+ pstat = &_qcrypto_stat;
+
+again:
+ spin_lock_irqsave(&cp->lock, flags);
+ if (atomic_read(&pengine->req_count) >= (pengine->max_req)) {
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return 0;
+ }
+
+ backlog_eng = crypto_get_backlog(&pengine->req_queue);
+
+ /* make sure it is in high bandwidth state */
+ if (pengine->bw_state != BUS_HAS_BANDWIDTH) {
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return 0;
+ }
+
+ /* try to get request from request queue of the engine first */
+ async_req = crypto_dequeue_request(&pengine->req_queue);
+ if (!async_req) {
+ /*
+ * if no request from the engine,
+ * try to get from request queue of driver
+ */
+ backlog_cp = crypto_get_backlog(&cp->req_queue);
+ async_req = crypto_dequeue_request(&cp->req_queue);
+ if (!async_req) {
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return 0;
+ }
+ }
+ pqcrypto_req_control = qcrypto_alloc_req_control(pengine);
+ if (pqcrypto_req_control == NULL) {
+ pr_err("Allocation of request failed\n");
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return 0;
+ }
+
+ /* add associated rsp entry to tfm response queue */
+ type = crypto_tfm_alg_type(async_req->tfm);
+ tfm_ctx = crypto_tfm_ctx(async_req->tfm);
+ switch (type) {
+ case CRYPTO_ALG_TYPE_AHASH:
+ ahash_req = container_of(async_req,
+ struct ahash_request, base);
+ ahash_rctx = ahash_request_ctx(ahash_req);
+ arsp = &ahash_rctx->rsp_entry;
+ list_add_tail(
+ &arsp->list,
+ &((struct qcrypto_sha_ctx *)tfm_ctx)
+ ->rsp_queue);
+ break;
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ ablkcipher_req = container_of(async_req,
+ struct ablkcipher_request, base);
+ cipher_rctx = ablkcipher_request_ctx(ablkcipher_req);
+ arsp = &cipher_rctx->rsp_entry;
+ list_add_tail(
+ &arsp->list,
+ &((struct qcrypto_cipher_ctx *)tfm_ctx)
+ ->rsp_queue);
+ break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ default:
+ aead_req = container_of(async_req,
+ struct aead_request, base);
+ cipher_rctx = aead_request_ctx(aead_req);
+ arsp = &cipher_rctx->rsp_entry;
+ list_add_tail(
+ &arsp->list,
+ &((struct qcrypto_cipher_ctx *)tfm_ctx)
+ ->rsp_queue);
+ break;
+ }
+
+ atomic_inc(&cp->resp_cnt);
+ arsp->res = -EINPROGRESS;
+ arsp->async_req = async_req;
+ pqcrypto_req_control->pce = pengine;
+ pqcrypto_req_control->req = async_req;
+ pqcrypto_req_control->arsp = arsp;
+ pengine->active_seq++;
+ pengine->check_flag = true;
+
+ spin_unlock_irqrestore(&cp->lock, flags);
+ if (backlog_eng)
+ backlog_eng->complete(backlog_eng, -EINPROGRESS);
+ if (backlog_cp)
+ backlog_cp->complete(backlog_cp, -EINPROGRESS);
+ switch (type) {
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ ret = _qcrypto_process_ablkcipher(pengine,
+ pqcrypto_req_control);
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ ret = _qcrypto_process_ahash(pengine, pqcrypto_req_control);
+ break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ ret = _qcrypto_process_aead(pengine, pqcrypto_req_control);
+ break;
+ default:
+ ret = -EINVAL;
+ };
+ pengine->total_req++;
+ if (ret) {
+ arsp->res = ret;
+ pengine->err_req++;
+ qcrypto_free_req_control(pengine, pqcrypto_req_control);
+
+ if (type == CRYPTO_ALG_TYPE_ABLKCIPHER)
+ pstat->ablk_cipher_op_fail++;
+ else
+ if (type == CRYPTO_ALG_TYPE_AHASH)
+ pstat->ahash_op_fail++;
+ else
+ pstat->aead_op_fail++;
+
+ _qcrypto_tfm_complete(cp, type, tfm_ctx);
+ goto again;
+ };
+ return ret;
+}
+
+static struct crypto_engine *_avail_eng(struct crypto_priv *cp)
+{
+ /* call this function with spinlock set */
+ struct crypto_engine *p;
+ struct crypto_engine *q = NULL;
+ int max_user = QCRYPTO_BIG_NUMBER;
+ int use_cnt;
+
+ if (unlikely(list_empty(&cp->engine_list))) {
+ pr_err("%s: no valid ce to schedule\n", __func__);
+ return NULL;
+ }
+
+ list_for_each_entry(p, &cp->engine_list, elist) {
+ use_cnt = atomic_read(&p->req_count);
+ if ((use_cnt < p->max_req) && (use_cnt < max_user)) {
+ q = p;
+ max_user = use_cnt;
+ }
+ }
+ return q;
+}
+
+static int _qcrypto_queue_req(struct crypto_priv *cp,
+ struct crypto_engine *pengine,
+ struct crypto_async_request *req)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cp->lock, flags);
+
+ if (pengine) {
+ ret = crypto_enqueue_request(&pengine->req_queue, req);
+ } else {
+ ret = crypto_enqueue_request(&cp->req_queue, req);
+ pengine = _avail_eng(cp);
+ }
+ if (pengine) {
+ switch (pengine->bw_state) {
+ case BUS_NO_BANDWIDTH:
+ if (pengine->high_bw_req == false) {
+ qcrypto_ce_bw_allocate_req(pengine);
+ pengine->high_bw_req = true;
+ }
+ pengine = NULL;
+ break;
+ case BUS_HAS_BANDWIDTH:
+ break;
+ case BUS_BANDWIDTH_RELEASING:
+ pengine->high_bw_req = true;
+ pengine = NULL;
+ break;
+ case BUS_BANDWIDTH_ALLOCATING:
+ pengine = NULL;
+ break;
+ case BUS_SUSPENDED:
+ case BUS_SUSPENDING:
+ default:
+ pengine = NULL;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&cp->lock, flags);
+ if (pengine) {
+ if (atomic_read(&cp->resp_cnt) <=
+ COMPLETION_CB_BACKLOG_LENGTH) {
+ cmpxchg(&cp->ce_req_proc_sts, STOPPED, IN_PROGRESS);
+ _start_qcrypto_process(cp, pengine);
+ } else
+ cmpxchg(&cp->ce_req_proc_sts, IN_PROGRESS, STOPPED);
+ }
+ return ret;
+}
+
+static int _qcrypto_enc_aes_192_fallback(struct ablkcipher_request *req)
+{
+ struct crypto_tfm *tfm =
+ crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ int err;
+
+ ablkcipher_request_set_tfm(req, ctx->fallback.cipher_fb);
+ err = crypto_ablkcipher_encrypt(req);
+ ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+ return err;
+}
+
+static int _qcrypto_dec_aes_192_fallback(struct ablkcipher_request *req)
+{
+ struct crypto_tfm *tfm =
+ crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ int err;
+
+ ablkcipher_request_set_tfm(req, ctx->fallback.cipher_fb);
+ err = crypto_ablkcipher_decrypt(req);
+ ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+ return err;
+}
+
+
+static int _qcrypto_enc_aes_ecb(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+ dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_ecb: %p\n", req);
+#endif
+
+ if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+ (!cp->ce_support.aes_key_192) &&
+ ctx->fallback.cipher_fb)
+ return _qcrypto_enc_aes_192_fallback(req);
+
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_AES;
+ rctx->dir = QCE_ENCRYPT;
+ rctx->mode = QCE_MODE_ECB;
+
+ pstat->ablk_cipher_aes_enc++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_enc_aes_cbc(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+ dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_cbc: %p\n", req);
+#endif
+
+ if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+ (!cp->ce_support.aes_key_192) &&
+ ctx->fallback.cipher_fb)
+ return _qcrypto_enc_aes_192_fallback(req);
+
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_AES;
+ rctx->dir = QCE_ENCRYPT;
+ rctx->mode = QCE_MODE_CBC;
+
+ pstat->ablk_cipher_aes_enc++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_enc_aes_ctr(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+ dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_ctr: %p\n", req);
+#endif
+
+ if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+ (!cp->ce_support.aes_key_192) &&
+ ctx->fallback.cipher_fb)
+ return _qcrypto_enc_aes_192_fallback(req);
+
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_AES;
+ rctx->dir = QCE_ENCRYPT;
+ rctx->mode = QCE_MODE_CTR;
+
+ pstat->ablk_cipher_aes_enc++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_enc_aes_xts(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_AES;
+ rctx->dir = QCE_ENCRYPT;
+ rctx->mode = QCE_MODE_XTS;
+
+ pstat->ablk_cipher_aes_enc++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_aead_encrypt_aes_ccm(struct aead_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ if ((ctx->authsize > 16) || (ctx->authsize < 4) || (ctx->authsize & 1))
+ return -EINVAL;
+ if ((ctx->auth_key_len != AES_KEYSIZE_128) &&
+ (ctx->auth_key_len != AES_KEYSIZE_256))
+ return -EINVAL;
+
+ pstat = &_qcrypto_stat;
+
+ rctx = aead_request_ctx(req);
+ rctx->aead = 1;
+ rctx->alg = CIPHER_ALG_AES;
+ rctx->dir = QCE_ENCRYPT;
+ rctx->mode = QCE_MODE_CCM;
+ rctx->iv = req->iv;
+
+ pstat->aead_ccm_aes_enc++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_rfc4309_enc_aes_ccm(struct aead_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ rctx = aead_request_ctx(req);
+ rctx->aead = 1;
+ rctx->alg = CIPHER_ALG_AES;
+ rctx->dir = QCE_ENCRYPT;
+ rctx->mode = QCE_MODE_CCM;
+ memset(rctx->rfc4309_iv, 0, sizeof(rctx->rfc4309_iv));
+ rctx->rfc4309_iv[0] = 3; /* L -1 */
+ memcpy(&rctx->rfc4309_iv[1], ctx->ccm4309_nonce, 3);
+ memcpy(&rctx->rfc4309_iv[4], req->iv, 8);
+ rctx->iv = rctx->rfc4309_iv;
+ pstat->aead_rfc4309_ccm_aes_enc++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_enc_des_ecb(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_DES;
+ rctx->dir = QCE_ENCRYPT;
+ rctx->mode = QCE_MODE_ECB;
+
+ pstat->ablk_cipher_des_enc++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_enc_des_cbc(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_DES;
+ rctx->dir = QCE_ENCRYPT;
+ rctx->mode = QCE_MODE_CBC;
+
+ pstat->ablk_cipher_des_enc++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_enc_3des_ecb(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_3DES;
+ rctx->dir = QCE_ENCRYPT;
+ rctx->mode = QCE_MODE_ECB;
+
+ pstat->ablk_cipher_3des_enc++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_enc_3des_cbc(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_3DES;
+ rctx->dir = QCE_ENCRYPT;
+ rctx->mode = QCE_MODE_CBC;
+
+ pstat->ablk_cipher_3des_enc++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_aes_ecb(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+ dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_ecb: %p\n", req);
+#endif
+
+ if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+ (!cp->ce_support.aes_key_192) &&
+ ctx->fallback.cipher_fb)
+ return _qcrypto_dec_aes_192_fallback(req);
+
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_AES;
+ rctx->dir = QCE_DECRYPT;
+ rctx->mode = QCE_MODE_ECB;
+
+ pstat->ablk_cipher_aes_dec++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_aes_cbc(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+ dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_cbc: %p\n", req);
+#endif
+
+ if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+ (!cp->ce_support.aes_key_192) &&
+ ctx->fallback.cipher_fb)
+ return _qcrypto_dec_aes_192_fallback(req);
+
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_AES;
+ rctx->dir = QCE_DECRYPT;
+ rctx->mode = QCE_MODE_CBC;
+
+ pstat->ablk_cipher_aes_dec++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_aes_ctr(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+ dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_ctr: %p\n", req);
+#endif
+
+ if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+ (!cp->ce_support.aes_key_192) &&
+ ctx->fallback.cipher_fb)
+ return _qcrypto_dec_aes_192_fallback(req);
+
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_AES;
+ rctx->mode = QCE_MODE_CTR;
+
+ /* Note. There is no such thing as aes/counter mode, decrypt */
+ rctx->dir = QCE_ENCRYPT;
+
+ pstat->ablk_cipher_aes_dec++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_des_ecb(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_DES;
+ rctx->dir = QCE_DECRYPT;
+ rctx->mode = QCE_MODE_ECB;
+
+ pstat->ablk_cipher_des_dec++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_des_cbc(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_DES;
+ rctx->dir = QCE_DECRYPT;
+ rctx->mode = QCE_MODE_CBC;
+
+ pstat->ablk_cipher_des_dec++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_3des_ecb(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_3DES;
+ rctx->dir = QCE_DECRYPT;
+ rctx->mode = QCE_MODE_ECB;
+
+ pstat->ablk_cipher_3des_dec++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_3des_cbc(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_3DES;
+ rctx->dir = QCE_DECRYPT;
+ rctx->mode = QCE_MODE_CBC;
+
+ pstat->ablk_cipher_3des_dec++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_aes_xts(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_AES;
+ rctx->mode = QCE_MODE_XTS;
+ rctx->dir = QCE_DECRYPT;
+
+ pstat->ablk_cipher_aes_dec++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+
+static int _qcrypto_aead_decrypt_aes_ccm(struct aead_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ if ((ctx->authsize > 16) || (ctx->authsize < 4) || (ctx->authsize & 1))
+ return -EINVAL;
+ if ((ctx->auth_key_len != AES_KEYSIZE_128) &&
+ (ctx->auth_key_len != AES_KEYSIZE_256))
+ return -EINVAL;
+
+ pstat = &_qcrypto_stat;
+
+ rctx = aead_request_ctx(req);
+ rctx->aead = 1;
+ rctx->alg = CIPHER_ALG_AES;
+ rctx->dir = QCE_DECRYPT;
+ rctx->mode = QCE_MODE_CCM;
+ rctx->iv = req->iv;
+
+ pstat->aead_ccm_aes_dec++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_rfc4309_dec_aes_ccm(struct aead_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+ rctx = aead_request_ctx(req);
+ rctx->aead = 1;
+ rctx->alg = CIPHER_ALG_AES;
+ rctx->dir = QCE_DECRYPT;
+ rctx->mode = QCE_MODE_CCM;
+ memset(rctx->rfc4309_iv, 0, sizeof(rctx->rfc4309_iv));
+ rctx->rfc4309_iv[0] = 3; /* L -1 */
+ memcpy(&rctx->rfc4309_iv[1], ctx->ccm4309_nonce, 3);
+ memcpy(&rctx->rfc4309_iv[4], req->iv, 8);
+ rctx->iv = rctx->rfc4309_iv;
+ pstat->aead_rfc4309_ccm_aes_dec++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+static int _qcrypto_aead_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ return 0;
+}
+
+static int _qcrypto_aead_ccm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
+
+ switch (authsize) {
+ case 4:
+ case 6:
+ case 8:
+ case 10:
+ case 12:
+ case 14:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+ ctx->authsize = authsize;
+ return 0;
+}
+
+static int _qcrypto_aead_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
+
+ switch (authsize) {
+ case 8:
+ case 12:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+ ctx->authsize = authsize;
+ return 0;
+}
+
+
+static int _qcrypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+ struct rtattr *rta = (struct rtattr *)key;
+ struct crypto_authenc_key_param *param;
+
+ if (!RTA_OK(rta, keylen))
+ goto badkey;
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ goto badkey;
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ goto badkey;
+
+ param = RTA_DATA(rta);
+ ctx->enc_key_len = be32_to_cpu(param->enckeylen);
+
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+
+ if (keylen < ctx->enc_key_len)
+ goto badkey;
+
+ ctx->auth_key_len = keylen - ctx->enc_key_len;
+ if (ctx->enc_key_len >= QCRYPTO_MAX_KEY_SIZE ||
+ ctx->auth_key_len >= QCRYPTO_MAX_KEY_SIZE)
+ goto badkey;
+ memset(ctx->auth_key, 0, QCRYPTO_MAX_KEY_SIZE);
+ memcpy(ctx->enc_key, key + ctx->auth_key_len, ctx->enc_key_len);
+ memcpy(ctx->auth_key, key, ctx->auth_key_len);
+
+ return 0;
+badkey:
+ ctx->enc_key_len = 0;
+ crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+}
+
+static int _qcrypto_aead_ccm_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_priv *cp = ctx->cp;
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_256:
+ break;
+ case AES_KEYSIZE_192:
+ if (cp->ce_support.aes_key_192)
+ break;
+ default:
+ ctx->enc_key_len = 0;
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ };
+ ctx->enc_key_len = keylen;
+ memcpy(ctx->enc_key, key, keylen);
+ ctx->auth_key_len = keylen;
+ memcpy(ctx->auth_key, key, keylen);
+
+ return 0;
+}
+
+static int _qcrypto_aead_rfc4309_ccm_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int key_len)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ if (key_len < QCRYPTO_CCM4309_NONCE_LEN)
+ return -EINVAL;
+ key_len -= QCRYPTO_CCM4309_NONCE_LEN;
+ memcpy(ctx->ccm4309_nonce, key + key_len, QCRYPTO_CCM4309_NONCE_LEN);
+ ret = _qcrypto_aead_ccm_setkey(aead, key, key_len);
+ return ret;
+};
+
+static int _qcrypto_aead_encrypt_aes_cbc(struct aead_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+#ifdef QCRYPTO_DEBUG
+ dev_info(&ctx->pengine->pdev->dev,
+ "_qcrypto_aead_encrypt_aes_cbc: %p\n", req);
+#endif
+
+ rctx = aead_request_ctx(req);
+ rctx->aead = 1;
+ rctx->alg = CIPHER_ALG_AES;
+ rctx->dir = QCE_ENCRYPT;
+ rctx->mode = QCE_MODE_CBC;
+ rctx->iv = req->iv;
+ if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+ pstat->aead_sha1_aes_enc++;
+ else
+ pstat->aead_sha256_aes_enc++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_decrypt_aes_cbc(struct aead_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+#ifdef QCRYPTO_DEBUG
+ dev_info(&ctx->pengine->pdev->dev,
+ "_qcrypto_aead_decrypt_aes_cbc: %p\n", req);
+#endif
+ rctx = aead_request_ctx(req);
+ rctx->aead = 1;
+ rctx->alg = CIPHER_ALG_AES;
+ rctx->dir = QCE_DECRYPT;
+ rctx->mode = QCE_MODE_CBC;
+ rctx->iv = req->iv;
+
+ if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+ pstat->aead_sha1_aes_dec++;
+ else
+ pstat->aead_sha256_aes_dec++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_givencrypt_aes_cbc(struct aead_givcrypt_request *req)
+{
+ struct aead_request *areq = &req->areq;
+ struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ rctx = aead_request_ctx(areq);
+ rctx->aead = 1;
+ rctx->alg = CIPHER_ALG_AES;
+ rctx->dir = QCE_ENCRYPT;
+ rctx->mode = QCE_MODE_CBC;
+ rctx->iv = req->giv; /* generated iv */
+
+ memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
+ /* avoid consecutive packets going out with same IV */
+ *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
+
+ if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+ pstat->aead_sha1_aes_enc++;
+ else
+ pstat->aead_sha256_aes_enc++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &areq->base);
+}
+
+static int _qcrypto_aead_encrypt_des_cbc(struct aead_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ rctx = aead_request_ctx(req);
+ rctx->aead = 1;
+ rctx->alg = CIPHER_ALG_DES;
+ rctx->dir = QCE_ENCRYPT;
+ rctx->mode = QCE_MODE_CBC;
+ rctx->iv = req->iv;
+
+ if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+ pstat->aead_sha1_des_enc++;
+ else
+ pstat->aead_sha256_des_enc++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_decrypt_des_cbc(struct aead_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ rctx = aead_request_ctx(req);
+ rctx->aead = 1;
+ rctx->alg = CIPHER_ALG_DES;
+ rctx->dir = QCE_DECRYPT;
+ rctx->mode = QCE_MODE_CBC;
+ rctx->iv = req->iv;
+
+ if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+ pstat->aead_sha1_des_dec++;
+ else
+ pstat->aead_sha256_des_dec++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_givencrypt_des_cbc(struct aead_givcrypt_request *req)
+{
+ struct aead_request *areq = &req->areq;
+ struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ rctx = aead_request_ctx(areq);
+ rctx->aead = 1;
+ rctx->alg = CIPHER_ALG_DES;
+ rctx->dir = QCE_ENCRYPT;
+ rctx->mode = QCE_MODE_CBC;
+ rctx->iv = req->giv; /* generated iv */
+
+ memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
+ /* avoid consecutive packets going out with same IV */
+ *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
+ if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+ pstat->aead_sha1_des_enc++;
+ else
+ pstat->aead_sha256_des_enc++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &areq->base);
+}
+
+static int _qcrypto_aead_encrypt_3des_cbc(struct aead_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ rctx = aead_request_ctx(req);
+ rctx->aead = 1;
+ rctx->alg = CIPHER_ALG_3DES;
+ rctx->dir = QCE_ENCRYPT;
+ rctx->mode = QCE_MODE_CBC;
+ rctx->iv = req->iv;
+
+ if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+ pstat->aead_sha1_3des_enc++;
+ else
+ pstat->aead_sha256_3des_enc++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_decrypt_3des_cbc(struct aead_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ rctx = aead_request_ctx(req);
+ rctx->aead = 1;
+ rctx->alg = CIPHER_ALG_3DES;
+ rctx->dir = QCE_DECRYPT;
+ rctx->mode = QCE_MODE_CBC;
+ rctx->iv = req->iv;
+
+ if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+ pstat->aead_sha1_3des_dec++;
+ else
+ pstat->aead_sha256_3des_dec++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_givencrypt_3des_cbc(struct aead_givcrypt_request *req)
+{
+ struct aead_request *areq = &req->areq;
+ struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ rctx = aead_request_ctx(areq);
+ rctx->aead = 1;
+ rctx->alg = CIPHER_ALG_3DES;
+ rctx->dir = QCE_ENCRYPT;
+ rctx->mode = QCE_MODE_CBC;
+ rctx->iv = req->giv; /* generated iv */
+
+ memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
+ /* avoid consecutive packets going out with same IV */
+ *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
+ if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+ pstat->aead_sha1_3des_enc++;
+ else
+ pstat->aead_sha256_3des_enc++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &areq->base);
+}
+
+static int _sha_init(struct ahash_request *req)
+{
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+ rctx->first_blk = 1;
+ rctx->last_blk = 0;
+ rctx->byte_count[0] = 0;
+ rctx->byte_count[1] = 0;
+ rctx->byte_count[2] = 0;
+ rctx->byte_count[3] = 0;
+ rctx->trailing_buf_len = 0;
+ rctx->count = 0;
+
+ return 0;
+};
+
+static int _sha1_init(struct ahash_request *req)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_stat *pstat;
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+ pstat = &_qcrypto_stat;
+
+ _sha_init(req);
+ sha_ctx->alg = QCE_HASH_SHA1;
+
+ memset(&rctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE);
+ memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
+ SHA1_DIGEST_SIZE);
+ sha_ctx->diglen = SHA1_DIGEST_SIZE;
+ pstat->sha1_digest++;
+ return 0;
+};
+
+static int _sha256_init(struct ahash_request *req)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_stat *pstat;
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+ pstat = &_qcrypto_stat;
+
+ _sha_init(req);
+ sha_ctx->alg = QCE_HASH_SHA256;
+
+ memset(&rctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE);
+ memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
+ SHA256_DIGEST_SIZE);
+ sha_ctx->diglen = SHA256_DIGEST_SIZE;
+ pstat->sha256_digest++;
+ return 0;
+};
+
+
+static int _sha1_export(struct ahash_request *req, void *out)
+{
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct sha1_state *out_ctx = (struct sha1_state *)out;
+
+ out_ctx->count = rctx->count;
+ _byte_stream_to_words(out_ctx->state, rctx->digest, SHA1_DIGEST_SIZE);
+ memcpy(out_ctx->buffer, rctx->trailing_buf, SHA1_BLOCK_SIZE);
+
+ return 0;
+};
+
+static int _sha1_hmac_export(struct ahash_request *req, void *out)
+{
+ return _sha1_export(req, out);
+}
+
+/* crypto hw padding constant for hmac first operation */
+#define HMAC_PADDING 64
+
+static int __sha1_import_common(struct ahash_request *req, const void *in,
+ bool hmac)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct sha1_state *in_ctx = (struct sha1_state *)in;
+ u64 hw_count = in_ctx->count;
+
+ rctx->count = in_ctx->count;
+ memcpy(rctx->trailing_buf, in_ctx->buffer, SHA1_BLOCK_SIZE);
+ if (in_ctx->count <= SHA1_BLOCK_SIZE) {
+ rctx->first_blk = 1;
+ } else {
+ rctx->first_blk = 0;
+ /*
+ * For hmac, there is a hardware padding done
+ * when first is set. So the byte_count will be
+ * incremened by 64 after the operstion of first
+ */
+ if (hmac)
+ hw_count += HMAC_PADDING;
+ }
+ rctx->byte_count[0] = (uint32_t)(hw_count & 0xFFFFFFC0);
+ rctx->byte_count[1] = (uint32_t)(hw_count >> 32);
+ _words_to_byte_stream(in_ctx->state, rctx->digest, sha_ctx->diglen);
+
+ rctx->trailing_buf_len = (uint32_t)(in_ctx->count &
+ (SHA1_BLOCK_SIZE-1));
+ return 0;
+}
+
+static int _sha1_import(struct ahash_request *req, const void *in)
+{
+ return __sha1_import_common(req, in, false);
+}
+
+static int _sha1_hmac_import(struct ahash_request *req, const void *in)
+{
+ return __sha1_import_common(req, in, true);
+}
+
+static int _sha256_export(struct ahash_request *req, void *out)
+{
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct sha256_state *out_ctx = (struct sha256_state *)out;
+
+ out_ctx->count = rctx->count;
+ _byte_stream_to_words(out_ctx->state, rctx->digest, SHA256_DIGEST_SIZE);
+ memcpy(out_ctx->buf, rctx->trailing_buf, SHA256_BLOCK_SIZE);
+
+ return 0;
+};
+
+static int _sha256_hmac_export(struct ahash_request *req, void *out)
+{
+ return _sha256_export(req, out);
+}
+
+static int __sha256_import_common(struct ahash_request *req, const void *in,
+ bool hmac)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct sha256_state *in_ctx = (struct sha256_state *)in;
+ u64 hw_count = in_ctx->count;
+
+ rctx->count = in_ctx->count;
+ memcpy(rctx->trailing_buf, in_ctx->buf, SHA256_BLOCK_SIZE);
+
+ if (in_ctx->count <= SHA256_BLOCK_SIZE) {
+ rctx->first_blk = 1;
+ } else {
+ rctx->first_blk = 0;
+ /*
+ * for hmac, there is a hardware padding done
+ * when first is set. So the byte_count will be
+ * incremened by 64 after the operstion of first
+ */
+ if (hmac)
+ hw_count += HMAC_PADDING;
+ }
+
+ rctx->byte_count[0] = (uint32_t)(hw_count & 0xFFFFFFC0);
+ rctx->byte_count[1] = (uint32_t)(hw_count >> 32);
+ _words_to_byte_stream(in_ctx->state, rctx->digest, sha_ctx->diglen);
+
+ rctx->trailing_buf_len = (uint32_t)(in_ctx->count &
+ (SHA256_BLOCK_SIZE-1));
+
+
+ return 0;
+}
+
+static int _sha256_import(struct ahash_request *req, const void *in)
+{
+ return __sha256_import_common(req, in, false);
+}
+
+static int _sha256_hmac_import(struct ahash_request *req, const void *in)
+{
+ return __sha256_import_common(req, in, true);
+}
+
+static int _copy_source(struct ahash_request *req)
+{
+ struct qcrypto_sha_req_ctx *srctx = NULL;
+ uint32_t bytes = 0;
+ uint32_t num_sg = 0;
+
+ srctx = ahash_request_ctx(req);
+ srctx->orig_src = req->src;
+ srctx->data = kzalloc((req->nbytes + 64), GFP_ATOMIC);
+ if (srctx->data == NULL) {
+ pr_err("Mem Alloc fail rctx->data, err %ld for 0x%x\n",
+ PTR_ERR(srctx->data), (req->nbytes + 64));
+ return -ENOMEM;
+ }
+
+ num_sg = qcrypto_count_sg(req->src, req->nbytes);
+ bytes = qcrypto_sg_copy_to_buffer(req->src, num_sg, srctx->data,
+ req->nbytes);
+ if (bytes != req->nbytes)
+ pr_warn("bytes copied=0x%x bytes to copy= 0x%x", bytes,
+ req->nbytes);
+ sg_set_buf(&srctx->dsg, srctx->data,
+ req->nbytes);
+ sg_mark_end(&srctx->dsg);
+ req->src = &srctx->dsg;
+
+ return 0;
+}
+
+static int _sha_update(struct ahash_request *req, uint32_t sha_block_size)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = sha_ctx->cp;
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+ uint32_t total, len, num_sg;
+ struct scatterlist *sg_last;
+ uint8_t *k_src = NULL;
+ uint32_t sha_pad_len = 0;
+ uint32_t trailing_buf_len = 0;
+ uint32_t nbytes;
+ uint32_t offset = 0;
+ uint32_t bytes = 0;
+ uint8_t *staging;
+ int ret = 0;
+
+ /* check for trailing buffer from previous updates and append it */
+ total = req->nbytes + rctx->trailing_buf_len;
+ len = req->nbytes;
+
+ if (total <= sha_block_size) {
+ k_src = &rctx->trailing_buf[rctx->trailing_buf_len];
+ num_sg = qcrypto_count_sg(req->src, len);
+ bytes = qcrypto_sg_copy_to_buffer(req->src, num_sg, k_src, len);
+
+ rctx->trailing_buf_len = total;
+ return 0;
+ }
+
+ /* save the original req structure fields*/
+ rctx->src = req->src;
+ rctx->nbytes = req->nbytes;
+
+ staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf),
+ L1_CACHE_BYTES);
+ memcpy(staging, rctx->trailing_buf, rctx->trailing_buf_len);
+ k_src = &rctx->trailing_buf[0];
+ /* get new trailing buffer */
+ sha_pad_len = ALIGN(total, sha_block_size) - total;
+ trailing_buf_len = sha_block_size - sha_pad_len;
+ offset = req->nbytes - trailing_buf_len;
+
+ if (offset != req->nbytes)
+ scatterwalk_map_and_copy(k_src, req->src, offset,
+ trailing_buf_len, 0);
+
+ nbytes = total - trailing_buf_len;
+ num_sg = qcrypto_count_sg(req->src, req->nbytes);
+
+ len = rctx->trailing_buf_len;
+ sg_last = req->src;
+
+ while (len < nbytes) {
+ if ((len + sg_last->length) > nbytes)
+ break;
+ len += sg_last->length;
+ sg_last = scatterwalk_sg_next(sg_last);
+ }
+ if (rctx->trailing_buf_len) {
+ if (cp->ce_support.aligned_only) {
+ rctx->data2 = kzalloc((req->nbytes + 64), GFP_ATOMIC);
+ if (rctx->data2 == NULL) {
+ pr_err("Mem Alloc fail srctx->data2, err %ld\n",
+ PTR_ERR(rctx->data2));
+ return -ENOMEM;
+ }
+ memcpy(rctx->data2, staging,
+ rctx->trailing_buf_len);
+ memcpy((rctx->data2 + rctx->trailing_buf_len),
+ rctx->data, req->src->length);
+ kzfree(rctx->data);
+ rctx->data = rctx->data2;
+ sg_set_buf(&rctx->sg[0], rctx->data,
+ (rctx->trailing_buf_len +
+ req->src->length));
+ req->src = rctx->sg;
+ sg_mark_end(&rctx->sg[0]);
+ } else {
+ sg_mark_end(sg_last);
+ memset(rctx->sg, 0, sizeof(rctx->sg));
+ sg_set_buf(&rctx->sg[0], staging,
+ rctx->trailing_buf_len);
+ sg_mark_end(&rctx->sg[1]);
+ sg_chain(rctx->sg, 2, req->src);
+ req->src = rctx->sg;
+ }
+ } else
+ sg_mark_end(sg_last);
+
+ req->nbytes = nbytes;
+ rctx->trailing_buf_len = trailing_buf_len;
+
+ ret = _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
+
+ return ret;
+};
+
+static int _sha1_update(struct ahash_request *req)
+{
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = sha_ctx->cp;
+
+ if (cp->ce_support.aligned_only) {
+ if (_copy_source(req))
+ return -ENOMEM;
+ }
+ rctx->count += req->nbytes;
+ return _sha_update(req, SHA1_BLOCK_SIZE);
+}
+
+static int _sha256_update(struct ahash_request *req)
+{
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = sha_ctx->cp;
+
+ if (cp->ce_support.aligned_only) {
+ if (_copy_source(req))
+ return -ENOMEM;
+ }
+
+ rctx->count += req->nbytes;
+ return _sha_update(req, SHA256_BLOCK_SIZE);
+}
+
+static int _sha_final(struct ahash_request *req, uint32_t sha_block_size)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = sha_ctx->cp;
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+ int ret = 0;
+ uint8_t *staging;
+
+ if (cp->ce_support.aligned_only) {
+ if (_copy_source(req))
+ return -ENOMEM;
+ }
+
+ rctx->last_blk = 1;
+
+ /* save the original req structure fields*/
+ rctx->src = req->src;
+ rctx->nbytes = req->nbytes;
+
+ staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf),
+ L1_CACHE_BYTES);
+ memcpy(staging, rctx->trailing_buf, rctx->trailing_buf_len);
+ sg_set_buf(&rctx->sg[0], staging, rctx->trailing_buf_len);
+ sg_mark_end(&rctx->sg[0]);
+
+ req->src = &rctx->sg[0];
+ req->nbytes = rctx->trailing_buf_len;
+
+ ret = _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
+
+ return ret;
+};
+
+static int _sha1_final(struct ahash_request *req)
+{
+ return _sha_final(req, SHA1_BLOCK_SIZE);
+}
+
+static int _sha256_final(struct ahash_request *req)
+{
+ return _sha_final(req, SHA256_BLOCK_SIZE);
+}
+
+static int _sha_digest(struct ahash_request *req)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_priv *cp = sha_ctx->cp;
+ int ret = 0;
+
+ if (cp->ce_support.aligned_only) {
+ if (_copy_source(req))
+ return -ENOMEM;
+ }
+
+ /* save the original req structure fields*/
+ rctx->src = req->src;
+ rctx->nbytes = req->nbytes;
+ rctx->first_blk = 1;
+ rctx->last_blk = 1;
+ ret = _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
+
+ return ret;
+}
+
+static int _sha1_digest(struct ahash_request *req)
+{
+ _sha1_init(req);
+ return _sha_digest(req);
+}
+
+static int _sha256_digest(struct ahash_request *req)
+{
+ _sha256_init(req);
+ return _sha_digest(req);
+}
+
+static void _crypto_sha_hmac_ahash_req_complete(
+ struct crypto_async_request *req, int err)
+{
+ struct completion *ahash_req_complete = req->data;
+
+ if (err == -EINPROGRESS)
+ return;
+ complete(ahash_req_complete);
+}
+
+static int _sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+ uint8_t *in_buf;
+ int ret = 0;
+ struct scatterlist sg;
+ struct ahash_request *ahash_req;
+ struct completion ahash_req_complete;
+
+ ahash_req = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (ahash_req == NULL)
+ return -ENOMEM;
+ init_completion(&ahash_req_complete);
+ ahash_request_set_callback(ahash_req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ _crypto_sha_hmac_ahash_req_complete,
+ &ahash_req_complete);
+ crypto_ahash_clear_flags(tfm, ~0);
+
+ in_buf = kzalloc(len + 64, GFP_KERNEL);
+ if (in_buf == NULL) {
+ pr_err("qcrypto Can't Allocate mem: in_buf, error %ld\n",
+ PTR_ERR(in_buf));
+ ahash_request_free(ahash_req);
+ return -ENOMEM;
+ }
+ memcpy(in_buf, key, len);
+ sg_set_buf(&sg, in_buf, len);
+ sg_mark_end(&sg);
+
+ ahash_request_set_crypt(ahash_req, &sg,
+ &sha_ctx->authkey[0], len);
+
+ if (sha_ctx->alg == QCE_HASH_SHA1)
+ ret = _sha1_digest(ahash_req);
+ else
+ ret = _sha256_digest(ahash_req);
+ if (ret == -EINPROGRESS || ret == -EBUSY) {
+ ret =
+ wait_for_completion_interruptible(
+ &ahash_req_complete);
+ reinit_completion(&sha_ctx->ahash_req_complete);
+ }
+
+ kzfree(in_buf);
+ ahash_request_free(ahash_req);
+
+ return ret;
+}
+
+static int _sha1_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+ memset(&sha_ctx->authkey[0], 0, SHA1_BLOCK_SIZE);
+ if (len <= SHA1_BLOCK_SIZE) {
+ memcpy(&sha_ctx->authkey[0], key, len);
+ sha_ctx->authkey_in_len = len;
+ } else {
+ sha_ctx->alg = QCE_HASH_SHA1;
+ sha_ctx->diglen = SHA1_DIGEST_SIZE;
+ _sha_hmac_setkey(tfm, key, len);
+ sha_ctx->authkey_in_len = SHA1_BLOCK_SIZE;
+ }
+ return 0;
+}
+
+static int _sha256_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+
+ memset(&sha_ctx->authkey[0], 0, SHA256_BLOCK_SIZE);
+ if (len <= SHA256_BLOCK_SIZE) {
+ memcpy(&sha_ctx->authkey[0], key, len);
+ sha_ctx->authkey_in_len = len;
+ } else {
+ sha_ctx->alg = QCE_HASH_SHA256;
+ sha_ctx->diglen = SHA256_DIGEST_SIZE;
+ _sha_hmac_setkey(tfm, key, len);
+ sha_ctx->authkey_in_len = SHA256_BLOCK_SIZE;
+ }
+
+ return 0;
+}
+
+static int _sha_hmac_init_ihash(struct ahash_request *req,
+ uint32_t sha_block_size)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+ int i;
+
+ for (i = 0; i < sha_block_size; i++)
+ rctx->trailing_buf[i] = sha_ctx->authkey[i] ^ 0x36;
+ rctx->trailing_buf_len = sha_block_size;
+
+ return 0;
+}
+
+static int _sha1_hmac_init(struct ahash_request *req)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = sha_ctx->cp;
+ struct crypto_stat *pstat;
+ int ret = 0;
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+ pstat = &_qcrypto_stat;
+ pstat->sha1_hmac_digest++;
+
+ _sha_init(req);
+ memset(&rctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE);
+ memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
+ SHA1_DIGEST_SIZE);
+ sha_ctx->diglen = SHA1_DIGEST_SIZE;
+
+ if (cp->ce_support.sha_hmac)
+ sha_ctx->alg = QCE_HASH_SHA1_HMAC;
+ else {
+ sha_ctx->alg = QCE_HASH_SHA1;
+ ret = _sha_hmac_init_ihash(req, SHA1_BLOCK_SIZE);
+ }
+
+ return ret;
+}
+
+static int _sha256_hmac_init(struct ahash_request *req)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = sha_ctx->cp;
+ struct crypto_stat *pstat;
+ int ret = 0;
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+ pstat = &_qcrypto_stat;
+ pstat->sha256_hmac_digest++;
+
+ _sha_init(req);
+
+ memset(&rctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE);
+ memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
+ SHA256_DIGEST_SIZE);
+ sha_ctx->diglen = SHA256_DIGEST_SIZE;
+
+ if (cp->ce_support.sha_hmac)
+ sha_ctx->alg = QCE_HASH_SHA256_HMAC;
+ else {
+ sha_ctx->alg = QCE_HASH_SHA256;
+ ret = _sha_hmac_init_ihash(req, SHA256_BLOCK_SIZE);
+ }
+
+ return ret;
+}
+
+static int _sha1_hmac_update(struct ahash_request *req)
+{
+ return _sha1_update(req);
+}
+
+static int _sha256_hmac_update(struct ahash_request *req)
+{
+ return _sha256_update(req);
+}
+
+static int _sha_hmac_outer_hash(struct ahash_request *req,
+ uint32_t sha_digest_size, uint32_t sha_block_size)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_priv *cp = sha_ctx->cp;
+ int i;
+ uint8_t *staging;
+ uint8_t *p;
+
+ staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf),
+ L1_CACHE_BYTES);
+ p = staging;
+ for (i = 0; i < sha_block_size; i++)
+ *p++ = sha_ctx->authkey[i] ^ 0x5c;
+ memcpy(p, &rctx->digest[0], sha_digest_size);
+ sg_set_buf(&rctx->sg[0], staging, sha_block_size +
+ sha_digest_size);
+ sg_mark_end(&rctx->sg[0]);
+
+ /* save the original req structure fields*/
+ rctx->src = req->src;
+ rctx->nbytes = req->nbytes;
+
+ req->src = &rctx->sg[0];
+ req->nbytes = sha_block_size + sha_digest_size;
+
+ _sha_init(req);
+ if (sha_ctx->alg == QCE_HASH_SHA1) {
+ memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
+ SHA1_DIGEST_SIZE);
+ sha_ctx->diglen = SHA1_DIGEST_SIZE;
+ } else {
+ memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
+ SHA256_DIGEST_SIZE);
+ sha_ctx->diglen = SHA256_DIGEST_SIZE;
+ }
+
+ rctx->last_blk = 1;
+ return _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
+}
+
+static int _sha_hmac_inner_hash(struct ahash_request *req,
+ uint32_t sha_digest_size, uint32_t sha_block_size)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct ahash_request *areq = sha_ctx->ahash_req;
+ struct crypto_priv *cp = sha_ctx->cp;
+ int ret = 0;
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+ uint8_t *staging;
+
+ staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf),
+ L1_CACHE_BYTES);
+ memcpy(staging, rctx->trailing_buf, rctx->trailing_buf_len);
+ sg_set_buf(&rctx->sg[0], staging, rctx->trailing_buf_len);
+ sg_mark_end(&rctx->sg[0]);
+
+ ahash_request_set_crypt(areq, &rctx->sg[0], &rctx->digest[0],
+ rctx->trailing_buf_len);
+ rctx->last_blk = 1;
+ ret = _qcrypto_queue_req(cp, sha_ctx->pengine, &areq->base);
+
+ if (ret == -EINPROGRESS || ret == -EBUSY) {
+ ret =
+ wait_for_completion_interruptible(&sha_ctx->ahash_req_complete);
+ reinit_completion(&sha_ctx->ahash_req_complete);
+ }
+
+ return ret;
+}
+
+static int _sha1_hmac_final(struct ahash_request *req)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = sha_ctx->cp;
+ int ret = 0;
+
+ if (cp->ce_support.sha_hmac)
+ return _sha_final(req, SHA1_BLOCK_SIZE);
+ else {
+ ret = _sha_hmac_inner_hash(req, SHA1_DIGEST_SIZE,
+ SHA1_BLOCK_SIZE);
+ if (ret)
+ return ret;
+ return _sha_hmac_outer_hash(req, SHA1_DIGEST_SIZE,
+ SHA1_BLOCK_SIZE);
+ }
+}
+
+static int _sha256_hmac_final(struct ahash_request *req)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = sha_ctx->cp;
+ int ret = 0;
+
+ if (cp->ce_support.sha_hmac)
+ return _sha_final(req, SHA256_BLOCK_SIZE);
+ else {
+ ret = _sha_hmac_inner_hash(req, SHA256_DIGEST_SIZE,
+ SHA256_BLOCK_SIZE);
+ if (ret)
+ return ret;
+ return _sha_hmac_outer_hash(req, SHA256_DIGEST_SIZE,
+ SHA256_BLOCK_SIZE);
+ }
+ return 0;
+}
+
+
+static int _sha1_hmac_digest(struct ahash_request *req)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_stat *pstat;
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+ pstat = &_qcrypto_stat;
+ pstat->sha1_hmac_digest++;
+
+ _sha_init(req);
+ memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
+ SHA1_DIGEST_SIZE);
+ sha_ctx->diglen = SHA1_DIGEST_SIZE;
+ sha_ctx->alg = QCE_HASH_SHA1_HMAC;
+
+ return _sha_digest(req);
+}
+
+static int _sha256_hmac_digest(struct ahash_request *req)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_stat *pstat;
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+ pstat = &_qcrypto_stat;
+ pstat->sha256_hmac_digest++;
+
+ _sha_init(req);
+ memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
+ SHA256_DIGEST_SIZE);
+ sha_ctx->diglen = SHA256_DIGEST_SIZE;
+ sha_ctx->alg = QCE_HASH_SHA256_HMAC;
+
+ return _sha_digest(req);
+}
+
+static int _qcrypto_prefix_alg_cra_name(char cra_name[], unsigned int size)
+{
+ char new_cra_name[CRYPTO_MAX_ALG_NAME] = "qcom-";
+ if (size >= CRYPTO_MAX_ALG_NAME - strlen("qcom-"))
+ return -EINVAL;
+ strlcat(new_cra_name, cra_name, CRYPTO_MAX_ALG_NAME);
+ strlcpy(cra_name, new_cra_name, CRYPTO_MAX_ALG_NAME);
+ return 0;
+}
+
+
+int qcrypto_cipher_set_device(struct ablkcipher_request *req, unsigned int dev)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_engine *pengine = NULL;
+
+ pengine = _qrypto_find_pengine_device(cp, dev);
+ if (pengine == NULL)
+ return -ENODEV;
+ ctx->pengine = pengine;
+
+ return 0;
+};
+EXPORT_SYMBOL(qcrypto_cipher_set_device);
+
+int qcrypto_cipher_set_device_hw(struct ablkcipher_request *req, u32 dev,
+ u32 hw_inst)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_engine *pengine = NULL;
+
+ pengine = _qrypto_find_pengine_device_hw(cp, dev, hw_inst);
+ if (pengine == NULL)
+ return -ENODEV;
+ ctx->pengine = pengine;
+
+ return 0;
+}
+EXPORT_SYMBOL(qcrypto_cipher_set_device_hw);
+
+int qcrypto_aead_set_device(struct aead_request *req, unsigned int dev)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_engine *pengine = NULL;
+
+ pengine = _qrypto_find_pengine_device(cp, dev);
+ if (pengine == NULL)
+ return -ENODEV;
+ ctx->pengine = pengine;
+
+ return 0;
+};
+EXPORT_SYMBOL(qcrypto_aead_set_device);
+
+int qcrypto_ahash_set_device(struct ahash_request *req, unsigned int dev)
+{
+ struct qcrypto_sha_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_engine *pengine = NULL;
+
+ pengine = _qrypto_find_pengine_device(cp, dev);
+ if (pengine == NULL)
+ return -ENODEV;
+ ctx->pengine = pengine;
+
+ return 0;
+};
+EXPORT_SYMBOL(qcrypto_ahash_set_device);
+
+int qcrypto_cipher_set_flag(struct ablkcipher_request *req, unsigned int flags)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+
+ if ((flags & QCRYPTO_CTX_USE_HW_KEY) &&
+ (cp->platform_support.hw_key_support == false)) {
+ pr_err("%s HW key usage not supported\n", __func__);
+ return -EINVAL;
+ }
+ if (((flags | ctx->flags) & QCRYPTO_CTX_KEY_MASK) ==
+ QCRYPTO_CTX_KEY_MASK) {
+ pr_err("%s Cannot set all key flags\n", __func__);
+ return -EINVAL;
+ }
+
+ ctx->flags |= flags;
+ return 0;
+};
+EXPORT_SYMBOL(qcrypto_cipher_set_flag);
+
+int qcrypto_aead_set_flag(struct aead_request *req, unsigned int flags)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+
+ if ((flags & QCRYPTO_CTX_USE_HW_KEY) &&
+ (cp->platform_support.hw_key_support == false)) {
+ pr_err("%s HW key usage not supported\n", __func__);
+ return -EINVAL;
+ }
+ if (((flags | ctx->flags) & QCRYPTO_CTX_KEY_MASK) ==
+ QCRYPTO_CTX_KEY_MASK) {
+ pr_err("%s Cannot set all key flags\n", __func__);
+ return -EINVAL;
+ }
+
+ ctx->flags |= flags;
+ return 0;
+};
+EXPORT_SYMBOL(qcrypto_aead_set_flag);
+
+int qcrypto_ahash_set_flag(struct ahash_request *req, unsigned int flags)
+{
+ struct qcrypto_sha_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+
+ if ((flags & QCRYPTO_CTX_USE_HW_KEY) &&
+ (cp->platform_support.hw_key_support == false)) {
+ pr_err("%s HW key usage not supported\n", __func__);
+ return -EINVAL;
+ }
+ if (((flags | ctx->flags) & QCRYPTO_CTX_KEY_MASK) ==
+ QCRYPTO_CTX_KEY_MASK) {
+ pr_err("%s Cannot set all key flags\n", __func__);
+ return -EINVAL;
+ }
+
+ ctx->flags |= flags;
+ return 0;
+};
+EXPORT_SYMBOL(qcrypto_ahash_set_flag);
+
+int qcrypto_cipher_clear_flag(struct ablkcipher_request *req,
+ unsigned int flags)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->flags &= ~flags;
+ return 0;
+
+};
+EXPORT_SYMBOL(qcrypto_cipher_clear_flag);
+
+int qcrypto_aead_clear_flag(struct aead_request *req, unsigned int flags)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->flags &= ~flags;
+ return 0;
+
+};
+EXPORT_SYMBOL(qcrypto_aead_clear_flag);
+
+int qcrypto_ahash_clear_flag(struct ahash_request *req, unsigned int flags)
+{
+ struct qcrypto_sha_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->flags &= ~flags;
+ return 0;
+};
+EXPORT_SYMBOL(qcrypto_ahash_clear_flag);
+
+static struct ahash_alg _qcrypto_ahash_algos[] = {
+ {
+ .init = _sha1_init,
+ .update = _sha1_update,
+ .final = _sha1_final,
+ .export = _sha1_export,
+ .import = _sha1_import,
+ .digest = _sha1_digest,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct sha1_state),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "qcrypto-sha1",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize =
+ sizeof(struct qcrypto_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_ahash_cra_init,
+ .cra_exit = _qcrypto_ahash_cra_exit,
+ },
+ },
+ },
+ {
+ .init = _sha256_init,
+ .update = _sha256_update,
+ .final = _sha256_final,
+ .export = _sha256_export,
+ .import = _sha256_import,
+ .digest = _sha256_digest,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "qcrypto-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize =
+ sizeof(struct qcrypto_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_ahash_cra_init,
+ .cra_exit = _qcrypto_ahash_cra_exit,
+ },
+ },
+ },
+};
+
+static struct ahash_alg _qcrypto_sha_hmac_algos[] = {
+ {
+ .init = _sha1_hmac_init,
+ .update = _sha1_hmac_update,
+ .final = _sha1_hmac_final,
+ .export = _sha1_hmac_export,
+ .import = _sha1_hmac_import,
+ .digest = _sha1_hmac_digest,
+ .setkey = _sha1_hmac_setkey,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct sha1_state),
+ .base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "qcrypto-hmac-sha1",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize =
+ sizeof(struct qcrypto_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_ahash_hmac_cra_init,
+ .cra_exit = _qcrypto_ahash_cra_exit,
+ },
+ },
+ },
+ {
+ .init = _sha256_hmac_init,
+ .update = _sha256_hmac_update,
+ .final = _sha256_hmac_final,
+ .export = _sha256_hmac_export,
+ .import = _sha256_hmac_import,
+ .digest = _sha256_hmac_digest,
+ .setkey = _sha256_hmac_setkey,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "qcrypto-hmac-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize =
+ sizeof(struct qcrypto_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_ahash_hmac_cra_init,
+ .cra_exit = _qcrypto_ahash_cra_exit,
+ },
+ },
+ },
+};
+
+static struct crypto_alg _qcrypto_ablk_cipher_algos[] = {
+ {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "qcrypto-ecb-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_cra_aes_ablkcipher_init,
+ .cra_exit = _qcrypto_cra_aes_ablkcipher_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = _qcrypto_setkey_aes,
+ .encrypt = _qcrypto_enc_aes_ecb,
+ .decrypt = _qcrypto_dec_aes_ecb,
+ },
+ },
+ },
+ {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "qcrypto-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_cra_aes_ablkcipher_init,
+ .cra_exit = _qcrypto_cra_aes_ablkcipher_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = _qcrypto_setkey_aes,
+ .encrypt = _qcrypto_enc_aes_cbc,
+ .decrypt = _qcrypto_dec_aes_cbc,
+ },
+ },
+ },
+ {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "qcrypto-ctr-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_cra_aes_ablkcipher_init,
+ .cra_exit = _qcrypto_cra_aes_ablkcipher_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = _qcrypto_setkey_aes,
+ .encrypt = _qcrypto_enc_aes_ctr,
+ .decrypt = _qcrypto_dec_aes_ctr,
+ },
+ },
+ },
+ {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "qcrypto-ecb-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_cra_ablkcipher_init,
+ .cra_exit = _qcrypto_cra_ablkcipher_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = _qcrypto_setkey_des,
+ .encrypt = _qcrypto_enc_des_ecb,
+ .decrypt = _qcrypto_dec_des_ecb,
+ },
+ },
+ },
+ {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "qcrypto-cbc-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_cra_ablkcipher_init,
+ .cra_exit = _qcrypto_cra_ablkcipher_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = _qcrypto_setkey_des,
+ .encrypt = _qcrypto_enc_des_cbc,
+ .decrypt = _qcrypto_dec_des_cbc,
+ },
+ },
+ },
+ {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "qcrypto-ecb-3des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_cra_ablkcipher_init,
+ .cra_exit = _qcrypto_cra_ablkcipher_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = _qcrypto_setkey_3des,
+ .encrypt = _qcrypto_enc_3des_ecb,
+ .decrypt = _qcrypto_dec_3des_ecb,
+ },
+ },
+ },
+ {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "qcrypto-cbc-3des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_cra_ablkcipher_init,
+ .cra_exit = _qcrypto_cra_ablkcipher_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = _qcrypto_setkey_3des,
+ .encrypt = _qcrypto_enc_3des_cbc,
+ .decrypt = _qcrypto_dec_3des_cbc,
+ },
+ },
+ },
+};
+
+static struct crypto_alg _qcrypto_ablk_cipher_xts_algo = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "qcrypto-xts-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_cra_ablkcipher_init,
+ .cra_exit = _qcrypto_cra_ablkcipher_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = _qcrypto_setkey_aes_xts,
+ .encrypt = _qcrypto_enc_aes_xts,
+ .decrypt = _qcrypto_dec_aes_xts,
+ },
+ },
+};
+
+static struct crypto_alg _qcrypto_aead_sha1_hmac_algos[] = {
+ {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_cra_aead_sha1_init,
+ .cra_exit = _qcrypto_cra_aead_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .setkey = _qcrypto_aead_setkey,
+ .setauthsize = _qcrypto_aead_setauthsize,
+ .encrypt = _qcrypto_aead_encrypt_aes_cbc,
+ .decrypt = _qcrypto_aead_decrypt_aes_cbc,
+ .givencrypt = _qcrypto_aead_givencrypt_aes_cbc,
+ .geniv = "<built-in>",
+ }
+ }
+ },
+ {
+ .cra_name = "authenc(hmac(sha1),cbc(des))",
+ .cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_cra_aead_sha1_init,
+ .cra_exit = _qcrypto_cra_aead_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .setkey = _qcrypto_aead_setkey,
+ .setauthsize = _qcrypto_aead_setauthsize,
+ .encrypt = _qcrypto_aead_encrypt_des_cbc,
+ .decrypt = _qcrypto_aead_decrypt_des_cbc,
+ .givencrypt = _qcrypto_aead_givencrypt_des_cbc,
+ .geniv = "<built-in>",
+ }
+ }
+ },
+ {
+ .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
+ .cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-3des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_cra_aead_sha1_init,
+ .cra_exit = _qcrypto_cra_aead_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .setkey = _qcrypto_aead_setkey,
+ .setauthsize = _qcrypto_aead_setauthsize,
+ .encrypt = _qcrypto_aead_encrypt_3des_cbc,
+ .decrypt = _qcrypto_aead_decrypt_3des_cbc,
+ .givencrypt = _qcrypto_aead_givencrypt_3des_cbc,
+ .geniv = "<built-in>",
+ }
+ }
+ },
+};
+
+static struct crypto_alg _qcrypto_aead_sha256_hmac_algos[] = {
+ {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "qcrypto-aead-hmac-sha256-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_cra_aead_sha256_init,
+ .cra_exit = _qcrypto_cra_aead_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .setkey = _qcrypto_aead_setkey,
+ .setauthsize = _qcrypto_aead_setauthsize,
+ .encrypt = _qcrypto_aead_encrypt_aes_cbc,
+ .decrypt = _qcrypto_aead_decrypt_aes_cbc,
+ .givencrypt = _qcrypto_aead_givencrypt_aes_cbc,
+ .geniv = "<built-in>",
+ }
+ }
+ },
+
+ {
+ .cra_name = "authenc(hmac(sha256),cbc(des))",
+ .cra_driver_name = "qcrypto-aead-hmac-sha256-cbc-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_cra_aead_sha256_init,
+ .cra_exit = _qcrypto_cra_aead_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .setkey = _qcrypto_aead_setkey,
+ .setauthsize = _qcrypto_aead_setauthsize,
+ .encrypt = _qcrypto_aead_encrypt_des_cbc,
+ .decrypt = _qcrypto_aead_decrypt_des_cbc,
+ .givencrypt = _qcrypto_aead_givencrypt_des_cbc,
+ .geniv = "<built-in>",
+ }
+ }
+ },
+ {
+ .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
+ .cra_driver_name = "qcrypto-aead-hmac-sha256-cbc-3des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_cra_aead_sha256_init,
+ .cra_exit = _qcrypto_cra_aead_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .setkey = _qcrypto_aead_setkey,
+ .setauthsize = _qcrypto_aead_setauthsize,
+ .encrypt = _qcrypto_aead_encrypt_3des_cbc,
+ .decrypt = _qcrypto_aead_decrypt_3des_cbc,
+ .givencrypt = _qcrypto_aead_givencrypt_3des_cbc,
+ .geniv = "<built-in>",
+ }
+ }
+ },
+};
+
+static struct crypto_alg _qcrypto_aead_ccm_algo = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "qcrypto-aes-ccm",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_cra_aead_ccm_init,
+ .cra_exit = _qcrypto_cra_aead_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .setkey = _qcrypto_aead_ccm_setkey,
+ .setauthsize = _qcrypto_aead_ccm_setauthsize,
+ .encrypt = _qcrypto_aead_encrypt_aes_ccm,
+ .decrypt = _qcrypto_aead_decrypt_aes_ccm,
+ .geniv = "<built-in>",
+ }
+ }
+};
+
+static struct crypto_alg _qcrypto_aead_rfc4309_ccm_algo = {
+ .cra_name = "rfc4309(ccm(aes))",
+ .cra_driver_name = "qcrypto-rfc4309-aes-ccm",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_nivaead_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_cra_aead_rfc4309_ccm_init,
+ .cra_exit = _qcrypto_cra_aead_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8,
+ .maxauthsize = 16,
+ .setkey = _qcrypto_aead_rfc4309_ccm_setkey,
+ .setauthsize = _qcrypto_aead_rfc4309_ccm_setauthsize,
+ .encrypt = _qcrypto_aead_rfc4309_enc_aes_ccm,
+ .decrypt = _qcrypto_aead_rfc4309_dec_aes_ccm,
+ .geniv = "seqiv",
+ }
+ }
+};
+
+
+static int _qcrypto_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ void *handle;
+ struct crypto_priv *cp = &qcrypto_dev;
+ int i;
+ struct msm_ce_hw_support *platform_support;
+ struct crypto_engine *pengine;
+ unsigned long flags;
+ struct qcrypto_req_control *pqcrypto_req_control = NULL;
+
+ pengine = kzalloc(sizeof(*pengine), GFP_KERNEL);
+ if (!pengine) {
+ pr_err("qcrypto Memory allocation of q_alg FAIL, error %ld\n",
+ PTR_ERR(pengine));
+ return -ENOMEM;
+ }
+
+ /* open qce */
+ handle = qce_open(pdev, &rc);
+ if (handle == NULL) {
+ kzfree(pengine);
+ platform_set_drvdata(pdev, NULL);
+ return rc;
+ }
+
+ platform_set_drvdata(pdev, pengine);
+ pengine->qce = handle;
+ pengine->pcp = cp;
+ pengine->pdev = pdev;
+ pengine->signature = 0xdeadbeef;
+
+ init_timer(&(pengine->bw_reaper_timer));
+ INIT_WORK(&pengine->bw_reaper_ws, qcrypto_bw_reaper_work);
+ pengine->bw_reaper_timer.function =
+ qcrypto_bw_reaper_timer_callback;
+ INIT_WORK(&pengine->bw_allocate_ws, qcrypto_bw_allocate_work);
+ pengine->high_bw_req = false;
+ pengine->active_seq = 0;
+ pengine->last_active_seq = 0;
+ pengine->check_flag = false;
+
+ crypto_init_queue(&pengine->req_queue, MSM_QCRYPTO_REQ_QUEUE_LENGTH);
+
+ mutex_lock(&cp->engine_lock);
+ cp->total_units++;
+ pengine->unit = cp->total_units;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ list_add_tail(&pengine->elist, &cp->engine_list);
+ cp->next_engine = pengine;
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ qce_hw_support(pengine->qce, &cp->ce_support);
+ pengine->ce_hw_instance = cp->ce_support.ce_hw_instance;
+ pengine->max_req = cp->ce_support.max_request;
+ pqcrypto_req_control = kzalloc(sizeof(struct qcrypto_req_control) *
+ pengine->max_req, GFP_KERNEL);
+ if (pqcrypto_req_control == NULL) {
+ rc = -ENOMEM;
+ goto err;
+ }
+ qcrypto_init_req_control(pengine, pqcrypto_req_control);
+ if (cp->ce_support.bam) {
+ cp->platform_support.ce_shared = cp->ce_support.is_shared;
+ cp->platform_support.shared_ce_resource = 0;
+ cp->platform_support.hw_key_support = cp->ce_support.hw_key;
+ cp->platform_support.sha_hmac = 1;
+
+ cp->platform_support.bus_scale_table =
+ (struct msm_bus_scale_pdata *)
+ msm_bus_cl_get_pdata(pdev);
+ if (!cp->platform_support.bus_scale_table)
+ pr_warn("bus_scale_table is NULL\n");
+
+ pengine->ce_device = cp->ce_support.ce_device;
+
+ } else {
+ platform_support =
+ (struct msm_ce_hw_support *)pdev->dev.platform_data;
+ cp->platform_support.ce_shared = platform_support->ce_shared;
+ cp->platform_support.shared_ce_resource =
+ platform_support->shared_ce_resource;
+ cp->platform_support.hw_key_support =
+ platform_support->hw_key_support;
+ cp->platform_support.bus_scale_table =
+ platform_support->bus_scale_table;
+ cp->platform_support.sha_hmac = platform_support->sha_hmac;
+ }
+
+ pengine->bus_scale_handle = 0;
+
+ if (cp->platform_support.bus_scale_table != NULL) {
+ pengine->bus_scale_handle =
+ msm_bus_scale_register_client(
+ (struct msm_bus_scale_pdata *)
+ cp->platform_support.bus_scale_table);
+ if (!pengine->bus_scale_handle) {
+ pr_err("%s not able to get bus scale\n",
+ __func__);
+ rc = -ENOMEM;
+ goto err;
+ }
+ pengine->bw_state = BUS_NO_BANDWIDTH;
+ } else {
+ pengine->bw_state = BUS_HAS_BANDWIDTH;
+ }
+
+ if (cp->total_units != 1) {
+ mutex_unlock(&cp->engine_lock);
+ return 0;
+ }
+
+ /* register crypto cipher algorithms the device supports */
+ for (i = 0; i < ARRAY_SIZE(_qcrypto_ablk_cipher_algos); i++) {
+ struct qcrypto_alg *q_alg;
+
+ q_alg = _qcrypto_cipher_alg_alloc(cp,
+ &_qcrypto_ablk_cipher_algos[i]);
+ if (IS_ERR(q_alg)) {
+ rc = PTR_ERR(q_alg);
+ goto err;
+ }
+ if (cp->ce_support.use_sw_aes_cbc_ecb_ctr_algo) {
+ rc = _qcrypto_prefix_alg_cra_name(
+ q_alg->cipher_alg.cra_name,
+ strlen(q_alg->cipher_alg.cra_name));
+ if (rc) {
+ dev_err(&pdev->dev,
+ "The algorithm name %s is too long.\n",
+ q_alg->cipher_alg.cra_name);
+ kfree(q_alg);
+ goto err;
+ }
+ }
+ rc = crypto_register_alg(&q_alg->cipher_alg);
+ if (rc) {
+ dev_err(&pdev->dev, "%s alg registration failed\n",
+ q_alg->cipher_alg.cra_driver_name);
+ kzfree(q_alg);
+ } else {
+ list_add_tail(&q_alg->entry, &cp->alg_list);
+ dev_info(&pdev->dev, "%s\n",
+ q_alg->cipher_alg.cra_driver_name);
+ }
+ }
+
+ /* register crypto cipher algorithms the device supports */
+ if (cp->ce_support.aes_xts) {
+ struct qcrypto_alg *q_alg;
+
+ q_alg = _qcrypto_cipher_alg_alloc(cp,
+ &_qcrypto_ablk_cipher_xts_algo);
+ if (IS_ERR(q_alg)) {
+ rc = PTR_ERR(q_alg);
+ goto err;
+ }
+ if (cp->ce_support.use_sw_aes_xts_algo) {
+ rc = _qcrypto_prefix_alg_cra_name(
+ q_alg->cipher_alg.cra_name,
+ strlen(q_alg->cipher_alg.cra_name));
+ if (rc) {
+ dev_err(&pdev->dev,
+ "The algorithm name %s is too long.\n",
+ q_alg->cipher_alg.cra_name);
+ kfree(q_alg);
+ goto err;
+ }
+ }
+ rc = crypto_register_alg(&q_alg->cipher_alg);
+ if (rc) {
+ dev_err(&pdev->dev, "%s alg registration failed\n",
+ q_alg->cipher_alg.cra_driver_name);
+ kzfree(q_alg);
+ } else {
+ list_add_tail(&q_alg->entry, &cp->alg_list);
+ dev_info(&pdev->dev, "%s\n",
+ q_alg->cipher_alg.cra_driver_name);
+ }
+ }
+
+ /*
+ * Register crypto hash (sha1 and sha256) algorithms the
+ * device supports
+ */
+ for (i = 0; i < ARRAY_SIZE(_qcrypto_ahash_algos); i++) {
+ struct qcrypto_alg *q_alg = NULL;
+
+ q_alg = _qcrypto_sha_alg_alloc(cp, &_qcrypto_ahash_algos[i]);
+
+ if (IS_ERR(q_alg)) {
+ rc = PTR_ERR(q_alg);
+ goto err;
+ }
+ if (cp->ce_support.use_sw_ahash_algo) {
+ rc = _qcrypto_prefix_alg_cra_name(
+ q_alg->sha_alg.halg.base.cra_name,
+ strlen(q_alg->sha_alg.halg.base.cra_name));
+ if (rc) {
+ dev_err(&pdev->dev,
+ "The algorithm name %s is too long.\n",
+ q_alg->sha_alg.halg.base.cra_name);
+ kfree(q_alg);
+ goto err;
+ }
+ }
+ rc = crypto_register_ahash(&q_alg->sha_alg);
+ if (rc) {
+ dev_err(&pdev->dev, "%s alg registration failed\n",
+ q_alg->sha_alg.halg.base.cra_driver_name);
+ kzfree(q_alg);
+ } else {
+ list_add_tail(&q_alg->entry, &cp->alg_list);
+ dev_info(&pdev->dev, "%s\n",
+ q_alg->sha_alg.halg.base.cra_driver_name);
+ }
+ }
+
+ /* register crypto aead (hmac-sha1) algorithms the device supports */
+ if (cp->ce_support.sha1_hmac_20 || cp->ce_support.sha1_hmac
+ || cp->ce_support.sha_hmac) {
+ for (i = 0; i < ARRAY_SIZE(_qcrypto_aead_sha1_hmac_algos);
+ i++) {
+ struct qcrypto_alg *q_alg;
+
+ q_alg = _qcrypto_cipher_alg_alloc(cp,
+ &_qcrypto_aead_sha1_hmac_algos[i]);
+ if (IS_ERR(q_alg)) {
+ rc = PTR_ERR(q_alg);
+ goto err;
+ }
+ if (cp->ce_support.use_sw_aead_algo) {
+ rc = _qcrypto_prefix_alg_cra_name(
+ q_alg->cipher_alg.cra_name,
+ strlen(q_alg->cipher_alg.cra_name));
+ if (rc) {
+ dev_err(&pdev->dev,
+ "The algorithm name %s is too long.\n",
+ q_alg->cipher_alg.cra_name);
+ kfree(q_alg);
+ goto err;
+ }
+ }
+ rc = crypto_register_alg(&q_alg->cipher_alg);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "%s alg registration failed\n",
+ q_alg->cipher_alg.cra_driver_name);
+ kfree(q_alg);
+ } else {
+ list_add_tail(&q_alg->entry, &cp->alg_list);
+ dev_info(&pdev->dev, "%s\n",
+ q_alg->cipher_alg.cra_driver_name);
+ }
+ }
+ }
+
+ /* register crypto aead (hmac-sha256) algorithms the device supports */
+ if (cp->ce_support.sha_hmac) {
+ for (i = 0; i < ARRAY_SIZE(_qcrypto_aead_sha256_hmac_algos);
+ i++) {
+ struct qcrypto_alg *q_alg;
+
+ q_alg = _qcrypto_cipher_alg_alloc(cp,
+ &_qcrypto_aead_sha256_hmac_algos[i]);
+ if (IS_ERR(q_alg)) {
+ rc = PTR_ERR(q_alg);
+ goto err;
+ }
+ if (cp->ce_support.use_sw_aead_algo) {
+ rc = _qcrypto_prefix_alg_cra_name(
+ q_alg->cipher_alg.cra_name,
+ strlen(q_alg->cipher_alg.cra_name));
+ if (rc) {
+ dev_err(&pdev->dev,
+ "The algorithm name %s is too long.\n",
+ q_alg->cipher_alg.cra_name);
+ kfree(q_alg);
+ goto err;
+ }
+ }
+ rc = crypto_register_alg(&q_alg->cipher_alg);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "%s alg registration failed\n",
+ q_alg->cipher_alg.cra_driver_name);
+ kfree(q_alg);
+ } else {
+ list_add_tail(&q_alg->entry, &cp->alg_list);
+ dev_info(&pdev->dev, "%s\n",
+ q_alg->cipher_alg.cra_driver_name);
+ }
+ }
+ }
+
+ if ((cp->ce_support.sha_hmac) || (cp->platform_support.sha_hmac)) {
+ /* register crypto hmac algorithms the device supports */
+ for (i = 0; i < ARRAY_SIZE(_qcrypto_sha_hmac_algos); i++) {
+ struct qcrypto_alg *q_alg = NULL;
+
+ q_alg = _qcrypto_sha_alg_alloc(cp,
+ &_qcrypto_sha_hmac_algos[i]);
+
+ if (IS_ERR(q_alg)) {
+ rc = PTR_ERR(q_alg);
+ goto err;
+ }
+ if (cp->ce_support.use_sw_hmac_algo) {
+ rc = _qcrypto_prefix_alg_cra_name(
+ q_alg->sha_alg.halg.base.cra_name,
+ strlen(
+ q_alg->sha_alg.halg.base.cra_name));
+ if (rc) {
+ dev_err(&pdev->dev,
+ "The algorithm name %s is too long.\n",
+ q_alg->sha_alg.halg.base.cra_name);
+ kfree(q_alg);
+ goto err;
+ }
+ }
+ rc = crypto_register_ahash(&q_alg->sha_alg);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "%s alg registration failed\n",
+ q_alg->sha_alg.halg.base.cra_driver_name);
+ kzfree(q_alg);
+ } else {
+ list_add_tail(&q_alg->entry, &cp->alg_list);
+ dev_info(&pdev->dev, "%s\n",
+ q_alg->sha_alg.halg.base.cra_driver_name);
+ }
+ }
+ }
+ /*
+ * Register crypto cipher (aes-ccm) algorithms the
+ * device supports
+ */
+ if (cp->ce_support.aes_ccm) {
+ struct qcrypto_alg *q_alg;
+
+ q_alg = _qcrypto_cipher_alg_alloc(cp, &_qcrypto_aead_ccm_algo);
+ if (IS_ERR(q_alg)) {
+ rc = PTR_ERR(q_alg);
+ goto err;
+ }
+ if (cp->ce_support.use_sw_aes_ccm_algo) {
+ rc = _qcrypto_prefix_alg_cra_name(
+ q_alg->cipher_alg.cra_name,
+ strlen(q_alg->cipher_alg.cra_name));
+ if (rc) {
+ dev_err(&pdev->dev,
+ "The algorithm name %s is too long.\n",
+ q_alg->cipher_alg.cra_name);
+ kfree(q_alg);
+ goto err;
+ }
+ }
+ rc = crypto_register_alg(&q_alg->cipher_alg);
+ if (rc) {
+ dev_err(&pdev->dev, "%s alg registration failed\n",
+ q_alg->cipher_alg.cra_driver_name);
+ kzfree(q_alg);
+ } else {
+ list_add_tail(&q_alg->entry, &cp->alg_list);
+ dev_info(&pdev->dev, "%s\n",
+ q_alg->cipher_alg.cra_driver_name);
+ }
+
+ q_alg = _qcrypto_cipher_alg_alloc(cp,
+ &_qcrypto_aead_rfc4309_ccm_algo);
+ if (IS_ERR(q_alg)) {
+ rc = PTR_ERR(q_alg);
+ goto err;
+ }
+
+ if (cp->ce_support.use_sw_aes_ccm_algo) {
+ rc = _qcrypto_prefix_alg_cra_name(
+ q_alg->cipher_alg.cra_name,
+ strlen(q_alg->cipher_alg.cra_name));
+ if (rc) {
+ dev_err(&pdev->dev,
+ "The algorithm name %s is too long.\n",
+ q_alg->cipher_alg.cra_name);
+ kfree(q_alg);
+ goto err;
+ }
+ }
+ rc = crypto_register_alg(&q_alg->cipher_alg);
+ if (rc) {
+ dev_err(&pdev->dev, "%s alg registration failed\n",
+ q_alg->cipher_alg.cra_driver_name);
+ kfree(q_alg);
+ } else {
+ list_add_tail(&q_alg->entry, &cp->alg_list);
+ dev_info(&pdev->dev, "%s\n",
+ q_alg->cipher_alg.cra_driver_name);
+ }
+ }
+
+ mutex_unlock(&cp->engine_lock);
+
+
+ return 0;
+err:
+ _qcrypto_remove_engine(pengine);
+ mutex_unlock(&cp->engine_lock);
+ if (pengine->qce)
+ qce_close(pengine->qce);
+ kzfree(pengine);
+ return rc;
+};
+
+static int _qcrypto_engine_in_use(struct crypto_engine *pengine)
+{
+ struct crypto_priv *cp = pengine->pcp;
+
+ if ((atomic_read(&pengine->req_count) > 0) || pengine->req_queue.qlen
+ || cp->req_queue.qlen)
+ return 1;
+ return 0;
+}
+
+static void _qcrypto_do_suspending(struct crypto_engine *pengine)
+{
+ struct crypto_priv *cp = pengine->pcp;
+
+ if (cp->platform_support.bus_scale_table == NULL)
+ return;
+ del_timer_sync(&pengine->bw_reaper_timer);
+ qcrypto_ce_set_bus(pengine, false);
+}
+
+static int _qcrypto_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int ret = 0;
+ struct crypto_engine *pengine;
+ struct crypto_priv *cp;
+ unsigned long flags;
+
+ pengine = platform_get_drvdata(pdev);
+ if (!pengine)
+ return -EINVAL;
+
+ /*
+ * Check if this platform supports clock management in suspend/resume
+ * If not, just simply return 0.
+ */
+ cp = pengine->pcp;
+ if (!cp->ce_support.clk_mgmt_sus_res)
+ return 0;
+ spin_lock_irqsave(&cp->lock, flags);
+ switch (pengine->bw_state) {
+ case BUS_NO_BANDWIDTH:
+ if (pengine->high_bw_req == false)
+ pengine->bw_state = BUS_SUSPENDED;
+ else
+ ret = -EBUSY;
+ break;
+ case BUS_HAS_BANDWIDTH:
+ if (_qcrypto_engine_in_use(pengine)) {
+ ret = -EBUSY;
+ } else {
+ pengine->bw_state = BUS_SUSPENDING;
+ spin_unlock_irqrestore(&cp->lock, flags);
+ _qcrypto_do_suspending(pengine);
+ spin_lock_irqsave(&cp->lock, flags);
+ pengine->bw_state = BUS_SUSPENDED;
+ }
+ break;
+ case BUS_BANDWIDTH_RELEASING:
+ case BUS_BANDWIDTH_ALLOCATING:
+ case BUS_SUSPENDED:
+ case BUS_SUSPENDING:
+ default:
+ ret = -EBUSY;
+ break;
+ }
+
+ spin_unlock_irqrestore(&cp->lock, flags);
+ if (ret)
+ return ret;
+ else {
+ if (qce_pm_table.suspend)
+ qce_pm_table.suspend(pengine->qce);
+ return 0;
+ }
+}
+
+static int _qcrypto_resume(struct platform_device *pdev)
+{
+ struct crypto_engine *pengine;
+ struct crypto_priv *cp;
+ unsigned long flags;
+ int ret = 0;
+
+ pengine = platform_get_drvdata(pdev);
+
+ if (!pengine)
+ return -EINVAL;
+ cp = pengine->pcp;
+ if (!cp->ce_support.clk_mgmt_sus_res)
+ return 0;
+ spin_lock_irqsave(&cp->lock, flags);
+ if (pengine->bw_state == BUS_SUSPENDED) {
+ spin_unlock_irqrestore(&cp->lock, flags);
+ if (qce_pm_table.resume)
+ qce_pm_table.resume(pengine->qce);
+
+ spin_lock_irqsave(&cp->lock, flags);
+ pengine->bw_state = BUS_NO_BANDWIDTH;
+ pengine->active_seq++;
+ pengine->check_flag = false;
+ if (cp->req_queue.qlen || pengine->req_queue.qlen) {
+ if (pengine->high_bw_req == false) {
+ qcrypto_ce_bw_allocate_req(pengine);
+ pengine->high_bw_req = true;
+ }
+ }
+ } else
+ ret = -EBUSY;
+
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return ret;
+}
+
+static struct of_device_id qcrypto_match[] = {
+ { .compatible = "qcom,qcrypto",
+ },
+ {}
+};
+
+static struct platform_driver _qualcomm_crypto = {
+ .probe = _qcrypto_probe,
+ .remove = _qcrypto_remove,
+ .suspend = _qcrypto_suspend,
+ .resume = _qcrypto_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "qcrypto",
+ .of_match_table = qcrypto_match,
+ },
+};
+
+static int _debug_qcrypto;
+
+static int _debug_stats_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t _debug_stats_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int rc = -EINVAL;
+ int qcrypto = *((int *) file->private_data);
+ int len;
+
+ len = _disp_stats(qcrypto);
+
+ rc = simple_read_from_buffer((void __user *) buf, len,
+ ppos, (void *) _debug_read_buf, len);
+
+ return rc;
+}
+
+static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long flags;
+ struct crypto_priv *cp = &qcrypto_dev;
+ struct crypto_engine *pe;
+
+ memset((char *)&_qcrypto_stat, 0, sizeof(struct crypto_stat));
+ spin_lock_irqsave(&cp->lock, flags);
+ list_for_each_entry(pe, &cp->engine_list, elist) {
+ pe->total_req = 0;
+ pe->err_req = 0;
+ qce_clear_driver_stats(pe->qce);
+ }
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return count;
+}
+
+static const struct file_operations _debug_stats_ops = {
+ .open = _debug_stats_open,
+ .read = _debug_stats_read,
+ .write = _debug_stats_write,
+};
+
+static int _qcrypto_debug_init(void)
+{
+ int rc;
+ char name[DEBUG_MAX_FNAME];
+ struct dentry *dent;
+
+ _debug_dent = debugfs_create_dir("qcrypto", NULL);
+ if (IS_ERR(_debug_dent)) {
+ pr_err("qcrypto debugfs_create_dir fail, error %ld\n",
+ PTR_ERR(_debug_dent));
+ return PTR_ERR(_debug_dent);
+ }
+
+ snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", 1);
+ _debug_qcrypto = 0;
+ dent = debugfs_create_file(name, 0644, _debug_dent,
+ &_debug_qcrypto, &_debug_stats_ops);
+ if (dent == NULL) {
+ pr_err("qcrypto debugfs_create_file fail, error %ld\n",
+ PTR_ERR(dent));
+ rc = PTR_ERR(dent);
+ goto err;
+ }
+ return 0;
+err:
+ debugfs_remove_recursive(_debug_dent);
+ return rc;
+}
+
+static int __init _qcrypto_init(void)
+{
+ int rc;
+ struct crypto_priv *pcp = &qcrypto_dev;
+
+ rc = _qcrypto_debug_init();
+ if (rc)
+ return rc;
+ INIT_LIST_HEAD(&pcp->alg_list);
+ INIT_LIST_HEAD(&pcp->engine_list);
+ init_llist_head(&pcp->ordered_resp_list);
+ spin_lock_init(&pcp->lock);
+ mutex_init(&pcp->engine_lock);
+ pcp->resp_wq = alloc_workqueue("qcrypto_seq_response_wq",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
+ if (!pcp->resp_wq) {
+ pr_err("Error allocating workqueue\n");
+ return -ENOMEM;
+ }
+ INIT_WORK(&pcp->resp_work, seq_response);
+ pcp->total_units = 0;
+ pcp->platform_support.bus_scale_table = NULL;
+ pcp->next_engine = NULL;
+ crypto_init_queue(&pcp->req_queue, MSM_QCRYPTO_REQ_QUEUE_LENGTH);
+ return platform_driver_register(&_qualcomm_crypto);
+}
+
+static void __exit _qcrypto_exit(void)
+{
+ pr_debug("%s Unregister QCRYPTO\n", __func__);
+ debugfs_remove_recursive(_debug_dent);
+ platform_driver_unregister(&_qualcomm_crypto);
+}
+
+module_init(_qcrypto_init);
+module_exit(_qcrypto_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm Crypto driver");
diff --git a/drivers/crypto/msm/qcryptohw_30.h b/drivers/crypto/msm/qcryptohw_30.h
new file mode 100644
index 000000000000..3c18ed2a097a
--- /dev/null
+++ b/drivers/crypto/msm/qcryptohw_30.h
@@ -0,0 +1,308 @@
+/* Copyright (c)2009- 2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTOHW_30_H_
+#define _DRIVERS_CRYPTO_MSM_QCRYPTOHW_30_H_
+
+#define QCE_AUTH_REG_BYTE_COUNT 2
+#define CRYPTO_DATA_IN_REG 0x0
+#define CRYPTO_DATA_OUT_REG 0x10
+#define CRYPTO_STATUS_REG 0x20
+#define CRYPTO_CONFIG_REG 0x24
+#define CRYPTO_DEBUG_REG 0x28
+#define CRYPTO_REGISTER_LOCK_REG 0x2C
+#define CRYPTO_SEG_CFG_REG 0x30
+#define CRYPTO_ENCR_SEG_CFG_REG 0x34
+#define CRYPTO_AUTH_SEG_CFG_REG 0x38
+#define CRYPTO_SEG_SIZE_REG 0x3C
+#define CRYPTO_GOPROC_REG 0x40
+#define CRYPTO_ENGINES_AVAIL 0x44
+
+#define CRYPTO_DES_KEY0_REG 0x50
+#define CRYPTO_DES_KEY1_REG 0x54
+#define CRYPTO_DES_KEY2_REG 0x58
+#define CRYPTO_DES_KEY3_REG 0x5C
+#define CRYPTO_DES_KEY4_REG 0x60
+#define CRYPTO_DES_KEY5_REG 0x64
+
+#define CRYPTO_CNTR0_IV0_REG 0x70
+#define CRYPTO_CNTR1_IV1_REG 0x74
+#define CRYPTO_CNTR2_IV2_REG 0x78
+#define CRYPTO_CNTR3_IV3_REG 0x7C
+#define CRYPTO_CNTR_MASK_REG 0x80
+
+#define CRYPTO_AUTH_BYTECNT0_REG 0x90
+#define CRYPTO_AUTH_BYTECNT1_REG 0x94
+#define CRYPTO_AUTH_BYTECNT2_REG 0x98
+#define CRYPTO_AUTH_BYTECNT3_REG 0x9C
+
+#define CRYPTO_AUTH_IV0_REG 0x100
+#define CRYPTO_AUTH_IV1_REG 0x104
+#define CRYPTO_AUTH_IV2_REG 0x108
+#define CRYPTO_AUTH_IV3_REG 0x10C
+#define CRYPTO_AUTH_IV4_REG 0x110
+#define CRYPTO_AUTH_IV5_REG 0x114
+#define CRYPTO_AUTH_IV6_REG 0x118
+#define CRYPTO_AUTH_IV7_REG 0x11C
+#define CRYPTO_AUTH_IV8_REG 0x120
+#define CRYPTO_AUTH_IV9_REG 0x124
+#define CRYPTO_AUTH_IV10_REG 0x128
+#define CRYPTO_AUTH_IV11_REG 0x12C
+#define CRYPTO_AUTH_IV12_REG 0x130
+#define CRYPTO_AUTH_IV13_REG 0x134
+#define CRYPTO_AUTH_IV14_REG 0x138
+#define CRYPTO_AUTH_IV15_REG 0x13C
+
+#define CRYPTO_AES_RNDKEY0 0x200
+#define CRYPTO_AES_RNDKEY1 0x204
+#define CRYPTO_AES_RNDKEY2 0x208
+#define CRYPTO_AES_RNDKEY3 0x20C
+#define CRYPTO_AES_RNDKEY4 0x210
+#define CRYPTO_AES_RNDKEY5 0x214
+#define CRYPTO_AES_RNDKEY6 0x218
+#define CRYPTO_AES_RNDKEY7 0x21C
+#define CRYPTO_AES_RNDKEY8 0x220
+#define CRYPTO_AES_RNDKEY9 0x224
+#define CRYPTO_AES_RNDKEY10 0x228
+#define CRYPTO_AES_RNDKEY11 0x22c
+#define CRYPTO_AES_RNDKEY12 0x230
+#define CRYPTO_AES_RNDKEY13 0x234
+#define CRYPTO_AES_RNDKEY14 0x238
+#define CRYPTO_AES_RNDKEY15 0x23C
+#define CRYPTO_AES_RNDKEY16 0x240
+#define CRYPTO_AES_RNDKEY17 0x244
+#define CRYPTO_AES_RNDKEY18 0x248
+#define CRYPTO_AES_RNDKEY19 0x24C
+#define CRYPTO_AES_RNDKEY20 0x250
+#define CRYPTO_AES_RNDKEY21 0x254
+#define CRYPTO_AES_RNDKEY22 0x258
+#define CRYPTO_AES_RNDKEY23 0x25C
+#define CRYPTO_AES_RNDKEY24 0x260
+#define CRYPTO_AES_RNDKEY25 0x264
+#define CRYPTO_AES_RNDKEY26 0x268
+#define CRYPTO_AES_RNDKEY27 0x26C
+#define CRYPTO_AES_RNDKEY28 0x270
+#define CRYPTO_AES_RNDKEY29 0x274
+#define CRYPTO_AES_RNDKEY30 0x278
+#define CRYPTO_AES_RNDKEY31 0x27C
+#define CRYPTO_AES_RNDKEY32 0x280
+#define CRYPTO_AES_RNDKEY33 0x284
+#define CRYPTO_AES_RNDKEY34 0x288
+#define CRYPTO_AES_RNDKEY35 0x28c
+#define CRYPTO_AES_RNDKEY36 0x290
+#define CRYPTO_AES_RNDKEY37 0x294
+#define CRYPTO_AES_RNDKEY38 0x298
+#define CRYPTO_AES_RNDKEY39 0x29C
+#define CRYPTO_AES_RNDKEY40 0x2A0
+#define CRYPTO_AES_RNDKEY41 0x2A4
+#define CRYPTO_AES_RNDKEY42 0x2A8
+#define CRYPTO_AES_RNDKEY43 0x2AC
+#define CRYPTO_AES_RNDKEY44 0x2B0
+#define CRYPTO_AES_RNDKEY45 0x2B4
+#define CRYPTO_AES_RNDKEY46 0x2B8
+#define CRYPTO_AES_RNDKEY47 0x2BC
+#define CRYPTO_AES_RNDKEY48 0x2C0
+#define CRYPTO_AES_RNDKEY49 0x2C4
+#define CRYPTO_AES_RNDKEY50 0x2C8
+#define CRYPTO_AES_RNDKEY51 0x2CC
+#define CRYPTO_AES_RNDKEY52 0x2D0
+#define CRYPTO_AES_RNDKEY53 0x2D4
+#define CRYPTO_AES_RNDKEY54 0x2D8
+#define CRYPTO_AES_RNDKEY55 0x2DC
+#define CRYPTO_AES_RNDKEY56 0x2E0
+#define CRYPTO_AES_RNDKEY57 0x2E4
+#define CRYPTO_AES_RNDKEY58 0x2E8
+#define CRYPTO_AES_RNDKEY59 0x2EC
+
+#define CRYPTO_DATA_SHADOW0 0x8000
+#define CRYPTO_DATA_SHADOW8191 0x8FFC
+
+/* status reg */
+#define CRYPTO_CORE_REV 28 /* bit 31-28 */
+#define CRYPTO_CORE_REV_MASK (0xf << CRYPTO_CORE_REV)
+#define CRYPTO_DOUT_SIZE_AVAIL 22 /* bit 24-22 */
+#define CRYPTO_DOUT_SIZE_AVAIL_MASK (0x7 << CRYPTO_DOUT_SIZE_AVAIL)
+#define CRYPTO_DIN_SIZE_AVAIL 19 /* bit 21-19 */
+#define CRYPTO_DIN_SIZE_AVAIL_MASK (0x7 << CRYPTO_DIN_SIZE_AVAIL)
+#define CRYPTO_ACCESS_VIOL 18
+#define CRYPTO_SEG_CHNG_ERR 17
+#define CRYPTO_CFH_CHNG_ERR 16
+#define CRYPTO_DOUT_ERR 15
+#define CRYPTO_DIN_ERR 14
+#define CRYPTO_LOCKED 13
+#define CRYPTO_CRYPTO_STATE 10 /* bit 12-10 */
+#define CRYPTO_CRYPTO_STATE_MASK (0x7 << CRYPTO_CRYPTO_STATE)
+#define CRYPTO_ENCR_BUSY 9
+#define CRYPTO_AUTH_BUSY 8
+#define CRYPTO_DOUT_INTR 7
+#define CRYPTO_DIN_INTR 6
+#define CRYPTO_AUTH_DONE_INTR 5
+#define CRYPTO_ERR_INTR 4
+#define CRYPTO_DOUT_RDY 3
+#define CRYPTO_DIN_RDY 2
+#define CRYPTO_AUTH_DONE 1
+#define CRYPTO_SW_ERR 0
+
+#define CRYPTO_CRYPTO_STATE_IDLE 0
+#define CRYPTO_CRYPTO_STATE_LOCKED 1
+#define CRYPTO_CRYPTO_STATE_GO 3
+#define CRYPTO_CRYPTO_STATE_PROCESSING 4
+#define CRYPTO_CRYPTO_STATE_FINAL_READ 5
+#define CRYPTO_CRYPTO_STATE_CTXT_CLEARING 6
+#define CRYPTO_CRYPTO_STATE_UNLOCKING 7
+
+/* config reg */
+#define CRYPTO_HIGH_SPD_HASH_EN_N 15
+#define CRYPTO_HIGH_SPD_OUT_EN_N 14
+#define CRYPTO_HIGH_SPD_IN_EN_N 13
+#define CRYPTO_DBG_EN 12
+#define CRYPTO_DBG_SEL 7 /* bit 11:7 */
+#define CRYPTO_DBG_SEL_MASK (0x1F << CRYPTO_DBG_SEL)
+#define CRYPTO_MASK_DOUT_INTR 6
+#define CRYPTO_MASK_DIN_INTR 5
+#define CRYPTO_MASK_AUTH_DONE_INTR 4
+#define CRYPTO_MASK_ERR_INTR 3
+#define CRYPTO_AUTO_SHUTDOWN_EN 2
+#define CRYPTO_CLK_EN_N 1
+#define CRYPTO_SW_RST 0
+
+/* seg_cfg reg */
+#define CRYPTO_F8_KEYSTREAM_ENABLE 25
+#define CRYPTO_F9_DIRECTION 24
+#define CRYPTO_F8_DIRECTION 23
+#define CRYPTO_USE_HW_KEY 22
+
+#define CRYPTO_CNTR_ALG 20 /* bit 21-20 */
+#define CRYPTO_CNTR_ALG_MASK (3 << efine CRYPTO_CNTR_ALG)
+
+#define CRYPTO_CLR_CNTXT 19
+#define CRYPTO_LAST 18
+#define CRYPTO_FIRST 17
+#define CRYPTO_ENCODE 16
+
+#define CRYPTO_AUTH_POS 14 /* bit 15-14 */
+#define CRYPTO_AUTH_POS_MASK (3 << CRYPTO_AUTH_POS)
+
+#define CRYPTO_AUTH_SIZE 11 /* bit 13-11 */
+#define CRYPTO_AUTH_SIZE_MASK (7 << CRYPTO_AUTH_SIZE)
+
+#define CRYPTO_AUTH_ALG 9 /* bit 10-9 */
+#define CRYPTO_AUTH_ALG_MASK (3 << CRYPTO_AUTH_ALG)
+
+#define CRYPTO_ENCR_MODE 6 /* bit 8-6 */
+#define CRYPTO_ENCR_MODE_MASK (7 << CRYPTO_ENCR_MODE)
+
+#define CRYPTO_ENCR_KEY_SZ 3 /* bit 5-3 */
+#define CRYPTO_ENCR_KEY_SZ_MASK (7 << CRYPTO_ENCR_KEY_SZ)
+
+#define CRYPTO_ENCR_ALG 0 /* bit 2-0 */
+#define CRYPTO_ENCR_ALG_MASK (7 << CRYPTO_ENCR_ALG)
+
+#define CRYPTO_CNTR_ALG_NIST 0
+#define CRYPTO_CNTR_ALG_UMB 1
+#define CRYPTO_CNTR_ALG_VAR2 2
+
+#define CRYPTO_AUTH_POS_BEFORE 0
+#define CRYPTO_AUTH_POS_AFTER 1
+
+#define CRYPTO_AUTH_SIZE_SHA1 0
+#define CRYPTO_AUTH_SIZE_SHA256 1
+#define CRYPTO_AUTH_SIZE_SHA384 2
+#define CRYPTO_AUTH_SIZE_SHA512 3
+#define CRYPTO_AUTH_SIZE_HMAC_SHA1 4
+
+#define CRYPTO_AUTH_SIZE_UIA1 0
+#define CRYPTO_AUTH_SIZE_UIA2 1
+
+#define CRYPTO_AUTH_ALG_NONE 0
+#define CRYPTO_AUTH_ALG_SHA 1
+#define CRYPTO_AUTH_ALG_F9 2
+#define CRYPTO_AUTH_ALG_RESERVED1 3
+
+#define CRYPTO_ENCR_MODE_ECB 0
+#define CRYPTO_ENCR_MODE_CBC 1
+/* only valid when AES */
+#define CRYPTO_ENCR_MODE_CTR 2
+
+
+#define CRYPTO_ENCR_KEY_SZ_DES 0
+#define CRYPTO_ENCR_KEY_SZ_3DES 1
+
+#define CRYPTO_ENCR_KEY_SZ_AES128 0
+#define CRYPTO_ENCR_KEY_SZ_AES192 1
+#define CRYPTO_ENCR_KEY_SZ_AES256 2
+
+#define CRYPTO_ENCR_KEY_SZ_UEA1 0
+#define CRYPTO_ENCR_KEY_SZ_UEA2 1
+
+#define CRYPTO_ENCR_ALG_NONE 0
+#define CRYPTO_ENCR_ALG_DES 1
+#define CRYPTO_ENCR_ALG_AES 2
+#define CRYPTO_ENCR_ALG_C2 3
+#define CRYPTO_ENCR_ALG_F8 4
+
+/* encr_seg_cfg reg */
+#define CRYPTO_ENCR_SEG_SIZE 16 /* bit 31-16 */
+#define CRYPTO_ENCR_SEG_SIZE_MASK (0xffff << CRYPTO_ENCR_SEG_SIZE)
+
+#define CRYPTO_ENCR_START 0
+#define CRYPTO_ENCR_START_MASK (0xffff << CRYPTO_ENCR_START)
+
+/* auth_seg_cfg reg */
+#define CRYPTO_AUTH_SEG_SIZE 16 /* bit 31-16 */
+#define CRYPTO_AUTH_SEG_SIZE_MASK (0xffff << CRYPTO_AUTH_SEG_SIZE)
+
+#define CRYPTO_AUTH_START 0
+#define CRYPTO_AUTH_START_MASK (0xffff << CRYPTO_AUTH_START)
+
+
+/* seg_size reg */
+#define CRYPTO_SEG_SIZE 0
+#define CRYPTO_SEG_SIZE_MASK (0xffff << CRYPTO_SEG_SIZE)
+
+/* goproc reg */
+#define CRYPTO_GO 0
+
+/* engines_avail */
+#define CRYPTO_F9_SEL 8
+#define CRYPTO_F8_SEL 7
+#define CRYPTO_HMAC_SEL 6
+#define CRYPTO_SHA512_SEL 5
+#define CRYPTO_SHA_SEL 4
+#define CRYPTO_DES_SEL 3
+#define CRYPTO_C2_SEL 2
+
+#define CRYPTO_AES_SEL 0 /* bit 1-0 */
+#define CRYPTO_AES_SEL_MASK (3 << CRYPTO_AES_SEL)
+#define CRYPTO_AES_SEL_NO 0
+#define CRYPTO_AES_SEL_SLOW 1
+#define CRYPTO_AES_SEL_FAST 2
+#define CRYPTO_AES_SEL_RESERVED 3
+
+/* F8 definition of CRYPTO_CNTR1_IV1_REG */
+#define CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT 16 /* bit 31 - 16 */
+#define CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT_MASK \
+ (0xffff << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT)
+
+#define CRYPTO_CNTR1_IV1_REG_F8_BEARER 0 /* bit 4 - 0 */
+#define CRYPTO_CNTR1_IV1_REG_F8_BEARER_MASK \
+ (0x1f << CRYPTO_CNTR1_IV1_REG_F8_BEARER)
+
+/* F9 definition of CRYPTO_AUTH_IV4_REG */
+#define CRYPTO_AUTH_IV4_REG_F9_VALID_BIS 0 /* bit 2 - 0 */
+#define CRYPTO_AUTH_IV4_REG_F9_VALID_BIS_MASK \
+ (0x7 << CRYPTO_AUTH_IV4_REG_F9_VALID_BIS)
+
+/* misc */
+#define CRYPTO_AES_RNDKEYS 60
+
+#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTOHW_30_H_ */
diff --git a/drivers/crypto/msm/qcryptohw_40.h b/drivers/crypto/msm/qcryptohw_40.h
new file mode 100644
index 000000000000..f2102c6e4cff
--- /dev/null
+++ b/drivers/crypto/msm/qcryptohw_40.h
@@ -0,0 +1,316 @@
+/* Copyright (c) 2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTOHW_40_H_
+#define _DRIVERS_CRYPTO_MSM_QCRYPTOHW_40_H_
+
+
+#define QCE_AUTH_REG_BYTE_COUNT 4
+#define CRYPTO_VERSION_REG 0x0
+#define CRYPTO_DATA_IN_REG 0x008
+#define CRYPTO_DATA_OUT_REG 0x010
+#define CRYPTO_STATUS_REG 0x100
+#define CRYPTO_ENGINES_AVAIL 0x104
+#define CRYPTO3_VERSION_REG 0x108
+#define CRYPTO_SEG_SIZE_REG 0x200
+#define CRYPTO_GOPROC_REG 0x204
+#define CRYPTO_ENCR_SEG_CFG_REG 0x300
+
+#define CRYPTO_ENCR_SEG_SIZE_REG 0x304
+#define CRYPTO_ENCR_SEG_START_REG 0x308
+
+#define CRYPTO_ENCR_KEY0_REG 0x310
+#define CRYPTO_ENCR_KEY1_REG 0x314
+#define CRYPTO_ENCR_KEY2_REG 0x318
+#define CRYPTO_ENCR_KEY3_REG 0x31C
+#define CRYPTO_ENCR_KEY4_REG 0x320
+#define CRYPTO_ENCR_KEY5_REG 0x324
+#define CRYPTO_ENCR_KEY6_REG 0x328
+#define CRYPTO_ENCR_KEY7_REG 0x32C
+
+#define CRYPTO_ENCR_XTS_KEY0_REG 0x330
+#define CRYPTO_ENCR_XTS_KEY1_REG 0x334
+#define CRYPTO_ENCR_XTS_KEY2_REG 0x338
+#define CRYPTO_ENCR_XTS_KEY3_REG 0x33C
+#define CRYPTO_ENCR_XTS_KEY4_REG 0x340
+#define CRYPTO_ENCR_XTS_KEY5_REG 0x344
+#define CRYPTO_ENCR_XTS_KEY6_REG 0x348
+#define CRYPTO_ENCR_XTS_KEY7_REG 0x34C
+
+#define CRYPTO_CNTR0_IV0_REG 0x350
+#define CRYPTO_CNTR1_IV1_REG 0x354
+#define CRYPTO_CNTR2_IV2_REG 0x358
+#define CRYPTO_CNTR3_IV3_REG 0x35C
+
+#define CRYPTO_CNTR_MASK_REG 0x360
+
+#define CRYPTO_ENCR_XTS_DU_SIZE_REG 0x364
+
+#define CRYPTO_AUTH_SEG_CFG_REG 0x400
+#define CRYPTO_AUTH_SEG_SIZE_REG 0x404
+#define CRYPTO_AUTH_SEG_START_REG 0x408
+
+#define CRYPTO_AUTH_KEY0_REG 0x410
+#define CRYPTO_AUTH_KEY1_REG 0x414
+#define CRYPTO_AUTH_KEY2_REG 0x418
+#define CRYPTO_AUTH_KEY3_REG 0x41C
+#define CRYPTO_AUTH_KEY4_REG 0x420
+#define CRYPTO_AUTH_KEY5_REG 0x424
+#define CRYPTO_AUTH_KEY6_REG 0x428
+#define CRYPTO_AUTH_KEY7_REG 0x42C
+#define CRYPTO_AUTH_KEY8_REG 0x430
+#define CRYPTO_AUTH_KEY9_REG 0x434
+#define CRYPTO_AUTH_KEY10_REG 0x438
+#define CRYPTO_AUTH_KEY11_REG 0x43C
+#define CRYPTO_AUTH_KEY12_REG 0x440
+#define CRYPTO_AUTH_KEY13_REG 0x444
+#define CRYPTO_AUTH_KEY14_REG 0x448
+#define CRYPTO_AUTH_KEY15_REG 0x44C
+
+#define CRYPTO_AUTH_IV0_REG 0x450
+#define CRYPTO_AUTH_IV1_REG 0x454
+#define CRYPTO_AUTH_IV2_REG 0x458
+#define CRYPTO_AUTH_IV3_REG 0x45C
+#define CRYPTO_AUTH_IV4_REG 0x460
+#define CRYPTO_AUTH_IV5_REG 0x464
+#define CRYPTO_AUTH_IV6_REG 0x468
+#define CRYPTO_AUTH_IV7_REG 0x46C
+#define CRYPTO_AUTH_IV8_REG 0x470
+#define CRYPTO_AUTH_IV9_REG 0x474
+#define CRYPTO_AUTH_IV10_REG 0x478
+#define CRYPTO_AUTH_IV11_REG 0x47C
+#define CRYPTO_AUTH_IV12_REG 0x480
+#define CRYPTO_AUTH_IV13_REG 0x484
+#define CRYPTO_AUTH_IV14_REG 0x488
+#define CRYPTO_AUTH_IV15_REG 0x48C
+
+#define CRYPTO_AUTH_INFO_NONCE0_REG 0x490
+#define CRYPTO_AUTH_INFO_NONCE1_REG 0x494
+#define CRYPTO_AUTH_INFO_NONCE2_REG 0x498
+#define CRYPTO_AUTH_INFO_NONCE3_REG 0x49C
+
+#define CRYPTO_AUTH_BYTECNT0_REG 0x4A0
+#define CRYPTO_AUTH_BYTECNT1_REG 0x4A4
+#define CRYPTO_AUTH_BYTECNT2_REG 0x4A8
+#define CRYPTO_AUTH_BYTECNT3_REG 0x4AC
+
+#define CRYPTO_AUTH_EXP_MAC0_REG 0x4B0
+#define CRYPTO_AUTH_EXP_MAC1_REG 0x4B4
+#define CRYPTO_AUTH_EXP_MAC2_REG 0x4B8
+#define CRYPTO_AUTH_EXP_MAC3_REG 0x4BC
+#define CRYPTO_AUTH_EXP_MAC4_REG 0x4C0
+#define CRYPTO_AUTH_EXP_MAC5_REG 0x4C4
+#define CRYPTO_AUTH_EXP_MAC6_REG 0x4C8
+#define CRYPTO_AUTH_EXP_MAC7_REG 0x4CC
+
+#define CRYPTO_CONFIG_REG 0x500
+#define CRYPTO_SACR_REG 0x504
+#define CRYPTO_DEBUG_REG 0x508
+
+#define CRYPTO_DATA_SHADOW0 0x8000
+#define CRYPTO_DATA_SHADOW8191 0x8FFC
+
+
+/* Register bits */
+
+#define CRYPTO_CORE_MAJOR_REV 4 /* bit 7-4 */
+#define CRYPTO_CORE_MAJOR_REV_MASK (0xF << CRYPTO_CORE_MAJOR_REV)
+#define CRYPTO_CORE_MINOR_REV 0 /* bit 3-0 */
+#define CRYPTO_CORE_MINOR_REV_MASK (0xF << CRYPTO_CORE_MINOR_REV)
+#define CRYPTO_CORE_REV_MASK 0xFF
+
+/* status reg */
+#define CRYPTO_MAC_FAILED 25
+#define CRYPTO_DOUT_SIZE_AVAIL 22 /* bit 24-22 */
+#define CRYPTO_DOUT_SIZE_AVAIL_MASK (0x7 << CRYPTO_DOUT_SIZE_AVAIL)
+#define CRYPTO_DIN_SIZE_AVAIL 19 /* bit 21-19 */
+#define CRYPTO_DIN_SIZE_AVAIL_MASK (0x7 << CRYPTO_DIN_SIZE_AVAIL)
+#define CRYPTO_ACCESS_VIOL 18
+#define CRYPTO_SEG_CHNG_ERR 17
+#define CRYPTO_CFH_CHNG_ERR 16
+#define CRYPTO_DOUT_ERR 15
+#define CRYPTO_DIN_ERR 14
+#define CRYPTO_LOCKED 13
+#define CRYPTO_CRYPTO_STATE 10 /* bit 12-10 */
+#define CRYPTO_CRYPTO_STATE_MASK (0x7 << CRYPTO_CRYPTO_STATE)
+#define CRYPTO_ENCR_BUSY 9
+#define CRYPTO_AUTH_BUSY 8
+#define CRYPTO_DOUT_INTR 7
+#define CRYPTO_DIN_INTR 6
+#define CRYPTO_OP_DONE_INTR 5
+#define CRYPTO_ERR_INTR 4
+#define CRYPTO_DOUT_RDY 3
+#define CRYPTO_DIN_RDY 2
+#define CRYPTO_OPERATION_DONE 1
+#define CRYPTO_SW_ERR 0
+
+/* config reg */
+#define CRYPTO_REQ_SIZE 30 /* bit 31-30 */
+#define CRYPTO_REQ_SIZE_MASK (0x3 << CRYPTO_REQ_SIZE)
+#define CRYPTO_REQ_SIZE_ENUM_16_BYTES 0
+#define CRYPTO_REQ_SIZE_ENUM_32_BYTES 1
+#define CRYPTO_REQ_SIZE_ENUM_64_BYTES 2
+
+#define CRYPTO_MAX_QUEUED_REQ 27 /* bit 29-27 */
+#define CRYPTO_MAX_QUEUED_REQ_MASK (0x7 << CRYPTO_MAX_QUEUED_REQ)
+#define CRYPTO_ENUM1_QUEUED_REQS 0
+#define CRYPTO_ENUM2_QUEUED_REQS 1
+#define CRYPTO_ENUM3_QUEUED_REQS 2
+#define CRYPTO_ENUM4_QUEUED_REQS 3
+
+#define CRYPTO_FIFO_THRESHOLD 24 /* bit 26-24 */
+#define CRYPTO_FIFO_THRESHOLD_MASK (0x7 << CRYPTO_FIFO_THRESHOLD)
+#define CRYPTO_FIFO_ENUM_16_BYTES 0
+#define CRYPTO_FIFO_ENUM_32_BYTES 1
+#define CRYPTO_FIFO_ENUM_48_BYTES 2
+#define CRYPTO_FIFO_ENUM_64_BYTES 3
+
+#define CRYPTO_IRQ_ENABLES 20 /* bit 23-20 */
+#define CRYPTO_IRQ_ENABLES_MASK (0xF << CRYPTO_IRQ_ENABLES)
+
+#define CRYPTO_ACR_EN 18
+#define CRYPTO_BAM_MODE 17
+#define CRYPTO_LITTLE_ENDIAN_MODE 16
+#define CRYPTO_HIGH_SPD_OUT_EN_N 14
+#define CRYPTO_HIGH_SPD_IN_EN_N 13
+#define CRYPTO_DBG_EN 12
+
+#define CRYPTO_DBG_SEL 7 /* bit 11:7 */
+#define CRYPTO_DBG_SEL_MASK (0x1F << CRYPTO_DBG_SEL)
+
+#define CRYPTO_MASK_DOUT_INTR 6
+#define CRYPTO_MASK_DIN_INTR 5
+#define CRYPTO_MASK_OP_DONE_INTR 4
+#define CRYPTO_MASK_ERR_INTR 3
+#define CRYPTO_AUTO_SHUTDOWN_EN 2
+#define CRYPTO_CLK_EN_N 1
+
+/* auth_seg_cfg reg */
+#define CRYPTO_COMP_EXP_MAC 20
+#define CRYPTO_COMP_EXP_MAC_DISABLED 0
+#define CRYPTO_COMP_EXP_MAC_ENABLED 1
+
+#define CRYPTO_F9_DIRECTION 19
+#define CRYPTO_F9_DIRECTION_UPLINK 0
+#define CRYPTO_F9_DIRECTION_DOWNLINK 1
+
+#define CRYPTO_AUTH_NONCE_NUM_WORDS 16
+#define CRYPTO_AUTH_NONCE_NUM_WORDS_MASK \
+ (0x7 << CRYPTO_AUTH_NONCE_NUM_WORDS)
+
+#define CRYPTO_USE_HW_KEY_AUTH 15
+
+#define CRYPTO_LAST 14
+
+#define CRYPTO_AUTH_POS 12 /* bit 13 .. 12*/
+#define CRYPTO_AUTH_POS_MASK (0x3 << CRYPTO_AUTH_POS)
+#define CRYPTO_AUTH_POS_BEFORE 0
+#define CRYPTO_AUTH_POS_AFTER 1
+
+#define CRYPTO_AUTH_SIZE 9 /* bits 11 .. 9*/
+#define CRYPTO_AUTH_SIZE_MASK (0x7 << CRYPTO_AUTH_SIZE)
+#define CRYPTO_AUTH_SIZE_SHA1 0
+#define CRYPTO_AUTH_SIZE_SHA256 1
+#define CRYPTO_AUTH_SIZE_ENUM_4_BYTES 0
+#define CRYPTO_AUTH_SIZE_ENUM_6_BYTES 1
+#define CRYPTO_AUTH_SIZE_ENUM_8_BYTES 2
+#define CRYPTO_AUTH_SIZE_ENUM_10_BYTES 3
+#define CRYPTO_AUTH_SIZE_ENUM_12_BYTES 4
+#define CRYPTO_AUTH_SIZE_ENUM_14_BYTES 5
+#define CRYPTO_AUTH_SIZE_ENUM_16_BYTES 6
+
+#define CRYPTO_AUTH_MODE 6 /* bit 8 .. 6*/
+#define CRYPTO_AUTH_MODE_MASK (0x7 << CRYPTO_AUTH_MODE)
+#define CRYPTO_AUTH_MODE_HASH 0
+#define CRYPTO_AUTH_MODE_HMAC 1
+#define CRYPTO_AUTH_MODE_CCM 0
+#define CRYPTO_AUTH_MODE_CMAC 1
+
+#define CRYPTO_AUTH_KEY_SIZE 3
+#define CRYPTO_AUTH_KEY_SIZE_MASK (0x7 << CRYPTO_AUTH_KEY_SIZE)
+#define CRYPTO_AUTH_KEY_SZ_AES128 0
+#define CRYPTO_AUTH_KEY_SZ_AES256 2
+
+#define CRYPTO_AUTH_ALG 0 /* bit 2 .. 0*/
+#define CRYPTO_AUTH_ALG_MASK 7
+#define CRYPTO_AUTH_ALG_NONE 0
+#define CRYPTO_AUTH_ALG_SHA 1
+#define CRYPTO_AUTH_ALG_AES 2
+#define CRYPTO_AUTH_ALG_KASUMI 3
+#define CRYPTO_AUTH_ALG_SNOW3G 4
+
+/* encr_xts_du_size reg */
+#define CRYPTO_ENCR_XTS_DU_SIZE 0 /* bit 19-0 */
+#define CRYPTO_ENCR_XTS_DU_SIZE_MASK 0xfffff
+
+/* encr_seg_cfg reg */
+#define CRYPTO_F8_KEYSTREAM_ENABLE 15
+#define CRYPTO_F8_KEYSTREAM_DISABLED 0
+#define CRYPTO_F8_KEYSTREAM_ENABLED 1
+
+#define CRYPTO_F8_DIRECTION 14
+#define CRYPTO_F8_DIRECTION_UPLINK 0
+#define CRYPTO_F8_DIRECTION_DOWNLINK 1
+
+#define CRYPTO_USE_HW_KEY_ENCR 13
+#define CRYPTO_USE_HW_KEY_REG 0
+#define CRYPTO_USE_HW_KEY 1
+
+#define CRYPTO_CNTR_ALG 11 /* bit 12-11 */
+#define CRYPTO_CNTR_ALG_MASK (3 << CRYPTO_CNTR_ALG)
+#define CRYPTO_CNTR_ALG_NIST 0
+
+#define CRYPTO_ENCODE 10
+
+#define CRYPTO_ENCR_MODE 6 /* bit 9-6 */
+#define CRYPTO_ENCR_MODE_MASK (0xF << CRYPTO_ENCR_MODE)
+/* only valid when AES */
+#define CRYPTO_ENCR_MODE_ECB 0
+#define CRYPTO_ENCR_MODE_CBC 1
+#define CRYPTO_ENCR_MODE_CTR 2
+#define CRYPTO_ENCR_MODE_XTS 3
+#define CRYPTO_ENCR_MODE_CCM 4
+
+#define CRYPTO_ENCR_KEY_SZ 3 /* bit 5-3 */
+#define CRYPTO_ENCR_KEY_SZ_MASK (7 << CRYPTO_ENCR_KEY_SZ)
+#define CRYPTO_ENCR_KEY_SZ_DES 0
+#define CRYPTO_ENCR_KEY_SZ_3DES 1
+#define CRYPTO_ENCR_KEY_SZ_AES128 0
+#define CRYPTO_ENCR_KEY_SZ_AES256 2
+#define CRYPTO_ENCR_KEY_SZ_UEA1 0
+#define CRYPTO_ENCR_KEY_SZ_UEA2 1
+
+#define CRYPTO_ENCR_ALG 0 /* bit 2-0 */
+#define CRYPTO_ENCR_ALG_MASK (7 << CRYPTO_ENCR_ALG)
+#define CRYPTO_ENCR_ALG_NONE 0
+#define CRYPTO_ENCR_ALG_DES 1
+#define CRYPTO_ENCR_ALG_AES 2
+#define CRYPTO_ENCR_ALG_KASUMI 3
+#define CRYPTO_ENCR_ALG_SNOW_3G 5
+
+/* goproc reg */
+#define CRYPTO_GO 0
+#define CRYPTO_CLR_CNTXT 1
+
+/* engines_avail */
+#define CRYPTO_ENCR_AES_SEL 0
+#define CRYPTO_DES_SEL 3
+#define CRYPTO_ENCR_SNOW3G_SEL 4
+#define CRYPTO_ENCR_KASUMI_SEL 5
+#define CRYPTO_SHA_SEL 6
+#define CRYPTO_SHA512_SEL 7
+#define CRYPTO_AUTH_AES_SEL 8
+#define CRYPTO_AUTH_SNOW3G_SEL 9
+#define CRYPTO_AUTH_KASUMI_SEL 10
+#define CRYPTO_BAM_SEL 11
+
+#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTOHW_40_H_ */
diff --git a/drivers/crypto/msm/qcryptohw_50.h b/drivers/crypto/msm/qcryptohw_50.h
new file mode 100644
index 000000000000..da8d50e9d688
--- /dev/null
+++ b/drivers/crypto/msm/qcryptohw_50.h
@@ -0,0 +1,528 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_
+#define _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_
+
+
+#define CRYPTO_BAM_CNFG_BITS_REG 0x0007C
+#define CRYPTO_BAM_CD_ENABLE 27
+#define CRYPTO_BAM_CD_ENABLE_MASK (1 << CRYPTO_BAM_CD_ENABLE)
+
+#define QCE_AUTH_REG_BYTE_COUNT 4
+#define CRYPTO_VERSION_REG 0x1A000
+
+#define CRYPTO_DATA_IN0_REG 0x1A010
+#define CRYPTO_DATA_IN1_REG 0x1A014
+#define CRYPTO_DATA_IN2_REG 0x1A018
+#define CRYPTO_DATA_IN3_REG 0x1A01C
+
+#define CRYPTO_DATA_OUT0_REG 0x1A020
+#define CRYPTO_DATA_OUT1_REG 0x1A024
+#define CRYPTO_DATA_OUT2_REG 0x1A028
+#define CRYPTO_DATA_OUT3_REG 0x1A02C
+
+#define CRYPTO_STATUS_REG 0x1A100
+#define CRYPTO_STATUS2_REG 0x1A104
+#define CRYPTO_ENGINES_AVAIL 0x1A108
+#define CRYPTO_FIFO_SIZES_REG 0x1A10C
+
+#define CRYPTO_SEG_SIZE_REG 0x1A110
+#define CRYPTO_GOPROC_REG 0x1A120
+#define CRYPTO_GOPROC_QC_KEY_REG 0x1B000
+#define CRYPTO_GOPROC_OEM_KEY_REG 0x1C000
+
+#define CRYPTO_ENCR_SEG_CFG_REG 0x1A200
+#define CRYPTO_ENCR_SEG_SIZE_REG 0x1A204
+#define CRYPTO_ENCR_SEG_START_REG 0x1A208
+
+#define CRYPTO_ENCR_KEY0_REG 0x1D000
+#define CRYPTO_ENCR_KEY1_REG 0x1D004
+#define CRYPTO_ENCR_KEY2_REG 0x1D008
+#define CRYPTO_ENCR_KEY3_REG 0x1D00C
+#define CRYPTO_ENCR_KEY4_REG 0x1D010
+#define CRYPTO_ENCR_KEY5_REG 0x1D014
+#define CRYPTO_ENCR_KEY6_REG 0x1D018
+#define CRYPTO_ENCR_KEY7_REG 0x1D01C
+
+#define CRYPTO_ENCR_XTS_KEY0_REG 0x1D020
+#define CRYPTO_ENCR_XTS_KEY1_REG 0x1D024
+#define CRYPTO_ENCR_XTS_KEY2_REG 0x1D028
+#define CRYPTO_ENCR_XTS_KEY3_REG 0x1D02C
+#define CRYPTO_ENCR_XTS_KEY4_REG 0x1D030
+#define CRYPTO_ENCR_XTS_KEY5_REG 0x1D034
+#define CRYPTO_ENCR_XTS_KEY6_REG 0x1D038
+#define CRYPTO_ENCR_XTS_KEY7_REG 0x1D03C
+
+#define CRYPTO_ENCR_PIPE0_KEY0_REG 0x1E000
+#define CRYPTO_ENCR_PIPE0_KEY1_REG 0x1E004
+#define CRYPTO_ENCR_PIPE0_KEY2_REG 0x1E008
+#define CRYPTO_ENCR_PIPE0_KEY3_REG 0x1E00C
+#define CRYPTO_ENCR_PIPE0_KEY4_REG 0x1E010
+#define CRYPTO_ENCR_PIPE0_KEY5_REG 0x1E014
+#define CRYPTO_ENCR_PIPE0_KEY6_REG 0x1E018
+#define CRYPTO_ENCR_PIPE0_KEY7_REG 0x1E01C
+
+#define CRYPTO_ENCR_PIPE1_KEY0_REG 0x1E020
+#define CRYPTO_ENCR_PIPE1_KEY1_REG 0x1E024
+#define CRYPTO_ENCR_PIPE1_KEY2_REG 0x1E028
+#define CRYPTO_ENCR_PIPE1_KEY3_REG 0x1E02C
+#define CRYPTO_ENCR_PIPE1_KEY4_REG 0x1E030
+#define CRYPTO_ENCR_PIPE1_KEY5_REG 0x1E034
+#define CRYPTO_ENCR_PIPE1_KEY6_REG 0x1E038
+#define CRYPTO_ENCR_PIPE1_KEY7_REG 0x1E03C
+
+#define CRYPTO_ENCR_PIPE2_KEY0_REG 0x1E040
+#define CRYPTO_ENCR_PIPE2_KEY1_REG 0x1E044
+#define CRYPTO_ENCR_PIPE2_KEY2_REG 0x1E048
+#define CRYPTO_ENCR_PIPE2_KEY3_REG 0x1E04C
+#define CRYPTO_ENCR_PIPE2_KEY4_REG 0x1E050
+#define CRYPTO_ENCR_PIPE2_KEY5_REG 0x1E054
+#define CRYPTO_ENCR_PIPE2_KEY6_REG 0x1E058
+#define CRYPTO_ENCR_PIPE2_KEY7_REG 0x1E05C
+
+#define CRYPTO_ENCR_PIPE3_KEY0_REG 0x1E060
+#define CRYPTO_ENCR_PIPE3_KEY1_REG 0x1E064
+#define CRYPTO_ENCR_PIPE3_KEY2_REG 0x1E068
+#define CRYPTO_ENCR_PIPE3_KEY3_REG 0x1E06C
+#define CRYPTO_ENCR_PIPE3_KEY4_REG 0x1E070
+#define CRYPTO_ENCR_PIPE3_KEY5_REG 0x1E074
+#define CRYPTO_ENCR_PIPE3_KEY6_REG 0x1E078
+#define CRYPTO_ENCR_PIPE3_KEY7_REG 0x1E07C
+
+
+#define CRYPTO_ENCR_PIPE0_XTS_KEY0_REG 0x1E200
+#define CRYPTO_ENCR_PIPE0_XTS_KEY1_REG 0x1E204
+#define CRYPTO_ENCR_PIPE0_XTS_KEY2_REG 0x1E208
+#define CRYPTO_ENCR_PIPE0_XTS_KEY3_REG 0x1E20C
+#define CRYPTO_ENCR_PIPE0_XTS_KEY4_REG 0x1E210
+#define CRYPTO_ENCR_PIPE0_XTS_KEY5_REG 0x1E214
+#define CRYPTO_ENCR_PIPE0_XTS_KEY6_REG 0x1E218
+#define CRYPTO_ENCR_PIPE0_XTS_KEY7_REG 0x1E21C
+
+#define CRYPTO_ENCR_PIPE1_XTS_KEY0_REG 0x1E220
+#define CRYPTO_ENCR_PIPE1_XTS_KEY1_REG 0x1E224
+#define CRYPTO_ENCR_PIPE1_XTS_KEY2_REG 0x1E228
+#define CRYPTO_ENCR_PIPE1_XTS_KEY3_REG 0x1E22C
+#define CRYPTO_ENCR_PIPE1_XTS_KEY4_REG 0x1E230
+#define CRYPTO_ENCR_PIPE1_XTS_KEY5_REG 0x1E234
+#define CRYPTO_ENCR_PIPE1_XTS_KEY6_REG 0x1E238
+#define CRYPTO_ENCR_PIPE1_XTS_KEY7_REG 0x1E23C
+
+#define CRYPTO_ENCR_PIPE2_XTS_KEY0_REG 0x1E240
+#define CRYPTO_ENCR_PIPE2_XTS_KEY1_REG 0x1E244
+#define CRYPTO_ENCR_PIPE2_XTS_KEY2_REG 0x1E248
+#define CRYPTO_ENCR_PIPE2_XTS_KEY3_REG 0x1E24C
+#define CRYPTO_ENCR_PIPE2_XTS_KEY4_REG 0x1E250
+#define CRYPTO_ENCR_PIPE2_XTS_KEY5_REG 0x1E254
+#define CRYPTO_ENCR_PIPE2_XTS_KEY6_REG 0x1E258
+#define CRYPTO_ENCR_PIPE2_XTS_KEY7_REG 0x1E25C
+
+#define CRYPTO_ENCR_PIPE3_XTS_KEY0_REG 0x1E260
+#define CRYPTO_ENCR_PIPE3_XTS_KEY1_REG 0x1E264
+#define CRYPTO_ENCR_PIPE3_XTS_KEY2_REG 0x1E268
+#define CRYPTO_ENCR_PIPE3_XTS_KEY3_REG 0x1E26C
+#define CRYPTO_ENCR_PIPE3_XTS_KEY4_REG 0x1E270
+#define CRYPTO_ENCR_PIPE3_XTS_KEY5_REG 0x1E274
+#define CRYPTO_ENCR_PIPE3_XTS_KEY6_REG 0x1E278
+#define CRYPTO_ENCR_PIPE3_XTS_KEY7_REG 0x1E27C
+
+
+#define CRYPTO_CNTR0_IV0_REG 0x1A20C
+#define CRYPTO_CNTR1_IV1_REG 0x1A210
+#define CRYPTO_CNTR2_IV2_REG 0x1A214
+#define CRYPTO_CNTR3_IV3_REG 0x1A218
+
+#define CRYPTO_CNTR_MASK_REG0 0x1A23C
+#define CRYPTO_CNTR_MASK_REG1 0x1A238
+#define CRYPTO_CNTR_MASK_REG2 0x1A234
+#define CRYPTO_CNTR_MASK_REG 0x1A21C
+
+#define CRYPTO_ENCR_CCM_INT_CNTR0_REG 0x1A220
+#define CRYPTO_ENCR_CCM_INT_CNTR1_REG 0x1A224
+#define CRYPTO_ENCR_CCM_INT_CNTR2_REG 0x1A228
+#define CRYPTO_ENCR_CCM_INT_CNTR3_REG 0x1A22C
+
+#define CRYPTO_ENCR_XTS_DU_SIZE_REG 0x1A230
+
+#define CRYPTO_AUTH_SEG_CFG_REG 0x1A300
+#define CRYPTO_AUTH_SEG_SIZE_REG 0x1A304
+#define CRYPTO_AUTH_SEG_START_REG 0x1A308
+
+#define CRYPTO_AUTH_KEY0_REG 0x1D040
+#define CRYPTO_AUTH_KEY1_REG 0x1D044
+#define CRYPTO_AUTH_KEY2_REG 0x1D048
+#define CRYPTO_AUTH_KEY3_REG 0x1D04C
+#define CRYPTO_AUTH_KEY4_REG 0x1D050
+#define CRYPTO_AUTH_KEY5_REG 0x1D054
+#define CRYPTO_AUTH_KEY6_REG 0x1D058
+#define CRYPTO_AUTH_KEY7_REG 0x1D05C
+#define CRYPTO_AUTH_KEY8_REG 0x1D060
+#define CRYPTO_AUTH_KEY9_REG 0x1D064
+#define CRYPTO_AUTH_KEY10_REG 0x1D068
+#define CRYPTO_AUTH_KEY11_REG 0x1D06C
+#define CRYPTO_AUTH_KEY12_REG 0x1D070
+#define CRYPTO_AUTH_KEY13_REG 0x1D074
+#define CRYPTO_AUTH_KEY14_REG 0x1D078
+#define CRYPTO_AUTH_KEY15_REG 0x1D07C
+
+#define CRYPTO_AUTH_PIPE0_KEY0_REG 0x1E800
+#define CRYPTO_AUTH_PIPE0_KEY1_REG 0x1E804
+#define CRYPTO_AUTH_PIPE0_KEY2_REG 0x1E808
+#define CRYPTO_AUTH_PIPE0_KEY3_REG 0x1E80C
+#define CRYPTO_AUTH_PIPE0_KEY4_REG 0x1E810
+#define CRYPTO_AUTH_PIPE0_KEY5_REG 0x1E814
+#define CRYPTO_AUTH_PIPE0_KEY6_REG 0x1E818
+#define CRYPTO_AUTH_PIPE0_KEY7_REG 0x1E81C
+#define CRYPTO_AUTH_PIPE0_KEY8_REG 0x1E820
+#define CRYPTO_AUTH_PIPE0_KEY9_REG 0x1E824
+#define CRYPTO_AUTH_PIPE0_KEY10_REG 0x1E828
+#define CRYPTO_AUTH_PIPE0_KEY11_REG 0x1E82C
+#define CRYPTO_AUTH_PIPE0_KEY12_REG 0x1E830
+#define CRYPTO_AUTH_PIPE0_KEY13_REG 0x1E834
+#define CRYPTO_AUTH_PIPE0_KEY14_REG 0x1E838
+#define CRYPTO_AUTH_PIPE0_KEY15_REG 0x1E83C
+
+#define CRYPTO_AUTH_PIPE1_KEY0_REG 0x1E880
+#define CRYPTO_AUTH_PIPE1_KEY1_REG 0x1E884
+#define CRYPTO_AUTH_PIPE1_KEY2_REG 0x1E888
+#define CRYPTO_AUTH_PIPE1_KEY3_REG 0x1E88C
+#define CRYPTO_AUTH_PIPE1_KEY4_REG 0x1E890
+#define CRYPTO_AUTH_PIPE1_KEY5_REG 0x1E894
+#define CRYPTO_AUTH_PIPE1_KEY6_REG 0x1E898
+#define CRYPTO_AUTH_PIPE1_KEY7_REG 0x1E89C
+#define CRYPTO_AUTH_PIPE1_KEY8_REG 0x1E8A0
+#define CRYPTO_AUTH_PIPE1_KEY9_REG 0x1E8A4
+#define CRYPTO_AUTH_PIPE1_KEY10_REG 0x1E8A8
+#define CRYPTO_AUTH_PIPE1_KEY11_REG 0x1E8AC
+#define CRYPTO_AUTH_PIPE1_KEY12_REG 0x1E8B0
+#define CRYPTO_AUTH_PIPE1_KEY13_REG 0x1E8B4
+#define CRYPTO_AUTH_PIPE1_KEY14_REG 0x1E8B8
+#define CRYPTO_AUTH_PIPE1_KEY15_REG 0x1E8BC
+
+#define CRYPTO_AUTH_PIPE2_KEY0_REG 0x1E900
+#define CRYPTO_AUTH_PIPE2_KEY1_REG 0x1E904
+#define CRYPTO_AUTH_PIPE2_KEY2_REG 0x1E908
+#define CRYPTO_AUTH_PIPE2_KEY3_REG 0x1E90C
+#define CRYPTO_AUTH_PIPE2_KEY4_REG 0x1E910
+#define CRYPTO_AUTH_PIPE2_KEY5_REG 0x1E914
+#define CRYPTO_AUTH_PIPE2_KEY6_REG 0x1E918
+#define CRYPTO_AUTH_PIPE2_KEY7_REG 0x1E91C
+#define CRYPTO_AUTH_PIPE2_KEY8_REG 0x1E920
+#define CRYPTO_AUTH_PIPE2_KEY9_REG 0x1E924
+#define CRYPTO_AUTH_PIPE2_KEY10_REG 0x1E928
+#define CRYPTO_AUTH_PIPE2_KEY11_REG 0x1E92C
+#define CRYPTO_AUTH_PIPE2_KEY12_REG 0x1E930
+#define CRYPTO_AUTH_PIPE2_KEY13_REG 0x1E934
+#define CRYPTO_AUTH_PIPE2_KEY14_REG 0x1E938
+#define CRYPTO_AUTH_PIPE2_KEY15_REG 0x1E93C
+
+#define CRYPTO_AUTH_PIPE3_KEY0_REG 0x1E980
+#define CRYPTO_AUTH_PIPE3_KEY1_REG 0x1E984
+#define CRYPTO_AUTH_PIPE3_KEY2_REG 0x1E988
+#define CRYPTO_AUTH_PIPE3_KEY3_REG 0x1E98C
+#define CRYPTO_AUTH_PIPE3_KEY4_REG 0x1E990
+#define CRYPTO_AUTH_PIPE3_KEY5_REG 0x1E994
+#define CRYPTO_AUTH_PIPE3_KEY6_REG 0x1E998
+#define CRYPTO_AUTH_PIPE3_KEY7_REG 0x1E99C
+#define CRYPTO_AUTH_PIPE3_KEY8_REG 0x1E9A0
+#define CRYPTO_AUTH_PIPE3_KEY9_REG 0x1E9A4
+#define CRYPTO_AUTH_PIPE3_KEY10_REG 0x1E9A8
+#define CRYPTO_AUTH_PIPE3_KEY11_REG 0x1E9AC
+#define CRYPTO_AUTH_PIPE3_KEY12_REG 0x1E9B0
+#define CRYPTO_AUTH_PIPE3_KEY13_REG 0x1E9B4
+#define CRYPTO_AUTH_PIPE3_KEY14_REG 0x1E9B8
+#define CRYPTO_AUTH_PIPE3_KEY15_REG 0x1E9BC
+
+
+#define CRYPTO_AUTH_IV0_REG 0x1A310
+#define CRYPTO_AUTH_IV1_REG 0x1A314
+#define CRYPTO_AUTH_IV2_REG 0x1A318
+#define CRYPTO_AUTH_IV3_REG 0x1A31C
+#define CRYPTO_AUTH_IV4_REG 0x1A320
+#define CRYPTO_AUTH_IV5_REG 0x1A324
+#define CRYPTO_AUTH_IV6_REG 0x1A328
+#define CRYPTO_AUTH_IV7_REG 0x1A32C
+#define CRYPTO_AUTH_IV8_REG 0x1A330
+#define CRYPTO_AUTH_IV9_REG 0x1A334
+#define CRYPTO_AUTH_IV10_REG 0x1A338
+#define CRYPTO_AUTH_IV11_REG 0x1A33C
+#define CRYPTO_AUTH_IV12_REG 0x1A340
+#define CRYPTO_AUTH_IV13_REG 0x1A344
+#define CRYPTO_AUTH_IV14_REG 0x1A348
+#define CRYPTO_AUTH_IV15_REG 0x1A34C
+
+#define CRYPTO_AUTH_INFO_NONCE0_REG 0x1A350
+#define CRYPTO_AUTH_INFO_NONCE1_REG 0x1A354
+#define CRYPTO_AUTH_INFO_NONCE2_REG 0x1A358
+#define CRYPTO_AUTH_INFO_NONCE3_REG 0x1A35C
+
+#define CRYPTO_AUTH_BYTECNT0_REG 0x1A390
+#define CRYPTO_AUTH_BYTECNT1_REG 0x1A394
+#define CRYPTO_AUTH_BYTECNT2_REG 0x1A398
+#define CRYPTO_AUTH_BYTECNT3_REG 0x1A39C
+
+#define CRYPTO_AUTH_EXP_MAC0_REG 0x1A3A0
+#define CRYPTO_AUTH_EXP_MAC1_REG 0x1A3A4
+#define CRYPTO_AUTH_EXP_MAC2_REG 0x1A3A8
+#define CRYPTO_AUTH_EXP_MAC3_REG 0x1A3AC
+#define CRYPTO_AUTH_EXP_MAC4_REG 0x1A3B0
+#define CRYPTO_AUTH_EXP_MAC5_REG 0x1A3B4
+#define CRYPTO_AUTH_EXP_MAC6_REG 0x1A3B8
+#define CRYPTO_AUTH_EXP_MAC7_REG 0x1A3BC
+
+#define CRYPTO_CONFIG_REG 0x1A400
+#define CRYPTO_DEBUG_ENABLE_REG 0x1AF00
+#define CRYPTO_DEBUG_REG 0x1AF04
+
+
+
+/* Register bits */
+#define CRYPTO_CORE_STEP_REV_MASK 0xFFFF
+#define CRYPTO_CORE_STEP_REV 0 /* bit 15-0 */
+#define CRYPTO_CORE_MAJOR_REV_MASK 0xFF000000
+#define CRYPTO_CORE_MAJOR_REV 24 /* bit 31-24 */
+#define CRYPTO_CORE_MINOR_REV_MASK 0xFF0000
+#define CRYPTO_CORE_MINOR_REV 16 /* bit 23-16 */
+
+/* status reg */
+#define CRYPTO_MAC_FAILED 31
+#define CRYPTO_DOUT_SIZE_AVAIL 26 /* bit 30-26 */
+#define CRYPTO_DOUT_SIZE_AVAIL_MASK (0x1F << CRYPTO_DOUT_SIZE_AVAIL)
+#define CRYPTO_DIN_SIZE_AVAIL 21 /* bit 21-25 */
+#define CRYPTO_DIN_SIZE_AVAIL_MASK (0x1F << CRYPTO_DIN_SIZE_AVAIL)
+#define CRYPTO_HSD_ERR 20
+#define CRYPTO_ACCESS_VIOL 19
+#define CRYPTO_PIPE_ACTIVE_ERR 18
+#define CRYPTO_CFG_CHNG_ERR 17
+#define CRYPTO_DOUT_ERR 16
+#define CRYPTO_DIN_ERR 15
+#define CRYPTO_AXI_ERR 14
+#define CRYPTO_CRYPTO_STATE 10 /* bit 13-10 */
+#define CRYPTO_CRYPTO_STATE_MASK (0xF << CRYPTO_CRYPTO_STATE)
+#define CRYPTO_ENCR_BUSY 9
+#define CRYPTO_AUTH_BUSY 8
+#define CRYPTO_DOUT_INTR 7
+#define CRYPTO_DIN_INTR 6
+#define CRYPTO_OP_DONE_INTR 5
+#define CRYPTO_ERR_INTR 4
+#define CRYPTO_DOUT_RDY 3
+#define CRYPTO_DIN_RDY 2
+#define CRYPTO_OPERATION_DONE 1
+#define CRYPTO_SW_ERR 0
+
+/* status2 reg */
+#define CRYPTO_AXI_EXTRA 1
+#define CRYPTO_LOCKED 2
+
+/* config reg */
+#define CRYPTO_REQ_SIZE 17 /* bit 20-17 */
+#define CRYPTO_REQ_SIZE_MASK (0xF << CRYPTO_REQ_SIZE)
+#define CRYPTO_REQ_SIZE_ENUM_1_BEAT 0
+#define CRYPTO_REQ_SIZE_ENUM_2_BEAT 1
+#define CRYPTO_REQ_SIZE_ENUM_3_BEAT 2
+#define CRYPTO_REQ_SIZE_ENUM_4_BEAT 3
+#define CRYPTO_REQ_SIZE_ENUM_5_BEAT 4
+#define CRYPTO_REQ_SIZE_ENUM_6_BEAT 5
+#define CRYPTO_REQ_SIZE_ENUM_7_BEAT 6
+#define CRYPTO_REQ_SIZE_ENUM_8_BEAT 7
+#define CRYPTO_REQ_SIZE_ENUM_9_BEAT 8
+#define CRYPTO_REQ_SIZE_ENUM_10_BEAT 9
+#define CRYPTO_REQ_SIZE_ENUM_11_BEAT 10
+#define CRYPTO_REQ_SIZE_ENUM_12_BEAT 11
+#define CRYPTO_REQ_SIZE_ENUM_13_BEAT 12
+#define CRYPTO_REQ_SIZE_ENUM_14_BEAT 13
+#define CRYPTO_REQ_SIZE_ENUM_15_BEAT 14
+#define CRYPTO_REQ_SIZE_ENUM_16_BEAT 15
+
+#define CRYPTO_MAX_QUEUED_REQ 14 /* bit 16-14 */
+#define CRYPTO_MAX_QUEUED_REQ_MASK (0x7 << CRYPTO_MAX_QUEUED_REQ)
+#define CRYPTO_ENUM_1_QUEUED_REQS 0
+#define CRYPTO_ENUM_2_QUEUED_REQS 1
+#define CRYPTO_ENUM_3_QUEUED_REQS 2
+
+#define CRYPTO_IRQ_ENABLES 10 /* bit 13-10 */
+#define CRYPTO_IRQ_ENABLES_MASK (0xF << CRYPTO_IRQ_ENABLES)
+
+#define CRYPTO_LITTLE_ENDIAN_MODE 9
+#define CRYPTO_LITTLE_ENDIAN_MASK (1 << CRYPTO_LITTLE_ENDIAN_MODE)
+#define CRYPTO_PIPE_SET_SELECT 5 /* bit 8-5 */
+#define CRYPTO_PIPE_SET_SELECT_MASK (0xF << CRYPTO_PIPE_SET_SELECT)
+
+#define CRYPTO_HIGH_SPD_EN_N 4
+
+#define CRYPTO_MASK_DOUT_INTR 3
+#define CRYPTO_MASK_DIN_INTR 2
+#define CRYPTO_MASK_OP_DONE_INTR 1
+#define CRYPTO_MASK_ERR_INTR 0
+
+/* auth_seg_cfg reg */
+#define CRYPTO_COMP_EXP_MAC 24
+#define CRYPTO_COMP_EXP_MAC_DISABLED 0
+#define CRYPTO_COMP_EXP_MAC_ENABLED 1
+
+#define CRYPTO_F9_DIRECTION 23
+#define CRYPTO_F9_DIRECTION_UPLINK 0
+#define CRYPTO_F9_DIRECTION_DOWNLINK 1
+
+#define CRYPTO_AUTH_NONCE_NUM_WORDS 20 /* bit 22-20 */
+#define CRYPTO_AUTH_NONCE_NUM_WORDS_MASK \
+ (0x7 << CRYPTO_AUTH_NONCE_NUM_WORDS)
+
+#define CRYPTO_USE_PIPE_KEY_AUTH 19
+#define CRYPTO_USE_HW_KEY_AUTH 18
+#define CRYPTO_FIRST 17
+#define CRYPTO_LAST 16
+
+#define CRYPTO_AUTH_POS 14 /* bit 15 .. 14*/
+#define CRYPTO_AUTH_POS_MASK (0x3 << CRYPTO_AUTH_POS)
+#define CRYPTO_AUTH_POS_BEFORE 0
+#define CRYPTO_AUTH_POS_AFTER 1
+
+#define CRYPTO_AUTH_SIZE 9 /* bits 13 .. 9*/
+#define CRYPTO_AUTH_SIZE_MASK (0x1F << CRYPTO_AUTH_SIZE)
+#define CRYPTO_AUTH_SIZE_SHA1 0
+#define CRYPTO_AUTH_SIZE_SHA256 1
+#define CRYPTO_AUTH_SIZE_ENUM_1_BYTES 0
+#define CRYPTO_AUTH_SIZE_ENUM_2_BYTES 1
+#define CRYPTO_AUTH_SIZE_ENUM_3_BYTES 2
+#define CRYPTO_AUTH_SIZE_ENUM_4_BYTES 3
+#define CRYPTO_AUTH_SIZE_ENUM_5_BYTES 4
+#define CRYPTO_AUTH_SIZE_ENUM_6_BYTES 5
+#define CRYPTO_AUTH_SIZE_ENUM_7_BYTES 6
+#define CRYPTO_AUTH_SIZE_ENUM_8_BYTES 7
+#define CRYPTO_AUTH_SIZE_ENUM_9_BYTES 8
+#define CRYPTO_AUTH_SIZE_ENUM_10_BYTES 9
+#define CRYPTO_AUTH_SIZE_ENUM_11_BYTES 10
+#define CRYPTO_AUTH_SIZE_ENUM_12_BYTES 11
+#define CRYPTO_AUTH_SIZE_ENUM_13_BYTES 12
+#define CRYPTO_AUTH_SIZE_ENUM_14_BYTES 13
+#define CRYPTO_AUTH_SIZE_ENUM_15_BYTES 14
+#define CRYPTO_AUTH_SIZE_ENUM_16_BYTES 15
+
+
+#define CRYPTO_AUTH_MODE 6 /* bit 8 .. 6*/
+#define CRYPTO_AUTH_MODE_MASK (0x7 << CRYPTO_AUTH_MODE)
+#define CRYPTO_AUTH_MODE_HASH 0
+#define CRYPTO_AUTH_MODE_HMAC 1
+#define CRYPTO_AUTH_MODE_CCM 0
+#define CRYPTO_AUTH_MODE_CMAC 1
+
+#define CRYPTO_AUTH_KEY_SIZE 3 /* bit 5 .. 3*/
+#define CRYPTO_AUTH_KEY_SIZE_MASK (0x7 << CRYPTO_AUTH_KEY_SIZE)
+#define CRYPTO_AUTH_KEY_SZ_AES128 0
+#define CRYPTO_AUTH_KEY_SZ_AES256 2
+
+#define CRYPTO_AUTH_ALG 0 /* bit 2 .. 0*/
+#define CRYPTO_AUTH_ALG_MASK 7
+#define CRYPTO_AUTH_ALG_NONE 0
+#define CRYPTO_AUTH_ALG_SHA 1
+#define CRYPTO_AUTH_ALG_AES 2
+#define CRYPTO_AUTH_ALG_KASUMI 3
+#define CRYPTO_AUTH_ALG_SNOW3G 4
+#define CRYPTO_AUTH_ALG_ZUC 5
+
+/* encr_xts_du_size reg */
+#define CRYPTO_ENCR_XTS_DU_SIZE 0 /* bit 19-0 */
+#define CRYPTO_ENCR_XTS_DU_SIZE_MASK 0xfffff
+
+/* encr_seg_cfg reg */
+#define CRYPTO_F8_KEYSTREAM_ENABLE 17/* bit */
+#define CRYPTO_F8_KEYSTREAM_DISABLED 0
+#define CRYPTO_F8_KEYSTREAM_ENABLED 1
+
+#define CRYPTO_F8_DIRECTION 16 /* bit */
+#define CRYPTO_F8_DIRECTION_UPLINK 0
+#define CRYPTO_F8_DIRECTION_DOWNLINK 1
+
+
+#define CRYPTO_USE_PIPE_KEY_ENCR 15 /* bit */
+#define CRYPTO_USE_PIPE_KEY_ENCR_ENABLED 1
+#define CRYPTO_USE_KEY_REGISTERS 0
+
+
+#define CRYPTO_USE_HW_KEY_ENCR 14
+#define CRYPTO_USE_KEY_REG 0
+#define CRYPTO_USE_HW_KEY 1
+
+#define CRYPTO_LAST_CCM 13
+#define CRYPTO_LAST_CCM_XFR 1
+#define CRYPTO_INTERM_CCM_XFR 0
+
+
+#define CRYPTO_CNTR_ALG 11 /* bit 12-11 */
+#define CRYPTO_CNTR_ALG_MASK (3 << CRYPTO_CNTR_ALG)
+#define CRYPTO_CNTR_ALG_NIST 0
+
+#define CRYPTO_ENCODE 10
+
+#define CRYPTO_ENCR_MODE 6 /* bit 9-6 */
+#define CRYPTO_ENCR_MODE_MASK (0xF << CRYPTO_ENCR_MODE)
+/* only valid when AES */
+#define CRYPTO_ENCR_MODE_ECB 0
+#define CRYPTO_ENCR_MODE_CBC 1
+#define CRYPTO_ENCR_MODE_CTR 2
+#define CRYPTO_ENCR_MODE_XTS 3
+#define CRYPTO_ENCR_MODE_CCM 4
+
+#define CRYPTO_ENCR_KEY_SZ 3 /* bit 5-3 */
+#define CRYPTO_ENCR_KEY_SZ_MASK (7 << CRYPTO_ENCR_KEY_SZ)
+#define CRYPTO_ENCR_KEY_SZ_DES 0
+#define CRYPTO_ENCR_KEY_SZ_3DES 1
+#define CRYPTO_ENCR_KEY_SZ_AES128 0
+#define CRYPTO_ENCR_KEY_SZ_AES256 2
+
+#define CRYPTO_ENCR_ALG 0 /* bit 2-0 */
+#define CRYPTO_ENCR_ALG_MASK (7 << CRYPTO_ENCR_ALG)
+#define CRYPTO_ENCR_ALG_NONE 0
+#define CRYPTO_ENCR_ALG_DES 1
+#define CRYPTO_ENCR_ALG_AES 2
+#define CRYPTO_ENCR_ALG_KASUMI 4
+#define CRYPTO_ENCR_ALG_SNOW_3G 5
+#define CRYPTO_ENCR_ALG_ZUC 6
+
+/* goproc reg */
+#define CRYPTO_GO 0
+#define CRYPTO_CLR_CNTXT 1
+#define CRYPTO_RESULTS_DUMP 2
+
+/* F8 definition of CRYPTO_ENCR_CNTR1_IV1 REG */
+#define CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT 16 /* bit 31 - 16 */
+#define CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT_MASK \
+ (0xffff << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT)
+
+#define CRYPTO_CNTR1_IV1_REG_F8_BEARER 0 /* bit 4 - 0 */
+#define CRYPTO_CNTR1_IV1_REG_F8_BEARER_MASK \
+ (0x1f << CRYPTO_CNTR1_IV1_REG_F8_BEARER)
+
+/* F9 definition of CRYPTO_AUTH_IV4 REG */
+#define CRYPTO_AUTH_IV4_REG_F9_VALID_BIS 0 /* bit 2 - 0 */
+#define CRYPTO_AUTH_IV4_REG_F9_VALID_BIS_MASK \
+ (0x7 << CRYPTO_AUTH_IV4_REG_F9_VALID_BIS)
+
+/* engines_avail */
+#define CRYPTO_ENCR_AES_SEL 0
+#define CRYPTO_DES_SEL 1
+#define CRYPTO_ENCR_SNOW3G_SEL 2
+#define CRYPTO_ENCR_KASUMI_SEL 3
+#define CRYPTO_SHA_SEL 4
+#define CRYPTO_SHA512_SEL 5
+#define CRYPTO_AUTH_AES_SEL 6
+#define CRYPTO_AUTH_SNOW3G_SEL 7
+#define CRYPTO_AUTH_KASUMI_SEL 8
+#define CRYPTO_BAM_PIPE_SETS 9 /* bit 12 - 9 */
+#define CRYPTO_AXI_WR_BEATS 13 /* bit 18 - 13 */
+#define CRYPTO_AXI_RD_BEATS 19 /* bit 24 - 19 */
+#define CRYPTO_ENCR_ZUC_SEL 26
+#define CRYPTO_AUTH_ZUC_SEL 27
+#define CRYPTO_ZUC_ENABLE 28
+#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_ */
diff --git a/include/linux/platform_data/qcom_crypto_device.h b/include/linux/platform_data/qcom_crypto_device.h
new file mode 100644
index 000000000000..37cf3c8d7f93
--- /dev/null
+++ b/include/linux/platform_data/qcom_crypto_device.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CRYPTO_DEVICE__H
+#define __QCOM_CRYPTO_DEVICE__H
+
+struct msm_ce_hw_support {
+ uint32_t ce_shared;
+ uint32_t shared_ce_resource;
+ uint32_t hw_key_support;
+ uint32_t sha_hmac;
+ void *bus_scale_table;
+};
+
+#endif /* __QCOM_CRYPTO_DEVICE__H */
diff --git a/include/linux/qcrypto.h b/include/linux/qcrypto.h
new file mode 100644
index 000000000000..6e87e5475369
--- /dev/null
+++ b/include/linux/qcrypto.h
@@ -0,0 +1,65 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTO_H_
+#define _DRIVERS_CRYPTO_MSM_QCRYPTO_H_
+
+#include <linux/crypto.h>
+#include <crypto/hash.h>
+
+#define QCRYPTO_CTX_KEY_MASK 0x000000ff
+#define QCRYPTO_CTX_USE_HW_KEY 0x00000001
+#define QCRYPTO_CTX_USE_PIPE_KEY 0x00000002
+
+#define QCRYPTO_CTX_XTS_MASK 0x0000ff00
+#define QCRYPTO_CTX_XTS_DU_SIZE_512B 0x00000100
+#define QCRYPTO_CTX_XTS_DU_SIZE_1KB 0x00000200
+
+
+int qcrypto_cipher_set_device(struct ablkcipher_request *req, unsigned int dev);
+int qcrypto_ahash_set_device(struct ahash_request *req, unsigned int dev);
+int qcrypto_aead_set_device(struct aead_request *req, unsigned int dev);
+
+int qcrypto_cipher_set_flag(struct ablkcipher_request *req, unsigned int flags);
+int qcrypto_ahash_set_flag(struct ahash_request *req, unsigned int flags);
+int qcrypto_aead_set_flag(struct aead_request *req, unsigned int flags);
+
+int qcrypto_cipher_clear_flag(struct ablkcipher_request *req,
+ unsigned int flags);
+int qcrypto_ahash_clear_flag(struct ahash_request *req, unsigned int flags);
+int qcrypto_aead_clear_flag(struct aead_request *req, unsigned int flags);
+
+struct crypto_engine_entry {
+ u32 hw_instance;
+ u32 ce_device;
+ int shared;
+};
+
+int qcrypto_get_num_engines(void);
+void qcrypto_get_engine_list(size_t num_engines,
+ struct crypto_engine_entry *arr);
+int qcrypto_cipher_set_device_hw(struct ablkcipher_request *req,
+ unsigned int fde_pfe,
+ unsigned int hw_inst);
+
+
+struct qcrypto_func_set {
+ int (*cipher_set)(struct ablkcipher_request *req,
+ unsigned int fde_pfe,
+ unsigned hw_inst);
+ int (*cipher_flag)(struct ablkcipher_request *req, unsigned int flags);
+ int (*get_num_engines)(void);
+ void (*get_engine_list)(size_t num_engines,
+ struct crypto_engine_entry *arr);
+};
+
+#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTO_H */
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 8639f651bc9d..70585dfddafe 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -128,6 +128,7 @@ header-y += fdreg.h
header-y += fib_rules.h
header-y += fiemap.h
header-y += filter.h
+header-y += fips_status.h
header-y += firewire-cdev.h
header-y += firewire-constants.h
header-y += flat.h
@@ -349,6 +350,8 @@ header-y += prctl.h
header-y += psci.h
header-y += ptp_clock.h
header-y += ptrace.h
+header-y += qcedev.h
+header-y += qcota.h
header-y += qnx4_fs.h
header-y += qnxtypes.h
header-y += qseecom.h
diff --git a/include/uapi/linux/compat_qcedev.h b/include/uapi/linux/compat_qcedev.h
new file mode 100644
index 000000000000..ff38af2f74ed
--- /dev/null
+++ b/include/uapi/linux/compat_qcedev.h
@@ -0,0 +1,165 @@
+#ifndef _UAPI_COMPAT_QCEDEV__H
+#define _UAPI_COMPAT_QCEDEV__H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#if IS_ENABLED(CONFIG_COMPAT)
+#include <linux/compat.h>
+
+/**
+* struct compat_buf_info - Buffer information
+* @offset: Offset from the base address of the buffer
+* (Used when buffer is allocated using PMEM)
+* @vaddr: Virtual buffer address pointer
+* @len: Size of the buffer
+*/
+struct compat_buf_info {
+ union {
+ compat_ulong_t offset;
+ compat_uptr_t vaddr;
+ };
+ compat_ulong_t len;
+};
+
+/**
+* struct compat_qcedev_vbuf_info - Source and destination Buffer information
+* @src: Array of buf_info for input/source
+* @dst: Array of buf_info for output/destination
+*/
+struct compat_qcedev_vbuf_info {
+ struct compat_buf_info src[QCEDEV_MAX_BUFFERS];
+ struct compat_buf_info dst[QCEDEV_MAX_BUFFERS];
+};
+
+/**
+* struct compat_qcedev_pmem_info - Stores PMEM buffer information
+* @fd_src: Handle to /dev/adsp_pmem used to allocate
+* memory for input/src buffer
+* @src: Array of buf_info for input/source
+* @fd_dst: Handle to /dev/adsp_pmem used to allocate
+* memory for output/dst buffer
+* @dst: Array of buf_info for output/destination
+* @pmem_src_offset: The offset from input/src buffer
+* (allocated by PMEM)
+*/
+struct compat_qcedev_pmem_info {
+ compat_int_t fd_src;
+ struct compat_buf_info src[QCEDEV_MAX_BUFFERS];
+ compat_int_t fd_dst;
+ struct compat_buf_info dst[QCEDEV_MAX_BUFFERS];
+};
+
+/**
+* struct compat_qcedev_cipher_op_req - Holds the ciphering request information
+* @use_pmem (IN): Flag to indicate if buffer source is PMEM
+* QCEDEV_USE_PMEM/QCEDEV_NO_PMEM
+* @pmem (IN): Stores PMEM buffer information.
+* Refer struct qcedev_pmem_info
+* @vbuf (IN/OUT): Stores Source and destination Buffer information
+* Refer to struct qcedev_vbuf_info
+* @data_len (IN): Total Length of input/src and output/dst in bytes
+* @in_place_op (IN): Indicates whether the operation is inplace where
+* source == destination
+* When using PMEM allocated memory, must set this to 1
+* @enckey (IN): 128 bits of confidentiality key
+* enckey[0] bit 127-120, enckey[1] bit 119-112,..
+* enckey[15] bit 7-0
+* @encklen (IN): Length of the encryption key(set to 128 bits/16
+* bytes in the driver)
+* @iv (IN/OUT): Initialisation vector data
+* This is updated by the driver, incremented by
+* number of blocks encrypted/decrypted.
+* @ivlen (IN): Length of the IV
+* @byteoffset (IN): Offset in the Cipher BLOCK (applicable and to be set
+* for AES-128 CTR mode only)
+* @alg (IN): Type of ciphering algorithm: AES/DES/3DES
+* @mode (IN): Mode use when using AES algorithm: ECB/CBC/CTR
+* Apllicabel when using AES algorithm only
+* @op (IN): Type of operation: QCEDEV_OPER_DEC/QCEDEV_OPER_ENC or
+* QCEDEV_OPER_ENC_NO_KEY/QCEDEV_OPER_DEC_NO_KEY
+*
+*If use_pmem is set to 0, the driver assumes that memory was not allocated
+* via PMEM, and kernel will need to allocate memory and copy data from user
+* space buffer (data_src/dta_dst) and process accordingly and copy data back
+* to the user space buffer
+*
+* If use_pmem is set to 1, the driver assumes that memory was allocated via
+* PMEM.
+* The kernel driver will use the fd_src to determine the kernel virtual address
+* base that maps to the user space virtual address base for the buffer
+* allocated in user space.
+* The final input/src and output/dst buffer pointer will be determined
+* by adding the offsets to the kernel virtual addr.
+*
+* If use of hardware key is supported in the target, user can configure the
+* key paramters (encklen, enckey) to use the hardware key.
+* In order to use the hardware key, set encklen to 0 and set the enckey
+* data array to 0.
+*/
+struct compat_qcedev_cipher_op_req {
+ uint8_t use_pmem;
+ union {
+ struct compat_qcedev_pmem_info pmem;
+ struct compat_qcedev_vbuf_info vbuf;
+ };
+ compat_ulong_t entries;
+ compat_ulong_t data_len;
+ uint8_t in_place_op;
+ uint8_t enckey[QCEDEV_MAX_KEY_SIZE];
+ compat_ulong_t encklen;
+ uint8_t iv[QCEDEV_MAX_IV_SIZE];
+ compat_ulong_t ivlen;
+ compat_ulong_t byteoffset;
+ enum qcedev_cipher_alg_enum alg;
+ enum qcedev_cipher_mode_enum mode;
+ enum qcedev_oper_enum op;
+};
+
+/**
+* struct qcedev_sha_op_req - Holds the hashing request information
+* @data (IN): Array of pointers to the data to be hashed
+* @entries (IN): Number of buf_info entries in the data array
+* @data_len (IN): Length of data to be hashed
+* @digest (IN/OUT): Returns the hashed data information
+* @diglen (OUT): Size of the hashed/digest data
+* @authkey (IN): Pointer to authentication key for HMAC
+* @authklen (IN): Size of the authentication key
+* @alg (IN): Secure Hash algorithm
+*/
+struct compat_qcedev_sha_op_req {
+ struct compat_buf_info data[QCEDEV_MAX_BUFFERS];
+ compat_ulong_t entries;
+ compat_ulong_t data_len;
+ uint8_t digest[QCEDEV_MAX_SHA_DIGEST];
+ compat_ulong_t diglen;
+ compat_uptr_t authkey;
+ compat_ulong_t authklen;
+ enum qcedev_sha_alg_enum alg;
+};
+
+struct file;
+extern long compat_qcedev_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg);
+
+#define COMPAT_QCEDEV_IOCTL_ENC_REQ \
+ _IOWR(QCEDEV_IOC_MAGIC, 1, struct compat_qcedev_cipher_op_req)
+#define COMPAT_QCEDEV_IOCTL_DEC_REQ \
+ _IOWR(QCEDEV_IOC_MAGIC, 2, struct compat_qcedev_cipher_op_req)
+#define COMPAT_QCEDEV_IOCTL_SHA_INIT_REQ \
+ _IOWR(QCEDEV_IOC_MAGIC, 3, struct compat_qcedev_sha_op_req)
+#define COMPAT_QCEDEV_IOCTL_SHA_UPDATE_REQ \
+ _IOWR(QCEDEV_IOC_MAGIC, 4, struct compat_qcedev_sha_op_req)
+#define COMPAT_QCEDEV_IOCTL_SHA_FINAL_REQ \
+ _IOWR(QCEDEV_IOC_MAGIC, 5, struct compat_qcedev_sha_op_req)
+#define COMPAT_QCEDEV_IOCTL_GET_SHA_REQ \
+ _IOWR(QCEDEV_IOC_MAGIC, 6, struct compat_qcedev_sha_op_req)
+#define COMPAT_QCEDEV_IOCTL_LOCK_CE \
+ _IO(QCEDEV_IOC_MAGIC, 7)
+#define COMPAT_QCEDEV_IOCTL_UNLOCK_CE \
+ _IO(QCEDEV_IOC_MAGIC, 8)
+#define COMPAT_QCEDEV_IOCTL_GET_CMAC_REQ \
+ _IOWR(QCEDEV_IOC_MAGIC, 9, struct compat_qcedev_sha_op_req)
+
+#endif /* CONFIG_COMPAT */
+#endif /* _UAPI_COMPAT_QCEDEV__H */
diff --git a/include/uapi/linux/fips_status.h b/include/uapi/linux/fips_status.h
new file mode 100644
index 000000000000..7daf27ba3e93
--- /dev/null
+++ b/include/uapi/linux/fips_status.h
@@ -0,0 +1,33 @@
+#ifndef _UAPI_FIPS_STATUS__H
+#define _UAPI_FIPS_STATUS__H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/**
+* fips_status: global FIPS140-2 status
+* @FIPS140_STATUS_NA:
+* Not a FIPS140-2 compliant Build.
+* The flag status won't
+* change throughout
+* the lifetime
+* @FIPS140_STATUS_PASS_CRYPTO:
+* KAT self tests are passed.
+* @FIPS140_STATUS_QCRYPTO_ALLOWED:
+* Integrity test is passed.
+* @FIPS140_STATUS_PASS:
+* All tests are passed and build
+* is in FIPS140-2 mode
+* @FIPS140_STATUS_FAIL:
+* One of the test is failed.
+* This will block all requests
+* to crypto modules
+*/
+enum fips_status {
+ FIPS140_STATUS_NA = 0,
+ FIPS140_STATUS_PASS_CRYPTO = 1,
+ FIPS140_STATUS_QCRYPTO_ALLOWED = 2,
+ FIPS140_STATUS_PASS = 3,
+ FIPS140_STATUS_FAIL = 0xFF
+};
+#endif /* _UAPI_FIPS_STATUS__H */
diff --git a/include/uapi/linux/qcedev.h b/include/uapi/linux/qcedev.h
new file mode 100644
index 000000000000..655d8483b251
--- /dev/null
+++ b/include/uapi/linux/qcedev.h
@@ -0,0 +1,259 @@
+#ifndef _UAPI_QCEDEV__H
+#define _UAPI_QCEDEV__H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include "fips_status.h"
+
+#define QCEDEV_MAX_SHA_BLOCK_SIZE 64
+#define QCEDEV_MAX_BEARER 31
+#define QCEDEV_MAX_KEY_SIZE 64
+#define QCEDEV_MAX_IV_SIZE 32
+
+#define QCEDEV_MAX_BUFFERS 16
+#define QCEDEV_MAX_SHA_DIGEST 32
+
+#define QCEDEV_USE_PMEM 1
+#define QCEDEV_NO_PMEM 0
+
+#define QCEDEV_AES_KEY_128 16
+#define QCEDEV_AES_KEY_192 24
+#define QCEDEV_AES_KEY_256 32
+/**
+*qcedev_oper_enum: Operation types
+* @QCEDEV_OPER_ENC: Encrypt
+* @QCEDEV_OPER_DEC: Decrypt
+* @QCEDEV_OPER_ENC_NO_KEY: Encrypt. Do not need key to be specified by
+* user. Key already set by an external processor.
+* @QCEDEV_OPER_DEC_NO_KEY: Decrypt. Do not need the key to be specified by
+* user. Key already set by an external processor.
+*/
+enum qcedev_oper_enum {
+ QCEDEV_OPER_DEC = 0,
+ QCEDEV_OPER_ENC = 1,
+ QCEDEV_OPER_DEC_NO_KEY = 2,
+ QCEDEV_OPER_ENC_NO_KEY = 3,
+ QCEDEV_OPER_LAST
+};
+
+/**
+*qcedev_oper_enum: Cipher algorithm types
+* @QCEDEV_ALG_DES: DES
+* @QCEDEV_ALG_3DES: 3DES
+* @QCEDEV_ALG_AES: AES
+*/
+enum qcedev_cipher_alg_enum {
+ QCEDEV_ALG_DES = 0,
+ QCEDEV_ALG_3DES = 1,
+ QCEDEV_ALG_AES = 2,
+ QCEDEV_ALG_LAST
+};
+
+/**
+*qcedev_cipher_mode_enum : AES mode
+* @QCEDEV_AES_MODE_CBC: CBC
+* @QCEDEV_AES_MODE_ECB: ECB
+* @QCEDEV_AES_MODE_CTR: CTR
+* @QCEDEV_AES_MODE_XTS: XTS
+* @QCEDEV_AES_MODE_CCM: CCM
+* @QCEDEV_DES_MODE_CBC: CBC
+* @QCEDEV_DES_MODE_ECB: ECB
+*/
+enum qcedev_cipher_mode_enum {
+ QCEDEV_AES_MODE_CBC = 0,
+ QCEDEV_AES_MODE_ECB = 1,
+ QCEDEV_AES_MODE_CTR = 2,
+ QCEDEV_AES_MODE_XTS = 3,
+ QCEDEV_AES_MODE_CCM = 4,
+ QCEDEV_DES_MODE_CBC = 5,
+ QCEDEV_DES_MODE_ECB = 6,
+ QCEDEV_AES_DES_MODE_LAST
+};
+
+/**
+*enum qcedev_sha_alg_enum : Secure Hashing Algorithm
+* @QCEDEV_ALG_SHA1: Digest returned: 20 bytes (160 bits)
+* @QCEDEV_ALG_SHA256: Digest returned: 32 bytes (256 bit)
+* @QCEDEV_ALG_SHA1_HMAC: HMAC returned 20 bytes (160 bits)
+* @QCEDEV_ALG_SHA256_HMAC: HMAC returned 32 bytes (256 bit)
+* @QCEDEV_ALG_AES_CMAC: Configurable MAC size
+*/
+enum qcedev_sha_alg_enum {
+ QCEDEV_ALG_SHA1 = 0,
+ QCEDEV_ALG_SHA256 = 1,
+ QCEDEV_ALG_SHA1_HMAC = 2,
+ QCEDEV_ALG_SHA256_HMAC = 3,
+ QCEDEV_ALG_AES_CMAC = 4,
+ QCEDEV_ALG_SHA_ALG_LAST
+};
+
+/**
+* struct buf_info - Buffer information
+* @offset: Offset from the base address of the buffer
+* (Used when buffer is allocated using PMEM)
+* @vaddr: Virtual buffer address pointer
+* @len: Size of the buffer
+*/
+struct buf_info {
+ union {
+ uint32_t offset;
+ uint8_t *vaddr;
+ };
+ uint32_t len;
+};
+
+/**
+* struct qcedev_vbuf_info - Source and destination Buffer information
+* @src: Array of buf_info for input/source
+* @dst: Array of buf_info for output/destination
+*/
+struct qcedev_vbuf_info {
+ struct buf_info src[QCEDEV_MAX_BUFFERS];
+ struct buf_info dst[QCEDEV_MAX_BUFFERS];
+};
+
+/**
+* struct qcedev_pmem_info - Stores PMEM buffer information
+* @fd_src: Handle to /dev/adsp_pmem used to allocate
+* memory for input/src buffer
+* @src: Array of buf_info for input/source
+* @fd_dst: Handle to /dev/adsp_pmem used to allocate
+* memory for output/dst buffer
+* @dst: Array of buf_info for output/destination
+* @pmem_src_offset: The offset from input/src buffer
+* (allocated by PMEM)
+*/
+struct qcedev_pmem_info {
+ int fd_src;
+ struct buf_info src[QCEDEV_MAX_BUFFERS];
+ int fd_dst;
+ struct buf_info dst[QCEDEV_MAX_BUFFERS];
+};
+
+/**
+* struct qcedev_cipher_op_req - Holds the ciphering request information
+* @use_pmem (IN): Flag to indicate if buffer source is PMEM
+* QCEDEV_USE_PMEM/QCEDEV_NO_PMEM
+* @pmem (IN): Stores PMEM buffer information.
+* Refer struct qcedev_pmem_info
+* @vbuf (IN/OUT): Stores Source and destination Buffer information
+* Refer to struct qcedev_vbuf_info
+* @data_len (IN): Total Length of input/src and output/dst in bytes
+* @in_place_op (IN): Indicates whether the operation is inplace where
+* source == destination
+* When using PMEM allocated memory, must set this to 1
+* @enckey (IN): 128 bits of confidentiality key
+* enckey[0] bit 127-120, enckey[1] bit 119-112,..
+* enckey[15] bit 7-0
+* @encklen (IN): Length of the encryption key(set to 128 bits/16
+* bytes in the driver)
+* @iv (IN/OUT): Initialisation vector data
+* This is updated by the driver, incremented by
+* number of blocks encrypted/decrypted.
+* @ivlen (IN): Length of the IV
+* @byteoffset (IN): Offset in the Cipher BLOCK (applicable and to be set
+* for AES-128 CTR mode only)
+* @alg (IN): Type of ciphering algorithm: AES/DES/3DES
+* @mode (IN): Mode use when using AES algorithm: ECB/CBC/CTR
+* Apllicabel when using AES algorithm only
+* @op (IN): Type of operation: QCEDEV_OPER_DEC/QCEDEV_OPER_ENC or
+* QCEDEV_OPER_ENC_NO_KEY/QCEDEV_OPER_DEC_NO_KEY
+*
+*If use_pmem is set to 0, the driver assumes that memory was not allocated
+* via PMEM, and kernel will need to allocate memory and copy data from user
+* space buffer (data_src/dta_dst) and process accordingly and copy data back
+* to the user space buffer
+*
+* If use_pmem is set to 1, the driver assumes that memory was allocated via
+* PMEM.
+* The kernel driver will use the fd_src to determine the kernel virtual address
+* base that maps to the user space virtual address base for the buffer
+* allocated in user space.
+* The final input/src and output/dst buffer pointer will be determined
+* by adding the offsets to the kernel virtual addr.
+*
+* If use of hardware key is supported in the target, user can configure the
+* key paramters (encklen, enckey) to use the hardware key.
+* In order to use the hardware key, set encklen to 0 and set the enckey
+* data array to 0.
+*/
+struct qcedev_cipher_op_req {
+ uint8_t use_pmem;
+ union {
+ struct qcedev_pmem_info pmem;
+ struct qcedev_vbuf_info vbuf;
+ };
+ uint32_t entries;
+ uint32_t data_len;
+ uint8_t in_place_op;
+ uint8_t enckey[QCEDEV_MAX_KEY_SIZE];
+ uint32_t encklen;
+ uint8_t iv[QCEDEV_MAX_IV_SIZE];
+ uint32_t ivlen;
+ uint32_t byteoffset;
+ enum qcedev_cipher_alg_enum alg;
+ enum qcedev_cipher_mode_enum mode;
+ enum qcedev_oper_enum op;
+};
+
+/**
+* struct qcedev_sha_op_req - Holds the hashing request information
+* @data (IN): Array of pointers to the data to be hashed
+* @entries (IN): Number of buf_info entries in the data array
+* @data_len (IN): Length of data to be hashed
+* @digest (IN/OUT): Returns the hashed data information
+* @diglen (OUT): Size of the hashed/digest data
+* @authkey (IN): Pointer to authentication key for HMAC
+* @authklen (IN): Size of the authentication key
+* @alg (IN): Secure Hash algorithm
+*/
+struct qcedev_sha_op_req {
+ struct buf_info data[QCEDEV_MAX_BUFFERS];
+ uint32_t entries;
+ uint32_t data_len;
+ uint8_t digest[QCEDEV_MAX_SHA_DIGEST];
+ uint32_t diglen;
+ uint8_t *authkey;
+ uint32_t authklen;
+ enum qcedev_sha_alg_enum alg;
+};
+
+/**
+* struct qfips_verify_t - Holds data for FIPS Integrity test
+* @kernel_size (IN): Size of kernel Image
+* @kernel (IN): pointer to buffer containing the kernel Image
+*/
+struct qfips_verify_t {
+ unsigned kernel_size;
+ void *kernel;
+};
+
+struct file;
+extern long qcedev_ioctl(struct file *file,
+ unsigned cmd, unsigned long arg);
+
+#define QCEDEV_IOC_MAGIC 0x87
+
+#define QCEDEV_IOCTL_ENC_REQ \
+ _IOWR(QCEDEV_IOC_MAGIC, 1, struct qcedev_cipher_op_req)
+#define QCEDEV_IOCTL_DEC_REQ \
+ _IOWR(QCEDEV_IOC_MAGIC, 2, struct qcedev_cipher_op_req)
+#define QCEDEV_IOCTL_SHA_INIT_REQ \
+ _IOWR(QCEDEV_IOC_MAGIC, 3, struct qcedev_sha_op_req)
+#define QCEDEV_IOCTL_SHA_UPDATE_REQ \
+ _IOWR(QCEDEV_IOC_MAGIC, 4, struct qcedev_sha_op_req)
+#define QCEDEV_IOCTL_SHA_FINAL_REQ \
+ _IOWR(QCEDEV_IOC_MAGIC, 5, struct qcedev_sha_op_req)
+#define QCEDEV_IOCTL_GET_SHA_REQ \
+ _IOWR(QCEDEV_IOC_MAGIC, 6, struct qcedev_sha_op_req)
+#define QCEDEV_IOCTL_LOCK_CE \
+ _IO(QCEDEV_IOC_MAGIC, 7)
+#define QCEDEV_IOCTL_UNLOCK_CE \
+ _IO(QCEDEV_IOC_MAGIC, 8)
+#define QCEDEV_IOCTL_GET_CMAC_REQ \
+ _IOWR(QCEDEV_IOC_MAGIC, 9, struct qcedev_sha_op_req)
+#define QCEDEV_IOCTL_UPDATE_FIPS_STATUS \
+ _IOWR(QCEDEV_IOC_MAGIC, 10, enum fips_status)
+#define QCEDEV_IOCTL_QUERY_FIPS_STATUS \
+ _IOR(QCEDEV_IOC_MAGIC, 11, enum fips_status)
+#endif /* _UAPI_QCEDEV__H */
diff --git a/include/uapi/linux/qcota.h b/include/uapi/linux/qcota.h
new file mode 100644
index 000000000000..7c47935b9bcd
--- /dev/null
+++ b/include/uapi/linux/qcota.h
@@ -0,0 +1,210 @@
+#ifndef _UAPI_QCOTA_H
+#define _UAPI_QCOTA_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define QCE_OTA_MAX_BEARER 31
+#define OTA_KEY_SIZE 16 /* 128 bits of keys. */
+
+enum qce_ota_dir_enum {
+ QCE_OTA_DIR_UPLINK = 0,
+ QCE_OTA_DIR_DOWNLINK = 1,
+ QCE_OTA_DIR_LAST
+};
+
+enum qce_ota_algo_enum {
+ QCE_OTA_ALGO_KASUMI = 0,
+ QCE_OTA_ALGO_SNOW3G = 1,
+ QCE_OTA_ALGO_LAST
+};
+
+/**
+ * struct qce_f8_req - qce f8 request
+ * @data_in: packets input data stream to be ciphered.
+ * If NULL, streaming mode operation.
+ * @data_out: ciphered packets output data.
+ * @data_len: length of data_in and data_out in bytes.
+ * @count_c: count-C, ciphering sequence number, 32 bit
+ * @bearer: 5 bit of radio bearer identifier.
+ * @ckey: 128 bits of confidentiality key,
+ * ckey[0] bit 127-120, ckey[1] bit 119-112,.., ckey[15] bit 7-0.
+ * @direction: uplink or donwlink.
+ * @algorithm: Kasumi, or Snow3G.
+ *
+ * If data_in is NULL, the engine will run in a special mode called
+ * key stream mode. In this special mode, the engine will generate
+ * key stream output for the number of bytes specified in the
+ * data_len, based on the input parameters of direction, algorithm,
+ * ckey, bearer, and count_c. The data_len is restricted to
+ * the length of multiple of 16 bytes. Application can then take the
+ * output stream, do a exclusive or to the input data stream, and
+ * generate the final cipher data stream.
+ */
+struct qce_f8_req {
+ uint8_t *data_in;
+ uint8_t *data_out;
+ uint16_t data_len;
+ uint32_t count_c;
+ uint8_t bearer;
+ uint8_t ckey[OTA_KEY_SIZE];
+ enum qce_ota_dir_enum direction;
+ enum qce_ota_algo_enum algorithm;
+};
+
+/**
+ * struct qce_f8_multi_pkt_req - qce f8 multiple packet request
+ * Muliptle packets with uniform size, and
+ * F8 ciphering parameters can be ciphered in a
+ * single request.
+ *
+ * @num_pkt: number of packets.
+ *
+ * @cipher_start: ciphering starts offset within a packet.
+ *
+ * @cipher_size: number of bytes to be ciphered within a packet.
+ *
+ * @qce_f8_req: description of the packet and F8 parameters.
+ * The following fields have special meaning for
+ * multiple packet operation,
+ *
+ * @data_len: data_len indicates the length of a packet.
+ *
+ * @data_in: packets are concatenated together in a byte
+ * stream started at data_in.
+ *
+ * @data_out: The returned ciphered output for multiple
+ * packets.
+ * Each packet ciphered output are concatenated
+ * together into a byte stream started at data_out.
+ * Note, each ciphered packet output area from
+ * offset 0 to cipher_start-1, and from offset
+ * cipher_size to data_len -1 are remained
+ * unaltered from packet input area.
+ * @count_c: count-C of the first packet, 32 bit.
+ *
+ *
+ * In one request, multiple packets can be ciphered, and output to the
+ * data_out stream.
+ *
+ * Packet data are layed out contiguously in sequence in data_in,
+ * and data_out area. Every packet is identical size.
+ * If the PDU is not byte aligned, set the data_len value of
+ * to the rounded up value of the packet size. Eg, PDU size of
+ * 253 bits, set the packet size to 32 bytes. Next packet starts on
+ * the next byte boundary.
+ *
+ * For each packet, data from offset 0 to cipher_start
+ * will be left unchanged and output to the data_out area.
+ * This area of the packet can be for the RLC header, which is not
+ * to be ciphered.
+ *
+ * The ciphering of a packet starts from offset cipher_start, for
+ * cipher_size bytes of data. Data starting from
+ * offset cipher_start + cipher_size to the end of packet will be left
+ * unchanged and output to the dataOut area.
+ *
+ * For each packet the input arguments of bearer, direction,
+ * ckey, algoritm have to be the same. count_c is the ciphering sequence
+ * number of the first packet. The 2nd packet's ciphering sequence
+ * number is assumed to be count_c + 1. The 3rd packet's ciphering sequence
+ * number is count_c + 2.....
+ *
+ */
+struct qce_f8_multi_pkt_req {
+ uint16_t num_pkt;
+ uint16_t cipher_start;
+ uint16_t cipher_size;
+ struct qce_f8_req qce_f8_req;
+};
+
+/**
+ * struct qce_f8_variable_multi_pkt_req - qce f8 multiple packet request
+ * Muliptle packets with variable size, and
+ * F8 ciphering parameters can be ciphered in a
+ * single request.
+ *
+ * @num_pkt: number of packets.
+ *
+ * @cipher_iov[]: array of iov of packets to be ciphered.
+ *
+ *
+ * @qce_f8_req: description of the packet and F8 parameters.
+ * The following fields have special meaning for
+ * multiple packet operation,
+ *
+ * @data_len: ignored.
+ *
+ * @data_in: ignored.
+ *
+ * @data_out: ignored.
+ *
+ * @count_c: count-C of the first packet, 32 bit.
+ *
+ *
+ * In one request, multiple packets can be ciphered.
+ *
+ * The i-th packet are defined in cipher_iov[i-1].
+ * The ciphering of i-th packet starts from offset 0 of the PDU specified
+ * by cipher_iov[i-1].addr, for cipher_iov[i-1].size bytes of data.
+ * If the PDU is not byte aligned, set the cipher_iov[i-1].size value
+ * to the rounded up value of the packet size. Eg, PDU size of
+ * 253 bits, set the packet size to 32 bytes.
+ *
+ * Ciphering are done in place. That is, the ciphering
+ * input and output data are both in cipher_iov[i-1].addr for the i-th
+ * packet.
+ *
+ * For each packet the input arguments of bearer, direction,
+ * ckey, algoritm have to be the same. count_c is the ciphering sequence
+ * number of the first packet. The 2nd packet's ciphering sequence
+ * number is assumed to be count_c + 1. The 3rd packet's ciphering sequence
+ * number is count_c + 2.....
+ */
+
+#define MAX_NUM_V_MULTI_PKT 20
+struct cipher_iov {
+ unsigned char *addr;
+ unsigned short size;
+};
+
+struct qce_f8_varible_multi_pkt_req {
+ unsigned short num_pkt;
+ struct cipher_iov cipher_iov[MAX_NUM_V_MULTI_PKT];
+ struct qce_f8_req qce_f8_req;
+};
+
+/**
+ * struct qce_f9_req - qce f9 request
+ * @message: message
+ * @msize: message size in bytes (include the last partial byte).
+ * @last_bits: valid bits in the last byte of message.
+ * @mac_i: 32 bit message authentication code, to be returned.
+ * @fresh: random 32 bit number, one per user.
+ * @count_i: 32 bit count-I integrity sequence number.
+ * @direction: uplink or donwlink.
+ * @ikey: 128 bits of integrity key,
+ * ikey[0] bit 127-120, ikey[1] bit 119-112,.., ikey[15] bit 7-0.
+ * @algorithm: Kasumi, or Snow3G.
+ */
+struct qce_f9_req {
+ uint8_t *message;
+ uint16_t msize;
+ uint8_t last_bits;
+ uint32_t mac_i;
+ uint32_t fresh;
+ uint32_t count_i;
+ enum qce_ota_dir_enum direction;
+ uint8_t ikey[OTA_KEY_SIZE];
+ enum qce_ota_algo_enum algorithm;
+};
+
+#define QCOTA_IOC_MAGIC 0x85
+
+#define QCOTA_F8_REQ _IOWR(QCOTA_IOC_MAGIC, 1, struct qce_f8_req)
+#define QCOTA_F8_MPKT_REQ _IOWR(QCOTA_IOC_MAGIC, 2, struct qce_f8_multi_pkt_req)
+#define QCOTA_F9_REQ _IOWR(QCOTA_IOC_MAGIC, 3, struct qce_f9_req)
+#define QCOTA_F8_V_MPKT_REQ _IOWR(QCOTA_IOC_MAGIC, 4,\
+ struct qce_f8_varible_multi_pkt_req)
+
+#endif /* _UAPI_QCOTA_H */