diff options
Diffstat (limited to 'include')
111 files changed, 3408 insertions, 482 deletions
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 4814cf971048..53a47d75cc43 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -237,6 +237,21 @@ extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); #endif +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +/* + * This is an implementation of pmdp_establish() that is only suitable for an + * architecture that doesn't have hardware dirty/accessed bits. In this case we + * can't race with CPU which sets these bits and non-atomic aproach is fine. + */ +static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, pmd_t pmd) +{ + pmd_t old_pmd = *pmdp; + set_pmd_at(vma->vm_mm, address, pmdp, pmd); + return old_pmd; +} +#endif + #ifndef __HAVE_ARCH_PMDP_INVALIDATE extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); @@ -755,8 +770,8 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); int pud_clear_huge(pud_t *pud); int pmd_clear_huge(pmd_t *pmd); -int pud_free_pmd_page(pud_t *pud); -int pmd_free_pte_page(pmd_t *pmd); +int pud_free_pmd_page(pud_t *pud, unsigned long addr); +int pmd_free_pte_page(pmd_t *pmd, unsigned long addr); #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) { @@ -774,16 +789,28 @@ static inline int pmd_clear_huge(pmd_t *pmd) { return 0; } -static inline int pud_free_pmd_page(pud_t *pud) +static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr) { return 0; } -static inline int pmd_free_pte_page(pmd_t *pmd) +static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) { return 0; } #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ +#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED +static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot) +{ + return true; +} + +static inline bool arch_has_pfn_modify_check(void) +{ + return false; +} +#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */ + #endif /* !__ASSEMBLY__ */ #ifndef io_remap_pfn_range diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index 2cf7a61ece59..ce6619c339fe 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -19,12 +19,46 @@ struct rtattr; +struct skcipher_instance { + void (*free)(struct skcipher_instance *inst); + union { + struct { + char head[offsetof(struct skcipher_alg, base)]; + struct crypto_instance base; + } s; + struct skcipher_alg alg; + }; +}; + struct crypto_skcipher_spawn { struct crypto_spawn base; }; extern const struct crypto_type crypto_givcipher_type; +static inline struct crypto_instance *skcipher_crypto_instance( + struct skcipher_instance *inst) +{ + return &inst->s.base; +} + +static inline struct skcipher_instance *skcipher_alg_instance( + struct crypto_skcipher *skcipher) +{ + return container_of(crypto_skcipher_alg(skcipher), + struct skcipher_instance, alg); +} + +static inline void *skcipher_instance_ctx(struct skcipher_instance *inst) +{ + return crypto_instance_ctx(skcipher_crypto_instance(inst)); +} + +static inline void skcipher_request_complete(struct skcipher_request *req, int err) +{ + req->base.complete(&req->base, err); +} + static inline void crypto_set_skcipher_spawn( struct crypto_skcipher_spawn *spawn, struct crypto_instance *inst) { @@ -33,6 +67,8 @@ static inline void crypto_set_skcipher_spawn( int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name, u32 type, u32 mask); +int crypto_grab_skcipher2(struct crypto_skcipher_spawn *spawn, + const char *name, u32 type, u32 mask); struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask); @@ -47,6 +83,12 @@ static inline struct crypto_alg *crypto_skcipher_spawn_alg( return spawn->base.alg; } +static inline struct skcipher_alg *crypto_spawn_skcipher_alg( + struct crypto_skcipher_spawn *spawn) +{ + return container_of(spawn->base.alg, struct skcipher_alg, base); +} + static inline struct crypto_ablkcipher *crypto_spawn_skcipher( struct crypto_skcipher_spawn *spawn) { @@ -55,6 +97,25 @@ static inline struct crypto_ablkcipher *crypto_spawn_skcipher( crypto_skcipher_mask(0))); } +static inline struct crypto_skcipher *crypto_spawn_skcipher2( + struct crypto_skcipher_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline void crypto_skcipher_set_reqsize( + struct crypto_skcipher *skcipher, unsigned int reqsize) +{ + skcipher->reqsize = reqsize; +} + +int crypto_register_skcipher(struct skcipher_alg *alg); +void crypto_unregister_skcipher(struct skcipher_alg *alg); +int crypto_register_skciphers(struct skcipher_alg *algs, int count); +void crypto_unregister_skciphers(struct skcipher_alg *algs, int count); +int skcipher_register_instance(struct crypto_template *tmpl, + struct skcipher_instance *inst); + int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req); int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req); const char *crypto_default_geniv(const struct crypto_alg *alg); @@ -122,5 +183,31 @@ static inline u32 skcipher_request_flags(struct skcipher_request *req) return req->base.flags; } +static inline unsigned int crypto_skcipher_alg_min_keysize( + struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blkcipher.min_keysize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_ablkcipher.min_keysize; + + return alg->min_keysize; +} + +static inline unsigned int crypto_skcipher_alg_max_keysize( + struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blkcipher.max_keysize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_ablkcipher.max_keysize; + + return alg->max_keysize; +} + #endif /* _CRYPTO_INTERNAL_SKCIPHER_H */ diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index fd8742a40ff3..5c90d3edf975 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -60,12 +60,80 @@ struct crypto_skcipher { unsigned int ivsize; unsigned int reqsize; - - bool has_setkey; + unsigned int keysize; struct crypto_tfm base; }; +/** + * struct skcipher_alg - symmetric key cipher definition + * @min_keysize: Minimum key size supported by the transformation. This is the + * smallest key length supported by this transformation algorithm. + * This must be set to one of the pre-defined values as this is + * not hardware specific. Possible values for this field can be + * found via git grep "_MIN_KEY_SIZE" include/crypto/ + * @max_keysize: Maximum key size supported by the transformation. This is the + * largest key length supported by this transformation algorithm. + * This must be set to one of the pre-defined values as this is + * not hardware specific. Possible values for this field can be + * found via git grep "_MAX_KEY_SIZE" include/crypto/ + * @setkey: Set key for the transformation. This function is used to either + * program a supplied key into the hardware or store the key in the + * transformation context for programming it later. Note that this + * function does modify the transformation context. This function can + * be called multiple times during the existence of the transformation + * object, so one must make sure the key is properly reprogrammed into + * the hardware. This function is also responsible for checking the key + * length for validity. In case a software fallback was put in place in + * the @cra_init call, this function might need to use the fallback if + * the algorithm doesn't support all of the key sizes. + * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt + * the supplied scatterlist containing the blocks of data. The crypto + * API consumer is responsible for aligning the entries of the + * scatterlist properly and making sure the chunks are correctly + * sized. In case a software fallback was put in place in the + * @cra_init call, this function might need to use the fallback if + * the algorithm doesn't support all of the key sizes. In case the + * key was stored in transformation context, the key might need to be + * re-programmed into the hardware in this function. This function + * shall not modify the transformation context, as this function may + * be called in parallel with the same transformation object. + * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt + * and the conditions are exactly the same. + * @init: Initialize the cryptographic transformation object. This function + * is used to initialize the cryptographic transformation object. + * This function is called only once at the instantiation time, right + * after the transformation context was allocated. In case the + * cryptographic hardware has some special requirements which need to + * be handled by software, this function shall check for the precise + * requirement of the transformation and put any software fallbacks + * in place. + * @exit: Deinitialize the cryptographic transformation object. This is a + * counterpart to @init, used to remove various changes set in + * @init. + * @ivsize: IV size applicable for transformation. The consumer must provide an + * IV of exactly that size to perform the encrypt or decrypt operation. + * @chunksize: Equal to the block size except for stream ciphers such as + * CTR where it is set to the underlying block size. + * + * All fields except @ivsize are mandatory and must be filled. + */ +struct skcipher_alg { + int (*setkey)(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct skcipher_request *req); + int (*decrypt)(struct skcipher_request *req); + int (*init)(struct crypto_skcipher *tfm); + void (*exit)(struct crypto_skcipher *tfm); + + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; + unsigned int chunksize; + + struct crypto_alg base; +}; + #define SKCIPHER_REQUEST_ON_STACK(name, tfm) \ char __##name##_desc[sizeof(struct skcipher_request) + \ crypto_skcipher_reqsize(tfm)] CRYPTO_MINALIGN_ATTR; \ @@ -233,6 +301,43 @@ static inline int crypto_has_skcipher(const char *alg_name, u32 type, } /** + * crypto_has_skcipher2() - Search for the availability of an skcipher. + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * skcipher + * @type: specifies the type of the skcipher + * @mask: specifies the mask for the skcipher + * + * Return: true when the skcipher is known to the kernel crypto API; false + * otherwise + */ +int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask); + +static inline const char *crypto_skcipher_driver_name( + struct crypto_skcipher *tfm) +{ + return crypto_tfm_alg_name(crypto_skcipher_tfm(tfm)); +} + +static inline struct skcipher_alg *crypto_skcipher_alg( + struct crypto_skcipher *tfm) +{ + return container_of(crypto_skcipher_tfm(tfm)->__crt_alg, + struct skcipher_alg, base); +} + +static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blkcipher.ivsize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_ablkcipher.ivsize; + + return alg->ivsize; +} + +/** * crypto_skcipher_ivsize() - obtain IV size * @tfm: cipher handle * @@ -246,6 +351,36 @@ static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm) return tfm->ivsize; } +static inline unsigned int crypto_skcipher_alg_chunksize( + struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blocksize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_blocksize; + + return alg->chunksize; +} + +/** + * crypto_skcipher_chunksize() - obtain chunk size + * @tfm: cipher handle + * + * The block size is set to one for ciphers such as CTR. However, + * you still need to provide incremental updates in multiples of + * the underlying block size as the IV does not have sub-block + * granularity. This is known in this API as the chunk size. + * + * Return: chunk size in bytes + */ +static inline unsigned int crypto_skcipher_chunksize( + struct crypto_skcipher *tfm) +{ + return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm)); +} + /** * crypto_skcipher_blocksize() - obtain block size of cipher * @tfm: cipher handle @@ -309,7 +444,13 @@ static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm, static inline bool crypto_skcipher_has_setkey(struct crypto_skcipher *tfm) { - return tfm->has_setkey; + return tfm->keysize; +} + +static inline unsigned int crypto_skcipher_default_keysize( + struct crypto_skcipher *tfm) +{ + return tfm->keysize; } /** diff --git a/include/crypto/vmac.h b/include/crypto/vmac.h deleted file mode 100644 index 6b700c7b2fe1..000000000000 --- a/include/crypto/vmac.h +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Modified to interface to the Linux kernel - * Copyright (c) 2009, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - */ - -#ifndef __CRYPTO_VMAC_H -#define __CRYPTO_VMAC_H - -/* -------------------------------------------------------------------------- - * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai. - * This implementation is herby placed in the public domain. - * The authors offers no warranty. Use at your own risk. - * Please send bug reports to the authors. - * Last modified: 17 APR 08, 1700 PDT - * ----------------------------------------------------------------------- */ - -/* - * User definable settings. - */ -#define VMAC_TAG_LEN 64 -#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */ -#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8) -#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/ - -/* - * This implementation uses u32 and u64 as names for unsigned 32- - * and 64-bit integer types. These are defined in C99 stdint.h. The - * following may need adaptation if you are not running a C99 or - * Microsoft C environment. - */ -struct vmac_ctx { - u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)]; - u64 polykey[2*VMAC_TAG_LEN/64]; - u64 l3key[2*VMAC_TAG_LEN/64]; - u64 polytmp[2*VMAC_TAG_LEN/64]; - u64 cached_nonce[2]; - u64 cached_aes[2]; - int first_block_processed; -}; - -typedef u64 vmac_t; - -struct vmac_ctx_t { - struct crypto_cipher *child; - struct vmac_ctx __vmac_ctx; - u8 partial[VMAC_NHBYTES]; /* partial block */ - int partial_size; /* size of the partial block */ -}; - -#endif /* __CRYPTO_VMAC_H */ diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index bb9d0deca07c..0fb4975fae91 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -342,6 +342,7 @@ # define DP_PSR_FRAME_CAPTURE (1 << 3) # define DP_PSR_SELECTIVE_UPDATE (1 << 4) # define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS (1 << 5) +# define DP_PSR_ENABLE_PSR2 (1 << 6) /* eDP 1.4a */ #define DP_ADAPTER_CTRL 0x1a0 # define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 9374470d9356..719428f16425 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -888,8 +888,8 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q, if (!q->limits.chunk_sectors) return q->limits.max_sectors; - return q->limits.chunk_sectors - - (offset & (q->limits.chunk_sectors - 1)); + return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors - + (offset & (q->limits.chunk_sectors - 1)))); } static inline unsigned int blk_rq_get_max_sectors(struct request *rq) diff --git a/include/linux/bug.h b/include/linux/bug.h index 91eedf5fae38..218ac5875124 100644 --- a/include/linux/bug.h +++ b/include/linux/bug.h @@ -115,4 +115,23 @@ static inline enum bug_trap_type report_bug(unsigned long bug_addr, #else #define PANIC_CORRUPTION 0 #endif /* CONFIG_PANIC_ON_DATA_CORRUPTION */ + +/* + * Since detected data corruption should stop operation on the affected + * structures. Return value must be checked and sanely acted on by caller. + */ +static inline __must_check bool check_data_corruption(bool v) { return v; } +#define CHECK_DATA_CORRUPTION(condition, fmt, ...) \ + check_data_corruption(({ \ + bool corruption = unlikely(condition); \ + if (corruption) { \ + if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \ + pr_err(fmt, ##__VA_ARGS__); \ + BUG(); \ + } else \ + WARN(1, fmt, ##__VA_ARGS__); \ + } \ + corruption; \ + })) + #endif /* _LINUX_BUG_H */ diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 557dae96ce74..143d40e8a1ea 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -65,25 +65,40 @@ #endif /* + * Feature detection for gnu_inline (gnu89 extern inline semantics). Either + * __GNUC_STDC_INLINE__ is defined (not using gnu89 extern inline semantics, + * and we opt in to the gnu89 semantics), or __GNUC_STDC_INLINE__ is not + * defined so the gnu89 semantics are the default. + */ +#ifdef __GNUC_STDC_INLINE__ +# define __gnu_inline __attribute__((gnu_inline)) +#else +# define __gnu_inline +#endif + +/* * Force always-inline if the user requests it so via the .config, * or if gcc is too old. * GCC does not warn about unused static inline functions for * -Wunused-function. This turns out to avoid the need for complex #ifdef * directives. Suppress the warning in clang as well by using "unused" * function attribute, which is redundant but not harmful for gcc. + * Prefer gnu_inline, so that extern inline functions do not emit an + * externally visible function. This makes extern inline behave as per gnu89 + * semantics rather than c99. This prevents multiple symbol definition errors + * of extern inline functions at link time. + * A lot of inline functions can cause havoc with function tracing. */ #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) -#define inline inline __attribute__((always_inline,unused)) notrace -#define __inline__ __inline__ __attribute__((always_inline,unused)) notrace -#define __inline __inline __attribute__((always_inline,unused)) notrace +#define inline \ + inline __attribute__((always_inline, unused)) notrace __gnu_inline #else -/* A lot of inline functions can cause havoc with function tracing */ -#define inline inline __attribute__((unused)) notrace -#define __inline__ __inline__ __attribute__((unused)) notrace -#define __inline __inline __attribute__((unused)) notrace +#define inline inline __attribute__((unused)) notrace __gnu_inline #endif +#define __inline__ inline +#define __inline inline #define __always_inline inline __attribute__((always_inline)) #define noinline __attribute__((noinline)) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 6fc9a6dd5ed2..0db1fa621d8a 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -111,7 +111,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); #define unlikely_notrace(x) __builtin_expect(!!(x), 0) #define __branch_check__(x, expect) ({ \ - int ______r; \ + long ______r; \ static struct ftrace_branch_data \ __attribute__((__aligned__(4))) \ __attribute__((section("_ftrace_annotated_branch"))) \ diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 6d4c3d55d29c..ec088f5729fb 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -59,6 +59,10 @@ extern ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf); extern ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_spec_store_bypass(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_l1tf(struct device *dev, + struct device_attribute *attr, char *buf); extern __printf(4, 5) struct device *cpu_device_create(struct device *parent, void *drvdata, diff --git a/include/linux/cpufreq_times.h b/include/linux/cpufreq_times.h index 3fb38750c853..356a3fad03c9 100644 --- a/include/linux/cpufreq_times.h +++ b/include/linux/cpufreq_times.h @@ -22,6 +22,7 @@ #ifdef CONFIG_CPU_FREQ_TIMES void cpufreq_task_times_init(struct task_struct *p); +void cpufreq_task_times_alloc(struct task_struct *p); void cpufreq_task_times_exit(struct task_struct *p); int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *p); @@ -31,6 +32,11 @@ void cpufreq_times_record_transition(struct cpufreq_freqs *freq); void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end); int single_uid_time_in_state_open(struct inode *inode, struct file *file); #else +static inline void cpufreq_task_times_init(struct task_struct *p) {} +static inline void cpufreq_task_times_alloc(struct task_struct *p) {} +static inline void cpufreq_task_times_exit(struct task_struct *p) {} +static inline void cpufreq_acct_update_power(struct task_struct *p, + u64 cputime) {} static inline void cpufreq_times_create_policy(struct cpufreq_policy *policy) {} static inline void cpufreq_times_record_transition( struct cpufreq_freqs *freq) {} diff --git a/include/linux/crypto.h b/include/linux/crypto.h index b7c1e1a7ebac..d7c8b37b2e95 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -48,6 +48,7 @@ #define CRYPTO_ALG_TYPE_AEAD 0x00000003 #define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004 #define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005 +#define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 #define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 #define CRYPTO_ALG_TYPE_DIGEST 0x00000008 #define CRYPTO_ALG_TYPE_HASH 0x00000008 diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h index 2cd41e0cd394..e724a4195170 100644 --- a/include/linux/diagchar.h +++ b/include/linux/diagchar.h @@ -145,10 +145,10 @@ the appropriate macros. */ /* This needs to be modified manually now, when we add a new RANGE of SSIDs to the msg_mask_tbl */ #define MSG_MASK_TBL_CNT 26 -#define APPS_EVENT_LAST_ID 0x0C5B +#define APPS_EVENT_LAST_ID 0xC85 #define MSG_SSID_0 0 -#define MSG_SSID_0_LAST 125 +#define MSG_SSID_0_LAST 129 #define MSG_SSID_1 500 #define MSG_SSID_1_LAST 506 #define MSG_SSID_2 1000 @@ -164,7 +164,7 @@ the appropriate macros. */ #define MSG_SSID_7 4600 #define MSG_SSID_7_LAST 4616 #define MSG_SSID_8 5000 -#define MSG_SSID_8_LAST 5034 +#define MSG_SSID_8_LAST 5036 #define MSG_SSID_9 5500 #define MSG_SSID_9_LAST 5516 #define MSG_SSID_10 6000 @@ -218,7 +218,7 @@ static const uint32_t msg_bld_masks_0[] = { MSG_LVL_MED, MSG_LVL_HIGH, MSG_LVL_HIGH, - MSG_LVL_LOW|MSG_MASK_5|MSG_MASK_6|MSG_MASK_7|MSG_MASK_8, + MSG_LVL_MED | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8, MSG_LVL_LOW, MSG_LVL_ERROR, MSG_LVL_LOW, @@ -342,6 +342,10 @@ static const uint32_t msg_bld_masks_0[] = { MSG_LVL_HIGH, MSG_LVL_LOW, MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL, + MSG_LVL_HIGH, + MSG_LVL_LOW, + MSG_LVL_MED, + MSG_LVL_MED, MSG_LVL_HIGH }; @@ -413,81 +417,91 @@ static const uint32_t msg_bld_masks_5[] = { }; static const uint32_t msg_bld_masks_6[] = { - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, }; static const uint32_t msg_bld_masks_7[] = { @@ -545,7 +559,9 @@ static const uint32_t msg_bld_masks_8[] = { MSG_LVL_MED, MSG_LVL_MED, MSG_LVL_MED, - MSG_LVL_HIGH, + MSG_LVL_MED, + MSG_LVL_MED, + MSG_LVL_MED, MSG_LVL_HIGH }; @@ -771,18 +787,22 @@ static const uint32_t msg_bld_masks_17[] = { static const uint32_t msg_bld_masks_18[] = { MSG_LVL_LOW, - MSG_LVL_LOW | MSG_MASK_8 | MSG_MASK_9 | MSG_MASK_10 | \ - MSG_MASK_11|MSG_MASK_12|MSG_MASK_13|MSG_MASK_14|MSG_MASK_15 | \ - MSG_MASK_16|MSG_MASK_17|MSG_MASK_18|MSG_MASK_19|MSG_MASK_20, - MSG_LVL_LOW | MSG_MASK_5 | MSG_MASK_6, - MSG_LVL_LOW | MSG_MASK_5, - MSG_LVL_LOW | MSG_MASK_5 | MSG_MASK_6, - MSG_LVL_LOW, - MSG_LVL_LOW | MSG_MASK_5 | \ - MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 | MSG_MASK_9, + MSG_LVL_MED | MSG_MASK_8 | MSG_MASK_9 | MSG_MASK_10 | MSG_MASK_11 | + MSG_MASK_12 | MSG_MASK_13 | MSG_MASK_14 | MSG_MASK_15 | + MSG_MASK_16 | MSG_MASK_17 | MSG_MASK_18 | MSG_MASK_19 | + MSG_MASK_20, + MSG_LVL_MED | MSG_MASK_5 | MSG_MASK_6, + MSG_LVL_MED | MSG_MASK_5, + MSG_LVL_MED | MSG_MASK_5 | MSG_MASK_6, MSG_LVL_LOW, + MSG_LVL_MED | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 | + MSG_MASK_9, MSG_LVL_LOW, MSG_LVL_LOW, + MSG_LVL_MED | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 | + MSG_MASK_9 | MSG_MASK_10 | MSG_MASK_11 | MSG_MASK_12 | + MSG_MASK_13 | MSG_MASK_14 | MSG_MASK_15 | MSG_MASK_16 | + MSG_MASK_20 | MSG_MASK_21 | MSG_MASK_22, MSG_LVL_LOW }; @@ -881,7 +901,7 @@ static const uint32_t msg_bld_masks_25[] = { /* LOG CODES */ static const uint32_t log_code_last_tbl[] = { 0x0, /* EQUIP ID 0 */ - 0x1C6A, /* EQUIP ID 1 */ + 0x1C7B, /* EQUIP ID 1 */ 0x0, /* EQUIP ID 2 */ 0x0, /* EQUIP ID 3 */ 0x4910, /* EQUIP ID 4 */ diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index f3422440c45f..eecc7240ddfc 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -17,6 +17,7 @@ #define __DMA_IOMMU_H #ifdef __KERNEL__ +#include <linux/types.h> #include <asm/errno.h> #ifdef CONFIG_IOMMU_DMA diff --git a/include/linux/extcon.h b/include/linux/extcon.h index e1360198955a..081afbe982c0 100644 --- a/include/linux/extcon.h +++ b/include/linux/extcon.h @@ -61,6 +61,9 @@ /* connector speed 0 - High Speed, 1 - super speed */ #define EXTCON_USB_SPEED 29 +/* connector type C current 0 - default current, 1 - medium or high current */ +#define EXTCON_USB_TYPEC_MED_HIGH_CURRENT 30 + /* Display external connector */ #define EXTCON_DISP_HDMI 40 /* High-Definition Multimedia Interface */ #define EXTCON_DISP_MHL 41 /* Mobile High-Definition Link */ diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 2ebfa01b7091..8e6a18582566 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -304,11 +304,6 @@ struct f2fs_node { * For NAT entries */ #define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry)) -#define NAT_ENTRY_BITMAP_SIZE ((NAT_ENTRY_PER_BLOCK + 7) / 8) -#define NAT_ENTRY_BITMAP_SIZE_ALIGNED \ - ((NAT_ENTRY_BITMAP_SIZE + BITS_PER_LONG - 1) / \ - BITS_PER_LONG * BITS_PER_LONG) - struct f2fs_nat_entry { __u8 version; /* latest version of cached nat entry */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 250f4d1ce9c5..f5f4e7871865 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -964,9 +964,9 @@ static inline struct file *get_file(struct file *f) /* Page cache limit. The filesystems should put that into their s_maxbytes limits, otherwise bad things can happen in VM. */ #if BITS_PER_LONG==32 -#define MAX_LFS_FILESIZE (((loff_t)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) +#define MAX_LFS_FILESIZE ((loff_t)ULONG_MAX << PAGE_SHIFT) #elif BITS_PER_LONG==64 -#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL) +#define MAX_LFS_FILESIZE ((loff_t)LLONG_MAX) #endif #define FL_POSIX 1 diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h index 44bd4fbd3ec5..e2729c6d9829 100644 --- a/include/linux/fscrypt_notsupp.h +++ b/include/linux/fscrypt_notsupp.h @@ -67,16 +67,6 @@ static inline void fscrypt_restore_control_page(struct page *page) return; } -static inline void fscrypt_set_d_op(struct dentry *dentry) -{ - return; -} - -static inline void fscrypt_set_encrypted_dentry(struct dentry *dentry) -{ - return; -} - /* policy.c */ static inline int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg) diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h index 9d1857302b73..46b62d82b6d6 100644 --- a/include/linux/fscrypt_supp.h +++ b/include/linux/fscrypt_supp.h @@ -28,7 +28,7 @@ struct fscrypt_operations { int (*set_context)(struct inode *, const void *, size_t, void *); bool (*dummy_context)(struct inode *); bool (*empty_dir)(struct inode *); - unsigned (*max_namelen)(struct inode *); + unsigned int max_namelen; }; struct fscrypt_ctx { @@ -74,20 +74,6 @@ static inline struct page *fscrypt_control_page(struct page *page) extern void fscrypt_restore_control_page(struct page *); -extern const struct dentry_operations fscrypt_d_ops; - -static inline void fscrypt_set_d_op(struct dentry *dentry) -{ - d_set_d_op(dentry, &fscrypt_d_ops); -} - -static inline void fscrypt_set_encrypted_dentry(struct dentry *dentry) -{ - spin_lock(&dentry->d_lock); - dentry->d_flags |= DCACHE_ENCRYPTED_WITH_KEY; - spin_unlock(&dentry->d_lock); -} - /* policy.c */ extern int fscrypt_ioctl_set_policy(struct file *, const void __user *); extern int fscrypt_ioctl_get_policy(struct file *, void __user *); diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h new file mode 100644 index 000000000000..e6e53a36104b --- /dev/null +++ b/include/linux/fscrypto.h @@ -0,0 +1,411 @@ +/* + * General per-file encryption definition + * + * Copyright (C) 2015, Google, Inc. + * + * Written by Michael Halcrow, 2015. + * Modified by Jaegeuk Kim, 2015. + */ + +#ifndef _LINUX_FSCRYPTO_H +#define _LINUX_FSCRYPTO_H + +#include <linux/key.h> +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/bio.h> +#include <linux/dcache.h> +#include <crypto/skcipher.h> +#include <uapi/linux/fs.h> + +#define FS_KEY_DERIVATION_NONCE_SIZE 16 +#define FS_ENCRYPTION_CONTEXT_FORMAT_V1 1 + +#define FS_POLICY_FLAGS_PAD_4 0x00 +#define FS_POLICY_FLAGS_PAD_8 0x01 +#define FS_POLICY_FLAGS_PAD_16 0x02 +#define FS_POLICY_FLAGS_PAD_32 0x03 +#define FS_POLICY_FLAGS_PAD_MASK 0x03 +#define FS_POLICY_FLAGS_VALID 0x03 + +/* Encryption algorithms */ +#define FS_ENCRYPTION_MODE_INVALID 0 +#define FS_ENCRYPTION_MODE_AES_256_XTS 1 +#define FS_ENCRYPTION_MODE_AES_256_GCM 2 +#define FS_ENCRYPTION_MODE_AES_256_CBC 3 +#define FS_ENCRYPTION_MODE_AES_256_CTS 4 + +/** + * Encryption context for inode + * + * Protector format: + * 1 byte: Protector format (1 = this version) + * 1 byte: File contents encryption mode + * 1 byte: File names encryption mode + * 1 byte: Flags + * 8 bytes: Master Key descriptor + * 16 bytes: Encryption Key derivation nonce + */ +struct fscrypt_context { + u8 format; + u8 contents_encryption_mode; + u8 filenames_encryption_mode; + u8 flags; + u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE]; + u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE]; +} __packed; + +/* Encryption parameters */ +#define FS_XTS_TWEAK_SIZE 16 +#define FS_AES_128_ECB_KEY_SIZE 16 +#define FS_AES_256_GCM_KEY_SIZE 32 +#define FS_AES_256_CBC_KEY_SIZE 32 +#define FS_AES_256_CTS_KEY_SIZE 32 +#define FS_AES_256_XTS_KEY_SIZE 64 +#define FS_MAX_KEY_SIZE 64 + +#define FS_KEY_DESC_PREFIX "fscrypt:" +#define FS_KEY_DESC_PREFIX_SIZE 8 + +/* This is passed in from userspace into the kernel keyring */ +struct fscrypt_key { + u32 mode; + u8 raw[FS_MAX_KEY_SIZE]; + u32 size; +} __packed; + +struct fscrypt_info { + u8 ci_data_mode; + u8 ci_filename_mode; + u8 ci_flags; + struct crypto_skcipher *ci_ctfm; + struct key *ci_keyring_key; + u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE]; +}; + +#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001 +#define FS_WRITE_PATH_FL 0x00000002 + +struct fscrypt_ctx { + union { + struct { + struct page *bounce_page; /* Ciphertext page */ + struct page *control_page; /* Original page */ + } w; + struct { + struct bio *bio; + struct work_struct work; + } r; + struct list_head free_list; /* Free list */ + }; + u8 flags; /* Flags */ + u8 mode; /* Encryption mode for tfm */ +}; + +struct fscrypt_completion_result { + struct completion completion; + int res; +}; + +#define DECLARE_FS_COMPLETION_RESULT(ecr) \ + struct fscrypt_completion_result ecr = { \ + COMPLETION_INITIALIZER((ecr).completion), 0 } + +#define FS_FNAME_NUM_SCATTER_ENTRIES 4 +#define FS_CRYPTO_BLOCK_SIZE 16 +#define FS_FNAME_CRYPTO_DIGEST_SIZE 32 + +/** + * For encrypted symlinks, the ciphertext length is stored at the beginning + * of the string in little-endian format. + */ +struct fscrypt_symlink_data { + __le16 len; + char encrypted_path[1]; +} __packed; + +/** + * This function is used to calculate the disk space required to + * store a filename of length l in encrypted symlink format. + */ +static inline u32 fscrypt_symlink_data_len(u32 l) +{ + if (l < FS_CRYPTO_BLOCK_SIZE) + l = FS_CRYPTO_BLOCK_SIZE; + return (l + sizeof(struct fscrypt_symlink_data) - 1); +} + +struct fscrypt_str { + unsigned char *name; + u32 len; +}; + +struct fscrypt_name { + const struct qstr *usr_fname; + struct fscrypt_str disk_name; + u32 hash; + u32 minor_hash; + struct fscrypt_str crypto_buf; +}; + +#define FSTR_INIT(n, l) { .name = n, .len = l } +#define FSTR_TO_QSTR(f) QSTR_INIT((f)->name, (f)->len) +#define fname_name(p) ((p)->disk_name.name) +#define fname_len(p) ((p)->disk_name.len) + +/* + * crypto opertions for filesystems + */ +struct fscrypt_operations { + int (*get_context)(struct inode *, void *, size_t); + int (*key_prefix)(struct inode *, u8 **); + int (*prepare_context)(struct inode *); + int (*set_context)(struct inode *, const void *, size_t, void *); + int (*dummy_context)(struct inode *); + bool (*is_encrypted)(struct inode *); + bool (*empty_dir)(struct inode *); + unsigned (*max_namelen)(struct inode *); +}; + +static inline bool fscrypt_dummy_context_enabled(struct inode *inode) +{ + if (inode->i_sb->s_cop->dummy_context && + inode->i_sb->s_cop->dummy_context(inode)) + return true; + return false; +} + +static inline bool fscrypt_valid_contents_enc_mode(u32 mode) +{ + return (mode == FS_ENCRYPTION_MODE_AES_256_XTS); +} + +static inline bool fscrypt_valid_filenames_enc_mode(u32 mode) +{ + return (mode == FS_ENCRYPTION_MODE_AES_256_CTS); +} + +static inline bool fscrypt_is_dot_dotdot(const struct qstr *str) +{ + if (str->len == 1 && str->name[0] == '.') + return true; + + if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.') + return true; + + return false; +} + +static inline struct page *fscrypt_control_page(struct page *page) +{ +#if IS_ENABLED(CONFIG_FS_ENCRYPTION) + return ((struct fscrypt_ctx *)page_private(page))->w.control_page; +#else + WARN_ON_ONCE(1); + return ERR_PTR(-EINVAL); +#endif +} + +static inline int fscrypt_has_encryption_key(struct inode *inode) +{ +#if IS_ENABLED(CONFIG_FS_ENCRYPTION) + return (inode->i_crypt_info != NULL); +#else + return 0; +#endif +} + +static inline void fscrypt_set_encrypted_dentry(struct dentry *dentry) +{ +#if IS_ENABLED(CONFIG_FS_ENCRYPTION) + spin_lock(&dentry->d_lock); + dentry->d_flags |= DCACHE_ENCRYPTED_WITH_KEY; + spin_unlock(&dentry->d_lock); +#endif +} + +#if IS_ENABLED(CONFIG_FS_ENCRYPTION) +extern const struct dentry_operations fscrypt_d_ops; +#endif + +static inline void fscrypt_set_d_op(struct dentry *dentry) +{ +#if IS_ENABLED(CONFIG_FS_ENCRYPTION) + d_set_d_op(dentry, &fscrypt_d_ops); +#endif +} + +#if IS_ENABLED(CONFIG_FS_ENCRYPTION) +/* crypto.c */ +extern struct kmem_cache *fscrypt_info_cachep; +int fscrypt_initialize(void); + +extern struct fscrypt_ctx *fscrypt_get_ctx(struct inode *, gfp_t); +extern void fscrypt_release_ctx(struct fscrypt_ctx *); +extern struct page *fscrypt_encrypt_page(struct inode *, struct page *, gfp_t); +extern int fscrypt_decrypt_page(struct page *); +extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *); +extern void fscrypt_pullback_bio_page(struct page **, bool); +extern void fscrypt_restore_control_page(struct page *); +extern int fscrypt_zeroout_range(struct inode *, pgoff_t, sector_t, + unsigned int); +/* policy.c */ +extern int fscrypt_ioctl_set_policy(struct file *, const void __user *); +extern int fscrypt_ioctl_get_policy(struct file *, void __user *); +extern int fscrypt_has_permitted_context(struct inode *, struct inode *); +extern int fscrypt_inherit_context(struct inode *, struct inode *, + void *, bool); +/* keyinfo.c */ +extern int get_crypt_info(struct inode *); +extern int fscrypt_get_encryption_info(struct inode *); +extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *); + +/* fname.c */ +extern int fscrypt_setup_filename(struct inode *, const struct qstr *, + int lookup, struct fscrypt_name *); +extern void fscrypt_free_filename(struct fscrypt_name *); +extern u32 fscrypt_fname_encrypted_size(struct inode *, u32); +extern int fscrypt_fname_alloc_buffer(struct inode *, u32, + struct fscrypt_str *); +extern void fscrypt_fname_free_buffer(struct fscrypt_str *); +extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32, + const struct fscrypt_str *, struct fscrypt_str *); +extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *, + struct fscrypt_str *); +#endif + +/* crypto.c */ +static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(struct inode *i, + gfp_t f) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline void fscrypt_notsupp_release_ctx(struct fscrypt_ctx *c) +{ + return; +} + +static inline struct page *fscrypt_notsupp_encrypt_page(struct inode *i, + struct page *p, gfp_t f) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline int fscrypt_notsupp_decrypt_page(struct page *p) +{ + return -EOPNOTSUPP; +} + +static inline void fscrypt_notsupp_decrypt_bio_pages(struct fscrypt_ctx *c, + struct bio *b) +{ + return; +} + +static inline void fscrypt_notsupp_pullback_bio_page(struct page **p, bool b) +{ + return; +} + +static inline void fscrypt_notsupp_restore_control_page(struct page *p) +{ + return; +} + +static inline int fscrypt_notsupp_zeroout_range(struct inode *i, pgoff_t p, + sector_t s, unsigned int f) +{ + return -EOPNOTSUPP; +} + +/* policy.c */ +static inline int fscrypt_notsupp_ioctl_set_policy(struct file *f, + const void __user *arg) +{ + return -EOPNOTSUPP; +} + +static inline int fscrypt_notsupp_ioctl_get_policy(struct file *f, + void __user *arg) +{ + return -EOPNOTSUPP; +} + +static inline int fscrypt_notsupp_has_permitted_context(struct inode *p, + struct inode *i) +{ + return 0; +} + +static inline int fscrypt_notsupp_inherit_context(struct inode *p, + struct inode *i, void *v, bool b) +{ + return -EOPNOTSUPP; +} + +/* keyinfo.c */ +static inline int fscrypt_notsupp_get_encryption_info(struct inode *i) +{ + return -EOPNOTSUPP; +} + +static inline void fscrypt_notsupp_put_encryption_info(struct inode *i, + struct fscrypt_info *f) +{ + return; +} + + /* fname.c */ +static inline int fscrypt_notsupp_setup_filename(struct inode *dir, + const struct qstr *iname, + int lookup, struct fscrypt_name *fname) +{ + if (dir->i_sb->s_cop->is_encrypted(dir)) + return -EOPNOTSUPP; + + memset(fname, 0, sizeof(struct fscrypt_name)); + fname->usr_fname = iname; + fname->disk_name.name = (unsigned char *)iname->name; + fname->disk_name.len = iname->len; + return 0; +} + +static inline void fscrypt_notsupp_free_filename(struct fscrypt_name *fname) +{ + return; +} + +static inline u32 fscrypt_notsupp_fname_encrypted_size(struct inode *i, u32 s) +{ + /* never happens */ + WARN_ON(1); + return 0; +} + +static inline int fscrypt_notsupp_fname_alloc_buffer(struct inode *inode, + u32 ilen, struct fscrypt_str *crypto_str) +{ + return -EOPNOTSUPP; +} + +static inline void fscrypt_notsupp_fname_free_buffer(struct fscrypt_str *c) +{ + return; +} + +static inline int fscrypt_notsupp_fname_disk_to_usr(struct inode *inode, + u32 hash, u32 minor_hash, + const struct fscrypt_str *iname, + struct fscrypt_str *oname) +{ + return -EOPNOTSUPP; +} + +static inline int fscrypt_notsupp_fname_usr_to_disk(struct inode *inode, + const struct qstr *iname, + struct fscrypt_str *oname) +{ + return -EOPNOTSUPP; +} +#endif /* _LINUX_FSCRYPTO_H */ diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h index 84d971ff3fba..5d06e838e650 100644 --- a/include/linux/fsl/guts.h +++ b/include/linux/fsl/guts.h @@ -16,6 +16,7 @@ #define __FSL_GUTS_H__ #include <linux/types.h> +#include <linux/io.h> /** * Global Utility Registers. diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h index 1600c55828e0..93a774ce4922 100644 --- a/include/linux/iio/buffer.h +++ b/include/linux/iio/buffer.h @@ -49,7 +49,7 @@ struct iio_buffer_access_funcs { int (*request_update)(struct iio_buffer *buffer); int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd); - int (*set_length)(struct iio_buffer *buffer, int length); + int (*set_length)(struct iio_buffer *buffer, unsigned int length); void (*release)(struct iio_buffer *buffer); @@ -78,8 +78,8 @@ struct iio_buffer_access_funcs { * @watermark: [INTERN] number of datums to wait for poll/read. */ struct iio_buffer { - int length; - int bytes_per_datum; + unsigned int length; + size_t bytes_per_datum; struct attribute_group *scan_el_attrs; long *scan_mask; bool scan_timestamp; diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 23e129ef6726..e353f6600b0b 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -125,6 +125,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) * Extended Capability Register */ +#define ecap_dit(e) ((e >> 41) & 0x1) #define ecap_pasid(e) ((e >> 40) & 0x1) #define ecap_pss(e) ((e >> 35) & 0x1f) #define ecap_eafs(e) ((e >> 34) & 0x1) @@ -294,6 +295,7 @@ enum { #define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32) #define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16) #define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) +#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52)) #define QI_DEV_IOTLB_SIZE 1 #define QI_DEV_IOTLB_MAX_INVS 32 @@ -318,6 +320,7 @@ enum { #define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32) #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16) #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4) +#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52)) #define QI_DEV_EIOTLB_MAX_INVS 32 #define QI_PGRP_IDX(idx) (((u64)(idx)) << 55) @@ -463,9 +466,8 @@ extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, u64 type); extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, unsigned int size_order, u64 type); -extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, - u64 addr, unsigned mask); - +extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, + u16 qdep, u64 addr, unsigned mask); extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); extern int dmar_ir_support(void); diff --git a/include/linux/io.h b/include/linux/io.h index de64c1e53612..8ab45611fc35 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -154,4 +154,26 @@ enum { void *memremap(resource_size_t offset, size_t size, unsigned long flags); void memunmap(void *addr); +/* + * On x86 PAT systems we have memory tracking that keeps track of + * the allowed mappings on memory ranges. This tracking works for + * all the in-kernel mapping APIs (ioremap*), but where the user + * wishes to map a range from a physical device into user memory + * the tracking won't be updated. This API is to be used by + * drivers which remap physical device pages into userspace, + * and wants to make sure they are mapped WC and not UC. + */ +#ifndef arch_io_reserve_memtype_wc +static inline int arch_io_reserve_memtype_wc(resource_size_t base, + resource_size_t size) +{ + return 0; +} + +static inline void arch_io_free_memtype_wc(resource_size_t base, + resource_size_t size) +{ +} +#endif + #endif /* _LINUX_IO_H */ diff --git a/include/linux/ipa.h b/include/linux/ipa.h index a4b817c5e4fc..623d0f08cdf9 100644 --- a/include/linux/ipa.h +++ b/include/linux/ipa.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1226,11 +1226,13 @@ int ipa_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl); */ int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs); +int ipa_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool user_only); + int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls); int ipa_commit_hdr(void); -int ipa_reset_hdr(void); +int ipa_reset_hdr(bool user_only); int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup); @@ -1241,7 +1243,8 @@ int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy); /* * Header Processing Context */ -int ipa_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs); +int ipa_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs, + bool user_only); int ipa_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls); @@ -1250,11 +1253,13 @@ int ipa_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls); */ int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules); +int ipa_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, bool user_only); + int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls); int ipa_commit_rt(enum ipa_ip_type ip); -int ipa_reset_rt(enum ipa_ip_type ip); +int ipa_reset_rt(enum ipa_ip_type ip, bool user_only); int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup); @@ -1269,13 +1274,15 @@ int ipa_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *rules); */ int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules); +int ipa_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, bool user_only); + int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls); int ipa_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *rules); int ipa_commit_flt(enum ipa_ip_type ip); -int ipa_reset_flt(enum ipa_ip_type ip); +int ipa_reset_flt(enum ipa_ip_type ip, bool user_only); /* * NAT @@ -1648,6 +1655,12 @@ static inline int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs) return -EPERM; } +static inline int ipa_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, + bool user_only) +{ + return -EPERM; +} + static inline int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls) { return -EPERM; @@ -1658,7 +1671,7 @@ static inline int ipa_commit_hdr(void) return -EPERM; } -static inline int ipa_reset_hdr(void) +static inline int ipa_reset_hdr(bool user_only) { return -EPERM; } @@ -1682,7 +1695,8 @@ static inline int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy) * Header Processing Context */ static inline int ipa_add_hdr_proc_ctx( - struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs) + struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs, + bool user_only) { return -EPERM; } @@ -1699,6 +1713,12 @@ static inline int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules) return -EPERM; } +static inline int ipa_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, + bool user_only) +{ + return -EPERM; +} + static inline int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls) { return -EPERM; @@ -1709,7 +1729,7 @@ static inline int ipa_commit_rt(enum ipa_ip_type ip) return -EPERM; } -static inline int ipa_reset_rt(enum ipa_ip_type ip) +static inline int ipa_reset_rt(enum ipa_ip_type ip, bool user_only) { return -EPERM; } @@ -1742,6 +1762,12 @@ static inline int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules) return -EPERM; } +static inline int ipa_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, + bool user_only) +{ + return -EPERM; +} + static inline int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls) { return -EPERM; @@ -1757,7 +1783,7 @@ static inline int ipa_commit_flt(enum ipa_ip_type ip) return -EPERM; } -static inline int ipa_reset_flt(enum ipa_ip_type ip) +static inline int ipa_reset_flt(enum ipa_ip_type ip, bool user_only) { return -EPERM; } diff --git a/include/linux/libata.h b/include/linux/libata.h index b20a2752f934..6428ac4746de 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -210,6 +210,7 @@ enum { ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ /* (doesn't imply presence) */ ATA_FLAG_SATA = (1 << 1), + ATA_FLAG_NO_LPM = (1 << 2), /* host not happy with LPM */ ATA_FLAG_NO_LOG_PAGE = (1 << 5), /* do not issue log page read */ ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */ ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */ diff --git a/include/linux/list.h b/include/linux/list.h index 993395a2e55c..d5750f2f1c36 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -28,27 +28,42 @@ static inline void INIT_LIST_HEAD(struct list_head *list) list->prev = list; } +#ifdef CONFIG_DEBUG_LIST +extern bool __list_add_valid(struct list_head *new, + struct list_head *prev, + struct list_head *next); +extern bool __list_del_entry_valid(struct list_head *entry); +#else +static inline bool __list_add_valid(struct list_head *new, + struct list_head *prev, + struct list_head *next) +{ + return true; +} +static inline bool __list_del_entry_valid(struct list_head *entry) +{ + return true; +} +#endif + /* * Insert a new entry between two known consecutive entries. * * This is only for internal list manipulation where we know * the prev/next entries already! */ -#ifndef CONFIG_DEBUG_LIST static inline void __list_add(struct list_head *new, struct list_head *prev, struct list_head *next) { + if (!__list_add_valid(new, prev, next)) + return; + next->prev = new; new->next = next; new->prev = prev; prev->next = new; } -#else -extern void __list_add(struct list_head *new, - struct list_head *prev, - struct list_head *next); -#endif /** * list_add - add a new entry @@ -96,22 +111,20 @@ static inline void __list_del(struct list_head * prev, struct list_head * next) * Note: list_empty() on entry does not return true after this, the entry is * in an undefined state. */ -#ifndef CONFIG_DEBUG_LIST static inline void __list_del_entry(struct list_head *entry) { + if (!__list_del_entry_valid(entry)) + return; + __list_del(entry->prev, entry->next); } static inline void list_del(struct list_head *entry) { - __list_del(entry->prev, entry->next); + __list_del_entry(entry); entry->next = LIST_POISON1; entry->prev = LIST_POISON2; } -#else -extern void __list_del_entry(struct list_head *entry); -extern void list_del(struct list_head *entry); -#endif /** * list_replace - replace old entry by new one diff --git a/include/linux/mdio.h b/include/linux/mdio.h index b42963bc81dd..a0d6dadd787e 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -11,6 +11,53 @@ #include <uapi/linux/mdio.h> +struct mdio_device { + struct device dev; + + const struct dev_pm_ops *pm_ops; + struct mii_bus *bus; + + int (*bus_match)(struct device *dev, struct device_driver *drv); + void (*device_free)(struct mdio_device *mdiodev); + void (*device_remove)(struct mdio_device *mdiodev); + + /* Bus address of the MDIO device (0-31) */ + int addr; + int flags; +}; +#define to_mdio_device(d) container_of(d, struct mdio_device, dev) + +/* struct mdio_driver_common: Common to all MDIO drivers */ +struct mdio_driver_common { + struct device_driver driver; + int flags; +}; +#define MDIO_DEVICE_FLAG_PHY 1 +#define to_mdio_common_driver(d) \ + container_of(d, struct mdio_driver_common, driver) + +/* struct mdio_driver: Generic MDIO driver */ +struct mdio_driver { + struct mdio_driver_common mdiodrv; + + /* + * Called during discovery. Used to set + * up device-specific structures, if any + */ + int (*probe)(struct mdio_device *mdiodev); + + /* Clears up any memory if needed */ + void (*remove)(struct mdio_device *mdiodev); +}; +#define to_mdio_driver(d) \ + container_of(to_mdio_common_driver(d), struct mdio_driver, mdiodrv) + +void mdio_device_free(struct mdio_device *mdiodev); +struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr); +int mdio_device_register(struct mdio_device *mdiodev); +void mdio_device_remove(struct mdio_device *mdiodev); +int mdio_driver_register(struct mdio_driver *drv); +void mdio_driver_unregister(struct mdio_driver *drv); static inline bool mdio_phy_id_is_c45(int phy_id) { diff --git a/include/linux/mm.h b/include/linux/mm.h index 34fcdede4604..6faba4f28ab4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2103,6 +2103,8 @@ int remap_pfn_range(struct vm_area_struct *, unsigned long addr, int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); +int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn, pgprot_t pgprot); int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index fe0a5de1eda5..29c17fae9bbf 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -404,7 +404,7 @@ struct kioctx_table; struct mm_struct { struct vm_area_struct *mmap; /* list of VMAs */ struct rb_root mm_rb; - u32 vmacache_seqnum; /* per-thread vmacache */ + u64 vmacache_seqnum; /* per-thread vmacache */ #ifdef CONFIG_MMU unsigned long (*get_unmapped_area) (struct file *filp, unsigned long addr, unsigned long len, diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index 862d8d1bae8f..ef3e628388cf 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -131,6 +131,8 @@ struct mmc_bus_ops { int (*shutdown)(struct mmc_host *); int (*reset)(struct mmc_host *); int (*change_bus_speed)(struct mmc_host *, unsigned long *); + int (*pre_hibernate)(struct mmc_host *); + int (*post_hibernate)(struct mmc_host *); }; struct mmc_card; diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index f564303a28f9..48849acf34ff 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -419,6 +419,7 @@ struct mmc_host { #define MMC_CAP_HW_RESET (1 << 31) /* Hardware reset */ u32 caps2; /* More host capabilities */ + u32 cached_caps2; #define MMC_CAP2_BOOTPART_NOACC (1 << 0) /* Boot partition no access */ #define MMC_CAP2_FULL_PWR_CYCLE (1 << 2) /* Can do full power cycle */ @@ -642,6 +643,7 @@ static inline void *mmc_cmdq_private(struct mmc_host *host) #define mmc_bus_manual_resume(host) ((host)->bus_resume_flags & \ MMC_BUSRESUME_MANUAL_RESUME) +#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME static inline void mmc_set_bus_resume_policy(struct mmc_host *host, int manual) { if (manual) @@ -649,6 +651,11 @@ static inline void mmc_set_bus_resume_policy(struct mmc_host *host, int manual) else host->bus_resume_flags &= ~MMC_BUSRESUME_MANUAL_RESUME; } +#else +static inline void mmc_set_bus_resume_policy(struct mmc_host *host, int manual) +{ +} +#endif extern int mmc_resume_bus(struct mmc_host *host); diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index 83430f2ea757..e0325706b76d 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h @@ -33,6 +33,7 @@ #define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d #define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335 #define SDIO_DEVICE_ID_BROADCOM_43362 0xa962 +#define SDIO_DEVICE_ID_BROADCOM_43364 0xa9a4 #define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6 #define SDIO_DEVICE_ID_BROADCOM_4345 0x4345 #define SDIO_DEVICE_ID_BROADCOM_4354 0x4354 diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index 772362adf471..053824b0a412 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -56,4 +56,10 @@ void dump_mm(const struct mm_struct *mm); #define VIRTUAL_BUG_ON(cond) do { } while (0) #endif +#ifdef CONFIG_DEBUG_VM_PGFLAGS +#define VM_BUG_ON_PGFLAGS(cond, page) VM_BUG_ON_PAGE(cond, page) +#else +#define VM_BUG_ON_PGFLAGS(cond, page) BUILD_BUG_ON_INVALID(cond) +#endif + #endif diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h index 1d6a935c1ac5..8793f5a7b820 100644 --- a/include/linux/netfilter/ipset/ip_set_timeout.h +++ b/include/linux/netfilter/ipset/ip_set_timeout.h @@ -65,8 +65,14 @@ ip_set_timeout_set(unsigned long *timeout, u32 value) static inline u32 ip_set_timeout_get(unsigned long *timeout) { - return *timeout == IPSET_ELEM_PERMANENT ? 0 : - jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC; + u32 t; + + if (*timeout == IPSET_ELEM_PERMANENT) + return 0; + + t = jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC; + /* Zero value in userspace means no timeout */ + return t == 0 ? 1 : t; } #endif /* __KERNEL__ */ diff --git a/include/linux/nospec.h b/include/linux/nospec.h index e791ebc65c9c..0c5ef54fd416 100644 --- a/include/linux/nospec.h +++ b/include/linux/nospec.h @@ -7,6 +7,8 @@ #define _LINUX_NOSPEC_H #include <asm/barrier.h> +struct task_struct; + /** * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise * @index: array element index @@ -55,4 +57,12 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, \ (typeof(_i)) (_i & _mask); \ }) + +/* Speculation control prctl */ +int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which); +int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, + unsigned long ctrl); +/* Speculation control for seccomp enforced mitigation */ +void arch_seccomp_spec_mitigate(struct task_struct *task); + #endif /* _LINUX_NOSPEC_H */ diff --git a/include/linux/overflow.h b/include/linux/overflow.h new file mode 100644 index 000000000000..8712ff70995f --- /dev/null +++ b/include/linux/overflow.h @@ -0,0 +1,278 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +#ifndef __LINUX_OVERFLOW_H +#define __LINUX_OVERFLOW_H + +#include <linux/compiler.h> + +/* + * In the fallback code below, we need to compute the minimum and + * maximum values representable in a given type. These macros may also + * be useful elsewhere, so we provide them outside the + * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block. + * + * It would seem more obvious to do something like + * + * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0) + * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0) + * + * Unfortunately, the middle expressions, strictly speaking, have + * undefined behaviour, and at least some versions of gcc warn about + * the type_max expression (but not if -fsanitize=undefined is in + * effect; in that case, the warning is deferred to runtime...). + * + * The slightly excessive casting in type_min is to make sure the + * macros also produce sensible values for the exotic type _Bool. [The + * overflow checkers only almost work for _Bool, but that's + * a-feature-not-a-bug, since people shouldn't be doing arithmetic on + * _Bools. Besides, the gcc builtins don't allow _Bool* as third + * argument.] + * + * Idea stolen from + * https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html - + * credit to Christian Biere. + */ +#define is_signed_type(type) (((type)(-1)) < (type)1) +#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type))) +#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T))) +#define type_min(T) ((T)((T)-type_max(T)-(T)1)) + + +#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW +/* + * For simplicity and code hygiene, the fallback code below insists on + * a, b and *d having the same type (similar to the min() and max() + * macros), whereas gcc's type-generic overflow checkers accept + * different types. Hence we don't just make check_add_overflow an + * alias for __builtin_add_overflow, but add type checks similar to + * below. + */ +#define check_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_add_overflow(__a, __b, __d); \ +}) + +#define check_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_sub_overflow(__a, __b, __d); \ +}) + +#define check_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_mul_overflow(__a, __b, __d); \ +}) + +#else + + +/* Checking for unsigned overflow is relatively easy without causing UB. */ +#define __unsigned_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a + __b; \ + *__d < __a; \ +}) +#define __unsigned_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a - __b; \ + __a < __b; \ +}) +/* + * If one of a or b is a compile-time constant, this avoids a division. + */ +#define __unsigned_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a * __b; \ + __builtin_constant_p(__b) ? \ + __b > 0 && __a > type_max(typeof(__a)) / __b : \ + __a > 0 && __b > type_max(typeof(__b)) / __a; \ +}) + +/* + * For signed types, detecting overflow is much harder, especially if + * we want to avoid UB. But the interface of these macros is such that + * we must provide a result in *d, and in fact we must produce the + * result promised by gcc's builtins, which is simply the possibly + * wrapped-around value. Fortunately, we can just formally do the + * operations in the widest relevant unsigned type (u64) and then + * truncate the result - gcc is smart enough to generate the same code + * with and without the (u64) casts. + */ + +/* + * Adding two signed integers can overflow only if they have the same + * sign, and overflow has happened iff the result has the opposite + * sign. + */ +#define __signed_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a + (u64)__b; \ + (((~(__a ^ __b)) & (*__d ^ __a)) \ + & type_min(typeof(__a))) != 0; \ +}) + +/* + * Subtraction is similar, except that overflow can now happen only + * when the signs are opposite. In this case, overflow has happened if + * the result has the opposite sign of a. + */ +#define __signed_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a - (u64)__b; \ + ((((__a ^ __b)) & (*__d ^ __a)) \ + & type_min(typeof(__a))) != 0; \ +}) + +/* + * Signed multiplication is rather hard. gcc always follows C99, so + * division is truncated towards 0. This means that we can write the + * overflow check like this: + * + * (a > 0 && (b > MAX/a || b < MIN/a)) || + * (a < -1 && (b > MIN/a || b < MAX/a) || + * (a == -1 && b == MIN) + * + * The redundant casts of -1 are to silence an annoying -Wtype-limits + * (included in -Wextra) warning: When the type is u8 or u16, the + * __b_c_e in check_mul_overflow obviously selects + * __unsigned_mul_overflow, but unfortunately gcc still parses this + * code and warns about the limited range of __b. + */ + +#define __signed_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + typeof(a) __tmax = type_max(typeof(a)); \ + typeof(a) __tmin = type_min(typeof(a)); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a * (u64)__b; \ + (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \ + (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \ + (__b == (typeof(__b))-1 && __a == __tmin); \ +}) + + +#define check_add_overflow(a, b, d) \ + __builtin_choose_expr(is_signed_type(typeof(a)), \ + __signed_add_overflow(a, b, d), \ + __unsigned_add_overflow(a, b, d)) + +#define check_sub_overflow(a, b, d) \ + __builtin_choose_expr(is_signed_type(typeof(a)), \ + __signed_sub_overflow(a, b, d), \ + __unsigned_sub_overflow(a, b, d)) + +#define check_mul_overflow(a, b, d) \ + __builtin_choose_expr(is_signed_type(typeof(a)), \ + __signed_mul_overflow(a, b, d), \ + __unsigned_mul_overflow(a, b, d)) + + +#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */ + +/** + * array_size() - Calculate size of 2-dimensional array. + * + * @a: dimension one + * @b: dimension two + * + * Calculates size of 2-dimensional array: @a * @b. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. + */ +static inline __must_check size_t array_size(size_t a, size_t b) +{ + size_t bytes; + + if (check_mul_overflow(a, b, &bytes)) + return SIZE_MAX; + + return bytes; +} + +/** + * array3_size() - Calculate size of 3-dimensional array. + * + * @a: dimension one + * @b: dimension two + * @c: dimension three + * + * Calculates size of 3-dimensional array: @a * @b * @c. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. + */ +static inline __must_check size_t array3_size(size_t a, size_t b, size_t c) +{ + size_t bytes; + + if (check_mul_overflow(a, b, &bytes)) + return SIZE_MAX; + if (check_mul_overflow(bytes, c, &bytes)) + return SIZE_MAX; + + return bytes; +} + +static inline __must_check size_t __ab_c_size(size_t n, size_t size, size_t c) +{ + size_t bytes; + + if (check_mul_overflow(n, size, &bytes)) + return SIZE_MAX; + if (check_add_overflow(bytes, c, &bytes)) + return SIZE_MAX; + + return bytes; +} + +/** + * struct_size() - Calculate size of structure with trailing array. + * @p: Pointer to the structure. + * @member: Name of the array member. + * @n: Number of elements in the array. + * + * Calculates size of memory needed for structure @p followed by an + * array of @n @member elements. + * + * Return: number of bytes needed or SIZE_MAX on overflow. + */ +#define struct_size(p, member, n) \ + __ab_c_size(n, \ + sizeof(*(p)->member) + __must_be_array((p)->member),\ + sizeof(*(p))) + +#endif /* __LINUX_OVERFLOW_H */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 86c233be1cfc..985fb2c63cb7 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -139,49 +139,101 @@ enum pageflags { #ifndef __GENERATING_BOUNDS_H +struct page; /* forward declaration */ + +static inline struct page *compound_head(struct page *page) +{ + unsigned long head = READ_ONCE(page->compound_head); + + if (unlikely(head & 1)) + return (struct page *) (head - 1); + return page; +} + +static inline int PageTail(struct page *page) +{ + return READ_ONCE(page->compound_head) & 1; +} + +static inline int PageCompound(struct page *page) +{ + return test_bit(PG_head, &page->flags) || PageTail(page); +} + +/* + * Page flags policies wrt compound pages + * + * PF_ANY: + * the page flag is relevant for small, head and tail pages. + * + * PF_HEAD: + * for compound page all operations related to the page flag applied to + * head page. + * + * PF_NO_TAIL: + * modifications of the page flag must be done on small or head pages, + * checks can be done on tail pages too. + * + * PF_NO_COMPOUND: + * the page flag is not relevant for compound pages. + */ +#define PF_ANY(page, enforce) page +#define PF_HEAD(page, enforce) compound_head(page) +#define PF_NO_TAIL(page, enforce) ({ \ + VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \ + compound_head(page);}) +#define PF_NO_COMPOUND(page, enforce) ({ \ + VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \ + page;}) + /* * Macros to create function definitions for page flags */ -#define TESTPAGEFLAG(uname, lname) \ -static inline int Page##uname(const struct page *page) \ - { return test_bit(PG_##lname, &page->flags); } +#define TESTPAGEFLAG(uname, lname, policy) \ +static inline int Page##uname(struct page *page) \ + { return test_bit(PG_##lname, &policy(page, 0)->flags); } -#define SETPAGEFLAG(uname, lname) \ +#define SETPAGEFLAG(uname, lname, policy) \ static inline void SetPage##uname(struct page *page) \ - { set_bit(PG_##lname, &page->flags); } + { set_bit(PG_##lname, &policy(page, 1)->flags); } -#define CLEARPAGEFLAG(uname, lname) \ +#define CLEARPAGEFLAG(uname, lname, policy) \ static inline void ClearPage##uname(struct page *page) \ - { clear_bit(PG_##lname, &page->flags); } + { clear_bit(PG_##lname, &policy(page, 1)->flags); } -#define __SETPAGEFLAG(uname, lname) \ +#define __SETPAGEFLAG(uname, lname, policy) \ static inline void __SetPage##uname(struct page *page) \ - { __set_bit(PG_##lname, &page->flags); } + { __set_bit(PG_##lname, &policy(page, 1)->flags); } -#define __CLEARPAGEFLAG(uname, lname) \ +#define __CLEARPAGEFLAG(uname, lname, policy) \ static inline void __ClearPage##uname(struct page *page) \ - { __clear_bit(PG_##lname, &page->flags); } + { __clear_bit(PG_##lname, &policy(page, 1)->flags); } -#define TESTSETFLAG(uname, lname) \ +#define TESTSETFLAG(uname, lname, policy) \ static inline int TestSetPage##uname(struct page *page) \ - { return test_and_set_bit(PG_##lname, &page->flags); } + { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); } -#define TESTCLEARFLAG(uname, lname) \ +#define TESTCLEARFLAG(uname, lname, policy) \ static inline int TestClearPage##uname(struct page *page) \ - { return test_and_clear_bit(PG_##lname, &page->flags); } + { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); } -#define __TESTCLEARFLAG(uname, lname) \ +#define __TESTCLEARFLAG(uname, lname, policy) \ static inline int __TestClearPage##uname(struct page *page) \ - { return __test_and_clear_bit(PG_##lname, &page->flags); } + { return __test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); } -#define PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \ - SETPAGEFLAG(uname, lname) CLEARPAGEFLAG(uname, lname) +#define PAGEFLAG(uname, lname, policy) \ + TESTPAGEFLAG(uname, lname, policy) \ + SETPAGEFLAG(uname, lname, policy) \ + CLEARPAGEFLAG(uname, lname, policy) -#define __PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \ - __SETPAGEFLAG(uname, lname) __CLEARPAGEFLAG(uname, lname) +#define __PAGEFLAG(uname, lname, policy) \ + TESTPAGEFLAG(uname, lname, policy) \ + __SETPAGEFLAG(uname, lname, policy) \ + __CLEARPAGEFLAG(uname, lname, policy) -#define TESTSCFLAG(uname, lname) \ - TESTSETFLAG(uname, lname) TESTCLEARFLAG(uname, lname) +#define TESTSCFLAG(uname, lname, policy) \ + TESTSETFLAG(uname, lname, policy) \ + TESTCLEARFLAG(uname, lname, policy) #define TESTPAGEFLAG_FALSE(uname) \ static inline int Page##uname(const struct page *page) { return 0; } @@ -210,28 +262,28 @@ static inline int __TestClearPage##uname(struct page *page) { return 0; } #define TESTSCFLAG_FALSE(uname) \ TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname) -struct page; /* forward declaration */ - -TESTPAGEFLAG(Locked, locked) -PAGEFLAG(Error, error) TESTCLEARFLAG(Error, error) -PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) - __SETPAGEFLAG(Referenced, referenced) -PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) -PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru) -PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active) - TESTCLEARFLAG(Active, active) -__PAGEFLAG(Slab, slab) -PAGEFLAG(Checked, checked) /* Used by some filesystems */ -PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */ -PAGEFLAG(SavePinned, savepinned); /* Xen */ -PAGEFLAG(Foreign, foreign); /* Xen */ -PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) -PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked) - __SETPAGEFLAG(SwapBacked, swapbacked) - -__PAGEFLAG(SlobFree, slob_free) +__PAGEFLAG(Locked, locked, PF_NO_TAIL) +PAGEFLAG(Error, error, PF_ANY) TESTCLEARFLAG(Error, error, PF_ANY) +PAGEFLAG(Referenced, referenced, PF_ANY) TESTCLEARFLAG(Referenced, referenced, PF_ANY) + __SETPAGEFLAG(Referenced, referenced, PF_ANY) +PAGEFLAG(Dirty, dirty, PF_ANY) TESTSCFLAG(Dirty, dirty, PF_ANY) + __CLEARPAGEFLAG(Dirty, dirty, PF_ANY) +PAGEFLAG(LRU, lru, PF_ANY) __CLEARPAGEFLAG(LRU, lru, PF_ANY) +PAGEFLAG(Active, active, PF_ANY) __CLEARPAGEFLAG(Active, active, PF_ANY) + TESTCLEARFLAG(Active, active, PF_ANY) +__PAGEFLAG(Slab, slab, PF_ANY) +PAGEFLAG(Checked, checked, PF_ANY) /* Used by some filesystems */ +PAGEFLAG(Pinned, pinned, PF_ANY) TESTSCFLAG(Pinned, pinned, PF_ANY) /* Xen */ +PAGEFLAG(SavePinned, savepinned, PF_ANY); /* Xen */ +PAGEFLAG(Foreign, foreign, PF_ANY); /* Xen */ +PAGEFLAG(Reserved, reserved, PF_ANY) __CLEARPAGEFLAG(Reserved, reserved, PF_ANY) +PAGEFLAG(SwapBacked, swapbacked, PF_ANY) + __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_ANY) + __SETPAGEFLAG(SwapBacked, swapbacked, PF_ANY) + +__PAGEFLAG(SlobFree, slob_free, PF_ANY) #ifdef CONFIG_ZCACHE -PAGEFLAG(WasActive, was_active) +PAGEFLAG(WasActive, was_active, PF_ANY) #else PAGEFLAG_FALSE(WasActive) #endif @@ -241,21 +293,22 @@ PAGEFLAG_FALSE(WasActive) * for its own purposes. * - PG_private and PG_private_2 cause releasepage() and co to be invoked */ -PAGEFLAG(Private, private) __SETPAGEFLAG(Private, private) - __CLEARPAGEFLAG(Private, private) -PAGEFLAG(Private2, private_2) TESTSCFLAG(Private2, private_2) -PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1) +PAGEFLAG(Private, private, PF_ANY) __SETPAGEFLAG(Private, private, PF_ANY) + __CLEARPAGEFLAG(Private, private, PF_ANY) +PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY) +PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY) + TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY) /* * Only test-and-set exist for PG_writeback. The unconditional operators are * risky: they bypass page accounting. */ -TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback) -PAGEFLAG(MappedToDisk, mappedtodisk) +TESTPAGEFLAG(Writeback, writeback, PF_ANY) TESTSCFLAG(Writeback, writeback, PF_ANY) +PAGEFLAG(MappedToDisk, mappedtodisk, PF_ANY) /* PG_readahead is only used for reads; PG_reclaim is only for writes */ -PAGEFLAG(Reclaim, reclaim) TESTCLEARFLAG(Reclaim, reclaim) -PAGEFLAG(Readahead, reclaim) TESTCLEARFLAG(Readahead, reclaim) +PAGEFLAG(Reclaim, reclaim, PF_ANY) TESTCLEARFLAG(Reclaim, reclaim, PF_ANY) +PAGEFLAG(Readahead, reclaim, PF_ANY) TESTCLEARFLAG(Readahead, reclaim, PF_ANY) #ifdef CONFIG_HIGHMEM /* @@ -268,31 +321,32 @@ PAGEFLAG_FALSE(HighMem) #endif #ifdef CONFIG_SWAP -PAGEFLAG(SwapCache, swapcache) +PAGEFLAG(SwapCache, swapcache, PF_ANY) #else PAGEFLAG_FALSE(SwapCache) #endif -PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable) - TESTCLEARFLAG(Unevictable, unevictable) +PAGEFLAG(Unevictable, unevictable, PF_ANY) + __CLEARPAGEFLAG(Unevictable, unevictable, PF_ANY) + TESTCLEARFLAG(Unevictable, unevictable, PF_ANY) #ifdef CONFIG_MMU -PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked) - TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked) +PAGEFLAG(Mlocked, mlocked, PF_ANY) __CLEARPAGEFLAG(Mlocked, mlocked, PF_ANY) + TESTSCFLAG(Mlocked, mlocked, PF_ANY) __TESTCLEARFLAG(Mlocked, mlocked, PF_ANY) #else PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked) TESTSCFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked) #endif #ifdef CONFIG_ARCH_USES_PG_UNCACHED -PAGEFLAG(Uncached, uncached) +PAGEFLAG(Uncached, uncached, PF_ANY) #else PAGEFLAG_FALSE(Uncached) #endif #ifdef CONFIG_MEMORY_FAILURE -PAGEFLAG(HWPoison, hwpoison) -TESTSCFLAG(HWPoison, hwpoison) +PAGEFLAG(HWPoison, hwpoison, PF_ANY) +TESTSCFLAG(HWPoison, hwpoison, PF_ANY) #define __PG_HWPOISON (1UL << PG_hwpoison) #else PAGEFLAG_FALSE(HWPoison) @@ -300,10 +354,10 @@ PAGEFLAG_FALSE(HWPoison) #endif #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT) -TESTPAGEFLAG(Young, young) -SETPAGEFLAG(Young, young) -TESTCLEARFLAG(Young, young) -PAGEFLAG(Idle, idle) +TESTPAGEFLAG(Young, young, PF_ANY) +SETPAGEFLAG(Young, young, PF_ANY) +TESTCLEARFLAG(Young, young, PF_ANY) +PAGEFLAG(Idle, idle, PF_ANY) #endif /* @@ -397,7 +451,7 @@ static inline void SetPageUptodate(struct page *page) set_bit(PG_uptodate, &(page)->flags); } -CLEARPAGEFLAG(Uptodate, uptodate) +CLEARPAGEFLAG(Uptodate, uptodate, PF_ANY) int test_clear_page_writeback(struct page *page); int __test_set_page_writeback(struct page *page, bool keep_write); @@ -417,12 +471,7 @@ static inline void set_page_writeback_keepwrite(struct page *page) test_set_page_writeback_keepwrite(page); } -__PAGEFLAG(Head, head) CLEARPAGEFLAG(Head, head) - -static inline int PageTail(struct page *page) -{ - return READ_ONCE(page->compound_head) & 1; -} +__PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY) static inline void set_compound_head(struct page *page, struct page *head) { @@ -434,20 +483,6 @@ static inline void clear_compound_head(struct page *page) WRITE_ONCE(page->compound_head, 0); } -static inline struct page *compound_head(struct page *page) -{ - unsigned long head = READ_ONCE(page->compound_head); - - if (unlikely(head & 1)) - return (struct page *) (head - 1); - return page; -} - -static inline int PageCompound(struct page *page) -{ - return PageHead(page) || PageTail(page); - -} #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline void ClearPageCompound(struct page *page) { @@ -573,7 +608,7 @@ static inline void __ClearPageBalloon(struct page *page) atomic_set(&page->_mapcount, -1); } -__PAGEFLAG(Isolated, isolated); +__PAGEFLAG(Isolated, isolated, PF_ANY); /* * If network-based swap is enabled, sl*b must keep track of whether pages @@ -652,6 +687,10 @@ static inline int page_has_private(struct page *page) return !!(page->flags & PAGE_FLAGS_PRIVATE); } +#undef PF_ANY +#undef PF_HEAD +#undef PF_NO_TAIL +#undef PF_NO_COMPOUND #endif /* !__GENERATING_BOUNDS_H */ #endif /* PAGE_FLAGS_H */ diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 771774e13f10..d2f4a732b3e8 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -359,8 +359,16 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start, unsigned int nr_pages, struct page **pages); unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, unsigned int nr_pages, struct page **pages); -unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, - int tag, unsigned int nr_pages, struct page **pages); +unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, + pgoff_t end, int tag, unsigned int nr_pages, + struct page **pages); +static inline unsigned find_get_pages_tag(struct address_space *mapping, + pgoff_t *index, int tag, unsigned int nr_pages, + struct page **pages) +{ + return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag, + nr_pages, pages); +} struct page *grab_cache_page_write_begin(struct address_space *mapping, pgoff_t index, unsigned flags); @@ -433,18 +441,9 @@ extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, unsigned int flags); extern void unlock_page(struct page *page); -static inline void __set_page_locked(struct page *page) -{ - __set_bit(PG_locked, &page->flags); -} - -static inline void __clear_page_locked(struct page *page) -{ - __clear_bit(PG_locked, &page->flags); -} - static inline int trylock_page(struct page *page) { + page = compound_head(page); return (likely(!test_and_set_bit_lock(PG_locked, &page->flags))); } @@ -497,9 +496,9 @@ extern int wait_on_page_bit_killable_timeout(struct page *page, static inline int wait_on_page_locked_killable(struct page *page) { - if (PageLocked(page)) - return wait_on_page_bit_killable(page, PG_locked); - return 0; + if (!PageLocked(page)) + return 0; + return wait_on_page_bit_killable(compound_head(page), PG_locked); } extern wait_queue_head_t *page_waitqueue(struct page *page); @@ -518,7 +517,7 @@ static inline void wake_up_page(struct page *page, int bit) static inline void wait_on_page_locked(struct page *page) { if (PageLocked(page)) - wait_on_page_bit(page, PG_locked); + wait_on_page_bit(compound_head(page), PG_locked); } /* @@ -664,17 +663,17 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); /* * Like add_to_page_cache_locked, but used to add newly allocated pages: - * the page is new, so we can just run __set_page_locked() against it. + * the page is new, so we can just run __SetPageLocked() against it. */ static inline int add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) { int error; - __set_page_locked(page); + __SetPageLocked(page); error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); if (unlikely(error)) - __clear_page_locked(page); + __ClearPageLocked(page); return error; } diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h index b45d391b4540..cead4419f933 100644 --- a/include/linux/pagevec.h +++ b/include/linux/pagevec.h @@ -29,9 +29,17 @@ unsigned pagevec_lookup_entries(struct pagevec *pvec, void pagevec_remove_exceptionals(struct pagevec *pvec); unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, pgoff_t start, unsigned nr_pages); -unsigned pagevec_lookup_tag(struct pagevec *pvec, - struct address_space *mapping, pgoff_t *index, int tag, - unsigned nr_pages); +unsigned pagevec_lookup_range_tag(struct pagevec *pvec, + struct address_space *mapping, pgoff_t *index, pgoff_t end, + int tag); +unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec, + struct address_space *mapping, pgoff_t *index, pgoff_t end, + int tag, unsigned max_pages); +static inline unsigned pagevec_lookup_tag(struct pagevec *pvec, + struct address_space *mapping, pgoff_t *index, int tag) +{ + return pagevec_lookup_range_tag(pvec, mapping, index, (pgoff_t)-1, tag); +} static inline void pagevec_init(struct pagevec *pvec, int cold) { diff --git a/include/linux/phy.h b/include/linux/phy.h index dbfd5ce9350f..c175610c8fdb 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -18,6 +18,7 @@ #include <linux/spinlock.h> #include <linux/ethtool.h> +#include <linux/mdio.h> #include <linux/mii.h> #include <linux/module.h> #include <linux/timer.h> @@ -357,6 +358,8 @@ struct phy_c45_device_ids { * handling, as well as handling shifts in PHY hardware state */ struct phy_device { + struct mdio_device mdio; + /* Information about the PHY type */ /* And management functions */ struct phy_driver *drv; diff --git a/include/linux/qdsp6v2/audio-anc-dev-mgr.h b/include/linux/qdsp6v2/audio-anc-dev-mgr.h index dfa6752bc31b..b0ece2dbb239 100644 --- a/include/linux/qdsp6v2/audio-anc-dev-mgr.h +++ b/include/linux/qdsp6v2/audio-anc-dev-mgr.h @@ -39,6 +39,8 @@ int msm_anc_dev_stop(void); int msm_anc_dev_set_info(void *info_p, int32_t anc_cmd); +int msm_anc_dev_get_info(void *info_p, int32_t anc_cmd); + int msm_anc_dev_create(struct platform_device *pdev); int msm_anc_dev_destroy(struct platform_device *pdev); diff --git a/include/linux/qdsp6v2/audio_notifier.h b/include/linux/qdsp6v2/audio_notifier.h index 3587b49a05c6..0d7f84613107 100644 --- a/include/linux/qdsp6v2/audio_notifier.h +++ b/include/linux/qdsp6v2/audio_notifier.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -92,7 +92,7 @@ int audio_notifier_deregister(char *client_name); static inline int audio_notifier_register(char *client_name, int domain, struct notifier_block *nb) { - return -ENODEV; + return 0; } static inline int audio_notifier_deregister(char *client_name) diff --git a/include/linux/qdsp6v2/sdsp_anc.h b/include/linux/qdsp6v2/sdsp_anc.h index 3b236e827e3d..5c1b7055c1d5 100644 --- a/include/linux/qdsp6v2/sdsp_anc.h +++ b/include/linux/qdsp6v2/sdsp_anc.h @@ -15,7 +15,6 @@ #include <sound/q6afe-v2.h> #include <sound/apr_audio-v2.h> - #define AUD_MSVC_MODULE_AUDIO_DEV_RESOURCE_SHARE 0x0001028A #define AUD_MSVC_PARAM_ID_PORT_SHARE_RESOURCE_CONFIG 0x00010297 #define AUD_MSVC_API_VERSION_SHARE_RESOURCE_CONFIG 0x1 @@ -23,8 +22,6 @@ #define AUD_MSVC_PARAM_ID_DEV_ANC_REFS_CONFIG 0x00010286 #define AUD_MSVC_API_VERSION_DEV_ANC_REFS_CONFIG 0x1 #define AUD_MSVC_MODULE_AUDIO_DEV_ANC_ALGO 0x00010234 -#define AUD_MSVC_PARAM_ID_PORT_ANC_ALGO_RPM 0x00010235 -#define AUD_MSVC_API_VERSION_DEV_ANC_ALGO_RPM 0x1 struct aud_msvc_port_param_data_v2 { /* ID of the module to be configured. @@ -148,7 +145,7 @@ struct aud_msvc_port_cmd_get_param_v2 { } __packed; struct aud_audioif_config_command { - struct apr_hdr hdr; + struct apr_hdr hdr; struct aud_msvc_port_cmd_set_param_v2 param; struct aud_msvc_port_param_data_v2 pdata; union afe_port_config port; @@ -162,13 +159,6 @@ struct aud_msvc_param_id_dev_share_resource_cfg { u32 lpm_length; } __packed; - -struct aud_msvc_param_id_dev_anc_algo_rpm { - u32 minor_version; - u32 rpm; -} __packed; - - struct aud_msvc_param_id_dev_anc_refs_cfg { u32 minor_version; u16 port_id; @@ -177,65 +167,20 @@ struct aud_msvc_param_id_dev_anc_refs_cfg { u32 bit_width; } __packed; - struct anc_share_resource_command { - struct apr_hdr hdr; + struct apr_hdr hdr; struct aud_msvc_port_cmd_set_param_v2 param; struct aud_msvc_port_param_data_v2 pdata; struct aud_msvc_param_id_dev_share_resource_cfg resource; } __packed; - struct anc_config_ref_command { - struct apr_hdr hdr; + struct apr_hdr hdr; struct aud_msvc_port_cmd_set_param_v2 param; struct aud_msvc_port_param_data_v2 pdata; struct aud_msvc_param_id_dev_anc_refs_cfg refs; } __packed; - - -struct anc_set_rpm_command { - struct apr_hdr hdr; - struct aud_msvc_port_cmd_set_param_v2 param; - struct aud_msvc_port_param_data_v2 pdata; - struct aud_msvc_param_id_dev_anc_algo_rpm set_rpm; -} __packed; - -struct anc_get_rpm_command { - struct apr_hdr hdr; - struct aud_msvc_port_cmd_get_param_v2 param; - struct aud_msvc_port_param_data_v2 pdata; - struct aud_msvc_param_id_dev_anc_algo_rpm get_rpm; -} __packed; - -struct anc_get_rpm_resp { - uint32_t status; - struct aud_msvc_port_param_data_v2 pdata; - struct aud_msvc_param_id_dev_anc_algo_rpm res_rpm; -} __packed; - -#define AUD_MSVC_PARAM_ID_PORT_ANC_ALGO_BYPASS_MODE 0x0001029B - -#define AUD_MSVC_API_VERSION_DEV_ANC_ALGO_BYPASS_MODE 0x1 - -#define AUD_MSVC_ANC_ALGO_BYPASS_MODE_NO 0x0 -#define AUD_MSVC_ANC_ALGO_BYPASS_MODE_REFS_TO_ANC_SPKR 0x1 -#define AUD_MSVC_ANC_ALGO_BYPASS_MODE_ANC_MIC_TO_ANC_SPKR 0x2 -#define AUD_MSVC_ANC_ALGO_BYPASS_MODE_REFS_MIXED_ANC_MIC_TO_ANC_SPKR 0x3 - -struct aud_msvc_param_id_dev_anc_algo_bypass_mode { - uint32_t minor_version; - uint32_t bypass_mode; -} __packed; - -struct anc_set_bypass_mode_command { - struct apr_hdr hdr; - struct aud_msvc_port_cmd_set_param_v2 param; - struct aud_msvc_port_param_data_v2 pdata; - struct aud_msvc_param_id_dev_anc_algo_bypass_mode set_bypass_mode; -} __packed; - #define AUD_MSVC_PARAM_ID_PORT_ANC_ALGO_MODULE_ID 0x0001023A struct aud_msvc_param_id_dev_anc_algo_module_id { @@ -244,7 +189,7 @@ struct aud_msvc_param_id_dev_anc_algo_module_id { } __packed; struct anc_set_algo_module_id_command { - struct apr_hdr hdr; + struct apr_hdr hdr; struct aud_msvc_port_cmd_set_param_v2 param; struct aud_msvc_port_param_data_v2 pdata; struct aud_msvc_param_id_dev_anc_algo_module_id set_algo_module_id; @@ -269,13 +214,37 @@ struct aud_msvc_param_id_dev_anc_mic_spkr_layout_info { } __packed; struct anc_set_mic_spkr_layout_info_command { - struct apr_hdr hdr; + struct apr_hdr hdr; struct aud_msvc_port_cmd_set_param_v2 param; struct aud_msvc_port_param_data_v2 pdata; struct aud_msvc_param_id_dev_anc_mic_spkr_layout_info set_mic_spkr_layout; } __packed; +struct anc_set_algo_module_cali_data_command { + struct apr_hdr hdr; + struct aud_msvc_port_cmd_set_param_v2 param; + struct aud_msvc_port_param_data_v2 pdata; + /* + * calibration data payload followed + */ +} __packed; + +struct anc_get_algo_module_cali_data_command { + struct apr_hdr hdr; + struct aud_msvc_port_cmd_get_param_v2 param; + struct aud_msvc_port_param_data_v2 pdata; + /* + * calibration data payload followed + */ +} __packed; + +struct anc_get_algo_module_cali_data_resp { + uint32_t status; + struct aud_msvc_port_param_data_v2 pdata; + uint32_t payload[128]; +} __packed; + int anc_if_tdm_port_start(u16 port_id, struct afe_tdm_port_config *tdm_port); int anc_if_tdm_port_stop(u16 port_id); @@ -286,15 +255,15 @@ int anc_if_share_resource(u16 port_id, u16 rddma_idx, u16 wrdma_idx, int anc_if_config_ref(u16 port_id, u32 sample_rate, u32 bit_width, u16 num_channel); -int anc_if_set_rpm(u16 port_id, u32 rpm); - -int anc_if_set_bypass_mode(u16 port_id, u32 bypass_mode); - int anc_if_set_algo_module_id(u16 port_id, u32 module_id); int anc_if_set_anc_mic_spkr_layout(u16 port_id, struct aud_msvc_param_id_dev_anc_mic_spkr_layout_info *set_mic_spkr_layout_p); +int anc_if_set_algo_module_cali_data(u16 port_id, void *data_p); + +int anc_if_get_algo_module_cali_data(u16 port_id, void *data_p); + int anc_if_shared_mem_map(void); int anc_if_shared_mem_unmap(void); diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 5ed540986019..0c94d17a4642 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -45,19 +45,17 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list) * This is only for internal list manipulation where we know * the prev/next entries already! */ -#ifndef CONFIG_DEBUG_LIST static inline void __list_add_rcu(struct list_head *new, struct list_head *prev, struct list_head *next) { + if (!__list_add_valid(new, prev, next)) + return; + new->next = next; new->prev = prev; rcu_assign_pointer(list_next_rcu(prev), new); next->prev = new; } -#else -void __list_add_rcu(struct list_head *new, - struct list_head *prev, struct list_head *next); -#endif /** * list_add_rcu - add a new entry to rcu-protected list diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 4acc552e9279..19d0778ec382 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -162,6 +162,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer); void ring_buffer_record_off(struct ring_buffer *buffer); void ring_buffer_record_on(struct ring_buffer *buffer); int ring_buffer_record_is_on(struct ring_buffer *buffer); +int ring_buffer_record_is_set_on(struct ring_buffer *buffer); void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); diff --git a/include/linux/sched.h b/include/linux/sched.h index 81f422018aa1..8f6894c6e83c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1726,7 +1726,7 @@ struct task_struct { struct mm_struct *mm, *active_mm; /* per-thread vma caching */ - u32 vmacache_seqnum; + u64 vmacache_seqnum; struct vm_area_struct *vmacache[VMACACHE_SIZE]; #if defined(SPLIT_RSS_COUNTING) struct task_rss_stat rss_stat; @@ -2457,6 +2457,8 @@ static inline void memalloc_noio_restore(unsigned int flags) #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ +#define PFA_SPEC_SSB_DISABLE 4 /* Speculative Store Bypass disabled */ +#define PFA_SPEC_SSB_FORCE_DISABLE 5 /* Speculative Store Bypass force disabled*/ #define TASK_PFA_TEST(name, func) \ @@ -2480,6 +2482,13 @@ TASK_PFA_TEST(SPREAD_SLAB, spread_slab) TASK_PFA_SET(SPREAD_SLAB, spread_slab) TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) +TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable) +TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable) +TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable) + +TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) +TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) + /* * task->jobctl flags */ diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index 2296e6b2f690..5a53d34bba26 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h @@ -3,7 +3,8 @@ #include <uapi/linux/seccomp.h> -#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC) +#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \ + SECCOMP_FILTER_FLAG_SPEC_ALLOW) #ifdef CONFIG_SECCOMP diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index a6da214d0584..c28bd8be290a 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -514,6 +514,7 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1, * @hash: the packet hash * @queue_mapping: Queue mapping for multiqueue devices * @xmit_more: More SKBs are pending for this queue + * @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves * @ndisc_nodetype: router type (from link layer) * @ooo_okay: allow the mapping of a socket to a queue to be changed * @l4_hash: indicate hash is a canonical 4-tuple hash over transport @@ -594,8 +595,8 @@ struct sk_buff { fclone:2, peeked:1, head_frag:1, - xmit_more:1; - /* one bit hole */ + xmit_more:1, + pfmemalloc:1; kmemcheck_bitfield_end(flags1); /* fields enclosed in headers_start/headers_end are copied @@ -615,19 +616,18 @@ struct sk_buff { __u8 __pkt_type_offset[0]; __u8 pkt_type:3; - __u8 pfmemalloc:1; __u8 ignore_df:1; __u8 nfctinfo:3; - __u8 nf_trace:1; + __u8 ip_summed:2; __u8 ooo_okay:1; __u8 l4_hash:1; __u8 sw_hash:1; __u8 wifi_acked_valid:1; __u8 wifi_acked:1; - __u8 no_fcs:1; + /* Indicates the inner headers are valid in the skbuff. */ __u8 encapsulation:1; __u8 encap_hdr_csum:1; @@ -635,11 +635,11 @@ struct sk_buff { __u8 csum_complete_sw:1; __u8 csum_level:2; __u8 csum_bad:1; - #ifdef CONFIG_IPV6_NDISC_NODETYPE __u8 ndisc_nodetype:2; #endif __u8 ipvs_property:1; + __u8 inner_protocol_type:1; __u8 remcsum_offload:1; /* 3 or 5 bit hole */ diff --git a/include/linux/suspend.h b/include/linux/suspend.h index c59803dc68de..be1ab158ad1a 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -377,6 +377,8 @@ extern int swsusp_page_is_forbidden(struct page *); extern void swsusp_set_page_free(struct page *); extern void swsusp_unset_page_free(struct page *); extern unsigned long get_safe_page(gfp_t gfp_mask); +extern asmlinkage int swsusp_arch_suspend(void); +extern asmlinkage int swsusp_arch_resume(void); extern void hibernation_set_ops(const struct platform_hibernation_ops *ops); extern int hibernate(void); diff --git a/include/linux/swap.h b/include/linux/swap.h index d5ec583b47f5..16b4a657fa6e 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -150,9 +150,10 @@ enum { SWP_FILE = (1 << 7), /* set after swap_activate success */ SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */ SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */ + SWP_STABLE_WRITES = (1 << 10), /* no overwrite PG_writeback pages */ /* add others here before... */ - SWP_FAST = (1 << 10), /* blkdev access is fast and cheap */ - SWP_SCANNING = (1 << 11), /* refcount in scan_swap_map */ + SWP_FAST = (1 << 11), /* blkdev access is fast and cheap */ + SWP_SCANNING = (1 << 12), /* refcount in scan_swap_map */ }; #define SWAP_CLUSTER_MAX 32UL diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h index ed2a9c9c2b43..9d995cd9009b 100644 --- a/include/linux/swapfile.h +++ b/include/linux/swapfile.h @@ -14,5 +14,7 @@ extern int try_to_unuse(unsigned int, bool, unsigned long); extern int swap_ratio(struct swap_info_struct **si); extern void setup_swap_ratio(struct swap_info_struct *p, int prio); extern bool is_swap_ratio_group(int prio); +extern unsigned long generic_max_swapfile_size(void); +extern unsigned long max_swapfile_size(void); #endif /* _LINUX_SWAPFILE_H */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index c2b66a277e98..5d2779aa4bbe 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -205,6 +205,26 @@ extern struct trace_event_functions exit_syscall_print_funcs; } \ static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__)) +/* + * Called before coming back to user-mode. Returning to user-mode with an + * address limit different than USER_DS can allow to overwrite kernel memory. + */ +static inline void addr_limit_user_check(void) +{ +#ifdef TIF_FSCHECK + if (!test_thread_flag(TIF_FSCHECK)) + return; +#endif + + if (CHECK_DATA_CORRUPTION(!segment_eq(get_fs(), USER_DS), + "Invalid address limit on user-mode return")) + force_sig(SIGKILL, current); + +#ifdef TIF_FSCHECK + clear_thread_flag(TIF_FSCHECK); +#endif +} + asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special, qid_t id, void __user *addr); asmlinkage long sys_time(time_t __user *tloc); diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 9c452f6db438..2839d624d5ee 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -238,6 +238,9 @@ int __must_check sysfs_create_files(struct kobject *kobj, const struct attribute **attr); int __must_check sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr, umode_t mode); +struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj, + const struct attribute *attr); +void sysfs_unbreak_active_protection(struct kernfs_node *kn); void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr, const void *ns); bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr); @@ -351,6 +354,17 @@ static inline int sysfs_chmod_file(struct kobject *kobj, return 0; } +static inline struct kernfs_node * +sysfs_break_active_protection(struct kobject *kobj, + const struct attribute *attr) +{ + return NULL; +} + +static inline void sysfs_unbreak_active_protection(struct kernfs_node *kn) +{ +} + static inline void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr, const void *ns) diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 2260f92f1492..5b6df1a8dc74 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -324,7 +324,7 @@ struct tcp_sock { /* Receiver queue space */ struct { - int space; + u32 space; u32 seq; u32 time; } rcvq_space; diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 8933ecc2bc9f..25f861ecb195 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -26,11 +26,7 @@ #ifdef __KERNEL__ -#ifdef CONFIG_DEBUG_STACK_USAGE -# define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) -#else -# define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK) -#endif +#define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) /* * flag set/clear/test wrappers diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index 4cde40dac778..dc35fa77b3d2 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -57,6 +57,9 @@ /* big enough to hold our biggest descriptor */ #define USB_COMP_EP0_BUFSIZ 4096 +/* OS feature descriptor length <= 4kB */ +#define USB_COMP_EP0_OS_DESC_BUFSIZ 4096 + #define USB_MS_TO_HS_INTERVAL(x) (ilog2((x * 1000 / 125)) + 1) struct usb_configuration; diff --git a/include/linux/usb/diag_bridge.h b/include/linux/usb/diag_bridge.h new file mode 100644 index 000000000000..e82c65330722 --- /dev/null +++ b/include/linux/usb/diag_bridge.h @@ -0,0 +1,54 @@ +/* Copyright (c) 2011, 2013, 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_USB_DIAG_BRIDGE_H__ +#define __LINUX_USB_DIAG_BRIDGE_H__ + +struct diag_bridge_ops { + void *ctxt; + void (*read_complete_cb)(void *ctxt, char *buf, + int buf_size, int actual); + void (*write_complete_cb)(void *ctxt, char *buf, + int buf_size, int actual); + int (*suspend)(void *ctxt); + void (*resume)(void *ctxt); +}; + +#if IS_ENABLED(CONFIG_USB_QCOM_DIAG_BRIDGE) + +extern int diag_bridge_read(int id, char *data, int size); +extern int diag_bridge_write(int id, char *data, int size); +extern int diag_bridge_open(int id, struct diag_bridge_ops *ops); +extern void diag_bridge_close(int id); + +#else + +static int __maybe_unused diag_bridge_read(int id, char *data, int size) +{ + return -ENODEV; +} + +static int __maybe_unused diag_bridge_write(int id, char *data, int size) +{ + return -ENODEV; +} + +static int __maybe_unused diag_bridge_open(int id, struct diag_bridge_ops *ops) +{ + return -ENODEV; +} + +static void __maybe_unused diag_bridge_close(int id) { } + +#endif + +#endif diff --git a/include/linux/usb/ipc_bridge.h b/include/linux/usb/ipc_bridge.h new file mode 100644 index 000000000000..a0e12d6f9af5 --- /dev/null +++ b/include/linux/usb/ipc_bridge.h @@ -0,0 +1,65 @@ +/* Copyright (c) 2013 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MSM_IPC_BRIDGE_H__ +#define __MSM_IPC_BRIDGE_H__ + +#include <linux/platform_device.h> + +/* + * The IPC bridge driver adds a IPC bridge platform device when the + * underlying transport is ready. The IPC transport driver acts as a + * platform driver for this device. The platform data is populated by + * IPC bridge driver to facilitate I/O. The callback functions are + * passed in platform data to avoid export functions. This would allow + * different bridge drivers to exist in the kernel. The IPC bridge driver + * removes the platform device when the underly transport is no longer + * available. It typically happens during shutdown and remote processor's + * subsystem restart. + */ + +/** + * struct ipc_bridge_platform_data - platform device data for IPC + * transport driver. + * @max_read_size: The maximum possible read size. + * @max_write_size: The maximum possible write size. + * @open: The open must be called before starting I/O. The IPC bridge + * driver use the platform device pointer to identify the + * underlying transport channel. The IPC bridge driver may + * notify that remote processor that it is ready to receive + * data. Returns 0 upon success and appropriate error code + * upon failure. + * @read: The read is done synchronously and should be called from process + * context. Returns the number of bytes read from remote + * processor or error code upon failure. The IPC transport + * driver may pass the buffer of max_read_size length if the + * available data size is not known in advance. + * @write: The write is done synchronously and should be called from process + * context. The IPC bridge driver uses the same buffer for DMA + * to avoid additional memcpy. So it must be physically contiguous. + * Returns the number of bytes written or error code upon failure. + * @close: The close must be called when the IPC bridge platform device + * is removed. The IPC transport driver may call close when + * it is no longer required to communicate with remote processor. + */ +struct ipc_bridge_platform_data { + unsigned int max_read_size; + unsigned int max_write_size; + int (*open)(struct platform_device *pdev); + int (*read)(struct platform_device *pdev, char *buf, + unsigned int count); + int (*write)(struct platform_device *pdev, char *buf, + unsigned int count); + void (*close)(struct platform_device *pdev); +}; + +#endif diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 056b4e5830a6..d31afe5d790d 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -89,7 +89,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PGPGOUTCLEAN, PSWPIN, PSWPOUT, #ifdef CONFIG_DEBUG_VM_VMACACHE VMACACHE_FIND_CALLS, VMACACHE_FIND_HITS, - VMACACHE_FULL_FLUSHES, #endif NR_VM_EVENT_ITEMS }; diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h index c3fa0fd43949..4f58ff2dacd6 100644 --- a/include/linux/vmacache.h +++ b/include/linux/vmacache.h @@ -15,7 +15,6 @@ static inline void vmacache_flush(struct task_struct *tsk) memset(tsk->vmacache, 0, sizeof(tsk->vmacache)); } -extern void vmacache_flush_all(struct mm_struct *mm); extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma); extern struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr); @@ -29,10 +28,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, static inline void vmacache_invalidate(struct mm_struct *mm) { mm->vmacache_seqnum++; - - /* deal with overflows */ - if (unlikely(mm->vmacache_seqnum == 0)) - vmacache_flush_all(mm); } #endif /* __LINUX_VMACACHE_H */ diff --git a/include/linux/wcnss_wlan.h b/include/linux/wcnss_wlan.h index 7389fff7da51..06c652cfb6af 100644 --- a/include/linux/wcnss_wlan.h +++ b/include/linux/wcnss_wlan.h @@ -119,7 +119,7 @@ int wcnss_get_wlan_mac_address(char mac_addr[WLAN_MAC_ADDR_SIZE]); void wcnss_allow_suspend(void); void wcnss_prevent_suspend(void); int wcnss_hardware_type(void); -void *wcnss_prealloc_get(unsigned int size); +void *wcnss_prealloc_get(size_t size); int wcnss_prealloc_put(void *ptr); void wcnss_reset_fiq(bool clk_chk_en); void wcnss_suspend_notify(void); diff --git a/include/linux/xxhash.h b/include/linux/xxhash.h new file mode 100644 index 000000000000..9e1f42cb57e9 --- /dev/null +++ b/include/linux/xxhash.h @@ -0,0 +1,236 @@ +/* + * xxHash - Extremely Fast Hash algorithm + * Copyright (C) 2012-2016, Yann Collet. + * + * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. This program is dual-licensed; you may select + * either version 2 of the GNU General Public License ("GPL") or BSD license + * ("BSD"). + * + * You can contact the author at: + * - xxHash homepage: http://cyan4973.github.io/xxHash/ + * - xxHash source repository: https://github.com/Cyan4973/xxHash + */ + +/* + * Notice extracted from xxHash homepage: + * + * xxHash is an extremely fast Hash algorithm, running at RAM speed limits. + * It also successfully passes all tests from the SMHasher suite. + * + * Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 + * Duo @3GHz) + * + * Name Speed Q.Score Author + * xxHash 5.4 GB/s 10 + * CrapWow 3.2 GB/s 2 Andrew + * MumurHash 3a 2.7 GB/s 10 Austin Appleby + * SpookyHash 2.0 GB/s 10 Bob Jenkins + * SBox 1.4 GB/s 9 Bret Mulvey + * Lookup3 1.2 GB/s 9 Bob Jenkins + * SuperFastHash 1.2 GB/s 1 Paul Hsieh + * CityHash64 1.05 GB/s 10 Pike & Alakuijala + * FNV 0.55 GB/s 5 Fowler, Noll, Vo + * CRC32 0.43 GB/s 9 + * MD5-32 0.33 GB/s 10 Ronald L. Rivest + * SHA1-32 0.28 GB/s 10 + * + * Q.Score is a measure of quality of the hash function. + * It depends on successfully passing SMHasher test set. + * 10 is a perfect score. + * + * A 64-bits version, named xxh64 offers much better speed, + * but for 64-bits applications only. + * Name Speed on 64 bits Speed on 32 bits + * xxh64 13.8 GB/s 1.9 GB/s + * xxh32 6.8 GB/s 6.0 GB/s + */ + +#ifndef XXHASH_H +#define XXHASH_H + +#include <linux/types.h> + +/*-**************************** + * Simple Hash Functions + *****************************/ + +/** + * xxh32() - calculate the 32-bit hash of the input with a given seed. + * + * @input: The data to hash. + * @length: The length of the data to hash. + * @seed: The seed can be used to alter the result predictably. + * + * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s + * + * Return: The 32-bit hash of the data. + */ +uint32_t xxh32(const void *input, size_t length, uint32_t seed); + +/** + * xxh64() - calculate the 64-bit hash of the input with a given seed. + * + * @input: The data to hash. + * @length: The length of the data to hash. + * @seed: The seed can be used to alter the result predictably. + * + * This function runs 2x faster on 64-bit systems, but slower on 32-bit systems. + * + * Return: The 64-bit hash of the data. + */ +uint64_t xxh64(const void *input, size_t length, uint64_t seed); + +/*-**************************** + * Streaming Hash Functions + *****************************/ + +/* + * These definitions are only meant to allow allocation of XXH state + * statically, on stack, or in a struct for example. + * Do not use members directly. + */ + +/** + * struct xxh32_state - private xxh32 state, do not use members directly + */ +struct xxh32_state { + uint32_t total_len_32; + uint32_t large_len; + uint32_t v1; + uint32_t v2; + uint32_t v3; + uint32_t v4; + uint32_t mem32[4]; + uint32_t memsize; +}; + +/** + * struct xxh32_state - private xxh64 state, do not use members directly + */ +struct xxh64_state { + uint64_t total_len; + uint64_t v1; + uint64_t v2; + uint64_t v3; + uint64_t v4; + uint64_t mem64[4]; + uint32_t memsize; +}; + +/** + * xxh32_reset() - reset the xxh32 state to start a new hashing operation + * + * @state: The xxh32 state to reset. + * @seed: Initialize the hash state with this seed. + * + * Call this function on any xxh32_state to prepare for a new hashing operation. + */ +void xxh32_reset(struct xxh32_state *state, uint32_t seed); + +/** + * xxh32_update() - hash the data given and update the xxh32 state + * + * @state: The xxh32 state to update. + * @input: The data to hash. + * @length: The length of the data to hash. + * + * After calling xxh32_reset() call xxh32_update() as many times as necessary. + * + * Return: Zero on success, otherwise an error code. + */ +int xxh32_update(struct xxh32_state *state, const void *input, size_t length); + +/** + * xxh32_digest() - produce the current xxh32 hash + * + * @state: Produce the current xxh32 hash of this state. + * + * A hash value can be produced at any time. It is still possible to continue + * inserting input into the hash state after a call to xxh32_digest(), and + * generate new hashes later on, by calling xxh32_digest() again. + * + * Return: The xxh32 hash stored in the state. + */ +uint32_t xxh32_digest(const struct xxh32_state *state); + +/** + * xxh64_reset() - reset the xxh64 state to start a new hashing operation + * + * @state: The xxh64 state to reset. + * @seed: Initialize the hash state with this seed. + */ +void xxh64_reset(struct xxh64_state *state, uint64_t seed); + +/** + * xxh64_update() - hash the data given and update the xxh64 state + * @state: The xxh64 state to update. + * @input: The data to hash. + * @length: The length of the data to hash. + * + * After calling xxh64_reset() call xxh64_update() as many times as necessary. + * + * Return: Zero on success, otherwise an error code. + */ +int xxh64_update(struct xxh64_state *state, const void *input, size_t length); + +/** + * xxh64_digest() - produce the current xxh64 hash + * + * @state: Produce the current xxh64 hash of this state. + * + * A hash value can be produced at any time. It is still possible to continue + * inserting input into the hash state after a call to xxh64_digest(), and + * generate new hashes later on, by calling xxh64_digest() again. + * + * Return: The xxh64 hash stored in the state. + */ +uint64_t xxh64_digest(const struct xxh64_state *state); + +/*-************************** + * Utils + ***************************/ + +/** + * xxh32_copy_state() - copy the source state into the destination state + * + * @src: The source xxh32 state. + * @dst: The destination xxh32 state. + */ +void xxh32_copy_state(struct xxh32_state *dst, const struct xxh32_state *src); + +/** + * xxh64_copy_state() - copy the source state into the destination state + * + * @src: The source xxh64 state. + * @dst: The destination xxh64 state. + */ +void xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state *src); + +#endif /* XXHASH_H */ diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index 57a8e98f2708..2219cce81ca4 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h @@ -47,6 +47,8 @@ void zs_destroy_pool(struct zs_pool *pool); unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags); void zs_free(struct zs_pool *pool, unsigned long obj); +size_t zs_huge_class_size(struct zs_pool *pool); + void *zs_map_object(struct zs_pool *pool, unsigned long handle, enum zs_mapmode mm); void zs_unmap_object(struct zs_pool *pool, unsigned long handle); diff --git a/include/linux/zstd.h b/include/linux/zstd.h new file mode 100644 index 000000000000..249575e2485f --- /dev/null +++ b/include/linux/zstd.h @@ -0,0 +1,1157 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of https://github.com/facebook/zstd. + * An additional grant of patent rights can be found in the PATENTS file in the + * same directory. + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. This program is dual-licensed; you may select + * either version 2 of the GNU General Public License ("GPL") or BSD license + * ("BSD"). + */ + +#ifndef ZSTD_H +#define ZSTD_H + +/* ====== Dependency ======*/ +#include <linux/types.h> /* size_t */ + + +/*-***************************************************************************** + * Introduction + * + * zstd, short for Zstandard, is a fast lossless compression algorithm, + * targeting real-time compression scenarios at zlib-level and better + * compression ratios. The zstd compression library provides in-memory + * compression and decompression functions. The library supports compression + * levels from 1 up to ZSTD_maxCLevel() which is 22. Levels >= 20, labeled + * ultra, should be used with caution, as they require more memory. + * Compression can be done in: + * - a single step, reusing a context (described as Explicit memory management) + * - unbounded multiple steps (described as Streaming compression) + * The compression ratio achievable on small data can be highly improved using + * compression with a dictionary in: + * - a single step (described as Simple dictionary API) + * - a single step, reusing a dictionary (described as Fast dictionary API) + ******************************************************************************/ + +/*====== Helper functions ======*/ + +/** + * enum ZSTD_ErrorCode - zstd error codes + * + * Functions that return size_t can be checked for errors using ZSTD_isError() + * and the ZSTD_ErrorCode can be extracted using ZSTD_getErrorCode(). + */ +typedef enum { + ZSTD_error_no_error, + ZSTD_error_GENERIC, + ZSTD_error_prefix_unknown, + ZSTD_error_version_unsupported, + ZSTD_error_parameter_unknown, + ZSTD_error_frameParameter_unsupported, + ZSTD_error_frameParameter_unsupportedBy32bits, + ZSTD_error_frameParameter_windowTooLarge, + ZSTD_error_compressionParameter_unsupported, + ZSTD_error_init_missing, + ZSTD_error_memory_allocation, + ZSTD_error_stage_wrong, + ZSTD_error_dstSize_tooSmall, + ZSTD_error_srcSize_wrong, + ZSTD_error_corruption_detected, + ZSTD_error_checksum_wrong, + ZSTD_error_tableLog_tooLarge, + ZSTD_error_maxSymbolValue_tooLarge, + ZSTD_error_maxSymbolValue_tooSmall, + ZSTD_error_dictionary_corrupted, + ZSTD_error_dictionary_wrong, + ZSTD_error_dictionaryCreation_failed, + ZSTD_error_maxCode +} ZSTD_ErrorCode; + +/** + * ZSTD_maxCLevel() - maximum compression level available + * + * Return: Maximum compression level available. + */ +int ZSTD_maxCLevel(void); +/** + * ZSTD_compressBound() - maximum compressed size in worst case scenario + * @srcSize: The size of the data to compress. + * + * Return: The maximum compressed size in the worst case scenario. + */ +size_t ZSTD_compressBound(size_t srcSize); +/** + * ZSTD_isError() - tells if a size_t function result is an error code + * @code: The function result to check for error. + * + * Return: Non-zero iff the code is an error. + */ +static __attribute__((unused)) unsigned int ZSTD_isError(size_t code) +{ + return code > (size_t)-ZSTD_error_maxCode; +} +/** + * ZSTD_getErrorCode() - translates an error function result to a ZSTD_ErrorCode + * @functionResult: The result of a function for which ZSTD_isError() is true. + * + * Return: The ZSTD_ErrorCode corresponding to the functionResult or 0 + * if the functionResult isn't an error. + */ +static __attribute__((unused)) ZSTD_ErrorCode ZSTD_getErrorCode( + size_t functionResult) +{ + if (!ZSTD_isError(functionResult)) + return (ZSTD_ErrorCode)0; + return (ZSTD_ErrorCode)(0 - functionResult); +} + +/** + * enum ZSTD_strategy - zstd compression search strategy + * + * From faster to stronger. + */ +typedef enum { + ZSTD_fast, + ZSTD_dfast, + ZSTD_greedy, + ZSTD_lazy, + ZSTD_lazy2, + ZSTD_btlazy2, + ZSTD_btopt, + ZSTD_btopt2 +} ZSTD_strategy; + +/** + * struct ZSTD_compressionParameters - zstd compression parameters + * @windowLog: Log of the largest match distance. Larger means more + * compression, and more memory needed during decompression. + * @chainLog: Fully searched segment. Larger means more compression, slower, + * and more memory (useless for fast). + * @hashLog: Dispatch table. Larger means more compression, + * slower, and more memory. + * @searchLog: Number of searches. Larger means more compression and slower. + * @searchLength: Match length searched. Larger means faster decompression, + * sometimes less compression. + * @targetLength: Acceptable match size for optimal parser (only). Larger means + * more compression, and slower. + * @strategy: The zstd compression strategy. + */ +typedef struct { + unsigned int windowLog; + unsigned int chainLog; + unsigned int hashLog; + unsigned int searchLog; + unsigned int searchLength; + unsigned int targetLength; + ZSTD_strategy strategy; +} ZSTD_compressionParameters; + +/** + * struct ZSTD_frameParameters - zstd frame parameters + * @contentSizeFlag: Controls whether content size will be present in the frame + * header (when known). + * @checksumFlag: Controls whether a 32-bit checksum is generated at the end + * of the frame for error detection. + * @noDictIDFlag: Controls whether dictID will be saved into the frame header + * when using dictionary compression. + * + * The default value is all fields set to 0. + */ +typedef struct { + unsigned int contentSizeFlag; + unsigned int checksumFlag; + unsigned int noDictIDFlag; +} ZSTD_frameParameters; + +/** + * struct ZSTD_parameters - zstd parameters + * @cParams: The compression parameters. + * @fParams: The frame parameters. + */ +typedef struct { + ZSTD_compressionParameters cParams; + ZSTD_frameParameters fParams; +} ZSTD_parameters; + +/** + * ZSTD_getCParams() - returns ZSTD_compressionParameters for selected level + * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel(). + * @estimatedSrcSize: The estimated source size to compress or 0 if unknown. + * @dictSize: The dictionary size or 0 if a dictionary isn't being used. + * + * Return: The selected ZSTD_compressionParameters. + */ +ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, + unsigned long long estimatedSrcSize, size_t dictSize); + +/** + * ZSTD_getParams() - returns ZSTD_parameters for selected level + * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel(). + * @estimatedSrcSize: The estimated source size to compress or 0 if unknown. + * @dictSize: The dictionary size or 0 if a dictionary isn't being used. + * + * The same as ZSTD_getCParams() except also selects the default frame + * parameters (all zero). + * + * Return: The selected ZSTD_parameters. + */ +ZSTD_parameters ZSTD_getParams(int compressionLevel, + unsigned long long estimatedSrcSize, size_t dictSize); + +/*-************************************* + * Explicit memory management + **************************************/ + +/** + * ZSTD_CCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_CCtx + * @cParams: The compression parameters to be used for compression. + * + * If multiple compression parameters might be used, the caller must call + * ZSTD_CCtxWorkspaceBound() for each set of parameters and use the maximum + * size. + * + * Return: A lower bound on the size of the workspace that is passed to + * ZSTD_initCCtx(). + */ +size_t ZSTD_CCtxWorkspaceBound(ZSTD_compressionParameters cParams); + +/** + * struct ZSTD_CCtx - the zstd compression context + * + * When compressing many times it is recommended to allocate a context just once + * and reuse it for each successive compression operation. + */ +typedef struct ZSTD_CCtx_s ZSTD_CCtx; +/** + * ZSTD_initCCtx() - initialize a zstd compression context + * @workspace: The workspace to emplace the context into. It must outlive + * the returned context. + * @workspaceSize: The size of workspace. Use ZSTD_CCtxWorkspaceBound() to + * determine how large the workspace must be. + * + * Return: A compression context emplaced into workspace. + */ +ZSTD_CCtx *ZSTD_initCCtx(void *workspace, size_t workspaceSize); + +/** + * ZSTD_compressCCtx() - compress src into dst + * @ctx: The context. Must have been initialized with a workspace at + * least as large as ZSTD_CCtxWorkspaceBound(params.cParams). + * @dst: The buffer to compress src into. + * @dstCapacity: The size of the destination buffer. May be any size, but + * ZSTD_compressBound(srcSize) is guaranteed to be large enough. + * @src: The data to compress. + * @srcSize: The size of the data to compress. + * @params: The parameters to use for compression. See ZSTD_getParams(). + * + * Return: The compressed size or an error, which can be checked using + * ZSTD_isError(). + */ +size_t ZSTD_compressCCtx(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, + const void *src, size_t srcSize, ZSTD_parameters params); + +/** + * ZSTD_DCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_DCtx + * + * Return: A lower bound on the size of the workspace that is passed to + * ZSTD_initDCtx(). + */ +size_t ZSTD_DCtxWorkspaceBound(void); + +/** + * struct ZSTD_DCtx - the zstd decompression context + * + * When decompressing many times it is recommended to allocate a context just + * once and reuse it for each successive decompression operation. + */ +typedef struct ZSTD_DCtx_s ZSTD_DCtx; +/** + * ZSTD_initDCtx() - initialize a zstd decompression context + * @workspace: The workspace to emplace the context into. It must outlive + * the returned context. + * @workspaceSize: The size of workspace. Use ZSTD_DCtxWorkspaceBound() to + * determine how large the workspace must be. + * + * Return: A decompression context emplaced into workspace. + */ +ZSTD_DCtx *ZSTD_initDCtx(void *workspace, size_t workspaceSize); + +/** + * ZSTD_decompressDCtx() - decompress zstd compressed src into dst + * @ctx: The decompression context. + * @dst: The buffer to decompress src into. + * @dstCapacity: The size of the destination buffer. Must be at least as large + * as the decompressed size. If the caller cannot upper bound the + * decompressed size, then it's better to use the streaming API. + * @src: The zstd compressed data to decompress. Multiple concatenated + * frames and skippable frames are allowed. + * @srcSize: The exact size of the data to decompress. + * + * Return: The decompressed size or an error, which can be checked using + * ZSTD_isError(). + */ +size_t ZSTD_decompressDCtx(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity, + const void *src, size_t srcSize); + +/*-************************ + * Simple dictionary API + **************************/ + +/** + * ZSTD_compress_usingDict() - compress src into dst using a dictionary + * @ctx: The context. Must have been initialized with a workspace at + * least as large as ZSTD_CCtxWorkspaceBound(params.cParams). + * @dst: The buffer to compress src into. + * @dstCapacity: The size of the destination buffer. May be any size, but + * ZSTD_compressBound(srcSize) is guaranteed to be large enough. + * @src: The data to compress. + * @srcSize: The size of the data to compress. + * @dict: The dictionary to use for compression. + * @dictSize: The size of the dictionary. + * @params: The parameters to use for compression. See ZSTD_getParams(). + * + * Compression using a predefined dictionary. The same dictionary must be used + * during decompression. + * + * Return: The compressed size or an error, which can be checked using + * ZSTD_isError(). + */ +size_t ZSTD_compress_usingDict(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, + const void *src, size_t srcSize, const void *dict, size_t dictSize, + ZSTD_parameters params); + +/** + * ZSTD_decompress_usingDict() - decompress src into dst using a dictionary + * @ctx: The decompression context. + * @dst: The buffer to decompress src into. + * @dstCapacity: The size of the destination buffer. Must be at least as large + * as the decompressed size. If the caller cannot upper bound the + * decompressed size, then it's better to use the streaming API. + * @src: The zstd compressed data to decompress. Multiple concatenated + * frames and skippable frames are allowed. + * @srcSize: The exact size of the data to decompress. + * @dict: The dictionary to use for decompression. The same dictionary + * must've been used to compress the data. + * @dictSize: The size of the dictionary. + * + * Return: The decompressed size or an error, which can be checked using + * ZSTD_isError(). + */ +size_t ZSTD_decompress_usingDict(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity, + const void *src, size_t srcSize, const void *dict, size_t dictSize); + +/*-************************** + * Fast dictionary API + ***************************/ + +/** + * ZSTD_CDictWorkspaceBound() - memory needed to initialize a ZSTD_CDict + * @cParams: The compression parameters to be used for compression. + * + * Return: A lower bound on the size of the workspace that is passed to + * ZSTD_initCDict(). + */ +size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams); + +/** + * struct ZSTD_CDict - a digested dictionary to be used for compression + */ +typedef struct ZSTD_CDict_s ZSTD_CDict; + +/** + * ZSTD_initCDict() - initialize a digested dictionary for compression + * @dictBuffer: The dictionary to digest. The buffer is referenced by the + * ZSTD_CDict so it must outlive the returned ZSTD_CDict. + * @dictSize: The size of the dictionary. + * @params: The parameters to use for compression. See ZSTD_getParams(). + * @workspace: The workspace. It must outlive the returned ZSTD_CDict. + * @workspaceSize: The workspace size. Must be at least + * ZSTD_CDictWorkspaceBound(params.cParams). + * + * When compressing multiple messages / blocks with the same dictionary it is + * recommended to load it just once. The ZSTD_CDict merely references the + * dictBuffer, so it must outlive the returned ZSTD_CDict. + * + * Return: The digested dictionary emplaced into workspace. + */ +ZSTD_CDict *ZSTD_initCDict(const void *dictBuffer, size_t dictSize, + ZSTD_parameters params, void *workspace, size_t workspaceSize); + +/** + * ZSTD_compress_usingCDict() - compress src into dst using a ZSTD_CDict + * @ctx: The context. Must have been initialized with a workspace at + * least as large as ZSTD_CCtxWorkspaceBound(cParams) where + * cParams are the compression parameters used to initialize the + * cdict. + * @dst: The buffer to compress src into. + * @dstCapacity: The size of the destination buffer. May be any size, but + * ZSTD_compressBound(srcSize) is guaranteed to be large enough. + * @src: The data to compress. + * @srcSize: The size of the data to compress. + * @cdict: The digested dictionary to use for compression. + * @params: The parameters to use for compression. See ZSTD_getParams(). + * + * Compression using a digested dictionary. The same dictionary must be used + * during decompression. + * + * Return: The compressed size or an error, which can be checked using + * ZSTD_isError(). + */ +size_t ZSTD_compress_usingCDict(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, + const void *src, size_t srcSize, const ZSTD_CDict *cdict); + + +/** + * ZSTD_DDictWorkspaceBound() - memory needed to initialize a ZSTD_DDict + * + * Return: A lower bound on the size of the workspace that is passed to + * ZSTD_initDDict(). + */ +size_t ZSTD_DDictWorkspaceBound(void); + +/** + * struct ZSTD_DDict - a digested dictionary to be used for decompression + */ +typedef struct ZSTD_DDict_s ZSTD_DDict; + +/** + * ZSTD_initDDict() - initialize a digested dictionary for decompression + * @dictBuffer: The dictionary to digest. The buffer is referenced by the + * ZSTD_DDict so it must outlive the returned ZSTD_DDict. + * @dictSize: The size of the dictionary. + * @workspace: The workspace. It must outlive the returned ZSTD_DDict. + * @workspaceSize: The workspace size. Must be at least + * ZSTD_DDictWorkspaceBound(). + * + * When decompressing multiple messages / blocks with the same dictionary it is + * recommended to load it just once. The ZSTD_DDict merely references the + * dictBuffer, so it must outlive the returned ZSTD_DDict. + * + * Return: The digested dictionary emplaced into workspace. + */ +ZSTD_DDict *ZSTD_initDDict(const void *dictBuffer, size_t dictSize, + void *workspace, size_t workspaceSize); + +/** + * ZSTD_decompress_usingDDict() - decompress src into dst using a ZSTD_DDict + * @ctx: The decompression context. + * @dst: The buffer to decompress src into. + * @dstCapacity: The size of the destination buffer. Must be at least as large + * as the decompressed size. If the caller cannot upper bound the + * decompressed size, then it's better to use the streaming API. + * @src: The zstd compressed data to decompress. Multiple concatenated + * frames and skippable frames are allowed. + * @srcSize: The exact size of the data to decompress. + * @ddict: The digested dictionary to use for decompression. The same + * dictionary must've been used to compress the data. + * + * Return: The decompressed size or an error, which can be checked using + * ZSTD_isError(). + */ +size_t ZSTD_decompress_usingDDict(ZSTD_DCtx *dctx, void *dst, + size_t dstCapacity, const void *src, size_t srcSize, + const ZSTD_DDict *ddict); + + +/*-************************** + * Streaming + ***************************/ + +/** + * struct ZSTD_inBuffer - input buffer for streaming + * @src: Start of the input buffer. + * @size: Size of the input buffer. + * @pos: Position where reading stopped. Will be updated. + * Necessarily 0 <= pos <= size. + */ +typedef struct ZSTD_inBuffer_s { + const void *src; + size_t size; + size_t pos; +} ZSTD_inBuffer; + +/** + * struct ZSTD_outBuffer - output buffer for streaming + * @dst: Start of the output buffer. + * @size: Size of the output buffer. + * @pos: Position where writing stopped. Will be updated. + * Necessarily 0 <= pos <= size. + */ +typedef struct ZSTD_outBuffer_s { + void *dst; + size_t size; + size_t pos; +} ZSTD_outBuffer; + + + +/*-***************************************************************************** + * Streaming compression - HowTo + * + * A ZSTD_CStream object is required to track streaming operation. + * Use ZSTD_initCStream() to initialize a ZSTD_CStream object. + * ZSTD_CStream objects can be reused multiple times on consecutive compression + * operations. It is recommended to re-use ZSTD_CStream in situations where many + * streaming operations will be achieved consecutively. Use one separate + * ZSTD_CStream per thread for parallel execution. + * + * Use ZSTD_compressStream() repetitively to consume input stream. + * The function will automatically update both `pos` fields. + * Note that it may not consume the entire input, in which case `pos < size`, + * and it's up to the caller to present again remaining data. + * It returns a hint for the preferred number of bytes to use as an input for + * the next function call. + * + * At any moment, it's possible to flush whatever data remains within internal + * buffer, using ZSTD_flushStream(). `output->pos` will be updated. There might + * still be some content left within the internal buffer if `output->size` is + * too small. It returns the number of bytes left in the internal buffer and + * must be called until it returns 0. + * + * ZSTD_endStream() instructs to finish a frame. It will perform a flush and + * write frame epilogue. The epilogue is required for decoders to consider a + * frame completed. Similar to ZSTD_flushStream(), it may not be able to flush + * the full content if `output->size` is too small. In which case, call again + * ZSTD_endStream() to complete the flush. It returns the number of bytes left + * in the internal buffer and must be called until it returns 0. + ******************************************************************************/ + +/** + * ZSTD_CStreamWorkspaceBound() - memory needed to initialize a ZSTD_CStream + * @cParams: The compression parameters to be used for compression. + * + * Return: A lower bound on the size of the workspace that is passed to + * ZSTD_initCStream() and ZSTD_initCStream_usingCDict(). + */ +size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams); + +/** + * struct ZSTD_CStream - the zstd streaming compression context + */ +typedef struct ZSTD_CStream_s ZSTD_CStream; + +/*===== ZSTD_CStream management functions =====*/ +/** + * ZSTD_initCStream() - initialize a zstd streaming compression context + * @params: The zstd compression parameters. + * @pledgedSrcSize: If params.fParams.contentSizeFlag == 1 then the caller must + * pass the source size (zero means empty source). Otherwise, + * the caller may optionally pass the source size, or zero if + * unknown. + * @workspace: The workspace to emplace the context into. It must outlive + * the returned context. + * @workspaceSize: The size of workspace. + * Use ZSTD_CStreamWorkspaceBound(params.cParams) to determine + * how large the workspace must be. + * + * Return: The zstd streaming compression context. + */ +ZSTD_CStream *ZSTD_initCStream(ZSTD_parameters params, + unsigned long long pledgedSrcSize, void *workspace, + size_t workspaceSize); + +/** + * ZSTD_initCStream_usingCDict() - initialize a streaming compression context + * @cdict: The digested dictionary to use for compression. + * @pledgedSrcSize: Optionally the source size, or zero if unknown. + * @workspace: The workspace to emplace the context into. It must outlive + * the returned context. + * @workspaceSize: The size of workspace. Call ZSTD_CStreamWorkspaceBound() + * with the cParams used to initialize the cdict to determine + * how large the workspace must be. + * + * Return: The zstd streaming compression context. + */ +ZSTD_CStream *ZSTD_initCStream_usingCDict(const ZSTD_CDict *cdict, + unsigned long long pledgedSrcSize, void *workspace, + size_t workspaceSize); + +/*===== Streaming compression functions =====*/ +/** + * ZSTD_resetCStream() - reset the context using parameters from creation + * @zcs: The zstd streaming compression context to reset. + * @pledgedSrcSize: Optionally the source size, or zero if unknown. + * + * Resets the context using the parameters from creation. Skips dictionary + * loading, since it can be reused. If `pledgedSrcSize` is non-zero the frame + * content size is always written into the frame header. + * + * Return: Zero or an error, which can be checked using ZSTD_isError(). + */ +size_t ZSTD_resetCStream(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize); +/** + * ZSTD_compressStream() - streaming compress some of input into output + * @zcs: The zstd streaming compression context. + * @output: Destination buffer. `output->pos` is updated to indicate how much + * compressed data was written. + * @input: Source buffer. `input->pos` is updated to indicate how much data was + * read. Note that it may not consume the entire input, in which case + * `input->pos < input->size`, and it's up to the caller to present + * remaining data again. + * + * The `input` and `output` buffers may be any size. Guaranteed to make some + * forward progress if `input` and `output` are not empty. + * + * Return: A hint for the number of bytes to use as the input for the next + * function call or an error, which can be checked using + * ZSTD_isError(). + */ +size_t ZSTD_compressStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output, + ZSTD_inBuffer *input); +/** + * ZSTD_flushStream() - flush internal buffers into output + * @zcs: The zstd streaming compression context. + * @output: Destination buffer. `output->pos` is updated to indicate how much + * compressed data was written. + * + * ZSTD_flushStream() must be called until it returns 0, meaning all the data + * has been flushed. Since ZSTD_flushStream() causes a block to be ended, + * calling it too often will degrade the compression ratio. + * + * Return: The number of bytes still present within internal buffers or an + * error, which can be checked using ZSTD_isError(). + */ +size_t ZSTD_flushStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output); +/** + * ZSTD_endStream() - flush internal buffers into output and end the frame + * @zcs: The zstd streaming compression context. + * @output: Destination buffer. `output->pos` is updated to indicate how much + * compressed data was written. + * + * ZSTD_endStream() must be called until it returns 0, meaning all the data has + * been flushed and the frame epilogue has been written. + * + * Return: The number of bytes still present within internal buffers or an + * error, which can be checked using ZSTD_isError(). + */ +size_t ZSTD_endStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output); + +/** + * ZSTD_CStreamInSize() - recommended size for the input buffer + * + * Return: The recommended size for the input buffer. + */ +size_t ZSTD_CStreamInSize(void); +/** + * ZSTD_CStreamOutSize() - recommended size for the output buffer + * + * When the output buffer is at least this large, it is guaranteed to be large + * enough to flush at least one complete compressed block. + * + * Return: The recommended size for the output buffer. + */ +size_t ZSTD_CStreamOutSize(void); + + + +/*-***************************************************************************** + * Streaming decompression - HowTo + * + * A ZSTD_DStream object is required to track streaming operations. + * Use ZSTD_initDStream() to initialize a ZSTD_DStream object. + * ZSTD_DStream objects can be re-used multiple times. + * + * Use ZSTD_decompressStream() repetitively to consume your input. + * The function will update both `pos` fields. + * If `input->pos < input->size`, some input has not been consumed. + * It's up to the caller to present again remaining data. + * If `output->pos < output->size`, decoder has flushed everything it could. + * Returns 0 iff a frame is completely decoded and fully flushed. + * Otherwise it returns a suggested next input size that will never load more + * than the current frame. + ******************************************************************************/ + +/** + * ZSTD_DStreamWorkspaceBound() - memory needed to initialize a ZSTD_DStream + * @maxWindowSize: The maximum window size allowed for compressed frames. + * + * Return: A lower bound on the size of the workspace that is passed to + * ZSTD_initDStream() and ZSTD_initDStream_usingDDict(). + */ +size_t ZSTD_DStreamWorkspaceBound(size_t maxWindowSize); + +/** + * struct ZSTD_DStream - the zstd streaming decompression context + */ +typedef struct ZSTD_DStream_s ZSTD_DStream; +/*===== ZSTD_DStream management functions =====*/ +/** + * ZSTD_initDStream() - initialize a zstd streaming decompression context + * @maxWindowSize: The maximum window size allowed for compressed frames. + * @workspace: The workspace to emplace the context into. It must outlive + * the returned context. + * @workspaceSize: The size of workspace. + * Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine + * how large the workspace must be. + * + * Return: The zstd streaming decompression context. + */ +ZSTD_DStream *ZSTD_initDStream(size_t maxWindowSize, void *workspace, + size_t workspaceSize); +/** + * ZSTD_initDStream_usingDDict() - initialize streaming decompression context + * @maxWindowSize: The maximum window size allowed for compressed frames. + * @ddict: The digested dictionary to use for decompression. + * @workspace: The workspace to emplace the context into. It must outlive + * the returned context. + * @workspaceSize: The size of workspace. + * Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine + * how large the workspace must be. + * + * Return: The zstd streaming decompression context. + */ +ZSTD_DStream *ZSTD_initDStream_usingDDict(size_t maxWindowSize, + const ZSTD_DDict *ddict, void *workspace, size_t workspaceSize); + +/*===== Streaming decompression functions =====*/ +/** + * ZSTD_resetDStream() - reset the context using parameters from creation + * @zds: The zstd streaming decompression context to reset. + * + * Resets the context using the parameters from creation. Skips dictionary + * loading, since it can be reused. + * + * Return: Zero or an error, which can be checked using ZSTD_isError(). + */ +size_t ZSTD_resetDStream(ZSTD_DStream *zds); +/** + * ZSTD_decompressStream() - streaming decompress some of input into output + * @zds: The zstd streaming decompression context. + * @output: Destination buffer. `output.pos` is updated to indicate how much + * decompressed data was written. + * @input: Source buffer. `input.pos` is updated to indicate how much data was + * read. Note that it may not consume the entire input, in which case + * `input.pos < input.size`, and it's up to the caller to present + * remaining data again. + * + * The `input` and `output` buffers may be any size. Guaranteed to make some + * forward progress if `input` and `output` are not empty. + * ZSTD_decompressStream() will not consume the last byte of the frame until + * the entire frame is flushed. + * + * Return: Returns 0 iff a frame is completely decoded and fully flushed. + * Otherwise returns a hint for the number of bytes to use as the input + * for the next function call or an error, which can be checked using + * ZSTD_isError(). The size hint will never load more than the frame. + */ +size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, + ZSTD_inBuffer *input); + +/** + * ZSTD_DStreamInSize() - recommended size for the input buffer + * + * Return: The recommended size for the input buffer. + */ +size_t ZSTD_DStreamInSize(void); +/** + * ZSTD_DStreamOutSize() - recommended size for the output buffer + * + * When the output buffer is at least this large, it is guaranteed to be large + * enough to flush at least one complete decompressed block. + * + * Return: The recommended size for the output buffer. + */ +size_t ZSTD_DStreamOutSize(void); + + +/* --- Constants ---*/ +#define ZSTD_MAGICNUMBER 0xFD2FB528 /* >= v0.8.0 */ +#define ZSTD_MAGIC_SKIPPABLE_START 0x184D2A50U + +#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1) +#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2) + +#define ZSTD_WINDOWLOG_MAX_32 27 +#define ZSTD_WINDOWLOG_MAX_64 27 +#define ZSTD_WINDOWLOG_MAX \ + ((unsigned int)(sizeof(size_t) == 4 \ + ? ZSTD_WINDOWLOG_MAX_32 \ + : ZSTD_WINDOWLOG_MAX_64)) +#define ZSTD_WINDOWLOG_MIN 10 +#define ZSTD_HASHLOG_MAX ZSTD_WINDOWLOG_MAX +#define ZSTD_HASHLOG_MIN 6 +#define ZSTD_CHAINLOG_MAX (ZSTD_WINDOWLOG_MAX+1) +#define ZSTD_CHAINLOG_MIN ZSTD_HASHLOG_MIN +#define ZSTD_HASHLOG3_MAX 17 +#define ZSTD_SEARCHLOG_MAX (ZSTD_WINDOWLOG_MAX-1) +#define ZSTD_SEARCHLOG_MIN 1 +/* only for ZSTD_fast, other strategies are limited to 6 */ +#define ZSTD_SEARCHLENGTH_MAX 7 +/* only for ZSTD_btopt, other strategies are limited to 4 */ +#define ZSTD_SEARCHLENGTH_MIN 3 +#define ZSTD_TARGETLENGTH_MIN 4 +#define ZSTD_TARGETLENGTH_MAX 999 + +/* for static allocation */ +#define ZSTD_FRAMEHEADERSIZE_MAX 18 +#define ZSTD_FRAMEHEADERSIZE_MIN 6 +static const size_t ZSTD_frameHeaderSize_prefix = 5; +static const size_t ZSTD_frameHeaderSize_min = ZSTD_FRAMEHEADERSIZE_MIN; +static const size_t ZSTD_frameHeaderSize_max = ZSTD_FRAMEHEADERSIZE_MAX; +/* magic number + skippable frame length */ +static const size_t ZSTD_skippableHeaderSize = 8; + + +/*-************************************* + * Compressed size functions + **************************************/ + +/** + * ZSTD_findFrameCompressedSize() - returns the size of a compressed frame + * @src: Source buffer. It should point to the start of a zstd encoded frame + * or a skippable frame. + * @srcSize: The size of the source buffer. It must be at least as large as the + * size of the frame. + * + * Return: The compressed size of the frame pointed to by `src` or an error, + * which can be check with ZSTD_isError(). + * Suitable to pass to ZSTD_decompress() or similar functions. + */ +size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize); + +/*-************************************* + * Decompressed size functions + **************************************/ +/** + * ZSTD_getFrameContentSize() - returns the content size in a zstd frame header + * @src: It should point to the start of a zstd encoded frame. + * @srcSize: The size of the source buffer. It must be at least as large as the + * frame header. `ZSTD_frameHeaderSize_max` is always large enough. + * + * Return: The frame content size stored in the frame header if known. + * `ZSTD_CONTENTSIZE_UNKNOWN` if the content size isn't stored in the + * frame header. `ZSTD_CONTENTSIZE_ERROR` on invalid input. + */ +unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize); + +/** + * ZSTD_findDecompressedSize() - returns decompressed size of a series of frames + * @src: It should point to the start of a series of zstd encoded and/or + * skippable frames. + * @srcSize: The exact size of the series of frames. + * + * If any zstd encoded frame in the series doesn't have the frame content size + * set, `ZSTD_CONTENTSIZE_UNKNOWN` is returned. But frame content size is always + * set when using ZSTD_compress(). The decompressed size can be very large. + * If the source is untrusted, the decompressed size could be wrong or + * intentionally modified. Always ensure the result fits within the + * application's authorized limits. ZSTD_findDecompressedSize() handles multiple + * frames, and so it must traverse the input to read each frame header. This is + * efficient as most of the data is skipped, however it does mean that all frame + * data must be present and valid. + * + * Return: Decompressed size of all the data contained in the frames if known. + * `ZSTD_CONTENTSIZE_UNKNOWN` if the decompressed size is unknown. + * `ZSTD_CONTENTSIZE_ERROR` if an error occurred. + */ +unsigned long long ZSTD_findDecompressedSize(const void *src, size_t srcSize); + +/*-************************************* + * Advanced compression functions + **************************************/ +/** + * ZSTD_checkCParams() - ensure parameter values remain within authorized range + * @cParams: The zstd compression parameters. + * + * Return: Zero or an error, which can be checked using ZSTD_isError(). + */ +size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams); + +/** + * ZSTD_adjustCParams() - optimize parameters for a given srcSize and dictSize + * @srcSize: Optionally the estimated source size, or zero if unknown. + * @dictSize: Optionally the estimated dictionary size, or zero if unknown. + * + * Return: The optimized parameters. + */ +ZSTD_compressionParameters ZSTD_adjustCParams( + ZSTD_compressionParameters cParams, unsigned long long srcSize, + size_t dictSize); + +/*--- Advanced decompression functions ---*/ + +/** + * ZSTD_isFrame() - returns true iff the buffer starts with a valid frame + * @buffer: The source buffer to check. + * @size: The size of the source buffer, must be at least 4 bytes. + * + * Return: True iff the buffer starts with a zstd or skippable frame identifier. + */ +unsigned int ZSTD_isFrame(const void *buffer, size_t size); + +/** + * ZSTD_getDictID_fromDict() - returns the dictionary id stored in a dictionary + * @dict: The dictionary buffer. + * @dictSize: The size of the dictionary buffer. + * + * Return: The dictionary id stored within the dictionary or 0 if the + * dictionary is not a zstd dictionary. If it returns 0 the + * dictionary can still be loaded as a content-only dictionary. + */ +unsigned int ZSTD_getDictID_fromDict(const void *dict, size_t dictSize); + +/** + * ZSTD_getDictID_fromDDict() - returns the dictionary id stored in a ZSTD_DDict + * @ddict: The ddict to find the id of. + * + * Return: The dictionary id stored within `ddict` or 0 if the dictionary is not + * a zstd dictionary. If it returns 0 `ddict` will be loaded as a + * content-only dictionary. + */ +unsigned int ZSTD_getDictID_fromDDict(const ZSTD_DDict *ddict); + +/** + * ZSTD_getDictID_fromFrame() - returns the dictionary id stored in a zstd frame + * @src: Source buffer. It must be a zstd encoded frame. + * @srcSize: The size of the source buffer. It must be at least as large as the + * frame header. `ZSTD_frameHeaderSize_max` is always large enough. + * + * Return: The dictionary id required to decompress the frame stored within + * `src` or 0 if the dictionary id could not be decoded. It can return + * 0 if the frame does not require a dictionary, the dictionary id + * wasn't stored in the frame, `src` is not a zstd frame, or `srcSize` + * is too small. + */ +unsigned int ZSTD_getDictID_fromFrame(const void *src, size_t srcSize); + +/** + * struct ZSTD_frameParams - zstd frame parameters stored in the frame header + * @frameContentSize: The frame content size, or 0 if not present. + * @windowSize: The window size, or 0 if the frame is a skippable frame. + * @dictID: The dictionary id, or 0 if not present. + * @checksumFlag: Whether a checksum was used. + */ +typedef struct { + unsigned long long frameContentSize; + unsigned int windowSize; + unsigned int dictID; + unsigned int checksumFlag; +} ZSTD_frameParams; + +/** + * ZSTD_getFrameParams() - extracts parameters from a zstd or skippable frame + * @fparamsPtr: On success the frame parameters are written here. + * @src: The source buffer. It must point to a zstd or skippable frame. + * @srcSize: The size of the source buffer. `ZSTD_frameHeaderSize_max` is + * always large enough to succeed. + * + * Return: 0 on success. If more data is required it returns how many bytes + * must be provided to make forward progress. Otherwise it returns + * an error, which can be checked using ZSTD_isError(). + */ +size_t ZSTD_getFrameParams(ZSTD_frameParams *fparamsPtr, const void *src, + size_t srcSize); + +/*-***************************************************************************** + * Buffer-less and synchronous inner streaming functions + * + * This is an advanced API, giving full control over buffer management, for + * users which need direct control over memory. + * But it's also a complex one, with many restrictions (documented below). + * Prefer using normal streaming API for an easier experience + ******************************************************************************/ + +/*-***************************************************************************** + * Buffer-less streaming compression (synchronous mode) + * + * A ZSTD_CCtx object is required to track streaming operations. + * Use ZSTD_initCCtx() to initialize a context. + * ZSTD_CCtx object can be re-used multiple times within successive compression + * operations. + * + * Start by initializing a context. + * Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary + * compression, + * or ZSTD_compressBegin_advanced(), for finer parameter control. + * It's also possible to duplicate a reference context which has already been + * initialized, using ZSTD_copyCCtx() + * + * Then, consume your input using ZSTD_compressContinue(). + * There are some important considerations to keep in mind when using this + * advanced function : + * - ZSTD_compressContinue() has no internal buffer. It uses externally provided + * buffer only. + * - Interface is synchronous : input is consumed entirely and produce 1+ + * (or more) compressed blocks. + * - Caller must ensure there is enough space in `dst` to store compressed data + * under worst case scenario. Worst case evaluation is provided by + * ZSTD_compressBound(). + * ZSTD_compressContinue() doesn't guarantee recover after a failed + * compression. + * - ZSTD_compressContinue() presumes prior input ***is still accessible and + * unmodified*** (up to maximum distance size, see WindowLog). + * It remembers all previous contiguous blocks, plus one separated memory + * segment (which can itself consists of multiple contiguous blocks) + * - ZSTD_compressContinue() detects that prior input has been overwritten when + * `src` buffer overlaps. In which case, it will "discard" the relevant memory + * section from its history. + * + * Finish a frame with ZSTD_compressEnd(), which will write the last block(s) + * and optional checksum. It's possible to use srcSize==0, in which case, it + * will write a final empty block to end the frame. Without last block mark, + * frames will be considered unfinished (corrupted) by decoders. + * + * `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress some new + * frame. + ******************************************************************************/ + +/*===== Buffer-less streaming compression functions =====*/ +size_t ZSTD_compressBegin(ZSTD_CCtx *cctx, int compressionLevel); +size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx *cctx, const void *dict, + size_t dictSize, int compressionLevel); +size_t ZSTD_compressBegin_advanced(ZSTD_CCtx *cctx, const void *dict, + size_t dictSize, ZSTD_parameters params, + unsigned long long pledgedSrcSize); +size_t ZSTD_copyCCtx(ZSTD_CCtx *cctx, const ZSTD_CCtx *preparedCCtx, + unsigned long long pledgedSrcSize); +size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx *cctx, const ZSTD_CDict *cdict, + unsigned long long pledgedSrcSize); +size_t ZSTD_compressContinue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, + const void *src, size_t srcSize); +size_t ZSTD_compressEnd(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, + const void *src, size_t srcSize); + + + +/*-***************************************************************************** + * Buffer-less streaming decompression (synchronous mode) + * + * A ZSTD_DCtx object is required to track streaming operations. + * Use ZSTD_initDCtx() to initialize a context. + * A ZSTD_DCtx object can be re-used multiple times. + * + * First typical operation is to retrieve frame parameters, using + * ZSTD_getFrameParams(). It fills a ZSTD_frameParams structure which provide + * important information to correctly decode the frame, such as the minimum + * rolling buffer size to allocate to decompress data (`windowSize`), and the + * dictionary ID used. + * Note: content size is optional, it may not be present. 0 means unknown. + * Note that these values could be wrong, either because of data malformation, + * or because an attacker is spoofing deliberate false information. As a + * consequence, check that values remain within valid application range, + * especially `windowSize`, before allocation. Each application can set its own + * limit, depending on local restrictions. For extended interoperability, it is + * recommended to support at least 8 MB. + * Frame parameters are extracted from the beginning of the compressed frame. + * Data fragment must be large enough to ensure successful decoding, typically + * `ZSTD_frameHeaderSize_max` bytes. + * Result: 0: successful decoding, the `ZSTD_frameParams` structure is filled. + * >0: `srcSize` is too small, provide at least this many bytes. + * errorCode, which can be tested using ZSTD_isError(). + * + * Start decompression, with ZSTD_decompressBegin() or + * ZSTD_decompressBegin_usingDict(). Alternatively, you can copy a prepared + * context, using ZSTD_copyDCtx(). + * + * Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() + * alternatively. + * ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' + * to ZSTD_decompressContinue(). + * ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will + * fail. + * + * The result of ZSTD_decompressContinue() is the number of bytes regenerated + * within 'dst' (necessarily <= dstCapacity). It can be zero, which is not an + * error; it just means ZSTD_decompressContinue() has decoded some metadata + * item. It can also be an error code, which can be tested with ZSTD_isError(). + * + * ZSTD_decompressContinue() needs previous data blocks during decompression, up + * to `windowSize`. They should preferably be located contiguously, prior to + * current block. Alternatively, a round buffer of sufficient size is also + * possible. Sufficient size is determined by frame parameters. + * ZSTD_decompressContinue() is very sensitive to contiguity, if 2 blocks don't + * follow each other, make sure that either the compressor breaks contiguity at + * the same place, or that previous contiguous segment is large enough to + * properly handle maximum back-reference. + * + * A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero. + * Context can then be reset to start a new decompression. + * + * Note: it's possible to know if next input to present is a header or a block, + * using ZSTD_nextInputType(). This information is not required to properly + * decode a frame. + * + * == Special case: skippable frames == + * + * Skippable frames allow integration of user-defined data into a flow of + * concatenated frames. Skippable frames will be ignored (skipped) by a + * decompressor. The format of skippable frames is as follows: + * a) Skippable frame ID - 4 Bytes, Little endian format, any value from + * 0x184D2A50 to 0x184D2A5F + * b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits + * c) Frame Content - any content (User Data) of length equal to Frame Size + * For skippable frames ZSTD_decompressContinue() always returns 0. + * For skippable frames ZSTD_getFrameParams() returns fparamsPtr->windowLog==0 + * what means that a frame is skippable. + * Note: If fparamsPtr->frameContentSize==0, it is ambiguous: the frame might + * actually be a zstd encoded frame with no content. For purposes of + * decompression, it is valid in both cases to skip the frame using + * ZSTD_findFrameCompressedSize() to find its size in bytes. + * It also returns frame size as fparamsPtr->frameContentSize. + ******************************************************************************/ + +/*===== Buffer-less streaming decompression functions =====*/ +size_t ZSTD_decompressBegin(ZSTD_DCtx *dctx); +size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx *dctx, const void *dict, + size_t dictSize); +void ZSTD_copyDCtx(ZSTD_DCtx *dctx, const ZSTD_DCtx *preparedDCtx); +size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx *dctx); +size_t ZSTD_decompressContinue(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, + const void *src, size_t srcSize); +typedef enum { + ZSTDnit_frameHeader, + ZSTDnit_blockHeader, + ZSTDnit_block, + ZSTDnit_lastBlock, + ZSTDnit_checksum, + ZSTDnit_skippableFrame +} ZSTD_nextInputType_e; +ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx *dctx); + +/*-***************************************************************************** + * Block functions + * + * Block functions produce and decode raw zstd blocks, without frame metadata. + * Frame metadata cost is typically ~18 bytes, which can be non-negligible for + * very small blocks (< 100 bytes). User will have to take in charge required + * information to regenerate data, such as compressed and content sizes. + * + * A few rules to respect: + * - Compressing and decompressing require a context structure + * + Use ZSTD_initCCtx() and ZSTD_initDCtx() + * - It is necessary to init context before starting + * + compression : ZSTD_compressBegin() + * + decompression : ZSTD_decompressBegin() + * + variants _usingDict() are also allowed + * + copyCCtx() and copyDCtx() work too + * - Block size is limited, it must be <= ZSTD_getBlockSizeMax() + * + If you need to compress more, cut data into multiple blocks + * + Consider using the regular ZSTD_compress() instead, as frame metadata + * costs become negligible when source size is large. + * - When a block is considered not compressible enough, ZSTD_compressBlock() + * result will be zero. In which case, nothing is produced into `dst`. + * + User must test for such outcome and deal directly with uncompressed data + * + ZSTD_decompressBlock() doesn't accept uncompressed data as input!!! + * + In case of multiple successive blocks, decoder must be informed of + * uncompressed block existence to follow proper history. Use + * ZSTD_insertBlock() in such a case. + ******************************************************************************/ + +/* Define for static allocation */ +#define ZSTD_BLOCKSIZE_ABSOLUTEMAX (128 * 1024) +/*===== Raw zstd block functions =====*/ +size_t ZSTD_getBlockSizeMax(ZSTD_CCtx *cctx); +size_t ZSTD_compressBlock(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, + const void *src, size_t srcSize); +size_t ZSTD_decompressBlock(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, + const void *src, size_t srcSize); +size_t ZSTD_insertBlock(ZSTD_DCtx *dctx, const void *blockStart, + size_t blockSize); + +#endif /* ZSTD_H */ diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h index e9eb2d6791b3..f7a35fcaaaf6 100644 --- a/include/net/af_vsock.h +++ b/include/net/af_vsock.h @@ -62,7 +62,8 @@ struct vsock_sock { struct list_head pending_links; struct list_head accept_queue; bool rejected; - struct delayed_work dwork; + struct delayed_work connect_work; + struct delayed_work pending_work; u32 peer_shutdown; bool sent_request; bool ignore_connecting_rst; @@ -73,7 +74,6 @@ struct vsock_sock { s64 vsock_stream_has_data(struct vsock_sock *vsk); s64 vsock_stream_has_space(struct vsock_sock *vsk); -void vsock_pending_work(struct work_struct *work); struct sock *__vsock_create(struct net *net, struct socket *sock, struct sock *parent, diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 1878d0a96333..876688b5a356 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -878,7 +878,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, u16 conn_timeout, u8 role); struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, u8 dst_type, u8 sec_level, u16 conn_timeout, - u8 role); + u8 role, bdaddr_t *direct_rpa); struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, u8 sec_level, u8 auth_type); struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, diff --git a/include/net/cnss.h b/include/net/cnss.h index d6f27759af17..ea0082dc1219 100644 --- a/include/net/cnss.h +++ b/include/net/cnss.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -55,6 +55,7 @@ struct cnss_wlan_driver { int (*suspend)(struct pci_dev *pdev, pm_message_t state); int (*resume)(struct pci_dev *pdev); void (*modem_status)(struct pci_dev *, int state); + void (*update_status)(struct pci_dev *pdev, uint32_t status); struct cnss_wlan_runtime_ops *runtime_ops; const struct pci_device_id *id_table; }; @@ -93,11 +94,14 @@ struct cnss_platform_cap { u32 cap_flag; }; -/* WLAN driver status */ +/* WLAN driver status, keep it aligned with cnss2 */ enum cnss_driver_status { CNSS_UNINITIALIZED, CNSS_INITIALIZED, - CNSS_LOAD_UNLOAD + CNSS_LOAD_UNLOAD, + CNSS_RECOVERY, + CNSS_FW_DOWN, + CNSS_SSR_FAIL, }; enum cnss_runtime_request { diff --git a/include/net/cnss2.h b/include/net/cnss2.h index f80d87533a99..7ca407f6b606 100644 --- a/include/net/cnss2.h +++ b/include/net/cnss2.h @@ -115,6 +115,11 @@ struct cnss_shadow_reg_v2_cfg { u32 addr; }; +struct cnss_rri_over_ddr_cfg { + u32 base_addr_low; + u32 base_addr_high; +}; + struct cnss_wlan_enable_cfg { u32 num_ce_tgt_cfg; struct cnss_ce_tgt_pipe_cfg *ce_tgt_cfg; @@ -124,6 +129,8 @@ struct cnss_wlan_enable_cfg { struct cnss_shadow_reg_cfg *shadow_reg_cfg; u32 num_shadow_reg_v2_cfg; struct cnss_shadow_reg_v2_cfg *shadow_reg_v2_cfg; + bool rri_over_ddr_cfg_valid; + struct cnss_rri_over_ddr_cfg rri_over_ddr_cfg; }; enum cnss_driver_mode { @@ -160,6 +167,7 @@ static inline int cnss_wlan_pm_control(struct device *dev, bool vote) return 0; } #endif /* CONFIG_PCI_MSM */ +extern int cnss_pci_is_device_down(struct device *dev); extern void cnss_schedule_recovery(struct device *dev, enum cnss_recovery_reason reason); extern int cnss_self_recovery(struct device *dev, @@ -171,6 +179,9 @@ extern int cnss_get_fw_files_for_target(struct device *dev, u32 target_type, u32 target_version); extern int cnss_get_platform_cap(struct device *dev, struct cnss_platform_cap *cap); +extern struct dma_iommu_mapping *cnss_smmu_get_mapping(struct device *dev); +extern int cnss_smmu_map(struct device *dev, + phys_addr_t paddr, uint32_t *iova_addr, size_t size); extern int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info); extern int cnss_request_bus_bandwidth(struct device *dev, int bandwidth); extern int cnss_power_up(struct device *dev); @@ -181,6 +192,9 @@ extern void cnss_lock_pm_sem(struct device *dev); extern void cnss_release_pm_sem(struct device *dev); extern int cnss_auto_suspend(struct device *dev); extern int cnss_auto_resume(struct device *dev); +extern int cnss_pci_force_wake_request(struct device *dev); +extern int cnss_pci_is_device_awake(struct device *dev); +extern int cnss_pci_force_wake_release(struct device *dev); extern int cnss_get_user_msi_assignment(struct device *dev, char *user_name, int *num_vectors, uint32_t *user_base_data, diff --git a/include/net/cnss_prealloc.h b/include/net/cnss_prealloc.h index 734b4b69ecbb..39d080a8b184 100644 --- a/include/net/cnss_prealloc.h +++ b/include/net/cnss_prealloc.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -15,7 +15,7 @@ #define WCNSS_PRE_ALLOC_GET_THRESHOLD (4*1024) -extern void *wcnss_prealloc_get(unsigned int size); +extern void *wcnss_prealloc_get(size_t size); extern int wcnss_prealloc_put(void *ptr); extern int wcnss_pre_alloc_reset(void); void wcnss_prealloc_check_memory_leak(void); diff --git a/include/net/ip.h b/include/net/ip.h index 17997b48102d..81c7408deb83 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -282,6 +282,13 @@ int ip_decrease_ttl(struct iphdr *iph) return --iph->ttl; } +static inline int ip_mtu_locked(const struct dst_entry *dst) +{ + const struct rtable *rt = (const struct rtable *)dst; + + return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU); +} + static inline int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst) { @@ -289,7 +296,7 @@ int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst) return pmtudisc == IP_PMTUDISC_DO || (pmtudisc == IP_PMTUDISC_WANT && - !(dst_metric_locked(dst, RTAX_MTU))); + !ip_mtu_locked(dst)); } static inline bool ip_sk_accept_pmtu(const struct sock *sk) @@ -315,7 +322,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, struct net *net = dev_net(dst->dev); if (net->ipv4.sysctl_ip_fwd_use_pmtu || - dst_metric_locked(dst, RTAX_MTU) || + ip_mtu_locked(dst) || !forwarding) return dst_mtu(dst); diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index bda1721e9622..3afb7c4c7098 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -56,6 +56,7 @@ struct fib_nh_exception { int fnhe_genid; __be32 fnhe_daddr; u32 fnhe_pmtu; + bool fnhe_mtu_locked; __be32 fnhe_gw; unsigned long fnhe_expires; struct rtable __rcu *fnhe_rth_input; diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 84f0d0602433..0e01d570fa22 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -762,7 +762,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb, * to minimize possbility that any useful information to an * attacker is leaked. Only lower 20 bits are relevant. */ - rol32(hash, 16); + hash = rol32(hash, 16); flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK; diff --git a/include/net/llc.h b/include/net/llc.h index e8e61d4fb458..82d989995d18 100644 --- a/include/net/llc.h +++ b/include/net/llc.h @@ -116,6 +116,11 @@ static inline void llc_sap_hold(struct llc_sap *sap) atomic_inc(&sap->refcnt); } +static inline bool llc_sap_hold_safe(struct llc_sap *sap) +{ + return atomic_inc_not_zero(&sap->refcnt); +} + void llc_sap_close(struct llc_sap *sap); static inline void llc_sap_put(struct llc_sap *sap) diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h index ea985aa7a6c5..df528a623548 100644 --- a/include/net/llc_conn.h +++ b/include/net/llc_conn.h @@ -104,7 +104,7 @@ void llc_sk_reset(struct sock *sk); /* Access to a connection */ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb); -void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb); +int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb); void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb); void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit); void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit); diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 4ca1c04a6f1f..c1744c2f8aca 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -3910,7 +3910,7 @@ static inline int ieee80211_sta_ps_transition_ni(struct ieee80211_sta *sta, * The TX headroom reserved by mac80211 for its own tx_status functions. * This is enough for the radiotap header. */ -#define IEEE80211_TX_STATUS_HEADROOM 14 +#define IEEE80211_TX_STATUS_HEADROOM ALIGN(14, 4) /** * ieee80211_sta_set_buffered - inform mac80211 about driver-buffered frames diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 93328c61934a..6965dfe7e88b 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -115,6 +115,7 @@ struct net { #endif #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) struct netns_nf_frag nf_frag; + struct ctl_table_header *nf_frag_frags_hdr; #endif struct sock *nfnl; struct sock *nfnl_stash; diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index c0368db6df54..d235722c0d92 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -86,7 +86,6 @@ struct netns_ipv6 { #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) struct netns_nf_frag { - struct netns_sysctl_ipv6 sysctl; struct netns_frags frags; }; #endif diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h index 316694dafa5b..008f466d1da7 100644 --- a/include/net/nfc/hci.h +++ b/include/net/nfc/hci.h @@ -87,7 +87,7 @@ struct nfc_hci_pipe { * According to specification 102 622 chapter 4.4 Pipes, * the pipe identifier is 7 bits long. */ -#define NFC_HCI_MAX_PIPES 127 +#define NFC_HCI_MAX_PIPES 128 struct nfc_hci_init_data { u8 gate_count; struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES]; diff --git a/include/net/regulatory.h b/include/net/regulatory.h index ebc5a2ed8631..f83cacce3308 100644 --- a/include/net/regulatory.h +++ b/include/net/regulatory.h @@ -78,7 +78,7 @@ struct regulatory_request { int wiphy_idx; enum nl80211_reg_initiator initiator; enum nl80211_user_reg_hint_type user_reg_hint_type; - char alpha2[2]; + char alpha2[3]; enum nl80211_dfs_regions dfs_region; bool intersect; bool processed; diff --git a/include/net/route.h b/include/net/route.h index 3adb9c724818..11dfd0df0e67 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -64,7 +64,8 @@ struct rtable { __be32 rt_gateway; /* Miscellaneous cached information */ - u32 rt_pmtu; + u32 rt_mtu_locked:1, + rt_pmtu:31; u32 rt_table_id; diff --git a/include/net/tcp.h b/include/net/tcp.h index e4f8cde33789..cbc97fc45ca5 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -391,6 +391,8 @@ extern int tcp_use_userconfig_sysctl_handler(struct ctl_table *, int, extern int tcp_proc_delayed_ack_control(struct ctl_table *, int, void __user *, size_t *, loff_t *); +void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks); + static inline void tcp_dec_quickack_mode(struct sock *sk, const unsigned int pkts) { @@ -574,6 +576,7 @@ void tcp_send_fin(struct sock *sk); void tcp_send_active_reset(struct sock *sk, gfp_t priority); int tcp_send_synack(struct sock *); void tcp_push_one(struct sock *, unsigned int mss_now); +void __tcp_send_ack(struct sock *sk, u32 rcv_nxt); void tcp_send_ack(struct sock *sk); void tcp_send_delayed_ack(struct sock *sk); void tcp_send_loss_probe(struct sock *sk); @@ -834,8 +837,6 @@ enum tcp_ca_event { CA_EVENT_LOSS, /* loss timeout */ CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */ CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */ - CA_EVENT_DELAYED_ACK, /* Delayed ack is sent */ - CA_EVENT_NON_DELAYED_ACK, }; /* Information about inbound ACK, passed to cong_ops->in_ack_event() */ diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 120da1d7f57e..10fefb0dc640 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -3007,6 +3007,20 @@ static inline int ib_check_mr_access(int flags) return 0; } +static inline bool ib_access_writable(int access_flags) +{ + /* + * We have writable memory backing the MR if any of the following + * access flags are set. "Local write" and "remote write" obviously + * require write access. "Remote atomic" can do things like fetch and + * add, which will modify memory, and "MW bind" can change permissions + * by binding a window. + */ + return access_flags & + (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | + IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND); +} + /** * ib_check_mr_status: lightweight check of MR status. * This routine may provide status checks on a selected diff --git a/include/soc/qcom/icnss.h b/include/soc/qcom/icnss.h index 4fff429dc0b2..85ec8beb3157 100644 --- a/include/soc/qcom/icnss.h +++ b/include/soc/qcom/icnss.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -23,7 +23,6 @@ #endif enum icnss_uevent { - ICNSS_UEVENT_FW_READY, ICNSS_UEVENT_FW_CRASHED, ICNSS_UEVENT_FW_DOWN, }; @@ -154,6 +153,7 @@ extern int icnss_wlan_get_dfs_nol(void *info, u16 info_len); extern bool icnss_is_qmi_disable(struct device *dev); extern bool icnss_is_fw_ready(void); extern bool icnss_is_fw_down(void); +extern bool icnss_is_rejuvenate(void); extern int icnss_set_wlan_mac_address(const u8 *in, const uint32_t len); extern u8 *icnss_get_wlan_mac_address(struct device *dev, uint32_t *num); extern int icnss_trigger_recovery(struct device *dev); diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h index 9110963d0e9f..6b11533685a5 100644 --- a/include/soc/qcom/socinfo.h +++ b/include/soc/qcom/socinfo.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2009-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -96,6 +96,8 @@ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm660") #define early_machine_is_sda660() \ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sda660") +#define early_machine_is_sdm455() \ + of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm455") #define early_machine_is_sdm636() \ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm636") #define early_machine_is_sda636() \ @@ -146,6 +148,7 @@ #define early_machine_is_msmhamster() 0 #define early_machine_is_sdm660() 0 #define early_machine_is_sda660() 0 +#define early_machine_is_sdm455() 0 #define early_machine_is_sdm636() 0 #define early_machine_is_sda636() 0 #define early_machine_is_sdm658() 0 @@ -211,6 +214,7 @@ enum msm_cpu { MSM_CPU_8998, MSM_CPU_HAMSTER, MSM_CPU_660, + MSM_CPU_455, MSM_CPU_630, MSM_CPU_636, }; diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h index 44202ff897fd..f759e0918037 100644 --- a/include/soc/tegra/mc.h +++ b/include/soc/tegra/mc.h @@ -99,6 +99,8 @@ struct tegra_mc_soc { u8 client_id_mask; const struct tegra_smmu_soc *smmu; + + u32 intmask; }; struct tegra_mc { diff --git a/include/sound/q6adm-v2.h b/include/sound/q6adm-v2.h index 4545f2cd3826..f04daf310182 100644 --- a/include/sound/q6adm-v2.h +++ b/include/sound/q6adm-v2.h @@ -128,7 +128,7 @@ int adm_pack_and_set_one_pp_param(int port_id, int copp_idx, int adm_open(int port, int path, int rate, int mode, int topology, int perf_mode, uint16_t bits_per_sample, - int app_type, int acdbdev_id); + int app_type, int acdbdev_id, u32 copp_token); int adm_map_rtac_block(struct rtac_cal_block_data *cal_block); diff --git a/include/sound/q6asm-v2.h b/include/sound/q6asm-v2.h index 3523fac586ce..8af49bb6a9fa 100644 --- a/include/sound/q6asm-v2.h +++ b/include/sound/q6asm-v2.h @@ -292,6 +292,9 @@ int q6asm_open_read_v4(struct audio_client *ac, uint32_t format, int q6asm_open_read_v5(struct audio_client *ac, uint32_t format, uint16_t bits_per_sample, bool ts_mode); +int q6asm_open_read_with_retry(struct audio_client *ac, uint32_t format, + uint16_t bits_per_sample, bool ts_mode); + int q6asm_open_write(struct audio_client *ac, uint32_t format /*, uint16_t bits_per_sample*/); @@ -340,6 +343,9 @@ int q6asm_open_read_write_v2(struct audio_client *ac, uint32_t rd_format, int q6asm_open_loopback_v2(struct audio_client *ac, uint16_t bits_per_sample); +int q6asm_open_loopback_with_retry(struct audio_client *ac, + uint16_t bits_per_sample); + int q6asm_open_transcode_loopback(struct audio_client *ac, uint16_t bits_per_sample, uint32_t source_format, uint32_t sink_format); diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index 073b9ac245ba..e844556794dc 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h @@ -125,6 +125,20 @@ DEFINE_EVENT(timer_class, timer_cancel, TP_ARGS(timer) ); +#define decode_clockid(type) \ + __print_symbolic(type, \ + { CLOCK_REALTIME, "CLOCK_REALTIME" }, \ + { CLOCK_MONOTONIC, "CLOCK_MONOTONIC" }, \ + { CLOCK_BOOTTIME, "CLOCK_BOOTTIME" }, \ + { CLOCK_TAI, "CLOCK_TAI" }) + +#define decode_hrtimer_mode(mode) \ + __print_symbolic(mode, \ + { HRTIMER_MODE_ABS, "ABS" }, \ + { HRTIMER_MODE_REL, "REL" }, \ + { HRTIMER_MODE_ABS_PINNED, "ABS|PINNED" }, \ + { HRTIMER_MODE_REL_PINNED, "REL|PINNED" }) + /** * hrtimer_init - called when the hrtimer is initialized * @hrtimer: pointer to struct hrtimer @@ -151,10 +165,8 @@ TRACE_EVENT(hrtimer_init, ), TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer, - __entry->clockid == CLOCK_REALTIME ? - "CLOCK_REALTIME" : "CLOCK_MONOTONIC", - __entry->mode == HRTIMER_MODE_ABS ? - "HRTIMER_MODE_ABS" : "HRTIMER_MODE_REL") + decode_clockid(__entry->clockid), + decode_hrtimer_mode(__entry->mode)) ); /** diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 24c76d138317..e127ac05cdb0 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -639,6 +639,7 @@ struct drm_gem_open { #define DRM_CAP_CURSOR_WIDTH 0x8 #define DRM_CAP_CURSOR_HEIGHT 0x9 #define DRM_CAP_ADDFB2_MODIFIERS 0x10 +#define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12 /** DRM_IOCTL_GET_CAP ioctl argument type */ struct drm_get_cap { @@ -842,7 +843,7 @@ struct drm_event_vblank { __u32 tv_sec; __u32 tv_usec; __u32 sequence; - __u32 reserved; + __u32 crtc_id; /* 0 on older kernels that do not support this */ }; /* typedef area */ diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index 30ae8c3c1c85..583ebc4616e9 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -62,6 +62,15 @@ struct drm_msm_timespec { __s64 tv_nsec; /* nanoseconds */ }; +/* From CEA.861.3 */ +#define HDR_EOTF_SMTPE_ST2084 0x2 +#define HDR_EOTF_HLG 0x3 + +/* hdr hdmi state takes possible values of 0, 1 and 2 respectively */ +#define DRM_MSM_HDR_DISABLE 0 +#define DRM_MSM_HDR_ENABLE 1 +#define DRM_MSM_HDR_RESET 2 + /* * HDR Metadata * These are defined as per EDID spec and shall be used by the sink @@ -466,6 +475,7 @@ struct drm_msm_submitqueue_query { #define DRM_MSM_COUNTER_PUT 0x44 #define DRM_MSM_COUNTER_READ 0x45 #define DRM_MSM_GEM_SYNC 0x46 +#define DRM_MSM_RMFB2 0x47 /** * Currently DRM framework supports only VSYNC event. @@ -509,6 +519,8 @@ struct drm_msm_submitqueue_query { #define DRM_IOCTL_MSM_SUBMITQUEUE_QUERY \ DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_QUERY, \ struct drm_msm_submitqueue_query) +#define DRM_IOCTL_MSM_RMFB2 DRM_IOW((DRM_COMMAND_BASE + \ + DRM_MSM_RMFB2), unsigned int) #if defined(__cplusplus) } diff --git a/include/uapi/drm/msm_drm_pp.h b/include/uapi/drm/msm_drm_pp.h index 9ed3a13953ef..5f5ca0345140 100644 --- a/include/uapi/drm/msm_drm_pp.h +++ b/include/uapi/drm/msm_drm_pp.h @@ -52,6 +52,34 @@ struct drm_msm_pa_vlut { __u32 val[PA_VLUT_SIZE]; }; +#define PA_HSIC_HUE_ENABLE (1 << 0) +#define PA_HSIC_SAT_ENABLE (1 << 1) +#define PA_HSIC_VAL_ENABLE (1 << 2) +#define PA_HSIC_CONT_ENABLE (1 << 3) +#define PA_HSIC_LEFT_DISPLAY_ONLY (1 << 4) +#define PA_HSIC_RIGHT_DISPLAY_ONLY (1 << 5) +/** + * struct drm_msm_pa_hsic - pa hsic feature structure + * @flags: flags for the feature customization, values can be: + * - PA_HSIC_HUE_ENABLE: Enable hue adjustment + * - PA_HSIC_SAT_ENABLE: Enable saturation adjustment + * - PA_HSIC_VAL_ENABLE: Enable value adjustment + * - PA_HSIC_CONT_ENABLE: Enable contrast adjustment + * + * @hue: hue setting + * @saturation: saturation setting + * @value: value setting + * @contrast: contrast setting + */ +#define DRM_MSM_PA_HSIC +struct drm_msm_pa_hsic { + __u64 flags; + __u32 hue; + __u32 saturation; + __u32 value; + __u32 contrast; +}; + /* struct drm_msm_memcol - Memory color feature strucuture. * Skin, sky, foliage features are supported. * @prot_flags: Bit mask for enabling protection feature. @@ -79,4 +107,94 @@ struct drm_msm_memcol { __u32 val_region; }; +#define GAMUT_3D_MODE_17 1 +#define GAMUT_3D_MODE_5 2 +#define GAMUT_3D_MODE_13 3 + +#define GAMUT_3D_MODE17_TBL_SZ 1229 +#define GAMUT_3D_MODE5_TBL_SZ 32 +#define GAMUT_3D_MODE13_TBL_SZ 550 +#define GAMUT_3D_SCALE_OFF_SZ 16 +#define GAMUT_3D_SCALEB_OFF_SZ 12 +#define GAMUT_3D_TBL_NUM 4 +#define GAMUT_3D_SCALE_OFF_TBL_NUM 3 +#define GAMUT_3D_MAP_EN (1 << 0) + +/** + * struct drm_msm_3d_col - 3d gamut color component structure + * @c0: Holds c0 value + * @c2_c1: Holds c2/c1 values + */ +struct drm_msm_3d_col { + __u32 c2_c1; + __u32 c0; +}; +/** + * struct drm_msm_3d_gamut - 3d gamut feature structure + * @flags: flags for the feature values are: + * 0 - no map + * GAMUT_3D_MAP_EN - enable map + * @mode: lut mode can take following values: + * - GAMUT_3D_MODE_17 + * - GAMUT_3D_MODE_5 + * - GAMUT_3D_MODE_13 + * @scale_off: Scale offset table + * @col: Color component tables + */ +struct drm_msm_3d_gamut { + __u64 flags; + __u32 mode; + __u32 scale_off[GAMUT_3D_SCALE_OFF_TBL_NUM][GAMUT_3D_SCALE_OFF_SZ]; + struct drm_msm_3d_col col[GAMUT_3D_TBL_NUM][GAMUT_3D_MODE17_TBL_SZ]; +}; + +#define PGC_TBL_LEN 512 +#define PGC_8B_ROUND (1 << 0) +/** + * struct drm_msm_pgc_lut - pgc lut feature structure + * @flags: flags for the featue values can be: + * - PGC_8B_ROUND + * @c0: color0 component lut + * @c1: color1 component lut + * @c2: color2 component lut + */ +struct drm_msm_pgc_lut { + __u64 flags; + __u32 c0[PGC_TBL_LEN]; + __u32 c1[PGC_TBL_LEN]; + __u32 c2[PGC_TBL_LEN]; +}; + + +#define IGC_TBL_LEN 256 +#define IGC_DITHER_ENABLE (1 << 0) +/** + * struct drm_msm_igc_lut - igc lut feature structure + * @flags: flags for the feature customization, values can be: + * - IGC_DITHER_ENABLE: Enable dither functionality + * @c0: color0 component lut + * @c1: color1 component lut + * @c2: color2 component lut + * @strength: dither strength, considered valid when IGC_DITHER_ENABLE + * is set in flags. Strength value based on source bit width. + */ +struct drm_msm_igc_lut { + __u64 flags; + __u32 c0[IGC_TBL_LEN]; + __u32 c1[IGC_TBL_LEN]; + __u32 c2[IGC_TBL_LEN]; + __u32 strength; +}; + +#define HIST_V_SIZE 256 +/** + * struct drm_msm_hist - histogram feature structure + * @flags: for customizing operations + * @data: histogram data + */ +struct drm_msm_hist { + __u64 flags; + __u32 data[HIST_V_SIZE]; +}; + #endif /* _MSM_DRM_PP_H_ */ diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h index fc9e2d6e5e2f..232367124712 100644 --- a/include/uapi/drm/virtgpu_drm.h +++ b/include/uapi/drm/virtgpu_drm.h @@ -60,6 +60,7 @@ struct drm_virtgpu_execbuffer { }; #define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */ +#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */ struct drm_virtgpu_getparam { uint64_t param; diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h index 5539933b3491..bd0da0e992b8 100644 --- a/include/uapi/linux/android/binder.h +++ b/include/uapi/linux/android/binder.h @@ -246,6 +246,15 @@ struct binder_node_debug_info { __u32 has_weak_ref; }; +struct binder_node_info_for_ref { + __u32 handle; + __u32 strong_count; + __u32 weak_count; + __u32 reserved1; + __u32 reserved2; + __u32 reserved3; +}; + #define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) #define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64) #define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32) @@ -254,6 +263,7 @@ struct binder_node_debug_info { #define BINDER_THREAD_EXIT _IOW('b', 8, __s32) #define BINDER_VERSION _IOWR('b', 9, struct binder_version) #define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info) +#define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref) /* * NOTE: Two special error codes you should check for when calling diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index cd1629170103..08f47e0e9f8d 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -819,13 +819,13 @@ struct ethtool_rx_flow_spec { static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie) { return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie; -}; +} static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie) { return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >> ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; -}; +} /** * struct ethtool_rxnfc - command to get or set RX flow classification rules diff --git a/include/uapi/linux/hab_ioctl.h b/include/uapi/linux/hab_ioctl.h index 83b5da236888..70e16433044e 100644 --- a/include/uapi/linux/hab_ioctl.h +++ b/include/uapi/linux/hab_ioctl.h @@ -69,7 +69,6 @@ struct hab_info { }; #define HAB_IOC_TYPE 0x0A -#define HAB_MAX_MSG_SIZEBYTES 0x1000 #define IOCTL_HAB_SEND \ _IOW(HAB_IOC_TYPE, 0x2, struct hab_send) diff --git a/include/uapi/linux/habmmid.h b/include/uapi/linux/habmmid.h index 4f79506dd971..1845c72c9e60 100644 --- a/include/uapi/linux/habmmid.h +++ b/include/uapi/linux/habmmid.h @@ -29,7 +29,8 @@ #define MM_VID_START 500 #define MM_VID 501 -#define MM_VID_END 502 +#define MM_VID_2 502 +#define MM_VID_END 503 #define MM_MISC_START 600 #define MM_MISC 601 @@ -37,10 +38,7 @@ #define MM_QCPE_START 700 #define MM_QCPE_VM1 701 -#define MM_QCPE_VM2 702 -#define MM_QCPE_VM3 703 -#define MM_QCPE_VM4 704 -#define MM_QCPE_END 705 +#define MM_QCPE_END 702 #define MM_CLK_START 800 #define MM_CLK_VM1 801 diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h index a3e44bc9889b..602aca385a27 100644 --- a/include/uapi/linux/if_ether.h +++ b/include/uapi/linux/if_ether.h @@ -29,6 +29,7 @@ */ #define ETH_ALEN 6 /* Octets in one ethernet addr */ +#define ETH_TLEN 2 /* Octets in ethernet type field */ #define ETH_HLEN 14 /* Total octets in header. */ #define ETH_ZLEN 60 /* Min. octets in frame sans FCS */ #define ETH_DATA_LEN 1500 /* Max. octets in payload */ diff --git a/include/uapi/linux/msm_audio_anc.h b/include/uapi/linux/msm_audio_anc.h index d628f7ce9267..87701fd8ee3a 100644 --- a/include/uapi/linux/msm_audio_anc.h +++ b/include/uapi/linux/msm_audio_anc.h @@ -21,6 +21,8 @@ /* room for ANC_CMD define extend */ #define ANC_CMD_MAX 0xFF +#define ANC_CALIBRATION_PAYLOAD_SIZE_MAX 100 + struct audio_anc_header { int32_t data_size; int32_t version; @@ -35,14 +37,23 @@ struct audio_anc_rpm_info { struct audio_anc_bypass_mode { int32_t mode; }; - struct audio_anc_algo_module_info { int32_t module_id; }; +struct audio_anc_algo_calibration_header { + uint32_t module_id; + uint32_t param_id; + uint32_t payload_size; +}; + +struct audio_anc_algo_calibration_body { + int32_t payload[ANC_CALIBRATION_PAYLOAD_SIZE_MAX]; +}; + struct audio_anc_algo_calibration_info { - int32_t payload_size; - /* num bytes of payload specificed in payload_size followed */ + struct audio_anc_algo_calibration_header cali_header; + struct audio_anc_algo_calibration_body cali_body; }; union audio_anc_data { diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h index 6a471c955d91..be3c68aaf23c 100644 --- a/include/uapi/linux/msm_ipa.h +++ b/include/uapi/linux/msm_ipa.h @@ -95,7 +95,9 @@ #define IPA_IOCTL_DEL_VLAN_IFACE 53 #define IPA_IOCTL_ADD_L2TP_VLAN_MAPPING 54 #define IPA_IOCTL_DEL_L2TP_VLAN_MAPPING 55 -#define IPA_IOCTL_MAX 56 +#define IPA_IOCTL_CLEANUP 56 +#define IPA_IOCTL_QUERY_WLAN_CLIENT 57 +#define IPA_IOCTL_MAX 58 /** * max size of the header to be inserted @@ -261,6 +263,11 @@ enum ipa_client_type { ((client) == IPA_CLIENT_APPS_LAN_CONS || \ (client) == IPA_CLIENT_APPS_WAN_CONS) +#define IPA_CLIENT_IS_APPS_PROD(client) \ + ((client) == IPA_CLIENT_APPS_LAN_PROD || \ + (client) == IPA_CLIENT_APPS_WAN_PROD || \ + (client) == IPA_CLIENT_APPS_CMD_PROD) + #define IPA_CLIENT_IS_USB_CONS(client) \ ((client) == IPA_CLIENT_USB_CONS || \ (client) == IPA_CLIENT_USB2_CONS || \ @@ -466,10 +473,15 @@ enum ipa_per_client_stats_event { IPA_PER_CLIENT_STATS_CONNECT_EVENT = IPA_VLAN_L2TP_EVENT_MAX, IPA_PER_CLIENT_STATS_DISCONNECT_EVENT, IPA_PER_CLIENT_STATS_EVENT_MAX, - IPA_EVENT_MAX_NUM = IPA_PER_CLIENT_STATS_EVENT_MAX, }; -#define IPA_EVENT_MAX_NUM ((int)IPA_PER_CLIENT_STATS_EVENT_MAX) +enum ipa_wlan_fw_ssr_event { + WLAN_FWR_SSR_BEFORE_SHUTDOWN = IPA_PER_CLIENT_STATS_EVENT_MAX, + IPA_WLAN_FW_SSR_EVENT_MAX, +#define IPA_WLAN_FW_SSR_EVENT_MAX IPA_WLAN_FW_SSR_EVENT_MAX +}; + +#define IPA_EVENT_MAX_NUM ((int)IPA_WLAN_FW_SSR_EVENT_MAX) #define IPA_EVENT_MAX ((int)IPA_EVENT_MAX_NUM) /** @@ -1907,6 +1919,10 @@ struct ipa_tether_device_info { #define IPA_IOC_DEL_L2TP_VLAN_MAPPING _IOWR(IPA_IOC_MAGIC, \ IPA_IOCTL_DEL_L2TP_VLAN_MAPPING, \ struct ipa_ioc_l2tp_vlan_mapping_info *) +#define IPA_IOC_CLEANUP _IO(IPA_IOC_MAGIC,\ + IPA_IOCTL_CLEANUP) +#define IPA_IOC_QUERY_WLAN_CLIENT _IO(IPA_IOC_MAGIC,\ + IPA_IOCTL_QUERY_WLAN_CLIENT) /* * unique magic number of the Tethering bridge ioctls */ diff --git a/include/uapi/linux/msm_kgsl.h b/include/uapi/linux/msm_kgsl.h index 13bb8b79359a..005fb8284524 100644 --- a/include/uapi/linux/msm_kgsl.h +++ b/include/uapi/linux/msm_kgsl.h @@ -322,6 +322,8 @@ enum kgsl_timestamp_type { #define KGSL_PROP_DEVICE_QDSS_STM 0x19 #define KGSL_PROP_DEVICE_QTIMER 0x20 #define KGSL_PROP_IB_TIMEOUT 0x21 +#define KGSL_PROP_SECURE_BUFFER_ALIGNMENT 0x23 +#define KGSL_PROP_SECURE_CTXT_SUPPORT 0x24 struct kgsl_shadowprop { unsigned long gpuaddr; diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index dd57d8bba233..03c9d5aef5c7 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -2492,7 +2492,7 @@ enum nl80211_attrs { #define NL80211_ATTR_KEYS NL80211_ATTR_KEYS #define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS -#define NL80211_WIPHY_NAME_MAXLEN 128 +#define NL80211_WIPHY_NAME_MAXLEN 64 #define NL80211_MAX_SUPP_RATES 32 #define NL80211_MAX_SUPP_HT_RATES 77 diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index c1af9b3c27c4..f0e9f0460ee7 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -206,4 +206,16 @@ struct prctl_mm_map { #define PR_SET_VMA 0x53564d41 # define PR_SET_VMA_ANON_NAME 0 +/* Per task speculation control */ +#define PR_GET_SPECULATION_CTRL 52 +#define PR_SET_SPECULATION_CTRL 53 +/* Speculation control variants */ +# define PR_SPEC_STORE_BYPASS 0 +/* Return and control values for PR_SET/GET_SPECULATION_CTRL */ +# define PR_SPEC_NOT_AFFECTED 0 +# define PR_SPEC_PRCTL (1UL << 0) +# define PR_SPEC_ENABLE (1UL << 1) +# define PR_SPEC_DISABLE (1UL << 2) +# define PR_SPEC_FORCE_DISABLE (1UL << 3) + #endif /* _LINUX_PRCTL_H */ diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h index 0f238a43ff1e..e4acb615792b 100644 --- a/include/uapi/linux/seccomp.h +++ b/include/uapi/linux/seccomp.h @@ -15,7 +15,9 @@ #define SECCOMP_SET_MODE_FILTER 1 /* Valid flags for SECCOMP_SET_MODE_FILTER */ -#define SECCOMP_FILTER_FLAG_TSYNC 1 +#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0) +/* In v4.14+ SECCOMP_FILTER_FLAG_LOG is (1UL << 1) */ +#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2) /* * All BPF programs must return a 32-bit value. diff --git a/include/uapi/media/ais/msm_ais_isp.h b/include/uapi/media/ais/msm_ais_isp.h index 2b4f0bfeb8c2..649173dd4404 100644 --- a/include/uapi/media/ais/msm_ais_isp.h +++ b/include/uapi/media/ais/msm_ais_isp.h @@ -936,6 +936,13 @@ struct msm_vfe_axi_output_plane_cfg { uint32_t frame_increment; }; +struct msm_vfe_axi_framedrop_update { + enum msm_vfe_axi_stream_src stream_src; + + uint8_t framedrop_period; + uint32_t framedrop_pattern; +}; + struct msm_vfe_axi_output_path_cfg { uint8_t enable; @@ -1003,6 +1010,7 @@ enum msm_isp_ioctl_cmd_code { MSM_ISP_SET_CLK_STATUS, MSM_ISP_CMD_EXT, + MSM_ISP_FRAMEDROP_UPDATE, }; @@ -1130,6 +1138,10 @@ enum msm_isp_ioctl_cmd_code { _IOWR('V', MSM_ISP_AXI_OUTPUT_CFG, \ struct msm_vfe_axi_output_cfg) +#define VIDIOC_MSM_ISP_FRAMEDROP_UPDATE \ + _IOWR('V', MSM_ISP_FRAMEDROP_UPDATE, \ + struct msm_vfe_axi_output_cfg) + #define VIDIOC_MSM_ISP_CAMIF_CFG \ _IOWR('V', MSM_ISP_CAMIF_CFG, \ struct msm_vfe_camif_cfg) diff --git a/include/uapi/media/msm_camera.h b/include/uapi/media/msm_camera.h index fd0937ffb1e5..81e350ede6eb 100644 --- a/include/uapi/media/msm_camera.h +++ b/include/uapi/media/msm_camera.h @@ -263,7 +263,7 @@ struct msm_mctl_post_proc_cmd { #define MSM_CAMERA_STROBE_FLASH_NONE 0 #define MSM_CAMERA_STROBE_FLASH_XENON 1 -#define MSM_MAX_CAMERA_SENSORS 5 +#define MSM_MAX_CAMERA_SENSORS 6 #define MAX_SENSOR_NAME 32 #define MAX_CAM_NAME_SIZE 32 #define MAX_ACT_MOD_NAME_SIZE 32 diff --git a/include/uapi/media/msm_camsensor_sdk.h b/include/uapi/media/msm_camsensor_sdk.h index ac454ca9a7fc..40731a927ce5 100644 --- a/include/uapi/media/msm_camsensor_sdk.h +++ b/include/uapi/media/msm_camsensor_sdk.h @@ -50,6 +50,8 @@ #define MSM_SENSOR_BYPASS_VIDEO_NODE 1 +#define FRONT_AUX_SENSOR_SUPPORT + enum msm_sensor_camera_id_t { CAMERA_0, CAMERA_1, @@ -70,6 +72,7 @@ enum camb_position_t { BACK_CAMERA_B, FRONT_CAMERA_B, AUX_CAMERA_B = 0x100, + FRONT_AUX_CAMERA_B, INVALID_CAMERA_B, }; diff --git a/include/uapi/media/msm_vidc.h b/include/uapi/media/msm_vidc.h index e9370cb660b2..95b6bab382bb 100644 --- a/include/uapi/media/msm_vidc.h +++ b/include/uapi/media/msm_vidc.h @@ -345,6 +345,15 @@ enum msm_vidc_h264_transfer_chars_values { MSM_VIDC_TRANSFER_SRGB = 13, MSM_VIDC_TRANSFER_BT_2020_10 = 14, MSM_VIDC_TRANSFER_BT_2020_12 = 15, +#define MSM_VIDC_TRANSFER_SMPTE_ST2084 \ + MSM_VIDC_TRANSFER_SMPTE_ST2084 + MSM_VIDC_TRANSFER_SMPTE_ST2084 = 16, +#define MSM_VIDC_TRANSFER_SMPTE_ST428_1 \ + MSM_VIDC_TRANSFER_SMPTE_ST428_1 + MSM_VIDC_TRANSFER_SMPTE_ST428_1 = 17, +#define MSM_VIDC_TRANSFER_HLG \ + MSM_VIDC_TRANSFER_HLG + MSM_VIDC_TRANSFER_HLG = 18, }; enum msm_vidc_pixel_depth { diff --git a/include/uapi/media/msmb_camera.h b/include/uapi/media/msmb_camera.h index 47d5a998e139..0a2dd446ccc4 100644 --- a/include/uapi/media/msmb_camera.h +++ b/include/uapi/media/msmb_camera.h @@ -53,7 +53,7 @@ #define MSM_CAMERA_SUBDEV_EXT 19 #define MSM_CAMERA_SUBDEV_TOF 20 #define MSM_CAMERA_SUBDEV_LASER_LED 21 -#define MSM_MAX_CAMERA_SENSORS 5 +#define MSM_MAX_CAMERA_SENSORS 6 /* The below macro is defined to put an upper limit on maximum * number of buffer requested per stream. In case of extremely diff --git a/include/video/msm_dba.h b/include/video/msm_dba.h index 3d20fd8d65eb..8ce2138044c3 100644 --- a/include/video/msm_dba.h +++ b/include/video/msm_dba.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2015,2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -53,6 +53,9 @@ * @MSM_DBA_CB_POST_RESET: This callback is called after device reset is * complete and the driver has applied back all the * properties. + * @MSM_DBA_CB_DDC_I2C_ERROR: Detected a failure in DDC block for i2c error. + * @MSM_DBA_CB_DDC_TIMEOUT: Detected a failure in DDC block for timed out + * waiting for downstream receiver. * * Clients for this driver can register for receiving callbacks for specific * events. This enum defines the type of events supported by the driver. An @@ -71,6 +74,8 @@ enum msm_dba_callback_event { MSM_DBA_CB_CEC_READ_PENDING = BIT(9), MSM_DBA_CB_PRE_RESET = BIT(10), MSM_DBA_CB_POST_RESET = BIT(11), + MSM_DBA_CB_DDC_I2C_ERROR = BIT(12), + MSM_DBA_CB_DDC_TIMEOUT = BIT(13), }; /** diff --git a/include/video/udlfb.h b/include/video/udlfb.h index f9466fa54ba4..2ad9a6d37ff4 100644 --- a/include/video/udlfb.h +++ b/include/video/udlfb.h @@ -87,7 +87,7 @@ struct dlfb_data { #define MIN_RAW_PIX_BYTES 2 #define MIN_RAW_CMD_BYTES (RAW_HEADER_BYTES + MIN_RAW_PIX_BYTES) -#define DL_DEFIO_WRITE_DELAY 5 /* fb_deferred_io.delay in jiffies */ +#define DL_DEFIO_WRITE_DELAY msecs_to_jiffies(HZ <= 300 ? 4 : 10) /* optimal value for 720p video */ #define DL_DEFIO_WRITE_DISABLE (HZ*60) /* "disable" with long delay */ /* remove these once align.h patch is taken into kernel */ |
