diff options
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/cgroup.h | 20 | ||||
| -rw-r--r-- | include/linux/cpufreq.h | 52 | ||||
| -rw-r--r-- | include/linux/diagchar.h | 21 | ||||
| -rw-r--r-- | include/linux/fs.h | 5 | ||||
| -rw-r--r-- | include/linux/if_vlan.h | 19 | ||||
| -rw-r--r-- | include/linux/interrupt.h | 7 | ||||
| -rw-r--r-- | include/linux/kthread.h | 4 | ||||
| -rw-r--r-- | include/linux/memblock.h | 8 | ||||
| -rw-r--r-- | include/linux/mmzone.h | 1 | ||||
| -rw-r--r-- | include/linux/msm_gsi.h | 18 | ||||
| -rw-r--r-- | include/linux/power_supply.h | 1 | ||||
| -rw-r--r-- | include/linux/ptrace.h | 7 | ||||
| -rw-r--r-- | include/linux/sched.h | 80 | ||||
| -rw-r--r-- | include/linux/sched/sysctl.h | 1 | ||||
| -rw-r--r-- | include/linux/skbuff.h | 3 | ||||
| -rw-r--r-- | include/linux/usb.h | 2 |
16 files changed, 230 insertions, 19 deletions
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index ad2bcf647b9a..210ccc4ea44b 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -340,6 +340,26 @@ static inline bool css_tryget_online(struct cgroup_subsys_state *css) } /** + * css_is_dying - test whether the specified css is dying + * @css: target css + * + * Test whether @css is in the process of offlining or already offline. In + * most cases, ->css_online() and ->css_offline() callbacks should be + * enough; however, the actual offline operations are RCU delayed and this + * test returns %true also when @css is scheduled to be offlined. + * + * This is useful, for example, when the use case requires synchronous + * behavior with respect to cgroup removal. cgroup removal schedules css + * offlining but the css can seem alive while the operation is being + * delayed. If the delay affects user visible semantics, this test can be + * used to resolve the situation. + */ +static inline bool css_is_dying(struct cgroup_subsys_state *css) +{ + return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt); +} + +/** * css_put - put a css reference * @css: target css * diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index fe865e627528..5daa9e78584c 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -107,6 +107,22 @@ struct cpufreq_policy { */ struct rw_semaphore rwsem; + + /* + * Fast switch flags: + * - fast_switch_possible should be set by the driver if it can + * guarantee that frequency can be changed on any CPU sharing the + * policy and that the change will affect all of the policy CPUs then. + * - fast_switch_enabled is to be set by governors that support fast + * freqnency switching with the help of cpufreq_enable_fast_switch(). + */ + bool fast_switch_possible; + bool fast_switch_enabled; + + /* Cached frequency lookup from cpufreq_driver_resolve_freq. */ + unsigned int cached_target_freq; + int cached_resolved_idx; + /* Synchronization for frequency transitions */ bool transition_ongoing; /* Tracks transition status */ spinlock_t transition_lock; @@ -485,6 +501,8 @@ int cpufreq_driver_target(struct cpufreq_policy *policy, int __cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation); +unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, + unsigned int target_freq); int cpufreq_register_governor(struct cpufreq_governor *governor); void cpufreq_unregister_governor(struct cpufreq_governor *governor); @@ -516,8 +534,42 @@ extern struct cpufreq_governor cpufreq_gov_interactive; #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_SCHED) extern struct cpufreq_governor cpufreq_gov_sched; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_sched) +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL) +extern struct cpufreq_governor cpufreq_gov_schedutil; +#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_schedutil) #endif +static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy) +{ + if (policy->max < policy->cur) + __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); + else if (policy->min > policy->cur) + __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); +} + +/* Governor attribute set */ +struct gov_attr_set { + struct kobject kobj; + struct list_head policy_list; + struct mutex update_lock; + int usage_count; +}; + +/* sysfs ops for cpufreq governors */ +extern const struct sysfs_ops governor_sysfs_ops; + +void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node); +void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node); +unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node); + +/* Governor sysfs attribute */ +struct governor_attr { + struct attribute attr; + ssize_t (*show)(struct gov_attr_set *attr_set, char *buf); + ssize_t (*store)(struct gov_attr_set *attr_set, const char *buf, + size_t count); +}; + /********************************************************************* * FREQUENCY TABLE HELPERS * *********************************************************************/ diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h index 21a0917119ce..7c92113e20c3 100644 --- a/include/linux/diagchar.h +++ b/include/linux/diagchar.h @@ -144,7 +144,7 @@ the appropriate macros. */ /* This needs to be modified manually now, when we add a new RANGE of SSIDs to the msg_mask_tbl */ -#define MSG_MASK_TBL_CNT 25 +#define MSG_MASK_TBL_CNT 26 #define APPS_EVENT_LAST_ID 0x0B3F #define MSG_SSID_0 0 @@ -195,8 +195,10 @@ the appropriate macros. */ #define MSG_SSID_22_LAST 10377 #define MSG_SSID_23 10400 #define MSG_SSID_23_LAST 10416 -#define MSG_SSID_24 0xC000 -#define MSG_SSID_24_LAST 0xC063 +#define MSG_SSID_24 10500 +#define MSG_SSID_24_LAST 10505 +#define MSG_SSID_25 0xC000 +#define MSG_SSID_25_LAST 0xC063 static const uint32_t msg_bld_masks_0[] = { MSG_LVL_LOW, @@ -857,6 +859,19 @@ static const uint32_t msg_bld_masks_23[] = { MSG_LVL_LOW }; +static const uint32_t msg_bld_masks_24[] = { + MSG_LVL_HIGH, + MSG_LVL_HIGH, + MSG_LVL_HIGH, + MSG_LVL_HIGH, + MSG_LVL_HIGH, + MSG_LVL_HIGH +}; + +static const uint32_t msg_bld_masks_25[] = { + MSG_LVL_LOW +}; + /* LOG CODES */ static const uint32_t log_code_last_tbl[] = { 0x0, /* EQUIP ID 0 */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 28c8f7038ae0..4b27be2038e3 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -689,6 +689,11 @@ struct inode { void *i_private; /* fs or device private pointer */ }; +static inline unsigned int i_blocksize(const struct inode *node) +{ + return (1 << node->i_blkbits); +} + static inline int inode_unhashed(struct inode *inode) { return hlist_unhashed(&inode->i_hash); diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 67ce5bd3b56a..19db03dbbd00 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -616,15 +616,16 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb) static inline netdev_features_t vlan_features_check(const struct sk_buff *skb, netdev_features_t features) { - if (skb_vlan_tagged_multi(skb)) - features = netdev_intersect_features(features, - NETIF_F_SG | - NETIF_F_HIGHDMA | - NETIF_F_FRAGLIST | - NETIF_F_GEN_CSUM | - NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_STAG_TX); - + if (skb_vlan_tagged_multi(skb)) { + /* In the case of multi-tagged packets, use a direct mask + * instead of using netdev_interesect_features(), to make + * sure that only devices supporting NETIF_F_HW_CSUM will + * have checksum offloading support. + */ + features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | + NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX; + } return features; } diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index ad16809c8596..b3b1af8a8f8c 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -423,6 +423,12 @@ enum }; #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ)) +/* Softirq's where the handling might be long: */ +#define LONG_SOFTIRQ_MASK ((1 << NET_TX_SOFTIRQ) | \ + (1 << NET_RX_SOFTIRQ) | \ + (1 << BLOCK_SOFTIRQ) | \ + (1 << BLOCK_IOPOLL_SOFTIRQ) | \ + (1 << TASKLET_SOFTIRQ)) /* map softirq index to softirq name. update 'softirq_to_name' in * kernel/softirq.c when adding a new softirq. @@ -458,6 +464,7 @@ extern void raise_softirq_irqoff(unsigned int nr); extern void raise_softirq(unsigned int nr); DECLARE_PER_CPU(struct task_struct *, ksoftirqd); +DECLARE_PER_CPU(__u32, active_softirqs); static inline struct task_struct *this_cpu_ksoftirqd(void) { diff --git a/include/linux/kthread.h b/include/linux/kthread.h index e691b6a23f72..4289343ba1f9 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -75,6 +75,8 @@ struct kthread_work { struct list_head node; kthread_work_func_t func; struct kthread_worker *worker; + /* Number of canceling calls that are running at the moment. */ + int canceling; }; #define KTHREAD_WORKER_INIT(worker) { \ @@ -129,4 +131,6 @@ bool queue_kthread_work(struct kthread_worker *worker, void flush_kthread_work(struct kthread_work *work); void flush_kthread_worker(struct kthread_worker *worker); +bool kthread_cancel_work_sync(struct kthread_work *work); + #endif /* _LINUX_KTHREAD_H */ diff --git a/include/linux/memblock.h b/include/linux/memblock.h index ab93174fa639..d4b56351027b 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -424,12 +424,20 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end) } #endif +extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr, + phys_addr_t end_addr); #else static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align) { return 0; } +static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr, + phys_addr_t end_addr) +{ + return 0; +} + #endif /* CONFIG_HAVE_MEMBLOCK */ #endif /* __KERNEL__ */ diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index ad7f915ddf76..9d1161a8d6b7 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -712,6 +712,7 @@ typedef struct pglist_data { * is the first PFN that needs to be initialised. */ unsigned long first_deferred_pfn; + unsigned long static_init_size; #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ } pg_data_t; diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h index d4b4cc7f8737..b95ea88c2424 100644 --- a/include/linux/msm_gsi.h +++ b/include/linux/msm_gsi.h @@ -749,6 +749,18 @@ int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl, uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb); /** + * gsi_ring_evt_ring_db - Peripheral should call this function for + * ringing the event ring doorbell with given value + * + * @evt_ring_hdl: Client handle previously obtained from + * gsi_alloc_evt_ring + * @value: The value to be used for ringing the doorbell + * + * @Return gsi_status + */ +int gsi_ring_evt_ring_db(unsigned long evt_ring_hdl, uint64_t value); + +/** * gsi_reset_evt_ring - Peripheral should call this function to * reset an event ring to recover from error state * @@ -1138,6 +1150,12 @@ static inline int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl, return -GSI_STATUS_UNSUPPORTED_OP; } +static inline int gsi_ring_evt_ring_db(unsigned long evt_ring_hdl, + uint64_t value) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + static inline int gsi_reset_evt_ring(unsigned long evt_ring_hdl) { return -GSI_STATUS_UNSUPPORTED_OP; diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 5cc13e9fbd8f..b584e353306d 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -271,6 +271,7 @@ enum power_supply_type { POWER_SUPPLY_TYPE_USB_HVDCP_3, /* Efficient High Voltage DCP */ POWER_SUPPLY_TYPE_USB_PD, /* Power Delivery */ POWER_SUPPLY_TYPE_WIRELESS, /* Accessory Charger Adapters */ + POWER_SUPPLY_TYPE_USB_FLOAT, /* Floating charger */ POWER_SUPPLY_TYPE_BMS, /* Battery Monitor System */ POWER_SUPPLY_TYPE_PARALLEL, /* Parallel Path */ POWER_SUPPLY_TYPE_MAIN, /* Main Path */ diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index e13bfdf7f314..81fdf4b8aba4 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -50,7 +50,8 @@ extern int ptrace_request(struct task_struct *child, long request, unsigned long addr, unsigned long data); extern void ptrace_notify(int exit_code); extern void __ptrace_link(struct task_struct *child, - struct task_struct *new_parent); + struct task_struct *new_parent, + const struct cred *ptracer_cred); extern void __ptrace_unlink(struct task_struct *child); extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead); #define PTRACE_MODE_READ 0x01 @@ -202,7 +203,7 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace) if (unlikely(ptrace) && current->ptrace) { child->ptrace = current->ptrace; - __ptrace_link(child, current->parent); + __ptrace_link(child, current->parent, current->ptracer_cred); if (child->ptrace & PT_SEIZED) task_set_jobctl_pending(child, JOBCTL_TRAP_STOP); @@ -211,6 +212,8 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace) set_tsk_thread_flag(child, TIF_SIGPENDING); } + else + child->ptracer_cred = NULL; } /** diff --git a/include/linux/sched.h b/include/linux/sched.h index 138fcf72508a..57042d91ae9c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1072,7 +1072,8 @@ extern void wake_up_q(struct wake_q_head *head); #define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ #define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ -#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu power */ +#define SD_ASYM_CPUCAPACITY 0x0040 /* Groups have different max cpu capacities */ +#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu capacity */ #define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */ #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ @@ -1133,6 +1134,37 @@ unsigned long capacity_curr_of(int cpu); struct sched_group; +struct eas_stats { + /* select_idle_sibling() stats */ + u64 sis_attempts; + u64 sis_idle; + u64 sis_cache_affine; + u64 sis_suff_cap; + u64 sis_idle_cpu; + u64 sis_count; + + /* select_energy_cpu_brute() stats */ + u64 secb_attempts; + u64 secb_sync; + u64 secb_idle_bt; + u64 secb_insuff_cap; + u64 secb_no_nrg_sav; + u64 secb_nrg_sav; + u64 secb_count; + + /* find_best_target() stats */ + u64 fbt_attempts; + u64 fbt_no_cpu; + u64 fbt_no_sd; + u64 fbt_pref_idle; + u64 fbt_count; + + /* cas */ + /* select_task_rq_fair() stats */ + u64 cas_attempts; + u64 cas_count; +}; + struct sched_domain { /* These fields must be setup */ struct sched_domain *parent; /* top domain must be null terminated */ @@ -1193,6 +1225,8 @@ struct sched_domain { unsigned int ttwu_wake_remote; unsigned int ttwu_move_affine; unsigned int ttwu_move_balance; + + struct eas_stats eas_stats; #endif #ifdef CONFIG_SCHED_DEBUG char *name; @@ -1351,6 +1385,35 @@ struct sched_statistics { u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; + + /* select_idle_sibling() */ + u64 nr_wakeups_sis_attempts; + u64 nr_wakeups_sis_idle; + u64 nr_wakeups_sis_cache_affine; + u64 nr_wakeups_sis_suff_cap; + u64 nr_wakeups_sis_idle_cpu; + u64 nr_wakeups_sis_count; + + /* energy_aware_wake_cpu() */ + u64 nr_wakeups_secb_attempts; + u64 nr_wakeups_secb_sync; + u64 nr_wakeups_secb_idle_bt; + u64 nr_wakeups_secb_insuff_cap; + u64 nr_wakeups_secb_no_nrg_sav; + u64 nr_wakeups_secb_nrg_sav; + u64 nr_wakeups_secb_count; + + /* find_best_target() */ + u64 nr_wakeups_fbt_attempts; + u64 nr_wakeups_fbt_no_cpu; + u64 nr_wakeups_fbt_no_sd; + u64 nr_wakeups_fbt_pref_idle; + u64 nr_wakeups_fbt_count; + + /* cas */ + /* select_task_rq_fair() */ + u64 nr_wakeups_cas_attempts; + u64 nr_wakeups_cas_count; }; #endif @@ -3512,4 +3575,19 @@ static inline unsigned long rlimit_max(unsigned int limit) return task_rlimit_max(current, limit); } +#define SCHED_CPUFREQ_RT (1U << 0) +#define SCHED_CPUFREQ_DL (1U << 1) +#define SCHED_CPUFREQ_IOWAIT (1U << 2) + +#ifdef CONFIG_CPU_FREQ +struct update_util_data { + void (*func)(struct update_util_data *data, u64 time, unsigned int flags); +}; + +void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data, + void (*func)(struct update_util_data *data, u64 time, + unsigned int flags)); +void cpufreq_remove_update_util_hook(int cpu); +#endif /* CONFIG_CPU_FREQ */ + #endif diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index ef8a092251aa..128c4a8c9979 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -39,7 +39,6 @@ extern unsigned int sysctl_sched_latency; extern unsigned int sysctl_sched_min_granularity; extern unsigned int sysctl_sched_wakeup_granularity; extern unsigned int sysctl_sched_child_runs_first; -extern unsigned int sysctl_sched_is_big_little; extern unsigned int sysctl_sched_sync_hint_enable; extern unsigned int sysctl_sched_initial_task_util; extern unsigned int sysctl_sched_cstate_aware; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index d443d9ab0236..3f61c647fc5c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1084,9 +1084,6 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) static inline void skb_sender_cpu_clear(struct sk_buff *skb) { -#ifdef CONFIG_XPS - skb->sender_cpu = 0; -#endif } #ifdef NET_SKBUFF_DATA_USES_OFFSET diff --git a/include/linux/usb.h b/include/linux/usb.h index a55f127d6836..e8b2ed4ad851 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -330,6 +330,8 @@ struct usb_host_bos { struct usb_ss_cap_descriptor *ss_cap; struct usb_ssp_cap_descriptor *ssp_cap; struct usb_ss_container_id_descriptor *ss_id; + struct usb_config_summary_descriptor *config_summary; + unsigned int num_config_summary_desc; }; int __usb_get_extra_descriptor(char *buffer, unsigned size, |
