From ee8209fd026b074bb8eb75bece516a338a281b1b Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 8 May 2013 11:55:48 +0300 Subject: dma: acpi-dma: parse CSRT to extract additional resources Since we have CSRT only to get additional DMA controller resources, let's get rid of drivers/acpi/csrt.c and move its logic inside ACPI DMA helpers code. Signed-off-by: Andy Shevchenko Signed-off-by: Mika Westerberg Acked-by: Rafael J. Wysocki Signed-off-by: Vinod Koul --- include/linux/acpi_dma.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/acpi_dma.h b/include/linux/acpi_dma.h index d09deabc7bf6..fb0298082916 100644 --- a/include/linux/acpi_dma.h +++ b/include/linux/acpi_dma.h @@ -37,6 +37,8 @@ struct acpi_dma_spec { * @dev: struct device of this controller * @acpi_dma_xlate: callback function to find a suitable channel * @data: private data used by a callback function + * @base_request_line: first supported request line (CSRT) + * @end_request_line: last supported request line (CSRT) */ struct acpi_dma { struct list_head dma_controllers; @@ -44,6 +46,8 @@ struct acpi_dma { struct dma_chan *(*acpi_dma_xlate) (struct acpi_dma_spec *, struct acpi_dma *); void *data; + unsigned short base_request_line; + unsigned short end_request_line; }; /* Used with acpi_dma_simple_xlate() */ -- cgit v1.2.3 From b59cc200ac025aca597fb21862c1c9e667f2eff2 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 8 May 2013 11:55:49 +0300 Subject: ACPI / LPSS: register clock device for Lynxpoint DMA properly The DMA controller in Lynxpoint is enumerated as a regular ACPI device now. To work properly it is using the LPSS root clock as a functional clock. That's why we have to register the clock device accordingly to the ACPI ID of the DMA controller. The acpi_lpss.c module is responsible to do the job. This patch also removes hardcoded name of the DMA device in clk-lpt.c and the name of the root clock in acpi_lpss.c. Signed-off-by: Rafael J. Wysocki Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul --- include/linux/platform_data/clk-lpss.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include/linux') diff --git a/include/linux/platform_data/clk-lpss.h b/include/linux/platform_data/clk-lpss.h index 528e73ce46d2..23901992b9dd 100644 --- a/include/linux/platform_data/clk-lpss.h +++ b/include/linux/platform_data/clk-lpss.h @@ -13,6 +13,11 @@ #ifndef __CLK_LPSS_H #define __CLK_LPSS_H +struct lpss_clk_data { + const char *name; + struct clk *clk; +}; + extern int lpt_clk_init(void); #endif /* __CLK_LPSS_H */ -- cgit v1.2.3 From 755ccb9d577b95b43537cfeb36da59140412f858 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 15 May 2013 08:00:25 +0000 Subject: broadcom: add include guards to include/linux/brcmphy.h include/linux/brcmphy.h is currently not protected against double inclusion, add ifdefs guard to fix that. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/brcmphy.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include/linux') diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index b840a4960282..677b4f01b2d0 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -1,3 +1,6 @@ +#ifndef _LINUX_BRCMPHY_H +#define _LINUX_BRCMPHY_H + #define PHY_ID_BCM50610 0x0143bd60 #define PHY_ID_BCM50610M 0x0143bd70 #define PHY_ID_BCM5241 0x0143bc30 @@ -29,3 +32,5 @@ #define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000 #define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000 #define PHY_BCM_FLAGS_VALID 0x80000000 + +#endif /* _LINUX_BRCMPHY_H */ -- cgit v1.2.3 From b0ce3508b25ea6fa10ae3ca254de1d695b521702 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 16 May 2013 07:34:53 +0000 Subject: bonding: allow TSO being set on bonding master MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In some situations, we need to disable TSO on bonding slaves. bonding device automatically unset TSO in bond_fix_features(), and performance is not good because : 1) We consume more cpu cycles. 2) GSO segmentation has some bugs leading to out of order TCP packets if this segmentation is done before virtual device. This particular problem will be addressed in a separate patch. This patch allows TSO being set/unset on the bonding master, so that GSO segmentation is done after bonding layer. Signed-off-by: Eric Dumazet Cc: Michał Mirosław Cc: Jay Vosburgh Cc: Andy Gospodarek Cc: Maciej Żenczykowski Cc: Tom Herbert Cc: Neal Cardwell Cc: Yuchung Cheng Signed-off-by: David S. Miller --- include/linux/netdevice.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index a94a5a0ab122..60584b185a0c 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2733,6 +2733,17 @@ static inline netdev_features_t netdev_get_wanted_features( } netdev_features_t netdev_increment_features(netdev_features_t all, netdev_features_t one, netdev_features_t mask); + +/* Allow TSO being used on stacked device : + * Performing the GSO segmentation before last device + * is a performance improvement. + */ +static inline netdev_features_t netdev_add_tso_features(netdev_features_t features, + netdev_features_t mask) +{ + return netdev_increment_features(features, NETIF_F_ALL_TSO, mask); +} + int __netdev_update_features(struct net_device *dev); void netdev_update_features(struct net_device *dev); void netdev_change_features(struct net_device *dev); -- cgit v1.2.3 From d4988d4c733ba0b61cb372edd3d1992d26dd10d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= Date: Thu, 9 May 2013 21:24:24 +0200 Subject: bcma: add more core IDs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PCIe and ARM CR4 cores were found on 14e4:43b1 AKA BCM4352. Reported-by: Gabriel Thörnblad Signed-off-by: Rafał Miłecki Signed-off-by: John W. Linville --- include/linux/bcma/bcma.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index f14a98a79c9d..2e34db82a643 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -134,7 +134,10 @@ struct bcma_host_ops { #define BCMA_CORE_I2S 0x834 #define BCMA_CORE_SDR_DDR1_MEM_CTL 0x835 /* SDR/DDR1 memory controller core */ #define BCMA_CORE_SHIM 0x837 /* SHIM component in ubus/6362 */ -#define BCMA_CORE_ARM_CR4 0x83e +#define BCMA_CORE_PHY_AC 0x83B +#define BCMA_CORE_PCIE2 0x83C /* PCI Express Gen2 */ +#define BCMA_CORE_USB30_DEV 0x83D +#define BCMA_CORE_ARM_CR4 0x83E #define BCMA_CORE_DEFAULT 0xFFF #define BCMA_MAX_NR_CORES 16 -- cgit v1.2.3 From 7f18d05a1af75142d4856a91ffd2f1d8eb553c12 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Wed, 15 May 2013 20:18:41 +0530 Subject: SERIAL: OMAP: Remove the slave idle handling from the driver UART IP slave idle handling now taken care by runtime pm backend(hwmod layer) so remove the hackery from the driver. As discussed on the list, in future if dma mode needs to be brought back to this driver, UART sysc handling needs to be updated in framework such a way that no-idle/force idle profile can be supported. Given the broken dma mode for OMAP uarts, its very unlikely. Acked-by: Greg Kroah-Hartman Tested-by: Vaibhav Bedia Tested-by: Sourav Poddar Signed-off-by: Rajendra nayak Signed-off-by: Santosh Shilimkar Reviewed-by: Kevin Hilman Tested-by: Kevin Hilman # OMAP4/Panda Signed-off-by: Paul Walmsley --- include/linux/platform_data/serial-omap.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/platform_data/serial-omap.h b/include/linux/platform_data/serial-omap.h index ff9b0aab5281..c860c1b314c0 100644 --- a/include/linux/platform_data/serial-omap.h +++ b/include/linux/platform_data/serial-omap.h @@ -43,8 +43,6 @@ struct omap_uart_port_info { int DTR_present; int (*get_context_loss_count)(struct device *); - void (*set_forceidle)(struct device *); - void (*set_noidle)(struct device *); void (*enable_wakeup)(struct device *, bool); }; -- cgit v1.2.3 From 2a7851bffb008ff4882eee673da74718997b4265 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 17 May 2013 03:56:10 +0000 Subject: netfilter: add nf_ipv6_ops hook to fix xt_addrtype with IPv6 Quoting https://bugzilla.netfilter.org/show_bug.cgi?id=812: [ ip6tables -m addrtype ] When I tried to use in the nat/PREROUTING it messes up the routing cache even if the rule didn't matched at all. [..] If I remove the --limit-iface-in from the non-working scenario, so just use the -m addrtype --dst-type LOCAL it works! This happens when LOCAL type matching is requested with --limit-iface-in, and the default ipv6 route is via the interface the packet we test arrived on. Because xt_addrtype uses ip6_route_output, the ipv6 routing implementation creates an unwanted cached entry, and the packet won't make it to the real/expected destination. Silently ignoring --limit-iface-in makes the routing work but it breaks rule matching (--dst-type LOCAL with limit-iface-in is supposed to only match if the dst address is configured on the incoming interface; without --limit-iface-in it will match if the address is reachable via lo). The test should call ipv6_chk_addr() instead. However, this would add a link-time dependency on ipv6. There are two possible solutions: 1) Revert the commit that moved ipt_addrtype to xt_addrtype, and put ipv6 specific code into ip6t_addrtype. 2) add new "nf_ipv6_ops" struct to register pointers to ipv6 functions. While the former might seem preferable, Pablo pointed out that there are more xt modules with link-time dependeny issues regarding ipv6, so lets go for 2). Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter_ipv6.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'include/linux') diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index 98ffb54988b6..2d4df6ce043e 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h @@ -17,6 +17,22 @@ extern __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, extern int ipv6_netfilter_init(void); extern void ipv6_netfilter_fini(void); + +/* + * Hook functions for ipv6 to allow xt_* modules to be built-in even + * if IPv6 is a module. + */ +struct nf_ipv6_ops { + int (*chk_addr)(struct net *net, const struct in6_addr *addr, + const struct net_device *dev, int strict); +}; + +extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops; +static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void) +{ + return rcu_dereference(nf_ipv6_ops); +} + #else /* CONFIG_NETFILTER */ static inline int ipv6_netfilter_init(void) { return 0; } static inline void ipv6_netfilter_fini(void) { return; } -- cgit v1.2.3 From 7805d000db30a3787a4c969bab6ae4d8a5fd8ce6 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 24 May 2013 10:50:24 +0900 Subject: cgroup: fix a subtle bug in descendant pre-order walk When cgroup_next_descendant_pre() initiates a walk, it checks whether the subtree root doesn't have any children and if not returns NULL. Later code assumes that the subtree isn't empty. This is broken because the subtree may become empty inbetween, which can lead to the traversal escaping the subtree by walking to the sibling of the subtree root. There's no reason to have the early exit path. Remove it along with the later assumption that the subtree isn't empty. This simplifies the code a bit and fixes the subtle bug. While at it, fix the comment of cgroup_for_each_descendant_pre() which was incorrectly referring to ->css_offline() instead of ->css_online(). Signed-off-by: Tejun Heo Reviewed-by: Michal Hocko Cc: stable@vger.kernel.org --- include/linux/cgroup.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 5047355b9a0f..8bda1294c035 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -707,7 +707,7 @@ struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos); * * If a subsystem synchronizes against the parent in its ->css_online() and * before starting iterating, and synchronizes against @pos on each - * iteration, any descendant cgroup which finished ->css_offline() is + * iteration, any descendant cgroup which finished ->css_online() is * guaranteed to be visible in the future iterations. * * In other words, the following guarantees that a descendant can't escape -- cgit v1.2.3 From a11650e11093ed57dca78bf16e7836517c599098 Mon Sep 17 00:00:00 2001 From: Alexandre Bounine Date: Fri, 24 May 2013 15:55:05 -0700 Subject: rapidio: make enumeration/discovery configurable Systems that use RapidIO fabric may need to implement their own enumeration and discovery methods which are better suitable for needs of a target application. The following set of patches is intended to simplify process of introduction of new RapidIO fabric enumeration/discovery methods. The first patch offers ability to add new RapidIO enumeration/discovery methods using kernel configuration options. This new configuration option mechanism allows to select statically linked or modular enumeration/discovery method(s) from the list of existing methods or use external module(s). This patch also updates the currently existing enumeration/discovery code to be used as a statically linked or modular method. The corresponding configuration option is named "Basic enumeration/discovery" method. This is the only one configuration option available today but new methods are expected to be introduced after adoption of provided patches. The second patch address a long time complaint of RapidIO subsystem users regarding fabric enumeration/discovery start sequence. Existing implementation offers only a boot-time enumeration/discovery start which requires synchronized boot of all endpoints in RapidIO network. While it works for small closed configurations with limited number of endpoints, using this approach in systems with large number of endpoints is quite challenging. To eliminate requirement for synchronized start the second patch introduces RapidIO enumeration/discovery start from user space. For compatibility with the existing RapidIO subsystem implementation, automatic boot time enumeration/discovery start can be configured in by specifying "rio-scan.scan=1" command line parameter if statically linked basic enumeration method is selected. This patch: Rework to implement RapidIO enumeration/discovery method selection combined with ability to use enumeration/discovery as a kernel module. This patch adds ability to introduce new RapidIO enumeration/discovery methods using kernel configuration options. Configuration option mechanism allows to select statically linked or modular enumeration/discovery method from the list of existing methods or use external modules. If a modular enumeration/discovery is selected each RapidIO mport device can have its own method attached to it. The existing enumeration/discovery code was updated to be used as statically linked or modular method. This configuration option is named "Basic enumeration/discovery" method. Several common routines have been moved from rio-scan.c to make them available to other enumeration methods and reduce number of exported symbols. Signed-off-by: Alexandre Bounine Cc: Matt Porter Cc: Li Yang Cc: Kumar Gala Cc: Andre van Herk Cc: Micha Nelissen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/rio.h | 13 ++++++++++++- include/linux/rio_drv.h | 1 + 2 files changed, 13 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/rio.h b/include/linux/rio.h index a3e784278667..cd3796ee7410 100644 --- a/include/linux/rio.h +++ b/include/linux/rio.h @@ -83,7 +83,6 @@ extern struct bus_type rio_bus_type; extern struct device rio_bus; -extern struct list_head rio_devices; /* list of all devices */ struct rio_mport; struct rio_dev; @@ -237,6 +236,7 @@ enum rio_phy_type { * @name: Port name string * @priv: Master port private data * @dma: DMA device associated with mport + * @nscan: RapidIO network enumeration/discovery operations */ struct rio_mport { struct list_head dbells; /* list of doorbell events */ @@ -262,6 +262,7 @@ struct rio_mport { #ifdef CONFIG_RAPIDIO_DMA_ENGINE struct dma_device dma; #endif + struct rio_scan *nscan; }; struct rio_id_table { @@ -460,6 +461,16 @@ static inline struct rio_mport *dma_to_mport(struct dma_device *ddev) } #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ +/** + * struct rio_scan - RIO enumeration and discovery operations + * @enumerate: Callback to perform RapidIO fabric enumeration. + * @discover: Callback to perform RapidIO fabric discovery. + */ +struct rio_scan { + int (*enumerate)(struct rio_mport *mport); + int (*discover)(struct rio_mport *mport); +}; + /* Architecture and hardware-specific functions */ extern int rio_register_mport(struct rio_mport *); extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int); diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h index b75c05920ab5..5059994fe297 100644 --- a/include/linux/rio_drv.h +++ b/include/linux/rio_drv.h @@ -433,5 +433,6 @@ extern u16 rio_local_get_device_id(struct rio_mport *port); extern struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from); extern struct rio_dev *rio_get_asm(u16 vid, u16 did, u16 asm_vid, u16 asm_did, struct rio_dev *from); +extern int rio_init_mports(void); #endif /* LINUX_RIO_DRV_H */ -- cgit v1.2.3 From bc8fcfea18249640f2728c46d70999dcb7e4dc49 Mon Sep 17 00:00:00 2001 From: Alexandre Bounine Date: Fri, 24 May 2013 15:55:06 -0700 Subject: rapidio: add enumeration/discovery start from user space Add RapidIO enumeration/discovery start from user space. User space start allows to defer RapidIO fabric scan until the moment when all participating endpoints are initialized avoiding mandatory synchronized start of all endpoints (which may be challenging in systems with large number of RapidIO endpoints). Signed-off-by: Alexandre Bounine Cc: Matt Porter Cc: Li Yang Cc: Kumar Gala Cc: Andre van Herk Cc: Micha Nelissen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/rio.h | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rio.h b/include/linux/rio.h index cd3796ee7410..18e099342e6f 100644 --- a/include/linux/rio.h +++ b/include/linux/rio.h @@ -265,6 +265,11 @@ struct rio_mport { struct rio_scan *nscan; }; +/* + * Enumeration/discovery control flags + */ +#define RIO_SCAN_ENUM_NO_WAIT 0x00000001 /* Do not wait for enum completed */ + struct rio_id_table { u16 start; /* logical minimal id */ u32 max; /* max number of IDs in table */ @@ -467,8 +472,8 @@ static inline struct rio_mport *dma_to_mport(struct dma_device *ddev) * @discover: Callback to perform RapidIO fabric discovery. */ struct rio_scan { - int (*enumerate)(struct rio_mport *mport); - int (*discover)(struct rio_mport *mport); + int (*enumerate)(struct rio_mport *mport, u32 flags); + int (*discover)(struct rio_mport *mport, u32 flags); }; /* Architecture and hardware-specific functions */ -- cgit v1.2.3 From 4c663cfc523a88d97a8309b04a089c27dc57fd7e Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 24 May 2013 15:55:09 -0700 Subject: wait: fix false timeouts when using wait_event_timeout() Many callers of the wait_event_timeout() and wait_event_interruptible_timeout() expect that the return value will be positive if the specified condition becomes true before the timeout elapses. However, at the moment this isn't guaranteed. If the wake-up handler is delayed enough, the time remaining until timeout will be calculated as 0 - and passed back as a return value - even if the condition became true before the timeout has passed. Fix this by returning at least 1 if the condition becomes true. This semantic is in line with what wait_for_condition_timeout() does; see commit bb10ed09 ("sched: fix wait_for_completion_timeout() spurious failure under heavy load"). Daniel said "We have 3 instances of this bug in drm/i915. One case even where we switch between the interruptible and not interruptible wait_event_timeout variants, foolishly presuming they have the same semantics. I very much like this." One such bug is reported at https://bugs.freedesktop.org/show_bug.cgi?id=64133 Signed-off-by: Imre Deak Acked-by: Daniel Vetter Acked-by: David Howells Acked-by: Jens Axboe Cc: "Paul E. McKenney" Cc: Dave Jones Cc: Lukas Czerner Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/wait.h | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/wait.h b/include/linux/wait.h index ac38be2692d8..1133695eb067 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -217,6 +217,8 @@ do { \ if (!ret) \ break; \ } \ + if (!ret && (condition)) \ + ret = 1; \ finish_wait(&wq, &__wait); \ } while (0) @@ -233,8 +235,9 @@ do { \ * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * - * The function returns 0 if the @timeout elapsed, and the remaining - * jiffies if the condition evaluated to true before the timeout elapsed. + * The function returns 0 if the @timeout elapsed, or the remaining + * jiffies (at least 1) if the @condition evaluated to %true before + * the @timeout elapsed. */ #define wait_event_timeout(wq, condition, timeout) \ ({ \ @@ -302,6 +305,8 @@ do { \ ret = -ERESTARTSYS; \ break; \ } \ + if (!ret && (condition)) \ + ret = 1; \ finish_wait(&wq, &__wait); \ } while (0) @@ -318,9 +323,10 @@ do { \ * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * - * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it - * was interrupted by a signal, and the remaining jiffies otherwise - * if the condition evaluated to true before the timeout elapsed. + * Returns: + * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by + * a signal, or the remaining jiffies (at least 1) if the @condition + * evaluated to %true before the @timeout elapsed. */ #define wait_event_interruptible_timeout(wq, condition, timeout) \ ({ \ -- cgit v1.2.3 From 7450231fb35492951e78a91b833fd935171f4e66 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 24 May 2013 15:55:20 -0700 Subject: linux/kernel.h: fix kernel-doc warning Fix kernel-doc warning in : Warning(include/linux/kernel.h:590): No description found for parameter 'ip' scripts/kernel-doc cannot handle macros, functions, or function prototypes between the function or macro that is being documented and its definition, so move these prototypes above the function that is being documented. Signed-off-by: Randy Dunlap Cc: Steven Rostedt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kernel.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index e96329ceb28c..e9ef6d6b51d5 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -562,6 +562,9 @@ int __trace_bprintk(unsigned long ip, const char *fmt, ...); extern __printf(2, 3) int __trace_printk(unsigned long ip, const char *fmt, ...); +extern int __trace_bputs(unsigned long ip, const char *str); +extern int __trace_puts(unsigned long ip, const char *str, int size); + /** * trace_puts - write a string into the ftrace buffer * @str: the string to record @@ -587,8 +590,6 @@ int __trace_printk(unsigned long ip, const char *fmt, ...); * (1 when __trace_bputs is used, strlen(str) when __trace_puts is used) */ -extern int __trace_bputs(unsigned long ip, const char *str); -extern int __trace_puts(unsigned long ip, const char *str, int size); #define trace_puts(str) ({ \ static const char *trace_printk_fmt \ __attribute__((section("__trace_printk_fmt"))) = \ -- cgit v1.2.3 From 26cb63ad11e04047a64309362674bcbbd6a6f246 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 28 May 2013 10:55:48 +0200 Subject: perf: Fix perf mmap bugs Vince reported a problem found by his perf specific trinity fuzzer. Al noticed 2 problems with perf's mmap(): - it has issues against fork() since we use vma->vm_mm for accounting. - it has an rb refcount leak on double mmap(). We fix the issues against fork() by using VM_DONTCOPY; I don't think there's code out there that uses this; we didn't hear about weird accounting problems/crashes. If we do need this to work, the previously proposed VM_PINNED could make this work. Aside from the rb reference leak spotted by Al, Vince's example prog was indeed doing a double mmap() through the use of perf_event_set_output(). This exposes another problem, since we now have 2 events with one buffer, the accounting gets screwy because we account per event. Fix this by making the buffer responsible for its own accounting. Reported-by: Vince Weaver Signed-off-by: Peter Zijlstra Cc: Al Viro Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Link: http://lkml.kernel.org/r/20130528085548.GA12193@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index f463a46424e2..c5b6dbf9c2fc 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -389,8 +389,7 @@ struct perf_event { /* mmap bits */ struct mutex mmap_mutex; atomic_t mmap_count; - int mmap_locked; - struct user_struct *mmap_user; + struct ring_buffer *rb; struct list_head rb_entry; -- cgit v1.2.3 From 12bcbe66d7b3cc9f9f86cd02f925666eaa3c2107 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 28 May 2013 14:38:42 -0400 Subject: rcu: Add _notrace variation of rcu_dereference_raw() and hlist_for_each_entry_rcu() As rcu_dereference_raw() under RCU debug config options can add quite a bit of checks, and that tracing uses rcu_dereference_raw(), these checks happen with the function tracer. The function tracer also happens to trace these debug checks too. This added overhead can livelock the system. Add a new interface to RCU for both rcu_dereference_raw_notrace() as well as hlist_for_each_entry_rcu_notrace() as the hlist iterator uses the rcu_dereference_raw() as well, and is used a bit with the function tracer. Link: http://lkml.kernel.org/r/20130528184209.304356745@goodmis.org Acked-by: Paul E. McKenney Signed-off-by: Steven Rostedt --- include/linux/rculist.h | 20 ++++++++++++++++++++ include/linux/rcupdate.h | 9 +++++++++ 2 files changed, 29 insertions(+) (limited to 'include/linux') diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 8089e35d47ac..f4b1001a4676 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -460,6 +460,26 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ &(pos)->member)), typeof(*(pos)), member)) +/** + * hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing) + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + * + * This list-traversal primitive may safely run concurrently with + * the _rcu list-mutation primitives such as hlist_add_head_rcu() + * as long as the traversal is guarded by rcu_read_lock(). + * + * This is the same as hlist_for_each_entry_rcu() except that it does + * not do any RCU debugging or tracing. + */ +#define hlist_for_each_entry_rcu_notrace(pos, head, member) \ + for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\ + typeof(*(pos)), member); \ + pos; \ + pos = hlist_entry_safe(rcu_dereference_raw_notrace(hlist_next_rcu(\ + &(pos)->member)), typeof(*(pos)), member)) + /** * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type * @pos: the type * to use as a loop cursor. diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 4ccd68e49b00..ddcc7826d907 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -640,6 +640,15 @@ static inline void rcu_preempt_sleep_check(void) #define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/ +/* + * The tracing infrastructure traces RCU (we want that), but unfortunately + * some of the RCU checks causes tracing to lock up the system. + * + * The tracing version of rcu_dereference_raw() must not call + * rcu_read_lock_held(). + */ +#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu) + /** * rcu_access_index() - fetch RCU index with no dereferencing * @p: The index to read -- cgit v1.2.3 From ac4e97abce9b80c020e7113325f49e58b7b15e3f Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 30 May 2013 09:19:35 +0200 Subject: scatterlist: sg_set_buf() argument must be in linear mapping Add a check behind CONFIG_DEBUG_SG to verify this. Signed-off-by: Rusty Russell Signed-off-by: Jens Axboe --- include/linux/scatterlist.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 5951e3f38878..26806775b11b 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -111,6 +111,9 @@ static inline struct page *sg_page(struct scatterlist *sg) static inline void sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen) { +#ifdef CONFIG_DEBUG_SG + BUG_ON(!virt_addr_valid(buf)); +#endif sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); } -- cgit v1.2.3 From 37448adfc7ce0d6d5892b87aa8d57edde4126f49 Mon Sep 17 00:00:00 2001 From: Lance Ortiz Date: Thu, 30 May 2013 08:25:12 -0600 Subject: aerdrv: Move cper_print_aer() call out of interrupt context The following warning was seen on 3.9 when a corrected PCIe error was being handled by the AER subsystem. WARNING: at .../drivers/pci/search.c:214 pci_get_dev_by_id+0x8a/0x90() This occurred because a call to pci_get_domain_bus_and_slot() was added to cper_print_pcie() to setup for the call to cper_print_aer(). The warning showed up because cper_print_pcie() is called in an interrupt context and pci_get* functions are not supposed to be called in that context. The solution is to move the cper_print_aer() call out of the interrupt context and into aer_recover_work_func() to avoid any warnings when calling pci_get* functions. Signed-off-by: Lance Ortiz Acked-by: Borislav Petkov Acked-by: Rafael J. Wysocki Signed-off-by: Tony Luck --- include/linux/aer.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/aer.h b/include/linux/aer.h index ec10e1b24c1c..737f90ab4b62 100644 --- a/include/linux/aer.h +++ b/include/linux/aer.h @@ -49,10 +49,11 @@ static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) } #endif -extern void cper_print_aer(const char *prefix, struct pci_dev *dev, +extern void cper_print_aer(struct pci_dev *dev, int cper_severity, struct aer_capability_regs *aer); extern int cper_severity_to_aer(int cper_severity); extern void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn, - int severity); + int severity, + struct aer_capability_regs *aer_regs); #endif //_AER_H_ -- cgit v1.2.3 From 45eacc692771bd2b1ea3d384e6345cab3da10861 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 15 May 2013 22:16:32 +0200 Subject: vtime: Use consistent clocks among nohz accounting While computing the cputime delta of dynticks CPUs, we are mixing up clocks of differents natures: * local_clock() which takes care of unstable clock sources and fix these if needed. * sched_clock() which is the weaker version of local_clock(). It doesn't compute any fixup in case of unstable source. If the clock source is stable, those two clocks are the same and we can safely compute the difference against two random points. Otherwise it results in random deltas as sched_clock() can randomly drift away, back or forward, from local_clock(). As a consequence, some strange behaviour with unstable tsc has been observed such as non progressing constant zero cputime. (The 'top' command showing no load). Fix this by only using local_clock(), or its irq safe/remote equivalent, in vtime code. Reported-by: Mike Galbraith Suggested-by: Mike Galbraith Cc: Steven Rostedt Cc: Paul E. McKenney Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Peter Zijlstra Cc: Borislav Petkov Cc: Li Zhong Cc: Mike Galbraith Signed-off-by: Frederic Weisbecker Signed-off-by: Ingo Molnar --- include/linux/vtime.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/vtime.h b/include/linux/vtime.h index 71a5782d8c59..b1dd2db80076 100644 --- a/include/linux/vtime.h +++ b/include/linux/vtime.h @@ -34,7 +34,7 @@ static inline void vtime_user_exit(struct task_struct *tsk) } extern void vtime_guest_enter(struct task_struct *tsk); extern void vtime_guest_exit(struct task_struct *tsk); -extern void vtime_init_idle(struct task_struct *tsk); +extern void vtime_init_idle(struct task_struct *tsk, int cpu); #else static inline void vtime_account_irq_exit(struct task_struct *tsk) { @@ -45,7 +45,7 @@ static inline void vtime_user_enter(struct task_struct *tsk) { } static inline void vtime_user_exit(struct task_struct *tsk) { } static inline void vtime_guest_enter(struct task_struct *tsk) { } static inline void vtime_guest_exit(struct task_struct *tsk) { } -static inline void vtime_init_idle(struct task_struct *tsk) { } +static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { } #endif #ifdef CONFIG_IRQ_TIME_ACCOUNTING -- cgit v1.2.3 From 521921bad1192fb1b8f9b6a5aa673635848b8b5f Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 16 May 2013 01:21:38 +0200 Subject: kvm: Move guest entry/exit APIs to context_tracking The kvm_host.h header file doesn't handle well inclusion when archs don't support KVM. This results in build crashes for such archs when they want to implement context tracking because this subsystem includes kvm_host.h in order to implement the guest_enter/exit APIs but it doesn't handle KVM off case. To fix this, move the guest_enter()/guest_exit() declarations and generic implementation to the context tracking headers. These generic APIs actually belong to this subsystem, besides other domains boundary tracking like user_enter() et al. KVM now properly becomes a user of this library, not the other buggy way around. Reported-by: Kevin Hilman Reviewed-by: Kevin Hilman Tested-by: Kevin Hilman Signed-off-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Paul E. McKenney Cc: Thomas Gleixner Cc: Peter Zijlstra Cc: Kevin Hilman Cc: Marcelo Tosatti Cc: Gleb Natapov Signed-off-by: Ingo Molnar --- include/linux/context_tracking.h | 35 +++++++++++++++++++++++++++++++++++ include/linux/kvm_host.h | 37 +------------------------------------ 2 files changed, 36 insertions(+), 36 deletions(-) (limited to 'include/linux') diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index 365f4a61bf04..fc09d7b0dacf 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h @@ -3,6 +3,7 @@ #include #include +#include #include struct context_tracking { @@ -19,6 +20,26 @@ struct context_tracking { } state; }; +static inline void __guest_enter(void) +{ + /* + * This is running in ioctl context so we can avoid + * the call to vtime_account() with its unnecessary idle check. + */ + vtime_account_system(current); + current->flags |= PF_VCPU; +} + +static inline void __guest_exit(void) +{ + /* + * This is running in ioctl context so we can avoid + * the call to vtime_account() with its unnecessary idle check. + */ + vtime_account_system(current); + current->flags &= ~PF_VCPU; +} + #ifdef CONFIG_CONTEXT_TRACKING DECLARE_PER_CPU(struct context_tracking, context_tracking); @@ -35,6 +56,9 @@ static inline bool context_tracking_active(void) extern void user_enter(void); extern void user_exit(void); +extern void guest_enter(void); +extern void guest_exit(void); + static inline enum ctx_state exception_enter(void) { enum ctx_state prev_ctx; @@ -57,6 +81,17 @@ extern void context_tracking_task_switch(struct task_struct *prev, static inline bool context_tracking_in_user(void) { return false; } static inline void user_enter(void) { } static inline void user_exit(void) { } + +static inline void guest_enter(void) +{ + __guest_enter(); +} + +static inline void guest_exit(void) +{ + __guest_exit(); +} + static inline enum ctx_state exception_enter(void) { return 0; } static inline void exception_exit(enum ctx_state prev_ctx) { } static inline void context_tracking_task_switch(struct task_struct *prev, diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index f0eea07d2c2b..8db53cfaccdb 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -760,42 +761,6 @@ static inline int kvm_iommu_unmap_guest(struct kvm *kvm) } #endif -static inline void __guest_enter(void) -{ - /* - * This is running in ioctl context so we can avoid - * the call to vtime_account() with its unnecessary idle check. - */ - vtime_account_system(current); - current->flags |= PF_VCPU; -} - -static inline void __guest_exit(void) -{ - /* - * This is running in ioctl context so we can avoid - * the call to vtime_account() with its unnecessary idle check. - */ - vtime_account_system(current); - current->flags &= ~PF_VCPU; -} - -#ifdef CONFIG_CONTEXT_TRACKING -extern void guest_enter(void); -extern void guest_exit(void); - -#else /* !CONFIG_CONTEXT_TRACKING */ -static inline void guest_enter(void) -{ - __guest_enter(); -} - -static inline void guest_exit(void) -{ - __guest_exit(); -} -#endif /* !CONFIG_CONTEXT_TRACKING */ - static inline void kvm_guest_enter(void) { unsigned long flags; -- cgit v1.2.3 From 1e2bd517c108816220f262d7954b697af03b5f9c Mon Sep 17 00:00:00 2001 From: Pravin B Shelar Date: Thu, 30 May 2013 06:45:27 +0000 Subject: udp6: Fix udp fragmentation for tunnel traffic. udp6 over GRE tunnel does not work after to GRE tso changes. GRE tso handler passes inner packet but keeps track of outer header start in SKB_GSO_CB(skb)->mac_offset. udp6 fragment need to take care of outer header, which start at the mac_offset, while adding fragment header. This bug is introduced by commit 68c3316311 (GRE: Add TCP segmentation offload for GRE). Reported-by: Dmitry Kravkov Signed-off-by: Pravin B Shelar Tested-by: Dmitry Kravkov Signed-off-by: David S. Miller --- include/linux/skbuff.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 2e0ced1af3b1..9c676eae3968 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2852,6 +2852,21 @@ static inline int skb_tnl_header_len(const struct sk_buff *inner_skb) SKB_GSO_CB(inner_skb)->mac_offset; } +static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra) +{ + int new_headroom, headroom; + int ret; + + headroom = skb_headroom(skb); + ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC); + if (ret) + return ret; + + new_headroom = skb_headroom(skb); + SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom); + return 0; +} + static inline bool skb_is_gso(const struct sk_buff *skb) { return skb_shinfo(skb)->gso_size; -- cgit v1.2.3 From 6d7581e62f8be462440d7b22c6361f7c9fa4902b Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 29 May 2013 05:02:56 +0000 Subject: list: introduce list_first_entry_or_null non-rcu variant of list_first_or_null_rcu Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/linux/list.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'include/linux') diff --git a/include/linux/list.h b/include/linux/list.h index 6a1f8df9144b..b83e5657365a 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -361,6 +361,17 @@ static inline void list_splice_tail_init(struct list_head *list, #define list_first_entry(ptr, type, member) \ list_entry((ptr)->next, type, member) +/** + * list_first_entry_or_null - get the first element from a list + * @ptr: the list head to take the element from. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_struct within the struct. + * + * Note that if the list is empty, it returns NULL. + */ +#define list_first_entry_or_null(ptr, type, member) \ + (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) + /** * list_for_each - iterate over a list * @pos: the &struct list_head to use as a loop cursor. -- cgit v1.2.3 From c87a124a5d5e8cf8e21c4363c3372bcaf53ea190 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 29 May 2013 09:06:27 +0000 Subject: net: force a reload of first item in hlist_nulls_for_each_entry_rcu Roman Gushchin discovered that udp4_lib_lookup2() was not reloading first item in the rcu protected list, in case the loop was restarted. This produced soft lockups as in https://lkml.org/lkml/2013/4/16/37 rcu_dereference(X)/ACCESS_ONCE(X) seem to not work as intended if X is ptr->field : In some cases, gcc caches the value or ptr->field in a register. Use a barrier() to disallow such caching, as documented in Documentation/atomic_ops.txt line 114 Thanks a lot to Roman for providing analysis and numerous patches. Diagnosed-by: Roman Gushchin Signed-off-by: Eric Dumazet Reported-by: Boris Zhmurov Signed-off-by: Roman Gushchin Acked-by: Paul E. McKenney Signed-off-by: David S. Miller --- include/linux/rculist_nulls.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h index 2ae13714828b..1c33dd7da4a7 100644 --- a/include/linux/rculist_nulls.h +++ b/include/linux/rculist_nulls.h @@ -105,9 +105,14 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, * @head: the head for your list. * @member: the name of the hlist_nulls_node within the struct. * + * The barrier() is needed to make sure compiler doesn't cache first element [1], + * as this loop can be restarted [2] + * [1] Documentation/atomic_ops.txt around line 114 + * [2] Documentation/RCU/rculist_nulls.txt around line 146 */ #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ - for (pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \ + for (({barrier();}), \ + pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \ (!is_a_nulls(pos)) && \ ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \ pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos))) -- cgit v1.2.3 From a7526eb5d06b0084ef12d7b168d008fcf516caab Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Wed, 5 Jun 2013 19:38:26 +0000 Subject: net: Unbreak compat_sys_{send,recv}msg I broke them in this commit: commit 1be374a0518a288147c6a7398792583200a67261 Author: Andy Lutomirski Date: Wed May 22 14:07:44 2013 -0700 net: Block MSG_CMSG_COMPAT in send(m)msg and recv(m)msg This patch adds __sys_sendmsg and __sys_sendmsg as common helpers that accept MSG_CMSG_COMPAT and blocks MSG_CMSG_COMPAT at the syscall entrypoints. It also reverts some unnecessary checks in sys_socketcall. Apparently I was suffering from underscore blindness the first time around. Signed-off-by: Andy Lutomirski Tested-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/socket.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/socket.h b/include/linux/socket.h index 33bf2dfab19d..b10ce4b341ea 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -320,6 +320,9 @@ extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); struct timespec; +/* The __sys_...msg variants allow MSG_CMSG_COMPAT */ +extern long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags); +extern long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags); extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags, struct timespec *timeout); extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, -- cgit v1.2.3 From d62840995a99c9766803d54e9d7923f247a1c1db Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 22 May 2013 02:41:36 -0700 Subject: trace: Allow idle-safe tracepoints to be called from irq __DECLARE_TRACE_RCU() currently creates an _rcuidle() tracepoint which may safely be invoked from what RCU considers to be an idle CPU. However, these _rcuidle() tracepoints may -not- be invoked from the handler of an irq taken from idle, because rcu_idle_enter() zeroes RCU's nesting-level counter, so that the rcu_irq_exit() returning to idle will trigger a WARN_ON_ONCE(). This commit therefore substitutes rcu_irq_enter() for rcu_idle_exit() and rcu_irq_exit() for rcu_idle_enter() in order to make the _rcuidle() tracepoints usable from irq handlers as well as from process context. Reported-by: Dave Jones Signed-off-by: Paul E. McKenney Cc: Steven Rostedt --- include/linux/tracepoint.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 2f322c38bd4d..f8e084d0fc77 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -145,8 +145,8 @@ static inline void tracepoint_synchronize_unregister(void) TP_PROTO(data_proto), \ TP_ARGS(data_args), \ TP_CONDITION(cond), \ - rcu_idle_exit(), \ - rcu_idle_enter()); \ + rcu_irq_enter(), \ + rcu_irq_exit()); \ } #else #define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) -- cgit v1.2.3 From ed13998c319b050fc9abdb73915859dfdbe1fb38 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Wed, 5 Jun 2013 15:30:55 +0200 Subject: sock_diag: fix filter code sent to userspace Filters need to be translated to real BPF code for userland, like SO_GETFILTER. Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- include/linux/filter.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/filter.h b/include/linux/filter.h index c050dcc322a4..f65f5a69db8f 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -46,6 +46,7 @@ extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); extern int sk_detach_filter(struct sock *sk); extern int sk_chk_filter(struct sock_filter *filter, unsigned int flen); extern int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned len); +extern void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to); #ifdef CONFIG_BPF_JIT #include -- cgit v1.2.3 From b79462a8b9f9a452edc20c64a70a89ba3b0a6a88 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sat, 8 Jun 2013 15:00:55 +0200 Subject: team: fix checks in team_get_first_port_txable_rcu() should be checked if "cur" is txable, not "port". Introduced by commit 6e88e1357c "team: use function team_port_txable() for determing enabled and up port" Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/linux/if_team.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/if_team.h b/include/linux/if_team.h index 4474557904f6..16fae6436d0e 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -249,12 +249,12 @@ team_get_first_port_txable_rcu(struct team *team, struct team_port *port) return port; cur = port; list_for_each_entry_continue_rcu(cur, &team->port_list, list) - if (team_port_txable(port)) + if (team_port_txable(cur)) return cur; list_for_each_entry_rcu(cur, &team->port_list, list) { if (cur == port) break; - if (team_port_txable(port)) + if (team_port_txable(cur)) return cur; } return NULL; -- cgit v1.2.3 From 16e53dbf10a2d7e228709a7286310e629ede5e45 Mon Sep 17 00:00:00 2001 From: "Srivatsa S. Bhat" Date: Wed, 12 Jun 2013 14:04:36 -0700 Subject: CPU hotplug: provide a generic helper to disable/enable CPU hotplug There are instances in the kernel where we would like to disable CPU hotplug (from sysfs) during some important operation. Today the freezer code depends on this and the code to do it was kinda tailor-made for that. Restructure the code and make it generic enough to be useful for other usecases too. Signed-off-by: Srivatsa S. Bhat Signed-off-by: Robin Holt Cc: H. Peter Anvin Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Russ Anderson Cc: Robin Holt Cc: Russell King Cc: Guan Xuetao Cc: Shawn Guo Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/cpu.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/cpu.h b/include/linux/cpu.h index c6f6e0839b61..9f3c7e81270a 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -175,6 +175,8 @@ extern struct bus_type cpu_subsys; extern void get_online_cpus(void); extern void put_online_cpus(void); +extern void cpu_hotplug_disable(void); +extern void cpu_hotplug_enable(void); #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri) #define register_hotcpu_notifier(nb) register_cpu_notifier(nb) #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) @@ -198,6 +200,8 @@ static inline void cpu_hotplug_driver_unlock(void) #define get_online_cpus() do { } while (0) #define put_online_cpus() do { } while (0) +#define cpu_hotplug_disable() do { } while (0) +#define cpu_hotplug_enable() do { } while (0) #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) /* These aren't inline functions due to a GCC bug. */ #define register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) -- cgit v1.2.3 From 637241a900cbd982f744d44646b48a273d609b34 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 12 Jun 2013 14:04:39 -0700 Subject: kmsg: honor dmesg_restrict sysctl on /dev/kmsg The dmesg_restrict sysctl currently covers the syslog method for access dmesg, however /dev/kmsg isn't covered by the same protections. Most people haven't noticed because util-linux dmesg(1) defaults to using the syslog method for access in older versions. With util-linux dmesg(1) defaults to reading directly from /dev/kmsg. To fix /dev/kmsg, let's compare the existing interfaces and what they allow: - /proc/kmsg allows: - open (SYSLOG_ACTION_OPEN) if CAP_SYSLOG since it uses a destructive single-reader interface (SYSLOG_ACTION_READ). - everything, after an open. - syslog syscall allows: - anything, if CAP_SYSLOG. - SYSLOG_ACTION_READ_ALL and SYSLOG_ACTION_SIZE_BUFFER, if dmesg_restrict==0. - nothing else (EPERM). The use-cases were: - dmesg(1) needs to do non-destructive SYSLOG_ACTION_READ_ALLs. - sysklog(1) needs to open /proc/kmsg, drop privs, and still issue the destructive SYSLOG_ACTION_READs. AIUI, dmesg(1) is moving to /dev/kmsg, and systemd-journald doesn't clear the ring buffer. Based on the comments in devkmsg_llseek, it sounds like actions besides reading aren't going to be supported by /dev/kmsg (i.e. SYSLOG_ACTION_CLEAR), so we have a strict subset of the non-destructive syslog syscall actions. To this end, move the check as Josh had done, but also rename the constants to reflect their new uses (SYSLOG_FROM_CALL becomes SYSLOG_FROM_READER, and SYSLOG_FROM_FILE becomes SYSLOG_FROM_PROC). SYSLOG_FROM_READER allows non-destructive actions, and SYSLOG_FROM_PROC allows destructive actions after a capabilities-constrained SYSLOG_ACTION_OPEN check. - /dev/kmsg allows: - open if CAP_SYSLOG or dmesg_restrict==0 - reading/polling, after open Addresses https://bugzilla.redhat.com/show_bug.cgi?id=903192 [akpm@linux-foundation.org: use pr_warn_once()] Signed-off-by: Kees Cook Reported-by: Christian Kujau Tested-by: Josh Boyer Cc: Kay Sievers Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/syslog.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/syslog.h b/include/linux/syslog.h index 38911391a139..98a3153c0f96 100644 --- a/include/linux/syslog.h +++ b/include/linux/syslog.h @@ -44,8 +44,8 @@ /* Return size of the log buffer */ #define SYSLOG_ACTION_SIZE_BUFFER 10 -#define SYSLOG_FROM_CALL 0 -#define SYSLOG_FROM_FILE 1 +#define SYSLOG_FROM_READER 0 +#define SYSLOG_FROM_PROC 1 int do_syslog(int type, char __user *buf, int count, bool from_file); -- cgit v1.2.3 From 30dad30922ccc733cfdbfe232090cf674dc374dc Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Wed, 12 Jun 2013 14:05:04 -0700 Subject: mm: migration: add migrate_entry_wait_huge() When we have a page fault for the address which is backed by a hugepage under migration, the kernel can't wait correctly and do busy looping on hugepage fault until the migration finishes. As a result, users who try to kick hugepage migration (via soft offlining, for example) occasionally experience long delay or soft lockup. This is because pte_offset_map_lock() can't get a correct migration entry or a correct page table lock for hugepage. This patch introduces migration_entry_wait_huge() to solve this. Signed-off-by: Naoya Horiguchi Reviewed-by: Rik van Riel Reviewed-by: Wanpeng Li Reviewed-by: Michal Hocko Cc: Mel Gorman Cc: Andi Kleen Cc: KOSAKI Motohiro Cc: [2.6.35+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/swapops.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 47ead515c811..c5fd30d2a415 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -137,6 +137,7 @@ static inline void make_migration_entry_read(swp_entry_t *entry) extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, unsigned long address); +extern void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte); #else #define make_migration_entry(page, write) swp_entry(0, 0) @@ -148,6 +149,8 @@ static inline int is_migration_entry(swp_entry_t swp) static inline void make_migration_entry_read(swp_entry_t *entryp) { } static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, unsigned long address) { } +static inline void migration_entry_wait_huge(struct mm_struct *mm, + pte_t *pte) { } static inline int is_write_migration_entry(swp_entry_t entry) { return 0; -- cgit v1.2.3 From c2853c8df57f49620d26f317d7d43347c29bfc2e Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Wed, 12 Jun 2013 14:05:10 -0700 Subject: include/linux/math64.h: add div64_ul() There is div64_long() to handle the s64/long division, but no mocro do u64/ul division. It is necessary in some scenarios, so add this function. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Alex Shi Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/math64.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/math64.h b/include/linux/math64.h index b8ba85544721..2913b86eb12a 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -6,7 +6,8 @@ #if BITS_PER_LONG == 64 -#define div64_long(x,y) div64_s64((x),(y)) +#define div64_long(x, y) div64_s64((x), (y)) +#define div64_ul(x, y) div64_u64((x), (y)) /** * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder @@ -47,7 +48,8 @@ static inline s64 div64_s64(s64 dividend, s64 divisor) #elif BITS_PER_LONG == 32 -#define div64_long(x,y) div_s64((x),(y)) +#define div64_long(x, y) div_s64((x), (y)) +#define div64_ul(x, y) div_u64((x), (y)) #ifndef div_u64_rem static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) -- cgit v1.2.3 From f21afc25f9ed45b8ffe200d0f071b0caec3ed2ef Mon Sep 17 00:00:00 2001 From: David Daney Date: Fri, 14 Jun 2013 11:13:59 -0700 Subject: smp.h: Use local_irq_{save,restore}() in !SMP version of on_each_cpu(). Thanks to commit f91eb62f71b3 ("init: scream bloody murder if interrupts are enabled too early"), "bloody murder" is now being screamed. With a MIPS OCTEON config, we use on_each_cpu() in our irq_chip.irq_bus_sync_unlock() function. This gets called in early as a result of the time_init() call. Because the !SMP version of on_each_cpu() unconditionally enables irqs, we get: WARNING: at init/main.c:560 start_kernel+0x250/0x410() Interrupts were enabled early CPU: 0 PID: 0 Comm: swapper Not tainted 3.10.0-rc5-Cavium-Octeon+ #801 Call Trace: show_stack+0x68/0x80 warn_slowpath_common+0x78/0xb0 warn_slowpath_fmt+0x38/0x48 start_kernel+0x250/0x410 Suggested fix: Do what we already do in the SMP version of on_each_cpu(), and use local_irq_save/local_irq_restore. Because we need a flags variable, make it a static inline to avoid name space issues. [ Change from v1: Convert on_each_cpu to a static inline function, add #include to avoid build breakage on some files. on_each_cpu_mask() and on_each_cpu_cond() suffer the same problem as on_each_cpu(), but they are not causing !SMP bugs for me, so I will defer changing them to a less urgent patch. ] Signed-off-by: David Daney Cc: Ralf Baechle Cc: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/smp.h | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/smp.h b/include/linux/smp.h index e6564c1dc552..c8488763277f 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -11,6 +11,7 @@ #include #include #include +#include extern void cpu_idle(void); @@ -139,13 +140,17 @@ static inline int up_smp_call_function(smp_call_func_t func, void *info) } #define smp_call_function(func, info, wait) \ (up_smp_call_function(func, info)) -#define on_each_cpu(func,info,wait) \ - ({ \ - local_irq_disable(); \ - func(info); \ - local_irq_enable(); \ - 0; \ - }) + +static inline int on_each_cpu(smp_call_func_t func, void *info, int wait) +{ + unsigned long flags; + + local_irq_save(flags); + func(info); + local_irq_restore(flags); + return 0; +} + /* * Note we still need to test the mask even for UP * because we actually can get an empty mask from -- cgit v1.2.3 From 29bb9e5a75684106a37593ad75ec75ff8312731b Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 24 May 2013 15:23:40 -0400 Subject: tracing/context-tracking: Add preempt_schedule_context() for tracing Dave Jones hit the following bug report: =============================== [ INFO: suspicious RCU usage. ] 3.10.0-rc2+ #1 Not tainted ------------------------------- include/linux/rcupdate.h:771 rcu_read_lock() used illegally while idle! other info that might help us debug this: RCU used illegally from idle CPU! rcu_scheduler_active = 1, debug_locks = 0 RCU used illegally from extended quiescent state! 2 locks held by cc1/63645: #0: (&rq->lock){-.-.-.}, at: [] __schedule+0xed/0x9b0 #1: (rcu_read_lock){.+.+..}, at: [] cpuacct_charge+0x5/0x1f0 CPU: 1 PID: 63645 Comm: cc1 Not tainted 3.10.0-rc2+ #1 [loadavg: 40.57 27.55 13.39 25/277 64369] Hardware name: Gigabyte Technology Co., Ltd. GA-MA78GM-S2H/GA-MA78GM-S2H, BIOS F12a 04/23/2010 0000000000000000 ffff88010f78fcf8 ffffffff816ae383 ffff88010f78fd28 ffffffff810b698d ffff88011c092548 000000000023d073 ffff88011c092500 0000000000000001 ffff88010f78fd60 ffffffff8109d7c5 ffffffff8109d645 Call Trace: [] dump_stack+0x19/0x1b [] lockdep_rcu_suspicious+0xfd/0x130 [] cpuacct_charge+0x185/0x1f0 [] ? cpuacct_charge+0x5/0x1f0 [] update_curr+0xec/0x240 [] put_prev_task_fair+0x228/0x480 [] __schedule+0x161/0x9b0 [] preempt_schedule+0x51/0x80 [] ? __cond_resched_softirq+0x60/0x60 [] ? retint_careful+0x12/0x2e [] ftrace_ops_control_func+0x1dc/0x210 [] ftrace_call+0x5/0x2f [] ? retint_careful+0xb/0x2e [] ? schedule_user+0x5/0x70 [] ? schedule_user+0x5/0x70 [] ? retint_careful+0x12/0x2e ------------[ cut here ]------------ What happened was that the function tracer traced the schedule_user() code that tells RCU that the system is coming back from userspace, and to add the CPU back to the RCU monitoring. Because the function tracer does a preempt_disable/enable_notrace() calls the preempt_enable_notrace() checks the NEED_RESCHED flag. If it is set, then preempt_schedule() is called. But this is called before the user_exit() function can inform the kernel that the CPU is no longer in user mode and needs to be accounted for by RCU. The fix is to create a new preempt_schedule_context() that checks if the kernel is still in user mode and if so to switch it to kernel mode before calling schedule. It also switches back to user mode coming back from schedule in need be. The only user of this currently is the preempt_enable_notrace(), which is only used by the tracing subsystem. Signed-off-by: Steven Rostedt Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1369423420.6828.226.camel@gandalf.local.home Signed-off-by: Ingo Molnar --- include/linux/preempt.h | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 87a03c746f17..f5d4723cdb3d 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -33,9 +33,25 @@ do { \ preempt_schedule(); \ } while (0) +#ifdef CONFIG_CONTEXT_TRACKING + +void preempt_schedule_context(void); + +#define preempt_check_resched_context() \ +do { \ + if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ + preempt_schedule_context(); \ +} while (0) +#else + +#define preempt_check_resched_context() preempt_check_resched() + +#endif /* CONFIG_CONTEXT_TRACKING */ + #else /* !CONFIG_PREEMPT */ #define preempt_check_resched() do { } while (0) +#define preempt_check_resched_context() do { } while (0) #endif /* CONFIG_PREEMPT */ @@ -88,7 +104,7 @@ do { \ do { \ preempt_enable_no_resched_notrace(); \ barrier(); \ - preempt_check_resched(); \ + preempt_check_resched_context(); \ } while (0) #else /* !CONFIG_PREEMPT_COUNT */ -- cgit v1.2.3 From 7995bd287134f6c8f80d94bebe7396f05a9bc42b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 20 Jun 2013 18:58:36 +0400 Subject: splice: don't pass the address of ->f_pos to methods Signed-off-by: Al Viro --- include/linux/fs.h | 2 -- include/linux/splice.h | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index 43db02e9c9fa..65c2be22b601 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2414,8 +2414,6 @@ extern ssize_t generic_file_splice_write(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out, loff_t *, size_t len, unsigned int flags); -extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, - size_t len, unsigned int flags); extern void file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); diff --git a/include/linux/splice.h b/include/linux/splice.h index 09a545a7dfa3..74575cbf2d6f 100644 --- a/include/linux/splice.h +++ b/include/linux/splice.h @@ -35,6 +35,7 @@ struct splice_desc { void *data; /* cookie */ } u; loff_t pos; /* file position */ + loff_t *opos; /* sendfile: output position */ size_t num_spliced; /* number of bytes already spliced */ bool need_wakeup; /* need to wake up writer */ }; -- cgit v1.2.3