From 1c8ddae09f4c102b97c9086cc70347e89468a547 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Fri, 15 Nov 2013 03:03:25 +0000 Subject: deb-pkg: Inhibit initramfs builders if CONFIG_BLK_DEV_INITRD is not set The kernel postinst hook for initramfs-tools will build an initramfs on installation unless $INITRD is set to 'No'. make-kpkg generates a postinst script that sets this variable appropriately, but we don't. Set it based on CONFIG_BLK_DEV_INITRD. This should also work with dracut when is fixed. Signed-off-by: Ben Hutchings Signed-off-by: Michal Marek --- scripts/package/builddeb | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/package/builddeb b/scripts/package/builddeb index 90e521fde35f..65014e1495bf 100644 --- a/scripts/package/builddeb +++ b/scripts/package/builddeb @@ -172,8 +172,15 @@ fi # Install the maintainer scripts # Note: hook scripts under /etc/kernel are also executed by official Debian -# kernel packages, as well as kernel packages built using make-kpkg +# kernel packages, as well as kernel packages built using make-kpkg. +# make-kpkg sets $INITRD to indicate whether an initramfs is wanted, and +# so do we; recent versions of dracut and initramfs-tools will obey this. debhookdir=${KDEB_HOOKDIR:-/etc/kernel} +if grep -q '^CONFIG_BLK_DEV_INITRD=y' $KCONFIG_CONFIG; then + want_initrd=Yes +else + want_initrd=No +fi for script in postinst postrm preinst prerm ; do mkdir -p "$tmpdir$debhookdir/$script.d" cat < "$tmpdir/DEBIAN/$script" @@ -184,6 +191,9 @@ set -e # Pass maintainer script parameters to hook scripts export DEB_MAINT_PARAMS="\$*" +# Tell initramfs builder whether it's wanted +export INITRD=$want_initrd + test -d $debhookdir/$script.d && run-parts --arg="$version" --arg="/$installed_image_path" $debhookdir/$script.d exit 0 EOF -- cgit v1.2.3 From 92e112fdbb3cb55b43390426501a7efacd893b96 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Fri, 13 Dec 2013 11:36:22 -0700 Subject: PCI/checkpatch: Deprecate DEFINE_PCI_DEVICE_TABLE Prefer use of the direct definition of struct pci_device_id instead of indirection via macro DEFINE_PCI_DEVICE_TABLE. Update the PCI documentation to deprecate DEFINE_PCI_DEVICE_TABLE. Update checkpatch adding --fix option. Signed-off-by: Joe Perches Signed-off-by: Bjorn Helgaas Reviewed-by: Jingoo Han --- scripts/checkpatch.pl | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'scripts') diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 9c9810030377..9fb30b15c9dc 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -2634,10 +2634,13 @@ sub process { $herecurr); } -# check for declarations of struct pci_device_id - if ($line =~ /\bstruct\s+pci_device_id\s+\w+\s*\[\s*\]\s*\=\s*\{/) { - WARN("DEFINE_PCI_DEVICE_TABLE", - "Use DEFINE_PCI_DEVICE_TABLE for struct pci_device_id\n" . $herecurr); +# check for uses of DEFINE_PCI_DEVICE_TABLE + if ($line =~ /\bDEFINE_PCI_DEVICE_TABLE\s*\(\s*(\w+)\s*\)\s*=/) { + if (WARN("DEFINE_PCI_DEVICE_TABLE", + "Prefer struct pci_device_id over deprecated DEFINE_PCI_DEVICE_TABLE\n" . $herecurr) && + $fix) { + $fixed[$linenr - 1] =~ s/\b(?:static\s+|)DEFINE_PCI_DEVICE_TABLE\s*\(\s*(\w+)\s*\)\s*=\s*/static const struct pci_device_id $1\[\] = /; + } } # check for new typedefs, only function parameters and sparse annotations -- cgit v1.2.3 From 95edca5c523c4b404dd60baa0a1bea0e4c38fd72 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Wed, 18 Dec 2013 12:35:20 -0500 Subject: localmodconfig: Add config depends by default settings Currently localmodconfig will miss dependencies from the default option. For example: config FOO default y if BAR || ZOO If FOO is needed for a module and is set to '=m', and so are BAR or ZOO, localmodconfig will not see that BOO or ZOO are also needed for the foo module, and will incorrectly disable them. Link: http://lkml.kernel.org/r/20131218175137.162937350@goodmis.org Signed-off-by: Steven Rostedt --- scripts/kconfig/streamline_config.pl | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'scripts') diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl index 4606cdfb859d..31331723e810 100644 --- a/scripts/kconfig/streamline_config.pl +++ b/scripts/kconfig/streamline_config.pl @@ -219,6 +219,13 @@ sub read_kconfig { $depends{$config} = $1; } elsif ($state eq "DEP" && /^\s*depends\s+on\s+(.*)$/) { $depends{$config} .= " " . $1; + } elsif ($state eq "DEP" && /^\s*def(_(bool|tristate)|ault)\s+(\S.*)$/) { + my $dep = $3; + if ($dep !~ /^\s*(y|m|n)\s*$/) { + $dep =~ s/.*\sif\s+//; + $depends{$config} .= " " . $dep; + dprint "Added default depends $dep to $config\n"; + } # Get the configs that select this config } elsif ($state ne "NONE" && /^\s*select\s+(\S+)/) { -- cgit v1.2.3 From 79f0345fefaafb7cde301a830471edd21a37989b Mon Sep 17 00:00:00 2001 From: Nishanth Menon Date: Mon, 2 Dec 2013 07:39:41 -0600 Subject: scripts: Coccinelle script for pm_runtime_* return checks with IS_ERR_VALUE As indicated by Sekhar in [1], there seems to be a tendency to use IS_ERR_VALUE to check the error result for pm_runtime_* functions which make no sense considering commit c48cd65 (ARM: OMAP: use consistent error checking) - the error values can either be < 0 for error OR 0, 1 in cases where we have success. So, setup a coccinelle script to help identify the same. [1] http://marc.info/?t=138472678100003&r=1&w=2 Cc: Russell King Reported-by: Sekhar Nori Signed-off-by: Nishanth Menon Acked-by: Julia Lawall Signed-off-by: Michal Marek --- scripts/coccinelle/api/pm_runtime.cocci | 109 ++++++++++++++++++++++++++++++++ 1 file changed, 109 insertions(+) create mode 100644 scripts/coccinelle/api/pm_runtime.cocci (limited to 'scripts') diff --git a/scripts/coccinelle/api/pm_runtime.cocci b/scripts/coccinelle/api/pm_runtime.cocci new file mode 100644 index 000000000000..f01789e967ec --- /dev/null +++ b/scripts/coccinelle/api/pm_runtime.cocci @@ -0,0 +1,109 @@ +/// Make sure pm_runtime_* calls does not use unnecessary IS_ERR_VALUE +// +// Keywords: pm_runtime +// Confidence: Medium +// Copyright (C) 2013 Texas Instruments Incorporated - GPLv2. +// URL: http://coccinelle.lip6.fr/ +// Options: --include-headers + +virtual patch +virtual context +virtual org +virtual report + +//---------------------------------------------------------- +// Detection +//---------------------------------------------------------- + +@runtime_bad_err_handle exists@ +expression ret; +@@ +( +ret = \(pm_runtime_idle\| + pm_runtime_suspend\| + pm_runtime_autosuspend\| + pm_runtime_resume\| + pm_request_idle\| + pm_request_resume\| + pm_request_autosuspend\| + pm_runtime_get\| + pm_runtime_get_sync\| + pm_runtime_put\| + pm_runtime_put_autosuspend\| + pm_runtime_put_sync\| + pm_runtime_put_sync_suspend\| + pm_runtime_put_sync_autosuspend\| + pm_runtime_set_active\| + pm_schedule_suspend\| + pm_runtime_barrier\| + pm_generic_runtime_suspend\| + pm_generic_runtime_resume\)(...); +... +IS_ERR_VALUE(ret) +... +) + +//---------------------------------------------------------- +// For context mode +//---------------------------------------------------------- + +@depends on runtime_bad_err_handle && context@ +identifier pm_runtime_api; +expression ret; +@@ +( +ret = pm_runtime_api(...); +... +* IS_ERR_VALUE(ret) +... +) + +//---------------------------------------------------------- +// For patch mode +//---------------------------------------------------------- + +@depends on runtime_bad_err_handle && patch@ +identifier pm_runtime_api; +expression ret; +@@ +( +ret = pm_runtime_api(...); +... +- IS_ERR_VALUE(ret) ++ ret < 0 +... +) + +//---------------------------------------------------------- +// For org and report mode +//---------------------------------------------------------- + +@r depends on runtime_bad_err_handle exists@ +position p1, p2; +identifier pm_runtime_api; +expression ret; +@@ +( +ret = pm_runtime_api@p1(...); +... +IS_ERR_VALUE@p2(ret) +... +) + +@script:python depends on org@ +p1 << r.p1; +p2 << r.p2; +pm_runtime_api << r.pm_runtime_api; +@@ + +cocci.print_main(pm_runtime_api,p1) +cocci.print_secs("IS_ERR_VALUE",p2) + +@script:python depends on report@ +p1 << r.p1; +p2 << r.p2; +pm_runtime_api << r.pm_runtime_api; +@@ + +msg = "%s returns < 0 as error. Unecessary IS_ERR_VALUE at line %s" % (pm_runtime_api, p2[0].line) +coccilib.report.print_report(p1[0],msg) -- cgit v1.2.3 From 7593e0902bc41392315316f1b5f4ba15feead842 Mon Sep 17 00:00:00 2001 From: Franck Bui-Huu Date: Mon, 2 Dec 2013 16:34:29 +0100 Subject: Fix detectition of kernel git repository in setlocalversion script [take #2] setlocalversion script was testing the presence of .git directory in order to find out if git is used as SCM to track the current kernel project. However in some cases, .git is not a directory but can be a file: when the kernel is a git submodule part of a git super project for example. This patch just fixes this by using 'git rev-parse --show-cdup' to check that the current directory is the kernel git topdir. This has the advantage to not test and rely on git internal infrastructure directly. Signed-off-by: Franck Bui-Huu Signed-off-by: Michal Marek --- scripts/setlocalversion | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/setlocalversion b/scripts/setlocalversion index d105a44b68f6..63d91e22ed7c 100755 --- a/scripts/setlocalversion +++ b/scripts/setlocalversion @@ -43,7 +43,8 @@ scm_version() fi # Check for git and a git repo. - if test -d .git && head=`git rev-parse --verify --short HEAD 2>/dev/null`; then + if test -z "$(git rev-parse --show-cdup 2>/dev/null)" && + head=`git rev-parse --verify --short HEAD 2>/dev/null`; then # If we are at a tagged commit (like "v2.6.30-rc6"), we ignore # it, because this version is defined in the top level Makefile. -- cgit v1.2.3 From f8ce239dfc7ba9add41d9ecdc5e7810738f839fa Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Thu, 5 Dec 2013 14:37:35 +0000 Subject: deb-pkg: Fix cross-building linux-headers package builddeb generates a control file that says the linux-headers package can only be built for the build system primary architecture. This breaks cross-building configurations. We should use $debarch for this instead. Since $debarch is not yet set when generating the control file, set Architecture: any and use control file variables to fill in the description. Fixes: cd8d60a20a45 ('kbuild: create linux-headers package in deb-pkg') Reported-and-tested-by: "Niew, Sh." Signed-off-by: Ben Hutchings Signed-off-by: Michal Marek --- scripts/package/builddeb | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'scripts') diff --git a/scripts/package/builddeb b/scripts/package/builddeb index 65014e1495bf..5fcfc33dd5ba 100644 --- a/scripts/package/builddeb +++ b/scripts/package/builddeb @@ -62,7 +62,7 @@ create_package() { fi # Create the package - dpkg-gencontrol -isp $forcearch -p$pname -P"$pdir" + dpkg-gencontrol -isp $forcearch -Vkernel:debarch="${debarch:-$(dpkg --print-architecture)}" -p$pname -P"$pdir" dpkg --build "$pdir" .. } @@ -298,15 +298,14 @@ mkdir -p "$destdir" (cd $objtree; cp $KCONFIG_CONFIG $destdir/.config) # copy .config manually to be where it's expected to be ln -sf "/usr/src/linux-headers-$version" "$kernel_headers_dir/lib/modules/$version/build" rm -f "$objtree/debian/hdrsrcfiles" "$objtree/debian/hdrobjfiles" -arch=$(dpkg --print-architecture) cat <> debian/control Package: $kernel_headers_packagename Provides: linux-headers, linux-headers-2.6 -Architecture: $arch -Description: Linux kernel headers for $KERNELRELEASE on $arch - This package provides kernel header files for $KERNELRELEASE on $arch +Architecture: any +Description: Linux kernel headers for $KERNELRELEASE on \${kernel:debarch} + This package provides kernel header files for $KERNELRELEASE on \${kernel:debarch} . This is useful for people who need to build external modules EOF -- cgit v1.2.3 From c5e318f67eebbad491615a752c51dbfde7dc3d78 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Thu, 5 Dec 2013 14:39:11 +0000 Subject: deb-pkg: Fix building for MIPS big-endian or ARM OABI These commands will mysteriously fail: $ make ARCH=arm versatile_defconfig [...] $ make ARCH=arm deb-pkg [...] make[1]: *** [deb-pkg] Error 1 make: *** [deb-pkg] Error 2 The Debian architecture selection for these kernel architectures does 'grep FOO=y $KCONFIG_CONFIG && echo bar', and after 'set -e' this aborts the script if grep does not find the given config symbol. Fixes: 10f26fa64200 ('build, deb-pkg: select userland architecture based on UTS_MACHINE') Signed-off-by: Ben Hutchings Signed-off-by: Michal Marek --- scripts/package/builddeb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'scripts') diff --git a/scripts/package/builddeb b/scripts/package/builddeb index 5fcfc33dd5ba..f46e4dd0558d 100644 --- a/scripts/package/builddeb +++ b/scripts/package/builddeb @@ -41,9 +41,9 @@ create_package() { parisc*) debarch=hppa ;; mips*) - debarch=mips$(grep -q CPU_LITTLE_ENDIAN=y $KCONFIG_CONFIG && echo el) ;; + debarch=mips$(grep -q CPU_LITTLE_ENDIAN=y $KCONFIG_CONFIG && echo el || true) ;; arm*) - debarch=arm$(grep -q CONFIG_AEABI=y $KCONFIG_CONFIG && echo el) ;; + debarch=arm$(grep -q CONFIG_AEABI=y $KCONFIG_CONFIG && echo el || true) ;; *) echo "" >&2 echo "** ** ** WARNING ** ** **" >&2 -- cgit v1.2.3 From f3c003f72dfb2497056bcbb864885837a1968ed5 Mon Sep 17 00:00:00 2001 From: Jiang Liu Date: Tue, 7 Jan 2014 22:17:12 +0800 Subject: arm64, jump label: detect %c support for ARM64 As commit a9468f30b5eac6 "ARM: 7333/2: jump label: detect %c support for ARM", this patch detects the same thing for ARM64 because some ARM64 GCC versions have the same issue. Some versions of ARM64 GCC which do support asm goto, do not support the %c specifier. Since we need the %c to support jump labels on ARM64, detect that too in the asm goto detection script to avoid build errors with these versions. Acked-by: Will Deacon Signed-off-by: Jiang Liu Signed-off-by: Catalin Marinas --- scripts/gcc-goto.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/gcc-goto.sh b/scripts/gcc-goto.sh index a2af2e88daf3..c9469d34ecc6 100644 --- a/scripts/gcc-goto.sh +++ b/scripts/gcc-goto.sh @@ -5,7 +5,7 @@ cat << "END" | $@ -x c - -c -o /dev/null >/dev/null 2>&1 && echo "y" int main(void) { -#ifdef __arm__ +#if defined(__arm__) || defined(__aarch64__) /* * Not related to asm goto, but used by jump label * and broken on some ARM GCC versions (see GCC Bug 48637). -- cgit v1.2.3 From 8fe9c93e7453e67b8bd09f263ec1bb0783c733fc Mon Sep 17 00:00:00 2001 From: Andreas Schwab Date: Mon, 30 Dec 2013 15:31:17 +0100 Subject: powerpc: Add vr save/restore functions GCC 4.8 now generates out-of-line vr save/restore functions when optimizing for size. They are needed for the raid6 altivec support. Signed-off-by: Andreas Schwab Signed-off-by: Benjamin Herrenschmidt --- scripts/mod/modpost.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'scripts') diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 17855761e6b7..40610984a1b5 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -584,12 +584,16 @@ static int ignore_undef_symbol(struct elf_info *info, const char *symname) if (strncmp(symname, "_restgpr_", sizeof("_restgpr_") - 1) == 0 || strncmp(symname, "_savegpr_", sizeof("_savegpr_") - 1) == 0 || strncmp(symname, "_rest32gpr_", sizeof("_rest32gpr_") - 1) == 0 || - strncmp(symname, "_save32gpr_", sizeof("_save32gpr_") - 1) == 0) + strncmp(symname, "_save32gpr_", sizeof("_save32gpr_") - 1) == 0 || + strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 || + strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0) return 1; if (info->hdr->e_machine == EM_PPC64) /* Special register function linked on all modules during final link of .ko */ if (strncmp(symname, "_restgpr0_", sizeof("_restgpr0_") - 1) == 0 || - strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0) + strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0 || + strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 || + strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0) return 1; /* Do not ignore this symbol */ return 0; -- cgit v1.2.3 From ee8b09cd60bfe45d856e7c3bef8742835686bf4e Mon Sep 17 00:00:00 2001 From: Todd E Brandt Date: Thu, 16 Jan 2014 16:18:22 -0800 Subject: PM / tools: new tool for suspend/resume performance optimization This tool is designed to assist kernel and OS developers in optimizing their linux stack's suspend/resume time. Using a kernel image built with a few extra options enabled, the tool will execute a suspend and will capture dmesg and ftrace data until resume is complete. This data is transformed into a device timeline and a callgraph to give a quick and detailed view of which devices and callbacks are taking the most time in suspend/resume. The output is a single html file which can be viewed in firefox or chrome. References: https://01.org/suspendresume Signed-off-by: Todd Brandt Signed-off-by: Rafael J. Wysocki --- scripts/analyze_suspend.py | 1446 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1446 insertions(+) create mode 100755 scripts/analyze_suspend.py (limited to 'scripts') diff --git a/scripts/analyze_suspend.py b/scripts/analyze_suspend.py new file mode 100755 index 000000000000..4f2cc12dc7c7 --- /dev/null +++ b/scripts/analyze_suspend.py @@ -0,0 +1,1446 @@ +#!/usr/bin/python +# +# Tool for analyzing suspend/resume timing +# Copyright (c) 2013, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +# +# Authors: +# Todd Brandt +# +# Description: +# This tool is designed to assist kernel and OS developers in optimizing +# their linux stack's suspend/resume time. Using a kernel image built +# with a few extra options enabled, the tool will execute a suspend and +# will capture dmesg and ftrace data until resume is complete. This data +# is transformed into a device timeline and a callgraph to give a quick +# and detailed view of which devices and callbacks are taking the most +# time in suspend/resume. The output is a single html file which can be +# viewed in firefox or chrome. +# +# The following kernel build options are required: +# CONFIG_PM_DEBUG=y +# CONFIG_PM_SLEEP_DEBUG=y +# CONFIG_FTRACE=y +# CONFIG_FUNCTION_TRACER=y +# CONFIG_FUNCTION_GRAPH_TRACER=y +# +# The following additional kernel parameters are required: +# (e.g. in file /etc/default/grub) +# GRUB_CMDLINE_LINUX_DEFAULT="... initcall_debug log_buf_len=16M ..." +# + +import sys +import time +import os +import string +import re +import array +import platform +import datetime +import struct + +# -- classes -- + +class SystemValues: + testdir = "." + tpath = "/sys/kernel/debug/tracing/" + mempath = "/dev/mem" + powerfile = "/sys/power/state" + suspendmode = "mem" + prefix = "test" + teststamp = "" + dmesgfile = "" + ftracefile = "" + htmlfile = "" + rtcwake = False + def setOutputFile(self): + if((self.htmlfile == "") and (self.dmesgfile != "")): + m = re.match(r"(?P.*)_dmesg\.txt$", self.dmesgfile) + if(m): + self.htmlfile = m.group("name")+".html" + if((self.htmlfile == "") and (self.ftracefile != "")): + m = re.match(r"(?P.*)_ftrace\.txt$", self.ftracefile) + if(m): + self.htmlfile = m.group("name")+".html" + if(self.htmlfile == ""): + self.htmlfile = "output.html" + def initTestOutput(self): + hostname = platform.node() + if(hostname != ""): + self.prefix = hostname + v = os.popen("cat /proc/version").read().strip() + kver = string.split(v)[2] + self.testdir = os.popen("date \"+suspend-%m%d%y-%H%M%S\"").read().strip() + self.teststamp = "# "+self.testdir+" "+self.prefix+" "+self.suspendmode+" "+kver + self.dmesgfile = self.testdir+"/"+self.prefix+"_"+self.suspendmode+"_dmesg.txt" + self.ftracefile = self.testdir+"/"+self.prefix+"_"+self.suspendmode+"_ftrace.txt" + self.htmlfile = self.testdir+"/"+self.prefix+"_"+self.suspendmode+".html" + os.mkdir(self.testdir) + +class Data: + altdevname = dict() + usedmesg = False + useftrace = False + notestrun = False + verbose = False + phases = [] + dmesg = {} # root data structure + start = 0.0 + end = 0.0 + stamp = {'time': "", 'host': "", 'mode': ""} + id = 0 + tSuspended = 0.0 + fwValid = False + fwSuspend = 0 + fwResume = 0 + def initialize(self): + self.dmesg = { # dmesg log data + 'suspend_general': {'list': dict(), 'start': -1.0, 'end': -1.0, + 'row': 0, 'color': "#CCFFCC", 'order': 0}, + 'suspend_early': {'list': dict(), 'start': -1.0, 'end': -1.0, + 'row': 0, 'color': "green", 'order': 1}, + 'suspend_noirq': {'list': dict(), 'start': -1.0, 'end': -1.0, + 'row': 0, 'color': "#00FFFF", 'order': 2}, + 'suspend_cpu': {'list': dict(), 'start': -1.0, 'end': -1.0, + 'row': 0, 'color': "blue", 'order': 3}, + 'resume_cpu': {'list': dict(), 'start': -1.0, 'end': -1.0, + 'row': 0, 'color': "red", 'order': 4}, + 'resume_noirq': {'list': dict(), 'start': -1.0, 'end': -1.0, + 'row': 0, 'color': "orange", 'order': 5}, + 'resume_early': {'list': dict(), 'start': -1.0, 'end': -1.0, + 'row': 0, 'color': "yellow", 'order': 6}, + 'resume_general': {'list': dict(), 'start': -1.0, 'end': -1.0, + 'row': 0, 'color': "#FFFFCC", 'order': 7} + } + self.phases = self.sortedPhases() + def normalizeTime(self): + tSus = tRes = self.tSuspended + if self.fwValid: + tSus -= -self.fwSuspend / 1000000000.0 + tRes -= self.fwResume / 1000000000.0 + self.tSuspended = 0.0 + self.start -= tSus + self.end -= tRes + for phase in self.phases: + zero = tRes + if "suspend" in phase: + zero = tSus + p = self.dmesg[phase] + p['start'] -= zero + p['end'] -= zero + list = p['list'] + for name in list: + d = list[name] + d['start'] -= zero + d['end'] -= zero + if('ftrace' in d): + cg = d['ftrace'] + cg.start -= zero + cg.end -= zero + for line in cg.list: + line.time -= zero + if self.fwValid: + fws = -self.fwSuspend / 1000000000.0 + fwr = self.fwResume / 1000000000.0 + list = dict() + self.id += 1 + devid = "dc%d" % self.id + list["firmware-suspend"] = \ + {'start': fws, 'end': 0, 'pid': 0, 'par': "", + 'length': -fws, 'row': 0, 'id': devid }; + self.id += 1 + devid = "dc%d" % self.id + list["firmware-resume"] = \ + {'start': 0, 'end': fwr, 'pid': 0, 'par': "", + 'length': fwr, 'row': 0, 'id': devid }; + self.dmesg['BIOS'] = \ + {'list': list, 'start': fws, 'end': fwr, + 'row': 0, 'color': "purple", 'order': 4} + self.dmesg['resume_cpu']['order'] += 1 + self.dmesg['resume_noirq']['order'] += 1 + self.dmesg['resume_early']['order'] += 1 + self.dmesg['resume_general']['order'] += 1 + self.phases = self.sortedPhases() + def vprint(self, msg): + if(self.verbose): + print(msg) + def dmesgSortVal(self, phase): + return self.dmesg[phase]['order'] + def sortedPhases(self): + return sorted(self.dmesg, key=self.dmesgSortVal) + def sortedDevices(self, phase): + list = self.dmesg[phase]['list'] + slist = [] + tmp = dict() + for devname in list: + dev = list[devname] + tmp[dev['start']] = devname + for t in sorted(tmp): + slist.append(tmp[t]) + return slist + def fixupInitcalls(self, phase, end): + # if any calls never returned, clip them at system resume end + phaselist = self.dmesg[phase]['list'] + for devname in phaselist: + dev = phaselist[devname] + if(dev['end'] < 0): + dev['end'] = end + self.vprint("%s (%s): callback didn't return" % (devname, phase)) + def fixupInitcallsThatDidntReturn(self): + # if any calls never returned, clip them at system resume end + for phase in self.phases: + self.fixupInitcalls(phase, self.dmesg['resume_general']['end']) + if(phase == "resume_general"): + break + def newAction(self, phase, name, pid, parent, start, end): + self.id += 1 + devid = "dc%d" % self.id + list = self.dmesg[phase]['list'] + length = -1.0 + if(start >= 0 and end >= 0): + length = end - start + list[name] = {'start': start, 'end': end, 'pid': pid, 'par': parent, + 'length': length, 'row': 0, 'id': devid } + def deviceIDs(self, devlist, phase): + idlist = [] + for p in self.phases: + if(p[0] != phase[0]): + continue + list = data.dmesg[p]['list'] + for devname in list: + if devname in devlist: + idlist.append(list[devname]['id']) + return idlist + def deviceParentID(self, devname, phase): + pdev = "" + pdevid = "" + for p in self.phases: + if(p[0] != phase[0]): + continue + list = data.dmesg[p]['list'] + if devname in list: + pdev = list[devname]['par'] + for p in self.phases: + if(p[0] != phase[0]): + continue + list = data.dmesg[p]['list'] + if pdev in list: + return list[pdev]['id'] + return pdev + def deviceChildrenIDs(self, devname, phase): + devlist = [] + for p in self.phases: + if(p[0] != phase[0]): + continue + list = data.dmesg[p]['list'] + for child in list: + if(list[child]['par'] == devname): + devlist.append(child) + return self.deviceIDs(devlist, phase) + +class FTraceLine: + time = 0.0 + length = 0.0 + fcall = False + freturn = False + fevent = False + depth = 0 + name = "" + def __init__(self, t, m, d): + self.time = float(t) + # check to see if this is a trace event + em = re.match(r"^ *\/\* *(?P.*) \*\/ *$", m) + if(em): + self.name = em.group("msg") + self.fevent = True + return + # convert the duration to seconds + if(d): + self.length = float(d)/1000000 + # the indentation determines the depth + match = re.match(r"^(?P *)(?P.*)$", m) + if(not match): + return + self.depth = self.getDepth(match.group('d')) + m = match.group('o') + # function return + if(m[0] == '}'): + self.freturn = True + if(len(m) > 1): + # includes comment with function name + match = re.match(r"^} *\/\* *(?P.*) *\*\/$", m) + if(match): + self.name = match.group('n') + # function call + else: + self.fcall = True + # function call with children + if(m[-1] == '{'): + match = re.match(r"^(?P.*) *\(.*", m) + if(match): + self.name = match.group('n') + # function call with no children (leaf) + elif(m[-1] == ';'): + self.freturn = True + match = re.match(r"^(?P.*) *\(.*", m) + if(match): + self.name = match.group('n') + # something else (possibly a trace marker) + else: + self.name = m + def getDepth(self, str): + return len(str)/2 + +class FTraceCallGraph: + start = -1.0 + end = -1.0 + list = [] + invalid = False + depth = 0 + def __init__(self): + self.start = -1.0 + self.end = -1.0 + self.list = [] + self.depth = 0 + def setDepth(self, line): + if(line.fcall and not line.freturn): + line.depth = self.depth + self.depth += 1 + elif(line.freturn and not line.fcall): + self.depth -= 1 + line.depth = self.depth + else: + line.depth = self.depth + def addLine(self, line, match): + if(not self.invalid): + self.setDepth(line) + if(line.depth == 0 and line.freturn): + self.end = line.time + self.list.append(line) + return True + if(self.invalid): + return False + if(len(self.list) >= 1000000 or self.depth < 0): + first = self.list[0] + self.list = [] + self.list.append(first) + self.invalid = True + id = "task %s cpu %s" % (match.group("pid"), match.group("cpu")) + window = "(%f - %f)" % (self.start, line.time) + data.vprint("Too much data for "+id+" "+window+", ignoring this callback") + return False + self.list.append(line) + if(self.start < 0): + self.start = line.time + return False + def sanityCheck(self): + stack = dict() + cnt = 0 + for l in self.list: + if(l.fcall and not l.freturn): + stack[l.depth] = l + cnt += 1 + elif(l.freturn and not l.fcall): + if(not stack[l.depth]): + return False + stack[l.depth].length = l.length + stack[l.depth] = 0 + l.length = 0 + cnt -= 1 + if(cnt == 0): + return True + return False + def debugPrint(self, filename): + if(filename == "stdout"): + print("[%f - %f]") % (self.start, self.end) + for l in self.list: + if(l.freturn and l.fcall): + print("%f (%02d): %s(); (%.3f us)" % (l.time, l.depth, l.name, l.length*1000000)) + elif(l.freturn): + print("%f (%02d): %s} (%.3f us)" % (l.time, l.depth, l.name, l.length*1000000)) + else: + print("%f (%02d): %s() { (%.3f us)" % (l.time, l.depth, l.name, l.length*1000000)) + print(" ") + else: + fp = open(filename, 'w') + print(filename) + for l in self.list: + if(l.freturn and l.fcall): + fp.write("%f (%02d): %s(); (%.3f us)\n" % (l.time, l.depth, l.name, l.length*1000000)) + elif(l.freturn): + fp.write("%f (%02d): %s} (%.3f us)\n" % (l.time, l.depth, l.name, l.length*1000000)) + else: + fp.write("%f (%02d): %s() { (%.3f us)\n" % (l.time, l.depth, l.name, l.length*1000000)) + fp.close() + +class Timeline: + html = {} + scaleH = 0.0 # height of the timescale row as a percent of the timeline height + rowH = 0.0 # height of each row in percent of the timeline height + row_height_pixels = 30 + maxrows = 0 + height = 0 + def __init__(self): + self.html = { + 'timeline': "", + 'legend': "", + 'scale': "" + } + def setRows(self, rows): + self.maxrows = int(rows) + self.scaleH = 100.0/float(self.maxrows) + self.height = self.maxrows*self.row_height_pixels + r = float(self.maxrows - 1) + if(r < 1.0): + r = 1.0 + self.rowH = (100.0 - self.scaleH)/r + +# -- global objects -- + +sysvals = SystemValues() +data = Data() + +# -- functions -- + +# Function: initFtrace +# Description: +# Configure ftrace to capture a function trace during suspend/resume +def initFtrace(): + global sysvals + + print("INITIALIZING FTRACE...") + # turn trace off + os.system("echo 0 > "+sysvals.tpath+"tracing_on") + # set the trace clock to global + os.system("echo global > "+sysvals.tpath+"trace_clock") + # set trace buffer to a huge value + os.system("echo nop > "+sysvals.tpath+"current_tracer") + os.system("echo 100000 > "+sysvals.tpath+"buffer_size_kb") + # clear the trace buffer + os.system("echo \"\" > "+sysvals.tpath+"trace") + # set trace type + os.system("echo function_graph > "+sysvals.tpath+"current_tracer") + os.system("echo \"\" > "+sysvals.tpath+"set_ftrace_filter") + # set trace format options + os.system("echo funcgraph-abstime > "+sysvals.tpath+"trace_options") + os.system("echo funcgraph-proc > "+sysvals.tpath+"trace_options") + # focus only on device suspend and resume + os.system("cat "+sysvals.tpath+"available_filter_functions | grep dpm_run_callback > "+sysvals.tpath+"set_graph_function") + +# Function: verifyFtrace +# Description: +# Check that ftrace is working on the system +def verifyFtrace(): + global sysvals + files = ["available_filter_functions", "buffer_size_kb", + "current_tracer", "set_ftrace_filter", + "trace", "trace_marker"] + for f in files: + if(os.path.exists(sysvals.tpath+f) == False): + return False + return True + +def parseStamp(line): + global data, sysvals + stampfmt = r"# suspend-(?P[0-9]{2})(?P[0-9]{2})(?P[0-9]{2})-"+\ + "(?P[0-9]{2})(?P[0-9]{2})(?P[0-9]{2})"+\ + " (?P.*) (?P.*) (?P.*)$" + m = re.match(stampfmt, line) + if(m): + dt = datetime.datetime(int(m.group("y"))+2000, int(m.group("m")), + int(m.group("d")), int(m.group("H")), int(m.group("M")), + int(m.group("S"))) + data.stamp['time'] = dt.strftime("%B %d %Y, %I:%M:%S %p") + data.stamp['host'] = m.group("host") + data.stamp['mode'] = m.group("mode") + data.stamp['kernel'] = m.group("kernel") + sysvals.suspendmode = data.stamp['mode'] + +# Function: analyzeTraceLog +# Description: +# Analyse an ftrace log output file generated from this app during +# the execution phase. Create an "ftrace" structure in memory for +# subsequent formatting in the html output file +def analyzeTraceLog(): + global sysvals, data + + # the ftrace data is tied to the dmesg data + if(not data.usedmesg): + return + + # read through the ftrace and parse the data + data.vprint("Analyzing the ftrace data...") + ftrace_line_fmt = r"^ *(?P