diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig.debug | 14 | ||||
| -rw-r--r-- | lib/Makefile | 1 | ||||
| -rw-r--r-- | lib/crc32.c | 2 | ||||
| -rw-r--r-- | lib/extable.c | 6 | ||||
| -rw-r--r-- | lib/iommu-helper.c | 80 | ||||
| -rw-r--r-- | lib/kobject.c | 2 | ||||
| -rw-r--r-- | lib/radix-tree.c | 15 | ||||
| -rw-r--r-- | lib/smp_processor_id.c | 4 | ||||
| -rw-r--r-- | lib/swiotlb.c | 41 | ||||
| -rw-r--r-- | lib/zlib_deflate/defutil.h | 2 |
10 files changed, 143 insertions, 24 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 0d8a5a4a789d..0d385be682db 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -81,7 +81,7 @@ config HEADERS_CHECK config DEBUG_SECTION_MISMATCH bool "Enable full Section mismatch analysis" - default n + depends on UNDEFINED help The section mismatch analysis checks if there are illegal references from one section to another section. @@ -90,19 +90,19 @@ config DEBUG_SECTION_MISMATCH most likely result in an oops. In the code functions and variables are annotated with __init, __devinit etc. (see full list in include/linux/init.h) - which result in the code/data being placed in specific sections. - The section mismatch anaylsis are always done after a full - kernel build but enabling this options will in addition + which results in the code/data being placed in specific sections. + The section mismatch analysis is always done after a full + kernel build but enabling this option will in addition do the following: - Add the option -fno-inline-functions-called-once to gcc When inlining a function annotated __init in a non-init - function we would loose the section information and thus + function we would lose the section information and thus the analysis would not catch the illegal reference. - This options tell gcc to inline less but will also + This option tells gcc to inline less but will also result in a larger kernel. - Run the section mismatch analysis for each module/built-in.o When we run the section mismatch analysis on vmlinux.o we - looses valueable information about where the mismatch was + lose valueble information about where the mismatch was introduced. Running the analysis for each module/built-in.o file will tell where the mismatch happens much closer to the diff --git a/lib/Makefile b/lib/Makefile index 543f2502b60a..a18062e4633f 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -65,6 +65,7 @@ obj-$(CONFIG_SMP) += pcounter.o obj-$(CONFIG_AUDIT_GENERIC) += audit.o obj-$(CONFIG_SWIOTLB) += swiotlb.o +obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o lib-$(CONFIG_GENERIC_BUG) += bug.o diff --git a/lib/crc32.c b/lib/crc32.c index d2c2f257bedd..49d1c9e3ce38 100644 --- a/lib/crc32.c +++ b/lib/crc32.c @@ -348,7 +348,7 @@ EXPORT_SYMBOL(crc32_be); * but again the multiple of the polynomial to subtract depends only on * the high bits, the high 8 bits in this case. * - * The multile we need in that case is the low 32 bits of a 40-bit + * The multiple we need in that case is the low 32 bits of a 40-bit * value whose high 8 bits are given, and which is a multiple of the * generator polynomial. This is simply the CRC-32 of the given * one-byte message. diff --git a/lib/extable.c b/lib/extable.c index 463f4560f16d..179c08745595 100644 --- a/lib/extable.c +++ b/lib/extable.c @@ -57,10 +57,10 @@ search_extable(const struct exception_table_entry *first, while (first <= last) { const struct exception_table_entry *mid; - mid = (last - first) / 2 + first; + mid = ((last - first) >> 1) + first; /* - * careful, the distance between entries can be - * larger than 2GB: + * careful, the distance between value and insn + * can be larger than MAX_LONG: */ if (mid->insn < value) first = mid + 1; diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c new file mode 100644 index 000000000000..495575a59ca6 --- /dev/null +++ b/lib/iommu-helper.c @@ -0,0 +1,80 @@ +/* + * IOMMU helper functions for the free area management + */ + +#include <linux/module.h> +#include <linux/bitops.h> + +static unsigned long find_next_zero_area(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr, + unsigned long align_mask) +{ + unsigned long index, end, i; +again: + index = find_next_zero_bit(map, size, start); + + /* Align allocation */ + index = (index + align_mask) & ~align_mask; + + end = index + nr; + if (end >= size) + return -1; + for (i = index; i < end; i++) { + if (test_bit(i, map)) { + start = i+1; + goto again; + } + } + return index; +} + +static inline void set_bit_area(unsigned long *map, unsigned long i, + int len) +{ + unsigned long end = i + len; + while (i < end) { + __set_bit(i, map); + i++; + } +} + +static inline int is_span_boundary(unsigned int index, unsigned int nr, + unsigned long shift, + unsigned long boundary_size) +{ + shift = (shift + index) & (boundary_size - 1); + return shift + nr > boundary_size; +} + +unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, + unsigned long start, unsigned int nr, + unsigned long shift, unsigned long boundary_size, + unsigned long align_mask) +{ + unsigned long index; +again: + index = find_next_zero_area(map, size, start, nr, align_mask); + if (index != -1) { + if (is_span_boundary(index, nr, shift, boundary_size)) { + /* we could do more effectively */ + start = index + 1; + goto again; + } + set_bit_area(map, index, nr); + } + return index; +} +EXPORT_SYMBOL(iommu_area_alloc); + +void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr) +{ + unsigned long end = start + nr; + + while (start < end) { + __clear_bit(start, map); + start++; + } +} +EXPORT_SYMBOL(iommu_area_free); diff --git a/lib/kobject.c b/lib/kobject.c index 1d63ead1815e..d784daeb8571 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -637,7 +637,7 @@ struct kobject *kobject_create(void) * @name: the name for the kset * @parent: the parent kobject of this kobject, if any. * - * This function creates a kset structure dynamically and registers it + * This function creates a kobject structure dynamically and registers it * with sysfs. When you are finished with this structure, call * kobject_put() and the structure will be dynamically freed when * it is no longer being used. diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 48c250fe2233..65f0e758ec38 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -95,14 +95,17 @@ static inline gfp_t root_gfp_mask(struct radix_tree_root *root) static struct radix_tree_node * radix_tree_node_alloc(struct radix_tree_root *root) { - struct radix_tree_node *ret; + struct radix_tree_node *ret = NULL; gfp_t gfp_mask = root_gfp_mask(root); - ret = kmem_cache_alloc(radix_tree_node_cachep, - set_migrateflags(gfp_mask, __GFP_RECLAIMABLE)); - if (ret == NULL && !(gfp_mask & __GFP_WAIT)) { + if (!(gfp_mask & __GFP_WAIT)) { struct radix_tree_preload *rtp; + /* + * Provided the caller has preloaded here, we will always + * succeed in getting a node here (and never reach + * kmem_cache_alloc) + */ rtp = &__get_cpu_var(radix_tree_preloads); if (rtp->nr) { ret = rtp->nodes[rtp->nr - 1]; @@ -110,6 +113,10 @@ radix_tree_node_alloc(struct radix_tree_root *root) rtp->nr--; } } + if (ret == NULL) + ret = kmem_cache_alloc(radix_tree_node_cachep, + set_migrateflags(gfp_mask, __GFP_RECLAIMABLE)); + BUG_ON(radix_tree_is_indirect_ptr(ret)); return ret; } diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index eddc9b3d3876..6c90fb90e19c 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c @@ -42,7 +42,9 @@ unsigned int debug_smp_processor_id(void) if (!printk_ratelimit()) goto out_enable; - printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] code: %s/%d\n", preempt_count(), current->comm, current->pid); + printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] " + "code: %s/%d\n", + preempt_count() - 1, current->comm, current->pid); print_symbol("caller is %s\n", (long)__builtin_return_address(0)); dump_stack(); diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 1a8050ade861..4bb5a11e18a2 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -282,6 +282,15 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr) return (addr & ~mask) != 0; } +static inline unsigned int is_span_boundary(unsigned int index, + unsigned int nslots, + unsigned long offset_slots, + unsigned long max_slots) +{ + unsigned long offset = (offset_slots + index) & (max_slots - 1); + return offset + nslots > max_slots; +} + /* * Allocates bounce buffer and returns its kernel virtual address. */ @@ -292,6 +301,16 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir) char *dma_addr; unsigned int nslots, stride, index, wrap; int i; + unsigned long start_dma_addr; + unsigned long mask; + unsigned long offset_slots; + unsigned long max_slots; + + mask = dma_get_seg_boundary(hwdev); + start_dma_addr = virt_to_bus(io_tlb_start) & mask; + + offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; + max_slots = ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; /* * For mappings greater than a page, we limit the stride (and @@ -311,10 +330,17 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir) */ spin_lock_irqsave(&io_tlb_lock, flags); { - wrap = index = ALIGN(io_tlb_index, stride); - + index = ALIGN(io_tlb_index, stride); if (index >= io_tlb_nslabs) - wrap = index = 0; + index = 0; + + while (is_span_boundary(index, nslots, offset_slots, + max_slots)) { + index += stride; + if (index >= io_tlb_nslabs) + index = 0; + } + wrap = index; do { /* @@ -341,9 +367,12 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir) goto found; } - index += stride; - if (index >= io_tlb_nslabs) - index = 0; + do { + index += stride; + if (index >= io_tlb_nslabs) + index = 0; + } while (is_span_boundary(index, nslots, offset_slots, + max_slots)); } while (index != wrap); spin_unlock_irqrestore(&io_tlb_lock, flags); diff --git a/lib/zlib_deflate/defutil.h b/lib/zlib_deflate/defutil.h index d9feaf638608..6b15a909ca3f 100644 --- a/lib/zlib_deflate/defutil.h +++ b/lib/zlib_deflate/defutil.h @@ -164,7 +164,7 @@ typedef struct deflate_state { int nice_match; /* Stop searching when current match exceeds this */ /* used by trees.c: */ - /* Didn't use ct_data typedef below to supress compiler warning */ + /* Didn't use ct_data typedef below to suppress compiler warning */ struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */ struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */ struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */ |
