diff --git a/Documentation/coccinelle.txt b/Documentation/coccinelle.txt index 18de785..7f773d5 100644 --- a/Documentation/coccinelle.txt +++ b/Documentation/coccinelle.txt @@ -6,15 +6,17 @@ Copyright 2010 Gilles Muller Getting Coccinelle ~~~~~~~~~~~~~~~~~~~~ -The semantic patches included in the kernel use the 'virtual rule' -feature which was introduced in Coccinelle version 0.1.11. +The semantic patches included in the kernel use features and options +which are provided by Coccinelle version 1.0.0-rc11 and above. +Using earlier versions will fail as the option names used by +the Coccinelle files and coccicheck have been updated. -Coccinelle (>=0.2.0) is available through the package manager +Coccinelle is available through the package manager of many distributions, e.g. : - - Debian (>=squeeze) - - Fedora (>=13) - - Ubuntu (>=10.04 Lucid Lynx) + - Debian + - Fedora + - Ubuntu - OpenSUSE - Arch Linux - NetBSD @@ -36,11 +38,6 @@ as a regular user, and install it with sudo make install -The semantic patches in the kernel will work best with Coccinelle version -0.2.4 or later. Using earlier versions may incur some parse errors in the -semantic patch code, but any results that are obtained should still be -correct. - Using Coccinelle on the Linux kernel ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -48,7 +45,7 @@ A Coccinelle-specific target is defined in the top level Makefile. This target is named 'coccicheck' and calls the 'coccicheck' front-end in the 'scripts' directory. -Four modes are defined: patch, report, context, and org. The mode to +Four basic modes are defined: patch, report, context, and org. The mode to use is specified by setting the MODE variable with 'MODE='. 'patch' proposes a fix, when possible. @@ -62,18 +59,24 @@ diff-like style.Lines of interest are indicated with '-'. 'org' generates a report in the Org mode format of Emacs. Note that not all semantic patches implement all modes. For easy use -of Coccinelle, the default mode is "chain" which tries the previous -modes in the order above until one succeeds. +of Coccinelle, the default mode is "report". + +Two other modes provide some common combinations of these modes. -To make a report for every semantic patch, run the following command: +'chain' tries the previous modes in the order above until one succeeds. - make coccicheck MODE=report +'rep+ctxt' runs successively the report mode and the context mode. + It should be used with the C option (described later) + which checks the code on a file basis. -NB: The 'report' mode is the default one. +Examples: + To make a report for every semantic patch, run the following command: -To produce patches, run: + make coccicheck MODE=report - make coccicheck MODE=patch + To produce patches, run: + + make coccicheck MODE=patch The coccicheck target applies every semantic patch available in the @@ -91,6 +94,11 @@ To enable verbose messages set the V= variable, for example: make coccicheck MODE=report V=1 +By default, coccicheck tries to run as parallel as possible. To change +the parallelism, set the J= variable. For example, to run across 4 CPUs: + + make coccicheck MODE=report J=4 + Using Coccinelle with a single semantic patch ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -124,26 +132,33 @@ To check only newly edited code, use the value 2 for the C flag, i.e. make C=2 CHECK="scripts/coccicheck" +In these modes, which works on a file basis, there is no information +about semantic patches displayed, and no commit message proposed. + This runs every semantic patch in scripts/coccinelle by default. The COCCI variable may additionally be used to only apply a single semantic patch as shown in the previous section. -The "chain" mode is the default. You can select another one with the +The "report" mode is the default. You can select another one with the MODE variable explained above. -In this mode, there is no information about semantic patches -displayed, and no commit message proposed. - Additional flags ~~~~~~~~~~~~~~~~~~ Additional flags can be passed to spatch through the SPFLAGS variable. - make SPFLAGS=--use_glimpse coccicheck + make SPFLAGS=--use-glimpse coccicheck + make SPFLAGS=--use-idutils coccicheck See spatch --help to learn more about spatch options. +Note that the '--use-glimpse' and '--use-idutils' options +require external tools for indexing the code. None of them is +thus active by default. However, by indexing the code with +one of these tools, and according to the cocci file used, +spatch could proceed the entire code base more quickly. + Proposing new semantic patches ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/Documentation/kbuild/kconfig.txt b/Documentation/kbuild/kconfig.txt index 213859e..e349f29 100644 --- a/Documentation/kbuild/kconfig.txt +++ b/Documentation/kbuild/kconfig.txt @@ -174,6 +174,19 @@ Searching in menuconfig: /^hotplug + When searching, symbols are sorted thus: + - exact match first: an exact match is when the search matches + the complete symbol name; + - alphabetical order: when two symbols do not match exactly, + they are sorted in alphabetical order (in the user's current + locale). + For example: ^ATH.K matches: + ATH5K ATH9K ATH5K_AHB ATH5K_DEBUG [...] ATH6KL ATH6KL_DEBUG + [...] ATH9K_AHB ATH9K_BTCOEX_SUPPORT ATH9K_COMMON [...] + of which only ATH5K and ATH9K match exactly and so are sorted + first (and in alphabetical order), then come all other symbols, + sorted in alphabetical order. + ______________________________________________________________________ User interface options for 'menuconfig' diff --git a/Documentation/vm/zswap.txt b/Documentation/vm/zswap.txt new file mode 100644 index 0000000..7e492d8 --- /dev/null +++ b/Documentation/vm/zswap.txt @@ -0,0 +1,68 @@ +Overview: + +Zswap is a lightweight compressed cache for swap pages. It takes pages that are +in the process of being swapped out and attempts to compress them into a +dynamically allocated RAM-based memory pool. zswap basically trades CPU cycles +for potentially reduced swap I/O.  This trade-off can also result in a +significant performance improvement if reads from the compressed cache are +faster than reads from a swap device. + +NOTE: Zswap is a new feature as of v3.11 and interacts heavily with memory +reclaim. This interaction has not be fully explored on the large set of +potential configurations and workloads that exist. For this reason, zswap +is a work in progress and should be considered experimental. + +Some potential benefits: +* Desktop/laptop users with limited RAM capacities can mitigate the +    performance impact of swapping. +* Overcommitted guests that share a common I/O resource can +    dramatically reduce their swap I/O pressure, avoiding heavy handed I/O + throttling by the hypervisor. This allows more work to get done with less + impact to the guest workload and guests sharing the I/O subsystem +* Users with SSDs as swap devices can extend the life of the device by +    drastically reducing life-shortening writes. + +Zswap evicts pages from compressed cache on an LRU basis to the backing swap +device when the compressed pool reaches it size limit. This requirement had +been identified in prior community discussions. + +To enabled zswap, the "enabled" attribute must be set to 1 at boot time. e.g. +zswap.enabled=1 + +Design: + +Zswap receives pages for compression through the Frontswap API and is able to +evict pages from its own compressed pool on an LRU basis and write them back to +the backing swap device in the case that the compressed pool is full. + +Zswap makes use of zbud for the managing the compressed memory pool. Each +allocation in zbud is not directly accessible by address. Rather, a handle is +return by the allocation routine and that handle must be mapped before being +accessed. The compressed memory pool grows on demand and shrinks as compressed +pages are freed. The pool is not preallocated. + +When a swap page is passed from frontswap to zswap, zswap maintains a mapping +of the swap entry, a combination of the swap type and swap offset, to the zbud +handle that references that compressed swap page. This mapping is achieved +with a red-black tree per swap type. The swap offset is the search key for the +tree nodes. + +During a page fault on a PTE that is a swap entry, frontswap calls the zswap +load function to decompress the page into the page allocated by the page fault +handler. + +Once there are no PTEs referencing a swap page stored in zswap (i.e. the count +in the swap_map goes to 0) the swap code calls the zswap invalidate function, +via frontswap, to free the compressed entry. + +Zswap seeks to be simple in its policies. Sysfs attributes allow for one user +controlled policies: +* max_pool_percent - The maximum percentage of memory that the compressed + pool can occupy. + +Zswap allows the compressor to be selected at kernel boot time by setting the +“compressor” attribute. The default compressor is lzo. e.g. +zswap.compressor=deflate + +A debugfs interface is provided for various statistic about pool size, number +of pages stored, and various counters for the reasons pages are rejected. diff --git a/MAINTAINERS b/MAINTAINERS index e51d018..cbd4f66 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2135,9 +2135,12 @@ COCCINELLE/Semantic Patches (SmPL) M: Julia Lawall M: Gilles Muller M: Nicolas Palix +M: Michal Marek L: cocci@systeme.lip6.fr (moderated for non-subscribers) +T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git misc W: http://coccinelle.lip6.fr/ S: Supported +F: Documentation/coccinelle.txt F: scripts/coccinelle/ F: scripts/coccicheck diff --git a/Makefile b/Makefile index 170ed7c..4e3575c 100644 --- a/Makefile +++ b/Makefile @@ -1116,6 +1116,7 @@ help: @echo ' gtags - Generate GNU GLOBAL index' @echo ' kernelrelease - Output the release version string' @echo ' kernelversion - Output the version stored in Makefile' + @echo ' image_name - Output the image name' @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \ echo ' (default: $(INSTALL_HDR_PATH))'; \ echo '' @@ -1310,7 +1311,7 @@ export_report: endif #ifeq ($(config-targets),1) endif #ifeq ($(mixed-targets),1) -PHONY += checkstack kernelrelease kernelversion +PHONY += checkstack kernelrelease kernelversion image_name # UML needs a little special treatment here. It wants to use the host # toolchain, so needs $(SUBARCH) passed to checkstack.pl. Everyone @@ -1331,6 +1332,9 @@ kernelrelease: kernelversion: @echo $(KERNELVERSION) +image_name: + @echo $(KBUILD_IMAGE) + # Clear a bunch of variables before executing the submake tools/: FORCE $(Q)mkdir -p $(objtree)/tools diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 10062ce..0c63562 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -181,11 +181,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm) if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(random_factor); mm->get_unmapped_area = arch_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; } } diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 4143d9b..9737e97 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -270,6 +270,8 @@ source "drivers/Kconfig" source "fs/Kconfig" +source "arch/arm64/kvm/Kconfig" + source "arch/arm64/Kconfig.debug" source "security/Kconfig" diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 49c162c..666e231 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig new file mode 100644 index 0000000..21e9082 --- /dev/null +++ b/arch/arm64/kvm/Kconfig @@ -0,0 +1,51 @@ +# +# KVM configuration +# + +source "virt/kvm/Kconfig" + +menuconfig VIRTUALIZATION + bool "Virtualization" + ---help--- + Say Y here to get to see options for using your Linux host to run + other operating systems inside virtual machines (guests). + This option alone does not add any kernel code. + + If you say N, all options in this submenu will be skipped and + disabled. + +if VIRTUALIZATION + +config KVM + bool "Kernel-based Virtual Machine (KVM) support" + select MMU_NOTIFIER + select PREEMPT_NOTIFIERS + select ANON_INODES + select KVM_MMIO + select KVM_ARM_HOST + select KVM_ARM_VGIC + select KVM_ARM_TIMER + ---help--- + Support hosting virtualized guest machines. + + If unsure, say N. + +config KVM_ARM_HOST + bool + ---help--- + Provides host support for ARM processors. + +config KVM_ARM_VGIC + bool + depends on KVM_ARM_HOST && OF + select HAVE_KVM_IRQCHIP + ---help--- + Adds support for a hardware assisted, in-kernel GIC emulation. + +config KVM_ARM_TIMER + bool + depends on KVM_ARM_VGIC + ---help--- + Adds support for the Architected Timers in virtual machines. + +endif # VIRTUALIZATION diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c index 7c7be78..8ed6cb1 100644 --- a/arch/arm64/mm/mmap.c +++ b/arch/arm64/mm/mmap.c @@ -90,11 +90,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm) if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE; mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(); mm->get_unmapped_area = arch_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; } } EXPORT_SYMBOL_GPL(arch_pick_mmap_layout); diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index 7e5fe27..f1baadd 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c @@ -158,11 +158,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm) if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(random_factor); mm->get_unmapped_area = arch_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; } } diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c index 67a42ed..cb8bdbe 100644 --- a/arch/powerpc/mm/mmap.c +++ b/arch/powerpc/mm/mmap.c @@ -92,10 +92,8 @@ void arch_pick_mmap_layout(struct mm_struct *mm) if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE; mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(); mm->get_unmapped_area = arch_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; } } diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 06bafec..4002329 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -91,11 +91,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm) if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE; mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(); mm->get_unmapped_area = arch_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; } } @@ -176,11 +174,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm) if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE; mm->get_unmapped_area = s390_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(); mm->get_unmapped_area = s390_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; } } diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h index b836e92..c2f6ff6 100644 --- a/arch/sparc/include/asm/leon.h +++ b/arch/sparc/include/asm/leon.h @@ -108,7 +108,7 @@ static inline int sparc_leon3_snooping_enabled(void) { u32 cctrl; __asm__ __volatile__("lda [%%g0] 2, %0\n\t" : "=r"(cctrl)); - return (cctrl >> 23) & 1; + return ((cctrl >> 23) & 1) && ((cctrl >> 17) & 1); }; static inline void sparc_leon3_disable_cache(void) diff --git a/arch/sparc/kernel/asm-offsets.c b/arch/sparc/kernel/asm-offsets.c index 961b87f..f76389a 100644 --- a/arch/sparc/kernel/asm-offsets.c +++ b/arch/sparc/kernel/asm-offsets.c @@ -49,6 +49,8 @@ int foo(void) DEFINE(AOFF_task_thread, offsetof(struct task_struct, thread)); BLANK(); DEFINE(AOFF_mm_context, offsetof(struct mm_struct, context)); + BLANK(); + DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); /* DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); */ return 0; diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c index 5ef48da..11d460f 100644 --- a/arch/sparc/kernel/ds.c +++ b/arch/sparc/kernel/ds.c @@ -783,6 +783,16 @@ void ldom_set_var(const char *var, const char *value) char *base, *p; int msg_len, loops; + if (strlen(var) + strlen(value) + 2 > + sizeof(pkt) - sizeof(pkt.header)) { + printk(KERN_ERR PFX + "contents length: %zu, which more than max: %lu," + "so could not set (%s) variable to (%s).\n", + strlen(var) + strlen(value) + 2, + sizeof(pkt) - sizeof(pkt.header), var, value); + return; + } + memset(&pkt, 0, sizeof(pkt)); pkt.header.data.tag.type = DS_DATA; pkt.header.data.handle = cp->handle; diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index 2daaaa6..51561b8 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -290,7 +290,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm) sysctl_legacy_va_layout) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } else { /* We know it's 32-bit */ unsigned long task_size = STACK_TOP32; @@ -302,7 +301,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm) mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor); mm->get_unmapped_area = arch_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; } } diff --git a/arch/sparc/mm/hypersparc.S b/arch/sparc/mm/hypersparc.S index 44aad32..969f964 100644 --- a/arch/sparc/mm/hypersparc.S +++ b/arch/sparc/mm/hypersparc.S @@ -74,7 +74,7 @@ hypersparc_flush_cache_mm_out: /* The things we do for performance... */ hypersparc_flush_cache_range: - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 #ifndef CONFIG_SMP ld [%o0 + AOFF_mm_context], %g1 cmp %g1, -1 @@ -163,7 +163,7 @@ hypersparc_flush_cache_range_out: */ /* Verified, my ass... */ hypersparc_flush_cache_page: - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 ld [%o0 + AOFF_mm_context], %g2 #ifndef CONFIG_SMP cmp %g2, -1 @@ -284,7 +284,7 @@ hypersparc_flush_tlb_mm_out: sta %g5, [%g1] ASI_M_MMUREGS hypersparc_flush_tlb_range: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 @@ -307,7 +307,7 @@ hypersparc_flush_tlb_range_out: sta %g5, [%g1] ASI_M_MMUREGS hypersparc_flush_tlb_page: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 andn %o1, (PAGE_SIZE - 1), %o1 diff --git a/arch/sparc/mm/swift.S b/arch/sparc/mm/swift.S index c801c39..5d2b88d 100644 --- a/arch/sparc/mm/swift.S +++ b/arch/sparc/mm/swift.S @@ -105,7 +105,7 @@ swift_flush_cache_mm_out: .globl swift_flush_cache_range swift_flush_cache_range: - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 sub %o2, %o1, %o2 sethi %hi(4096), %o3 cmp %o2, %o3 @@ -116,7 +116,7 @@ swift_flush_cache_range: .globl swift_flush_cache_page swift_flush_cache_page: - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 70: ld [%o0 + AOFF_mm_context], %g2 cmp %g2, -1 @@ -219,7 +219,7 @@ swift_flush_sig_insns: .globl swift_flush_tlb_range .globl swift_flush_tlb_all swift_flush_tlb_range: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 swift_flush_tlb_mm: ld [%o0 + AOFF_mm_context], %g2 cmp %g2, -1 @@ -233,7 +233,7 @@ swift_flush_tlb_all_out: .globl swift_flush_tlb_page swift_flush_tlb_page: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 andn %o1, (PAGE_SIZE - 1), %o1 diff --git a/arch/sparc/mm/tsunami.S b/arch/sparc/mm/tsunami.S index 4e55e8f..bf10a34 100644 --- a/arch/sparc/mm/tsunami.S +++ b/arch/sparc/mm/tsunami.S @@ -24,7 +24,7 @@ /* Sliiick... */ tsunami_flush_cache_page: tsunami_flush_cache_range: - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 tsunami_flush_cache_mm: ld [%o0 + AOFF_mm_context], %g2 cmp %g2, -1 @@ -46,7 +46,7 @@ tsunami_flush_sig_insns: /* More slick stuff... */ tsunami_flush_tlb_range: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 tsunami_flush_tlb_mm: ld [%o0 + AOFF_mm_context], %g2 cmp %g2, -1 @@ -65,7 +65,7 @@ tsunami_flush_tlb_out: /* This one can be done in a fine grained manner... */ tsunami_flush_tlb_page: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 andn %o1, (PAGE_SIZE - 1), %o1 diff --git a/arch/sparc/mm/viking.S b/arch/sparc/mm/viking.S index bf8ee06..852257f 100644 --- a/arch/sparc/mm/viking.S +++ b/arch/sparc/mm/viking.S @@ -108,7 +108,7 @@ viking_mxcc_flush_page: viking_flush_cache_page: viking_flush_cache_range: #ifndef CONFIG_SMP - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 #endif viking_flush_cache_mm: #ifndef CONFIG_SMP @@ -148,7 +148,7 @@ viking_flush_tlb_mm: #endif viking_flush_tlb_range: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 @@ -173,7 +173,7 @@ viking_flush_tlb_range: #endif viking_flush_tlb_page: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 @@ -239,7 +239,7 @@ sun4dsmp_flush_tlb_range: tst %g5 bne 3f mov SRMMU_CTX_REG, %g1 - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4 @@ -265,7 +265,7 @@ sun4dsmp_flush_tlb_page: tst %g5 bne 2f mov SRMMU_CTX_REG, %g1 - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 and %o1, PAGE_MASK, %o1 diff --git a/arch/tile/mm/mmap.c b/arch/tile/mm/mmap.c index f96f4ce..d67d91e 100644 --- a/arch/tile/mm/mmap.c +++ b/arch/tile/mm/mmap.c @@ -66,10 +66,8 @@ void arch_pick_mmap_layout(struct mm_struct *mm) if (!is_32bit || rlimit(RLIMIT_STACK) == RLIM_INFINITY) { mm->mmap_base = TASK_UNMAPPED_BASE; mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(mm); mm->get_unmapped_area = arch_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; } } diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index 52ff81c..bae3aba 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c @@ -308,8 +308,6 @@ static int load_aout_binary(struct linux_binprm *bprm) (current->mm->start_data = N_DATADDR(ex)); current->mm->brk = ex.a_bss + (current->mm->start_brk = N_BSSADDR(ex)); - current->mm->free_area_cache = TASK_UNMAPPED_BASE; - current->mm->cached_hole_size = 0; retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT); if (retval < 0) { diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c index 0db655e..639d128 100644 --- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c +++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c @@ -491,10 +491,8 @@ static struct perf_amd_iommu __perf_iommu = { static __init int amd_iommu_pc_init(void) { /* Make sure the IOMMU PC resource is available */ - if (!amd_iommu_pc_supported()) { - pr_err("perf: amd_iommu PMU not installed. No support!\n"); + if (!amd_iommu_pc_supported()) return -ENODEV; - } _init_perf_amd_iommu(&__perf_iommu, "amd_iommu"); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a7e1855..064d0be 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3404,15 +3404,22 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu, var->limit = vmx_read_guest_seg_limit(vmx, seg); var->selector = vmx_read_guest_seg_selector(vmx, seg); ar = vmx_read_guest_seg_ar(vmx, seg); + var->unusable = (ar >> 16) & 1; var->type = ar & 15; var->s = (ar >> 4) & 1; var->dpl = (ar >> 5) & 3; - var->present = (ar >> 7) & 1; + /* + * Some userspaces do not preserve unusable property. Since usable + * segment has to be present according to VMX spec we can use present + * property to amend userspace bug by making unusable segment always + * nonpresent. vmx_segment_access_rights() already marks nonpresent + * segment as unusable. + */ + var->present = !var->unusable; var->avl = (ar >> 12) & 1; var->l = (ar >> 13) & 1; var->db = (ar >> 14) & 1; var->g = (ar >> 15) & 1; - var->unusable = (ar >> 16) & 1; } static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 845df68..62c29a5 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -115,10 +115,8 @@ void arch_pick_mmap_layout(struct mm_struct *mm) if (mmap_is_legacy()) { mm->mmap_base = mmap_legacy_base(); mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(); mm->get_unmapped_area = arch_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; } } diff --git a/drivers/ide/delkin_cb.c b/drivers/ide/delkin_cb.c index 7e27d32..300daab 100644 --- a/drivers/ide/delkin_cb.c +++ b/drivers/ide/delkin_cb.c @@ -173,18 +173,7 @@ static struct pci_driver delkin_cb_pci_driver = { .resume = delkin_cb_resume, }; -static int __init delkin_cb_init(void) -{ - return pci_register_driver(&delkin_cb_pci_driver); -} - -static void __exit delkin_cb_exit(void) -{ - pci_unregister_driver(&delkin_cb_pci_driver); -} - -module_init(delkin_cb_init); -module_exit(delkin_cb_exit); +module_pci_driver(delkin_cb_pci_driver); MODULE_AUTHOR("Mark Lord"); MODULE_DESCRIPTION("Basic support for Delkin/ASKA/Workbit Cardbus IDE"); diff --git a/drivers/ide/gayle.c b/drivers/ide/gayle.c index 51beb85..0a8440a 100644 --- a/drivers/ide/gayle.c +++ b/drivers/ide/gayle.c @@ -183,20 +183,7 @@ static struct platform_driver amiga_gayle_ide_driver = { }, }; -static int __init amiga_gayle_ide_init(void) -{ - return platform_driver_probe(&amiga_gayle_ide_driver, - amiga_gayle_ide_probe); -} - -module_init(amiga_gayle_ide_init); - -static void __exit amiga_gayle_ide_exit(void) -{ - platform_driver_unregister(&amiga_gayle_ide_driver); -} - -module_exit(amiga_gayle_ide_exit); +module_platform_driver_probe(amiga_gayle_ide_driver, amiga_gayle_ide_probe); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:amiga-gayle-ide"); diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c index 729428e..dabb88b 100644 --- a/drivers/ide/ide-taskfile.c +++ b/drivers/ide/ide-taskfile.c @@ -239,9 +239,6 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd, unsigned nr_bytes = min(len, cursg->length - cmd->cursg_ofs); int page_is_high; - if (nr_bytes > PAGE_SIZE) - nr_bytes = PAGE_SIZE; - page = sg_page(cursg); offset = cursg->offset + cmd->cursg_ofs; @@ -249,6 +246,8 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd, page = nth_page(page, (offset >> PAGE_SHIFT)); offset %= PAGE_SIZE; + nr_bytes = min_t(unsigned, nr_bytes, (PAGE_SIZE - offset)); + page_is_high = PageHighMem(page); if (page_is_high) local_irq_save(flags); diff --git a/drivers/ide/tx4938ide.c b/drivers/ide/tx4938ide.c index 91d49dd..ede8575 100644 --- a/drivers/ide/tx4938ide.c +++ b/drivers/ide/tx4938ide.c @@ -203,18 +203,7 @@ static struct platform_driver tx4938ide_driver = { .remove = __exit_p(tx4938ide_remove), }; -static int __init tx4938ide_init(void) -{ - return platform_driver_probe(&tx4938ide_driver, tx4938ide_probe); -} - -static void __exit tx4938ide_exit(void) -{ - platform_driver_unregister(&tx4938ide_driver); -} - -module_init(tx4938ide_init); -module_exit(tx4938ide_exit); +module_platform_driver_probe(tx4938ide_driver, tx4938ide_probe); MODULE_DESCRIPTION("TX4938 internal IDE driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c index c0ab800..4ecdee5 100644 --- a/drivers/ide/tx4939ide.c +++ b/drivers/ide/tx4939ide.c @@ -624,18 +624,7 @@ static struct platform_driver tx4939ide_driver = { .resume = tx4939ide_resume, }; -static int __init tx4939ide_init(void) -{ - return platform_driver_probe(&tx4939ide_driver, tx4939ide_probe); -} - -static void __exit tx4939ide_exit(void) -{ - platform_driver_unregister(&tx4939ide_driver); -} - -module_init(tx4939ide_init); -module_exit(tx4939ide_exit); +module_platform_driver_probe(tx4939ide_driver, tx4939ide_probe); MODULE_DESCRIPTION("TX4939 internal IDE driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 1df0ff3..3df5684 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -1239,6 +1239,8 @@ static int vnet_port_remove(struct vio_dev *vdev) dev_set_drvdata(&vdev->dev, NULL); kfree(port); + + unregister_netdev(vp->dev); } return 0; } diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c index bce8769..89dec7f 100644 --- a/fs/binfmt_aout.c +++ b/fs/binfmt_aout.c @@ -255,8 +255,6 @@ static int load_aout_binary(struct linux_binprm * bprm) (current->mm->start_data = N_DATADDR(ex)); current->mm->brk = ex.a_bss + (current->mm->start_brk = N_BSSADDR(ex)); - current->mm->free_area_cache = current->mm->mmap_base; - current->mm->cached_hole_size = 0; retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT); if (retval < 0) { diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index f8a0b0e..100edcc 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -738,8 +738,6 @@ static int load_elf_binary(struct linux_binprm *bprm) /* Do this so that we can load the interpreter, if need be. We will change some of these later */ - current->mm->free_area_cache = current->mm->mmap_base; - current->mm->cached_hole_size = 0; retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP), executable_stack); if (retval < 0) { diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index ace9a5f..fb425aa 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -330,12 +330,9 @@ struct mm_struct { unsigned long (*get_unmapped_area) (struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); - void (*unmap_area) (struct mm_struct *mm, unsigned long addr); #endif unsigned long mmap_base; /* base of mmap area */ unsigned long task_size; /* size of task vm space */ - unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */ - unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */ unsigned long highest_vm_end; /* highest vma end address */ pgd_t * pgd; atomic_t mm_users; /* How many users with user space? */ diff --git a/include/linux/sched.h b/include/linux/sched.h index f99d57e..50d04b9 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -322,8 +322,6 @@ extern unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); -extern void arch_unmap_area(struct mm_struct *, unsigned long); -extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); #else static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} #endif diff --git a/include/linux/zbud.h b/include/linux/zbud.h new file mode 100644 index 0000000..2571a5c --- /dev/null +++ b/include/linux/zbud.h @@ -0,0 +1,22 @@ +#ifndef _ZBUD_H_ +#define _ZBUD_H_ + +#include + +struct zbud_pool; + +struct zbud_ops { + int (*evict)(struct zbud_pool *pool, unsigned long handle); +}; + +struct zbud_pool *zbud_create_pool(gfp_t gfp, struct zbud_ops *ops); +void zbud_destroy_pool(struct zbud_pool *pool); +int zbud_alloc(struct zbud_pool *pool, int size, gfp_t gfp, + unsigned long *handle); +void zbud_free(struct zbud_pool *pool, unsigned long handle); +int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries); +void *zbud_map(struct zbud_pool *pool, unsigned long handle); +void zbud_unmap(struct zbud_pool *pool, unsigned long handle); +u64 zbud_get_pool_size(struct zbud_pool *pool); + +#endif /* _ZBUD_H_ */ diff --git a/kernel/events/core.c b/kernel/events/core.c index 1db3af9..1833bc5 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -182,7 +182,7 @@ void update_perf_cpu_limits(void) u64 tmp = perf_sample_period_ns; tmp *= sysctl_perf_cpu_time_max_percent; - tmp = do_div(tmp, 100); + do_div(tmp, 100); atomic_set(&perf_sample_allowed_ns, tmp); } @@ -232,7 +232,7 @@ DEFINE_PER_CPU(u64, running_sample_length); void perf_sample_event_took(u64 sample_len_ns) { u64 avg_local_sample_len; - u64 local_samples_len = __get_cpu_var(running_sample_length); + u64 local_samples_len; if (atomic_read(&perf_sample_allowed_ns) == 0) return; diff --git a/kernel/fork.c b/kernel/fork.c index 6e6a1c1..66635c8 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -365,8 +365,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) mm->locked_vm = 0; mm->mmap = NULL; mm->mmap_cache = NULL; - mm->free_area_cache = oldmm->mmap_base; - mm->cached_hole_size = ~0UL; mm->map_count = 0; cpumask_clear(mm_cpumask(mm)); mm->mm_rb = RB_ROOT; @@ -540,8 +538,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p) mm->nr_ptes = 0; memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); spin_lock_init(&mm->page_table_lock); - mm->free_area_cache = TASK_UNMAPPED_BASE; - mm->cached_hole_size = ~0UL; mm_init_aio(mm); mm_init_owner(mm, p); diff --git a/kernel/printk.c b/kernel/printk.c index 8212c1a..d37d45c 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -1369,9 +1369,9 @@ static int console_trylock_for_printk(unsigned int cpu) } } logbuf_cpu = UINT_MAX; + raw_spin_unlock(&logbuf_lock); if (wake) up(&console_sem); - raw_spin_unlock(&logbuf_lock); return retval; } diff --git a/mm/Kconfig b/mm/Kconfig index 7e28ecf..8028dcc 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -478,6 +478,36 @@ config FRONTSWAP If unsure, say Y to enable frontswap. +config ZBUD + tristate + default n + help + A special purpose allocator for storing compressed pages. + It is designed to store up to two compressed pages per physical + page. While this design limits storage density, it has simple and + deterministic reclaim properties that make it preferable to a higher + density approach when reclaim will be used. + +config ZSWAP + bool "Compressed cache for swap pages (EXPERIMENTAL)" + depends on FRONTSWAP && CRYPTO=y + select CRYPTO_LZO + select ZBUD + default n + help + A lightweight compressed cache for swap pages. It takes + pages that are in the process of being swapped out and attempts to + compress them into a dynamically allocated RAM-based memory pool. + This can result in a significant I/O reduction on swap device and, + in the case where decompressing from RAM is faster that swap device + reads, can also improve workload performance. + + This is marked experimental because it is a new feature (as of + v3.11) that interacts heavily with memory reclaim. While these + interactions don't cause any known issues on simple memory setups, + they have not be fully explored on the large set of potential + configurations and workloads that exist. + config MEM_SOFT_DIRTY bool "Track memory changes" depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY diff --git a/mm/Makefile b/mm/Makefile index 72c5acb..f008033 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -32,6 +32,7 @@ obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o obj-$(CONFIG_BOUNCE) += bounce.o obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o obj-$(CONFIG_FRONTSWAP) += frontswap.o +obj-$(CONFIG_ZSWAP) += zswap.o obj-$(CONFIG_HAS_DMA) += dmapool.o obj-$(CONFIG_HUGETLBFS) += hugetlb.o obj-$(CONFIG_NUMA) += mempolicy.o @@ -58,3 +59,4 @@ obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o obj-$(CONFIG_CLEANCACHE) += cleancache.o obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o +obj-$(CONFIG_ZBUD) += zbud.o diff --git a/mm/mmap.c b/mm/mmap.c index f813111..fbad7b0 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1878,15 +1878,6 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, } #endif -void arch_unmap_area(struct mm_struct *mm, unsigned long addr) -{ - /* - * Is this a new hole at the lowest possible address? - */ - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) - mm->free_area_cache = addr; -} - /* * This mmap-allocator allocates new areas top-down from below the * stack's low limit (the base): @@ -1943,19 +1934,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, } #endif -void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr) -{ - /* - * Is this a new hole at the highest possible address? - */ - if (addr > mm->free_area_cache) - mm->free_area_cache = addr; - - /* dont allow allocations above current base */ - if (mm->free_area_cache > mm->mmap_base) - mm->free_area_cache = mm->mmap_base; -} - unsigned long get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) @@ -2376,7 +2354,6 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, { struct vm_area_struct **insertion_point; struct vm_area_struct *tail_vma = NULL; - unsigned long addr; insertion_point = (prev ? &prev->vm_next : &mm->mmap); vma->vm_prev = NULL; @@ -2393,11 +2370,6 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, } else mm->highest_vm_end = prev ? prev->vm_end : 0; tail_vma->vm_next = NULL; - if (mm->unmap_area == arch_unmap_area) - addr = prev ? prev->vm_end : mm->mmap_base; - else - addr = vma ? vma->vm_start : mm->mmap_base; - mm->unmap_area(mm, addr); mm->mmap_cache = NULL; /* Kill the cache. */ } diff --git a/mm/nommu.c b/mm/nommu.c index e44e6e0..ecd1f15 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -1871,10 +1871,6 @@ unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr, return -ENOMEM; } -void arch_unmap_area(struct mm_struct *mm, unsigned long addr) -{ -} - void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows) diff --git a/mm/util.c b/mm/util.c index ab1424d..7441c41 100644 --- a/mm/util.c +++ b/mm/util.c @@ -295,7 +295,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm) { mm->mmap_base = TASK_UNMAPPED_BASE; mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } #endif diff --git a/mm/zbud.c b/mm/zbud.c new file mode 100644 index 0000000..9bb4710 --- /dev/null +++ b/mm/zbud.c @@ -0,0 +1,527 @@ +/* + * zbud.c + * + * Copyright (C) 2013, Seth Jennings, IBM + * + * Concepts based on zcache internal zbud allocator by Dan Magenheimer. + * + * zbud is an special purpose allocator for storing compressed pages. Contrary + * to what its name may suggest, zbud is not a buddy allocator, but rather an + * allocator that "buddies" two compressed pages together in a single memory + * page. + * + * While this design limits storage density, it has simple and deterministic + * reclaim properties that make it preferable to a higher density approach when + * reclaim will be used. + * + * zbud works by storing compressed pages, or "zpages", together in pairs in a + * single memory page called a "zbud page". The first buddy is "left + * justifed" at the beginning of the zbud page, and the last buddy is "right + * justified" at the end of the zbud page. The benefit is that if either + * buddy is freed, the freed buddy space, coalesced with whatever slack space + * that existed between the buddies, results in the largest possible free region + * within the zbud page. + * + * zbud also provides an attractive lower bound on density. The ratio of zpages + * to zbud pages can not be less than 1. This ensures that zbud can never "do + * harm" by using more pages to store zpages than the uncompressed zpages would + * have used on their own. + * + * zbud pages are divided into "chunks". The size of the chunks is fixed at + * compile time and determined by NCHUNKS_ORDER below. Dividing zbud pages + * into chunks allows organizing unbuddied zbud pages into a manageable number + * of unbuddied lists according to the number of free chunks available in the + * zbud page. + * + * The zbud API differs from that of conventional allocators in that the + * allocation function, zbud_alloc(), returns an opaque handle to the user, + * not a dereferenceable pointer. The user must map the handle using + * zbud_map() in order to get a usable pointer by which to access the + * allocation data and unmap the handle with zbud_unmap() when operations + * on the allocation data are complete. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +/***************** + * Structures +*****************/ +/* + * NCHUNKS_ORDER determines the internal allocation granularity, effectively + * adjusting internal fragmentation. It also determines the number of + * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the + * allocation granularity will be in chunks of size PAGE_SIZE/64, and there + * will be 64 freelists per pool. + */ +#define NCHUNKS_ORDER 6 + +#define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER) +#define CHUNK_SIZE (1 << CHUNK_SHIFT) +#define NCHUNKS (PAGE_SIZE >> CHUNK_SHIFT) +#define ZHDR_SIZE_ALIGNED CHUNK_SIZE + +/** + * struct zbud_pool - stores metadata for each zbud pool + * @lock: protects all pool fields and first|last_chunk fields of any + * zbud page in the pool + * @unbuddied: array of lists tracking zbud pages that only contain one buddy; + * the lists each zbud page is added to depends on the size of + * its free region. + * @buddied: list tracking the zbud pages that contain two buddies; + * these zbud pages are full + * @lru: list tracking the zbud pages in LRU order by most recently + * added buddy. + * @pages_nr: number of zbud pages in the pool. + * @ops: pointer to a structure of user defined operations specified at + * pool creation time. + * + * This structure is allocated at pool creation time and maintains metadata + * pertaining to a particular zbud pool. + */ +struct zbud_pool { + spinlock_t lock; + struct list_head unbuddied[NCHUNKS]; + struct list_head buddied; + struct list_head lru; + u64 pages_nr; + struct zbud_ops *ops; +}; + +/* + * struct zbud_header - zbud page metadata occupying the first chunk of each + * zbud page. + * @buddy: links the zbud page into the unbuddied/buddied lists in the pool + * @lru: links the zbud page into the lru list in the pool + * @first_chunks: the size of the first buddy in chunks, 0 if free + * @last_chunks: the size of the last buddy in chunks, 0 if free + */ +struct zbud_header { + struct list_head buddy; + struct list_head lru; + unsigned int first_chunks; + unsigned int last_chunks; + bool under_reclaim; +}; + +/***************** + * Helpers +*****************/ +/* Just to make the code easier to read */ +enum buddy { + FIRST, + LAST +}; + +/* Converts an allocation size in bytes to size in zbud chunks */ +static int size_to_chunks(int size) +{ + return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT; +} + +#define for_each_unbuddied_list(_iter, _begin) \ + for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++) + +/* Initializes the zbud header of a newly allocated zbud page */ +static struct zbud_header *init_zbud_page(struct page *page) +{ + struct zbud_header *zhdr = page_address(page); + zhdr->first_chunks = 0; + zhdr->last_chunks = 0; + INIT_LIST_HEAD(&zhdr->buddy); + INIT_LIST_HEAD(&zhdr->lru); + zhdr->under_reclaim = 0; + return zhdr; +} + +/* Resets the struct page fields and frees the page */ +static void free_zbud_page(struct zbud_header *zhdr) +{ + __free_page(virt_to_page(zhdr)); +} + +/* + * Encodes the handle of a particular buddy within a zbud page + * Pool lock should be held as this function accesses first|last_chunks + */ +static unsigned long encode_handle(struct zbud_header *zhdr, enum buddy bud) +{ + unsigned long handle; + + /* + * For now, the encoded handle is actually just the pointer to the data + * but this might not always be the case. A little information hiding. + * Add CHUNK_SIZE to the handle if it is the first allocation to jump + * over the zbud header in the first chunk. + */ + handle = (unsigned long)zhdr; + if (bud == FIRST) + /* skip over zbud header */ + handle += ZHDR_SIZE_ALIGNED; + else /* bud == LAST */ + handle += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT); + return handle; +} + +/* Returns the zbud page where a given handle is stored */ +static struct zbud_header *handle_to_zbud_header(unsigned long handle) +{ + return (struct zbud_header *)(handle & PAGE_MASK); +} + +/* Returns the number of free chunks in a zbud page */ +static int num_free_chunks(struct zbud_header *zhdr) +{ + /* + * Rather than branch for different situations, just use the fact that + * free buddies have a length of zero to simplify everything. -1 at the + * end for the zbud header. + */ + return NCHUNKS - zhdr->first_chunks - zhdr->last_chunks - 1; +} + +/***************** + * API Functions +*****************/ +/** + * zbud_create_pool() - create a new zbud pool + * @gfp: gfp flags when allocating the zbud pool structure + * @ops: user-defined operations for the zbud pool + * + * Return: pointer to the new zbud pool or NULL if the metadata allocation + * failed. + */ +struct zbud_pool *zbud_create_pool(gfp_t gfp, struct zbud_ops *ops) +{ + struct zbud_pool *pool; + int i; + + pool = kmalloc(sizeof(struct zbud_pool), gfp); + if (!pool) + return NULL; + spin_lock_init(&pool->lock); + for_each_unbuddied_list(i, 0) + INIT_LIST_HEAD(&pool->unbuddied[i]); + INIT_LIST_HEAD(&pool->buddied); + INIT_LIST_HEAD(&pool->lru); + pool->pages_nr = 0; + pool->ops = ops; + return pool; +} + +/** + * zbud_destroy_pool() - destroys an existing zbud pool + * @pool: the zbud pool to be destroyed + * + * The pool should be emptied before this function is called. + */ +void zbud_destroy_pool(struct zbud_pool *pool) +{ + kfree(pool); +} + +/** + * zbud_alloc() - allocates a region of a given size + * @pool: zbud pool from which to allocate + * @size: size in bytes of the desired allocation + * @gfp: gfp flags used if the pool needs to grow + * @handle: handle of the new allocation + * + * This function will attempt to find a free region in the pool large enough to + * satisfy the allocation request. A search of the unbuddied lists is + * performed first. If no suitable free region is found, then a new page is + * allocated and added to the pool to satisfy the request. + * + * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used + * as zbud pool pages. + * + * Return: 0 if success and handle is set, otherwise -EINVAL is the size or + * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate + * a new page. + */ +int zbud_alloc(struct zbud_pool *pool, int size, gfp_t gfp, + unsigned long *handle) +{ + int chunks, i, freechunks; + struct zbud_header *zhdr = NULL; + enum buddy bud; + struct page *page; + + if (size <= 0 || gfp & __GFP_HIGHMEM) + return -EINVAL; + if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED) + return -ENOSPC; + chunks = size_to_chunks(size); + spin_lock(&pool->lock); + + /* First, try to find an unbuddied zbud page. */ + zhdr = NULL; + for_each_unbuddied_list(i, chunks) { + if (!list_empty(&pool->unbuddied[i])) { + zhdr = list_first_entry(&pool->unbuddied[i], + struct zbud_header, buddy); + list_del(&zhdr->buddy); + if (zhdr->first_chunks == 0) + bud = FIRST; + else + bud = LAST; + goto found; + } + } + + /* Couldn't find unbuddied zbud page, create new one */ + spin_unlock(&pool->lock); + page = alloc_page(gfp); + if (!page) + return -ENOMEM; + spin_lock(&pool->lock); + pool->pages_nr++; + zhdr = init_zbud_page(page); + bud = FIRST; + +found: + if (bud == FIRST) + zhdr->first_chunks = chunks; + else + zhdr->last_chunks = chunks; + + if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0) { + /* Add to unbuddied list */ + freechunks = num_free_chunks(zhdr); + list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); + } else { + /* Add to buddied list */ + list_add(&zhdr->buddy, &pool->buddied); + } + + /* Add/move zbud page to beginning of LRU */ + if (!list_empty(&zhdr->lru)) + list_del(&zhdr->lru); + list_add(&zhdr->lru, &pool->lru); + + *handle = encode_handle(zhdr, bud); + spin_unlock(&pool->lock); + + return 0; +} + +/** + * zbud_free() - frees the allocation associated with the given handle + * @pool: pool in which the allocation resided + * @handle: handle associated with the allocation returned by zbud_alloc() + * + * In the case that the zbud page in which the allocation resides is under + * reclaim, as indicated by the PG_reclaim flag being set, this function + * only sets the first|last_chunks to 0. The page is actually freed + * once both buddies are evicted (see zbud_reclaim_page() below). + */ +void zbud_free(struct zbud_pool *pool, unsigned long handle) +{ + struct zbud_header *zhdr; + int freechunks; + + spin_lock(&pool->lock); + zhdr = handle_to_zbud_header(handle); + + /* If first buddy, handle will be page aligned */ + if ((handle - ZHDR_SIZE_ALIGNED) & ~PAGE_MASK) + zhdr->last_chunks = 0; + else + zhdr->first_chunks = 0; + + if (zhdr->under_reclaim) { + /* zbud page is under reclaim, reclaim will free */ + spin_unlock(&pool->lock); + return; + } + + /* Remove from existing buddy list */ + list_del(&zhdr->buddy); + + if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { + /* zbud page is empty, free */ + list_del(&zhdr->lru); + free_zbud_page(zhdr); + pool->pages_nr--; + } else { + /* Add to unbuddied list */ + freechunks = num_free_chunks(zhdr); + list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); + } + + spin_unlock(&pool->lock); +} + +#define list_tail_entry(ptr, type, member) \ + list_entry((ptr)->prev, type, member) + +/** + * zbud_reclaim_page() - evicts allocations from a pool page and frees it + * @pool: pool from which a page will attempt to be evicted + * @retires: number of pages on the LRU list for which eviction will + * be attempted before failing + * + * zbud reclaim is different from normal system reclaim in that the reclaim is + * done from the bottom, up. This is because only the bottom layer, zbud, has + * information on how the allocations are organized within each zbud page. This + * has the potential to create interesting locking situations between zbud and + * the user, however. + * + * To avoid these, this is how zbud_reclaim_page() should be called: + + * The user detects a page should be reclaimed and calls zbud_reclaim_page(). + * zbud_reclaim_page() will remove a zbud page from the pool LRU list and call + * the user-defined eviction handler with the pool and handle as arguments. + * + * If the handle can not be evicted, the eviction handler should return + * non-zero. zbud_reclaim_page() will add the zbud page back to the + * appropriate list and try the next zbud page on the LRU up to + * a user defined number of retries. + * + * If the handle is successfully evicted, the eviction handler should + * return 0 _and_ should have called zbud_free() on the handle. zbud_free() + * contains logic to delay freeing the page if the page is under reclaim, + * as indicated by the setting of the PG_reclaim flag on the underlying page. + * + * If all buddies in the zbud page are successfully evicted, then the + * zbud page can be freed. + * + * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are + * no pages to evict or an eviction handler is not registered, -EAGAIN if + * the retry limit was hit. + */ +int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries) +{ + int i, ret, freechunks; + struct zbud_header *zhdr; + unsigned long first_handle = 0, last_handle = 0; + + spin_lock(&pool->lock); + if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) || + retries == 0) { + spin_unlock(&pool->lock); + return -EINVAL; + } + for (i = 0; i < retries; i++) { + zhdr = list_tail_entry(&pool->lru, struct zbud_header, lru); + list_del(&zhdr->lru); + list_del(&zhdr->buddy); + /* Protect zbud page against free */ + zhdr->under_reclaim = true; + /* + * We need encode the handles before unlocking, since we can + * race with free that will set (first|last)_chunks to 0 + */ + first_handle = 0; + last_handle = 0; + if (zhdr->first_chunks) + first_handle = encode_handle(zhdr, FIRST); + if (zhdr->last_chunks) + last_handle = encode_handle(zhdr, LAST); + spin_unlock(&pool->lock); + + /* Issue the eviction callback(s) */ + if (first_handle) { + ret = pool->ops->evict(pool, first_handle); + if (ret) + goto next; + } + if (last_handle) { + ret = pool->ops->evict(pool, last_handle); + if (ret) + goto next; + } +next: + spin_lock(&pool->lock); + zhdr->under_reclaim = false; + if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { + /* + * Both buddies are now free, free the zbud page and + * return success. + */ + free_zbud_page(zhdr); + pool->pages_nr--; + spin_unlock(&pool->lock); + return 0; + } else if (zhdr->first_chunks == 0 || + zhdr->last_chunks == 0) { + /* add to unbuddied list */ + freechunks = num_free_chunks(zhdr); + list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); + } else { + /* add to buddied list */ + list_add(&zhdr->buddy, &pool->buddied); + } + + /* add to beginning of LRU */ + list_add(&zhdr->lru, &pool->lru); + } + spin_unlock(&pool->lock); + return -EAGAIN; +} + +/** + * zbud_map() - maps the allocation associated with the given handle + * @pool: pool in which the allocation resides + * @handle: handle associated with the allocation to be mapped + * + * While trivial for zbud, the mapping functions for others allocators + * implementing this allocation API could have more complex information encoded + * in the handle and could create temporary mappings to make the data + * accessible to the user. + * + * Returns: a pointer to the mapped allocation + */ +void *zbud_map(struct zbud_pool *pool, unsigned long handle) +{ + return (void *)(handle); +} + +/** + * zbud_unmap() - maps the allocation associated with the given handle + * @pool: pool in which the allocation resides + * @handle: handle associated with the allocation to be unmapped + */ +void zbud_unmap(struct zbud_pool *pool, unsigned long handle) +{ +} + +/** + * zbud_get_pool_size() - gets the zbud pool size in pages + * @pool: pool whose size is being queried + * + * Returns: size in pages of the given pool. The pool lock need not be + * taken to access pages_nr. + */ +u64 zbud_get_pool_size(struct zbud_pool *pool) +{ + return pool->pages_nr; +} + +static int __init init_zbud(void) +{ + /* Make sure the zbud header will fit in one chunk */ + BUILD_BUG_ON(sizeof(struct zbud_header) > ZHDR_SIZE_ALIGNED); + pr_info("loaded\n"); + return 0; +} + +static void __exit exit_zbud(void) +{ + pr_info("unloaded\n"); +} + +module_init(init_zbud); +module_exit(exit_zbud); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Seth Jennings "); +MODULE_DESCRIPTION("Buddy Allocator for Compressed Pages"); diff --git a/mm/zswap.c b/mm/zswap.c new file mode 100644 index 0000000..deda2b6 --- /dev/null +++ b/mm/zswap.c @@ -0,0 +1,943 @@ +/* + * zswap.c - zswap driver file + * + * zswap is a backend for frontswap that takes pages that are in the process + * of being swapped out and attempts to compress and store them in a + * RAM-based memory pool. This can result in a significant I/O reduction on + * the swap device and, in the case where decompressing from RAM is faster + * than reading from the swap device, can also improve workload performance. + * + * Copyright (C) 2012 Seth Jennings + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. +*/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +/********************************* +* statistics +**********************************/ +/* Number of memory pages used by the compressed pool */ +static u64 zswap_pool_pages; +/* The number of compressed pages currently stored in zswap */ +static atomic_t zswap_stored_pages = ATOMIC_INIT(0); + +/* + * The statistics below are not protected from concurrent access for + * performance reasons so they may not be a 100% accurate. However, + * they do provide useful information on roughly how many times a + * certain event is occurring. +*/ + +/* Pool limit was hit (see zswap_max_pool_percent) */ +static u64 zswap_pool_limit_hit; +/* Pages written back when pool limit was reached */ +static u64 zswap_written_back_pages; +/* Store failed due to a reclaim failure after pool limit was reached */ +static u64 zswap_reject_reclaim_fail; +/* Compressed page was too big for the allocator to (optimally) store */ +static u64 zswap_reject_compress_poor; +/* Store failed because underlying allocator could not get memory */ +static u64 zswap_reject_alloc_fail; +/* Store failed because the entry metadata could not be allocated (rare) */ +static u64 zswap_reject_kmemcache_fail; +/* Duplicate store was encountered (rare) */ +static u64 zswap_duplicate_entry; + +/********************************* +* tunables +**********************************/ +/* Enable/disable zswap (disabled by default, fixed at boot for now) */ +static bool zswap_enabled __read_mostly; +module_param_named(enabled, zswap_enabled, bool, 0); + +/* Compressor to be used by zswap (fixed at boot for now) */ +#define ZSWAP_COMPRESSOR_DEFAULT "lzo" +static char *zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT; +module_param_named(compressor, zswap_compressor, charp, 0); + +/* The maximum percentage of memory that the compressed pool can occupy */ +static unsigned int zswap_max_pool_percent = 20; +module_param_named(max_pool_percent, + zswap_max_pool_percent, uint, 0644); + +/********************************* +* compression functions +**********************************/ +/* per-cpu compression transforms */ +static struct crypto_comp * __percpu *zswap_comp_pcpu_tfms; + +enum comp_op { + ZSWAP_COMPOP_COMPRESS, + ZSWAP_COMPOP_DECOMPRESS +}; + +static int zswap_comp_op(enum comp_op op, const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen) +{ + struct crypto_comp *tfm; + int ret; + + tfm = *per_cpu_ptr(zswap_comp_pcpu_tfms, get_cpu()); + switch (op) { + case ZSWAP_COMPOP_COMPRESS: + ret = crypto_comp_compress(tfm, src, slen, dst, dlen); + break; + case ZSWAP_COMPOP_DECOMPRESS: + ret = crypto_comp_decompress(tfm, src, slen, dst, dlen); + break; + default: + ret = -EINVAL; + } + + put_cpu(); + return ret; +} + +static int __init zswap_comp_init(void) +{ + if (!crypto_has_comp(zswap_compressor, 0, 0)) { + pr_info("%s compressor not available\n", zswap_compressor); + /* fall back to default compressor */ + zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT; + if (!crypto_has_comp(zswap_compressor, 0, 0)) + /* can't even load the default compressor */ + return -ENODEV; + } + pr_info("using %s compressor\n", zswap_compressor); + + /* alloc percpu transforms */ + zswap_comp_pcpu_tfms = alloc_percpu(struct crypto_comp *); + if (!zswap_comp_pcpu_tfms) + return -ENOMEM; + return 0; +} + +static void zswap_comp_exit(void) +{ + /* free percpu transforms */ + if (zswap_comp_pcpu_tfms) + free_percpu(zswap_comp_pcpu_tfms); +} + +/********************************* +* data structures +**********************************/ +/* + * struct zswap_entry + * + * This structure contains the metadata for tracking a single compressed + * page within zswap. + * + * rbnode - links the entry into red-black tree for the appropriate swap type + * refcount - the number of outstanding reference to the entry. This is needed + * to protect against premature freeing of the entry by code + * concurent calls to load, invalidate, and writeback. The lock + * for the zswap_tree structure that contains the entry must + * be held while changing the refcount. Since the lock must + * be held, there is no reason to also make refcount atomic. + * offset - the swap offset for the entry. Index into the red-black tree. + * handle - zsmalloc allocation handle that stores the compressed page data + * length - the length in bytes of the compressed page data. Needed during + * decompression + */ +struct zswap_entry { + struct rb_node rbnode; + pgoff_t offset; + int refcount; + unsigned int length; + unsigned long handle; +}; + +struct zswap_header { + swp_entry_t swpentry; +}; + +/* + * The tree lock in the zswap_tree struct protects a few things: + * - the rbtree + * - the refcount field of each entry in the tree + */ +struct zswap_tree { + struct rb_root rbroot; + spinlock_t lock; + struct zbud_pool *pool; +}; + +static struct zswap_tree *zswap_trees[MAX_SWAPFILES]; + +/********************************* +* zswap entry functions +**********************************/ +static struct kmem_cache *zswap_entry_cache; + +static int zswap_entry_cache_create(void) +{ + zswap_entry_cache = KMEM_CACHE(zswap_entry, 0); + return (zswap_entry_cache == NULL); +} + +static void zswap_entry_cache_destory(void) +{ + kmem_cache_destroy(zswap_entry_cache); +} + +static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp) +{ + struct zswap_entry *entry; + entry = kmem_cache_alloc(zswap_entry_cache, gfp); + if (!entry) + return NULL; + entry->refcount = 1; + return entry; +} + +static void zswap_entry_cache_free(struct zswap_entry *entry) +{ + kmem_cache_free(zswap_entry_cache, entry); +} + +/* caller must hold the tree lock */ +static void zswap_entry_get(struct zswap_entry *entry) +{ + entry->refcount++; +} + +/* caller must hold the tree lock */ +static int zswap_entry_put(struct zswap_entry *entry) +{ + entry->refcount--; + return entry->refcount; +} + +/********************************* +* rbtree functions +**********************************/ +static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset) +{ + struct rb_node *node = root->rb_node; + struct zswap_entry *entry; + + while (node) { + entry = rb_entry(node, struct zswap_entry, rbnode); + if (entry->offset > offset) + node = node->rb_left; + else if (entry->offset < offset) + node = node->rb_right; + else + return entry; + } + return NULL; +} + +/* + * In the case that a entry with the same offset is found, a pointer to + * the existing entry is stored in dupentry and the function returns -EEXIST + */ +static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry, + struct zswap_entry **dupentry) +{ + struct rb_node **link = &root->rb_node, *parent = NULL; + struct zswap_entry *myentry; + + while (*link) { + parent = *link; + myentry = rb_entry(parent, struct zswap_entry, rbnode); + if (myentry->offset > entry->offset) + link = &(*link)->rb_left; + else if (myentry->offset < entry->offset) + link = &(*link)->rb_right; + else { + *dupentry = myentry; + return -EEXIST; + } + } + rb_link_node(&entry->rbnode, parent, link); + rb_insert_color(&entry->rbnode, root); + return 0; +} + +/********************************* +* per-cpu code +**********************************/ +static DEFINE_PER_CPU(u8 *, zswap_dstmem); + +static int __zswap_cpu_notifier(unsigned long action, unsigned long cpu) +{ + struct crypto_comp *tfm; + u8 *dst; + + switch (action) { + case CPU_UP_PREPARE: + tfm = crypto_alloc_comp(zswap_compressor, 0, 0); + if (IS_ERR(tfm)) { + pr_err("can't allocate compressor transform\n"); + return NOTIFY_BAD; + } + *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = tfm; + dst = kmalloc(PAGE_SIZE * 2, GFP_KERNEL); + if (!dst) { + pr_err("can't allocate compressor buffer\n"); + crypto_free_comp(tfm); + *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = NULL; + return NOTIFY_BAD; + } + per_cpu(zswap_dstmem, cpu) = dst; + break; + case CPU_DEAD: + case CPU_UP_CANCELED: + tfm = *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu); + if (tfm) { + crypto_free_comp(tfm); + *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = NULL; + } + dst = per_cpu(zswap_dstmem, cpu); + kfree(dst); + per_cpu(zswap_dstmem, cpu) = NULL; + break; + default: + break; + } + return NOTIFY_OK; +} + +static int zswap_cpu_notifier(struct notifier_block *nb, + unsigned long action, void *pcpu) +{ + unsigned long cpu = (unsigned long)pcpu; + return __zswap_cpu_notifier(action, cpu); +} + +static struct notifier_block zswap_cpu_notifier_block = { + .notifier_call = zswap_cpu_notifier +}; + +static int zswap_cpu_init(void) +{ + unsigned long cpu; + + get_online_cpus(); + for_each_online_cpu(cpu) + if (__zswap_cpu_notifier(CPU_UP_PREPARE, cpu) != NOTIFY_OK) + goto cleanup; + register_cpu_notifier(&zswap_cpu_notifier_block); + put_online_cpus(); + return 0; + +cleanup: + for_each_online_cpu(cpu) + __zswap_cpu_notifier(CPU_UP_CANCELED, cpu); + put_online_cpus(); + return -ENOMEM; +} + +/********************************* +* helpers +**********************************/ +static bool zswap_is_full(void) +{ + return (totalram_pages * zswap_max_pool_percent / 100 < + zswap_pool_pages); +} + +/* + * Carries out the common pattern of freeing and entry's zsmalloc allocation, + * freeing the entry itself, and decrementing the number of stored pages. + */ +static void zswap_free_entry(struct zswap_tree *tree, struct zswap_entry *entry) +{ + zbud_free(tree->pool, entry->handle); + zswap_entry_cache_free(entry); + atomic_dec(&zswap_stored_pages); + zswap_pool_pages = zbud_get_pool_size(tree->pool); +} + +/********************************* +* writeback code +**********************************/ +/* return enum for zswap_get_swap_cache_page */ +enum zswap_get_swap_ret { + ZSWAP_SWAPCACHE_NEW, + ZSWAP_SWAPCACHE_EXIST, + ZSWAP_SWAPCACHE_NOMEM +}; + +/* + * zswap_get_swap_cache_page + * + * This is an adaption of read_swap_cache_async() + * + * This function tries to find a page with the given swap entry + * in the swapper_space address space (the swap cache). If the page + * is found, it is returned in retpage. Otherwise, a page is allocated, + * added to the swap cache, and returned in retpage. + * + * If success, the swap cache page is returned in retpage + * Returns 0 if page was already in the swap cache, page is not locked + * Returns 1 if the new page needs to be populated, page is locked + * Returns <0 on error + */ +static int zswap_get_swap_cache_page(swp_entry_t entry, + struct page **retpage) +{ + struct page *found_page, *new_page = NULL; + struct address_space *swapper_space = &swapper_spaces[swp_type(entry)]; + int err; + + *retpage = NULL; + do { + /* + * First check the swap cache. Since this is normally + * called after lookup_swap_cache() failed, re-calling + * that would confuse statistics. + */ + found_page = find_get_page(swapper_space, entry.val); + if (found_page) + break; + + /* + * Get a new page to read into from swap. + */ + if (!new_page) { + new_page = alloc_page(GFP_KERNEL); + if (!new_page) + break; /* Out of memory */ + } + + /* + * call radix_tree_preload() while we can wait. + */ + err = radix_tree_preload(GFP_KERNEL); + if (err) + break; + + /* + * Swap entry may have been freed since our caller observed it. + */ + err = swapcache_prepare(entry); + if (err == -EEXIST) { /* seems racy */ + radix_tree_preload_end(); + continue; + } + if (err) { /* swp entry is obsolete ? */ + radix_tree_preload_end(); + break; + } + + /* May fail (-ENOMEM) if radix-tree node allocation failed. */ + __set_page_locked(new_page); + SetPageSwapBacked(new_page); + err = __add_to_swap_cache(new_page, entry); + if (likely(!err)) { + radix_tree_preload_end(); + lru_cache_add_anon(new_page); + *retpage = new_page; + return ZSWAP_SWAPCACHE_NEW; + } + radix_tree_preload_end(); + ClearPageSwapBacked(new_page); + __clear_page_locked(new_page); + /* + * add_to_swap_cache() doesn't return -EEXIST, so we can safely + * clear SWAP_HAS_CACHE flag. + */ + swapcache_free(entry, NULL); + } while (err != -ENOMEM); + + if (new_page) + page_cache_release(new_page); + if (!found_page) + return ZSWAP_SWAPCACHE_NOMEM; + *retpage = found_page; + return ZSWAP_SWAPCACHE_EXIST; +} + +/* + * Attempts to free an entry by adding a page to the swap cache, + * decompressing the entry data into the page, and issuing a + * bio write to write the page back to the swap device. + * + * This can be thought of as a "resumed writeback" of the page + * to the swap device. We are basically resuming the same swap + * writeback path that was intercepted with the frontswap_store() + * in the first place. After the page has been decompressed into + * the swap cache, the compressed version stored by zswap can be + * freed. + */ +static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle) +{ + struct zswap_header *zhdr; + swp_entry_t swpentry; + struct zswap_tree *tree; + pgoff_t offset; + struct zswap_entry *entry; + struct page *page; + u8 *src, *dst; + unsigned int dlen; + int ret, refcount; + struct writeback_control wbc = { + .sync_mode = WB_SYNC_NONE, + }; + + /* extract swpentry from data */ + zhdr = zbud_map(pool, handle); + swpentry = zhdr->swpentry; /* here */ + zbud_unmap(pool, handle); + tree = zswap_trees[swp_type(swpentry)]; + offset = swp_offset(swpentry); + BUG_ON(pool != tree->pool); + + /* find and ref zswap entry */ + spin_lock(&tree->lock); + entry = zswap_rb_search(&tree->rbroot, offset); + if (!entry) { + /* entry was invalidated */ + spin_unlock(&tree->lock); + return 0; + } + zswap_entry_get(entry); + spin_unlock(&tree->lock); + BUG_ON(offset != entry->offset); + + /* try to allocate swap cache page */ + switch (zswap_get_swap_cache_page(swpentry, &page)) { + case ZSWAP_SWAPCACHE_NOMEM: /* no memory */ + ret = -ENOMEM; + goto fail; + + case ZSWAP_SWAPCACHE_EXIST: /* page is unlocked */ + /* page is already in the swap cache, ignore for now */ + page_cache_release(page); + ret = -EEXIST; + goto fail; + + case ZSWAP_SWAPCACHE_NEW: /* page is locked */ + /* decompress */ + dlen = PAGE_SIZE; + src = (u8 *)zbud_map(tree->pool, entry->handle) + + sizeof(struct zswap_header); + dst = kmap_atomic(page); + ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src, + entry->length, dst, &dlen); + kunmap_atomic(dst); + zbud_unmap(tree->pool, entry->handle); + BUG_ON(ret); + BUG_ON(dlen != PAGE_SIZE); + + /* page is up to date */ + SetPageUptodate(page); + } + + /* start writeback */ + __swap_writepage(page, &wbc, end_swap_bio_write); + page_cache_release(page); + zswap_written_back_pages++; + + spin_lock(&tree->lock); + + /* drop local reference */ + zswap_entry_put(entry); + /* drop the initial reference from entry creation */ + refcount = zswap_entry_put(entry); + + /* + * There are three possible values for refcount here: + * (1) refcount is 1, load is in progress, unlink from rbtree, + * load will free + * (2) refcount is 0, (normal case) entry is valid, + * remove from rbtree and free entry + * (3) refcount is -1, invalidate happened during writeback, + * free entry + */ + if (refcount >= 0) { + /* no invalidate yet, remove from rbtree */ + rb_erase(&entry->rbnode, &tree->rbroot); + } + spin_unlock(&tree->lock); + if (refcount <= 0) { + /* free the entry */ + zswap_free_entry(tree, entry); + return 0; + } + return -EAGAIN; + +fail: + spin_lock(&tree->lock); + zswap_entry_put(entry); + spin_unlock(&tree->lock); + return ret; +} + +/********************************* +* frontswap hooks +**********************************/ +/* attempts to compress and store an single page */ +static int zswap_frontswap_store(unsigned type, pgoff_t offset, + struct page *page) +{ + struct zswap_tree *tree = zswap_trees[type]; + struct zswap_entry *entry, *dupentry; + int ret; + unsigned int dlen = PAGE_SIZE, len; + unsigned long handle; + char *buf; + u8 *src, *dst; + struct zswap_header *zhdr; + + if (!tree) { + ret = -ENODEV; + goto reject; + } + + /* reclaim space if needed */ + if (zswap_is_full()) { + zswap_pool_limit_hit++; + if (zbud_reclaim_page(tree->pool, 8)) { + zswap_reject_reclaim_fail++; + ret = -ENOMEM; + goto reject; + } + } + + /* allocate entry */ + entry = zswap_entry_cache_alloc(GFP_KERNEL); + if (!entry) { + zswap_reject_kmemcache_fail++; + ret = -ENOMEM; + goto reject; + } + + /* compress */ + dst = get_cpu_var(zswap_dstmem); + src = kmap_atomic(page); + ret = zswap_comp_op(ZSWAP_COMPOP_COMPRESS, src, PAGE_SIZE, dst, &dlen); + kunmap_atomic(src); + if (ret) { + ret = -EINVAL; + goto freepage; + } + + /* store */ + len = dlen + sizeof(struct zswap_header); + ret = zbud_alloc(tree->pool, len, __GFP_NORETRY | __GFP_NOWARN, + &handle); + if (ret == -ENOSPC) { + zswap_reject_compress_poor++; + goto freepage; + } + if (ret) { + zswap_reject_alloc_fail++; + goto freepage; + } + zhdr = zbud_map(tree->pool, handle); + zhdr->swpentry = swp_entry(type, offset); + buf = (u8 *)(zhdr + 1); + memcpy(buf, dst, dlen); + zbud_unmap(tree->pool, handle); + put_cpu_var(zswap_dstmem); + + /* populate entry */ + entry->offset = offset; + entry->handle = handle; + entry->length = dlen; + + /* map */ + spin_lock(&tree->lock); + do { + ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry); + if (ret == -EEXIST) { + zswap_duplicate_entry++; + /* remove from rbtree */ + rb_erase(&dupentry->rbnode, &tree->rbroot); + if (!zswap_entry_put(dupentry)) { + /* free */ + zswap_free_entry(tree, dupentry); + } + } + } while (ret == -EEXIST); + spin_unlock(&tree->lock); + + /* update stats */ + atomic_inc(&zswap_stored_pages); + zswap_pool_pages = zbud_get_pool_size(tree->pool); + + return 0; + +freepage: + put_cpu_var(zswap_dstmem); + zswap_entry_cache_free(entry); +reject: + return ret; +} + +/* + * returns 0 if the page was successfully decompressed + * return -1 on entry not found or error +*/ +static int zswap_frontswap_load(unsigned type, pgoff_t offset, + struct page *page) +{ + struct zswap_tree *tree = zswap_trees[type]; + struct zswap_entry *entry; + u8 *src, *dst; + unsigned int dlen; + int refcount, ret; + + /* find */ + spin_lock(&tree->lock); + entry = zswap_rb_search(&tree->rbroot, offset); + if (!entry) { + /* entry was written back */ + spin_unlock(&tree->lock); + return -1; + } + zswap_entry_get(entry); + spin_unlock(&tree->lock); + + /* decompress */ + dlen = PAGE_SIZE; + src = (u8 *)zbud_map(tree->pool, entry->handle) + + sizeof(struct zswap_header); + dst = kmap_atomic(page); + ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src, entry->length, + dst, &dlen); + kunmap_atomic(dst); + zbud_unmap(tree->pool, entry->handle); + BUG_ON(ret); + + spin_lock(&tree->lock); + refcount = zswap_entry_put(entry); + if (likely(refcount)) { + spin_unlock(&tree->lock); + return 0; + } + spin_unlock(&tree->lock); + + /* + * We don't have to unlink from the rbtree because + * zswap_writeback_entry() or zswap_frontswap_invalidate page() + * has already done this for us if we are the last reference. + */ + /* free */ + + zswap_free_entry(tree, entry); + + return 0; +} + +/* frees an entry in zswap */ +static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset) +{ + struct zswap_tree *tree = zswap_trees[type]; + struct zswap_entry *entry; + int refcount; + + /* find */ + spin_lock(&tree->lock); + entry = zswap_rb_search(&tree->rbroot, offset); + if (!entry) { + /* entry was written back */ + spin_unlock(&tree->lock); + return; + } + + /* remove from rbtree */ + rb_erase(&entry->rbnode, &tree->rbroot); + + /* drop the initial reference from entry creation */ + refcount = zswap_entry_put(entry); + + spin_unlock(&tree->lock); + + if (refcount) { + /* writeback in progress, writeback will free */ + return; + } + + /* free */ + zswap_free_entry(tree, entry); +} + +/* frees all zswap entries for the given swap type */ +static void zswap_frontswap_invalidate_area(unsigned type) +{ + struct zswap_tree *tree = zswap_trees[type]; + struct rb_node *node; + struct zswap_entry *entry; + + if (!tree) + return; + + /* walk the tree and free everything */ + spin_lock(&tree->lock); + /* + * TODO: Even though this code should not be executed because + * the try_to_unuse() in swapoff should have emptied the tree, + * it is very wasteful to rebalance the tree after every + * removal when we are freeing the whole tree. + * + * If post-order traversal code is ever added to the rbtree + * implementation, it should be used here. + */ + while ((node = rb_first(&tree->rbroot))) { + entry = rb_entry(node, struct zswap_entry, rbnode); + rb_erase(&entry->rbnode, &tree->rbroot); + zbud_free(tree->pool, entry->handle); + zswap_entry_cache_free(entry); + atomic_dec(&zswap_stored_pages); + } + tree->rbroot = RB_ROOT; + spin_unlock(&tree->lock); +} + +static struct zbud_ops zswap_zbud_ops = { + .evict = zswap_writeback_entry +}; + +static void zswap_frontswap_init(unsigned type) +{ + struct zswap_tree *tree; + + tree = kzalloc(sizeof(struct zswap_tree), GFP_KERNEL); + if (!tree) + goto err; + tree->pool = zbud_create_pool(GFP_KERNEL, &zswap_zbud_ops); + if (!tree->pool) + goto freetree; + tree->rbroot = RB_ROOT; + spin_lock_init(&tree->lock); + zswap_trees[type] = tree; + return; + +freetree: + kfree(tree); +err: + pr_err("alloc failed, zswap disabled for swap type %d\n", type); +} + +static struct frontswap_ops zswap_frontswap_ops = { + .store = zswap_frontswap_store, + .load = zswap_frontswap_load, + .invalidate_page = zswap_frontswap_invalidate_page, + .invalidate_area = zswap_frontswap_invalidate_area, + .init = zswap_frontswap_init +}; + +/********************************* +* debugfs functions +**********************************/ +#ifdef CONFIG_DEBUG_FS +#include + +static struct dentry *zswap_debugfs_root; + +static int __init zswap_debugfs_init(void) +{ + if (!debugfs_initialized()) + return -ENODEV; + + zswap_debugfs_root = debugfs_create_dir("zswap", NULL); + if (!zswap_debugfs_root) + return -ENOMEM; + + debugfs_create_u64("pool_limit_hit", S_IRUGO, + zswap_debugfs_root, &zswap_pool_limit_hit); + debugfs_create_u64("reject_reclaim_fail", S_IRUGO, + zswap_debugfs_root, &zswap_reject_reclaim_fail); + debugfs_create_u64("reject_alloc_fail", S_IRUGO, + zswap_debugfs_root, &zswap_reject_alloc_fail); + debugfs_create_u64("reject_kmemcache_fail", S_IRUGO, + zswap_debugfs_root, &zswap_reject_kmemcache_fail); + debugfs_create_u64("reject_compress_poor", S_IRUGO, + zswap_debugfs_root, &zswap_reject_compress_poor); + debugfs_create_u64("written_back_pages", S_IRUGO, + zswap_debugfs_root, &zswap_written_back_pages); + debugfs_create_u64("duplicate_entry", S_IRUGO, + zswap_debugfs_root, &zswap_duplicate_entry); + debugfs_create_u64("pool_pages", S_IRUGO, + zswap_debugfs_root, &zswap_pool_pages); + debugfs_create_atomic_t("stored_pages", S_IRUGO, + zswap_debugfs_root, &zswap_stored_pages); + + return 0; +} + +static void __exit zswap_debugfs_exit(void) +{ + debugfs_remove_recursive(zswap_debugfs_root); +} +#else +static int __init zswap_debugfs_init(void) +{ + return 0; +} + +static void __exit zswap_debugfs_exit(void) { } +#endif + +/********************************* +* module init and exit +**********************************/ +static int __init init_zswap(void) +{ + if (!zswap_enabled) + return 0; + + pr_info("loading zswap\n"); + if (zswap_entry_cache_create()) { + pr_err("entry cache creation failed\n"); + goto error; + } + if (zswap_comp_init()) { + pr_err("compressor initialization failed\n"); + goto compfail; + } + if (zswap_cpu_init()) { + pr_err("per-cpu initialization failed\n"); + goto pcpufail; + } + frontswap_register_ops(&zswap_frontswap_ops); + if (zswap_debugfs_init()) + pr_warn("debugfs initialization failed\n"); + return 0; +pcpufail: + zswap_comp_exit(); +compfail: + zswap_entry_cache_destory(); +error: + return -ENOMEM; +} +/* must be late so crypto has time to come up */ +late_initcall(init_zswap); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Seth Jennings "); +MODULE_DESCRIPTION("Compressed cache for swap pages"); diff --git a/scripts/Makefile.headersinst b/scripts/Makefile.headersinst index 182084d..8ccf830 100644 --- a/scripts/Makefile.headersinst +++ b/scripts/Makefile.headersinst @@ -47,18 +47,24 @@ header-y := $(filter-out $(generic-y), $(header-y)) all-files := $(header-y) $(genhdr-y) $(wrapper-files) output-files := $(addprefix $(installdir)/, $(all-files)) -input-files := $(foreach hdr, $(header-y), \ +input-files1 := $(foreach hdr, $(header-y), \ $(if $(wildcard $(srcdir)/$(hdr)), \ - $(wildcard $(srcdir)/$(hdr)), \ + $(wildcard $(srcdir)/$(hdr))) \ + ) +input-files1-name := $(notdir $(input-files1)) +input-files2 := $(foreach hdr, $(header-y), \ + $(if $(wildcard $(srcdir)/$(hdr)),, \ $(if $(wildcard $(oldsrcdir)/$(hdr)), \ $(wildcard $(oldsrcdir)/$(hdr)), \ $(error Missing UAPI file $(srcdir)/$(hdr))) \ - )) \ - $(foreach hdr, $(genhdr-y), \ + )) +input-files2-name := $(notdir $(input-files2)) +input-files3 := $(foreach hdr, $(genhdr-y), \ $(if $(wildcard $(gendir)/$(hdr)), \ $(wildcard $(gendir)/$(hdr)), \ $(error Missing generated UAPI file $(gendir)/$(hdr)) \ )) +input-files3-name := $(notdir $(input-files3)) # Work out what needs to be removed oldheaders := $(patsubst $(installdir)/%,%,$(wildcard $(installdir)/*.h)) @@ -72,7 +78,9 @@ printdir = $(patsubst $(INSTALL_HDR_PATH)/%/,%,$(dir $@)) quiet_cmd_install = INSTALL $(printdir) ($(words $(all-files))\ file$(if $(word 2, $(all-files)),s)) cmd_install = \ - $(CONFIG_SHELL) $< $(installdir) $(input-files); \ + $(CONFIG_SHELL) $< $(installdir) $(srcdir) $(input-files1-name); \ + $(CONFIG_SHELL) $< $(installdir) $(oldsrcdir) $(input-files2-name); \ + $(CONFIG_SHELL) $< $(installdir) $(gendir) $(input-files3-name); \ for F in $(wrapper-files); do \ echo "\#include " > $(installdir)/$$F; \ done; \ @@ -98,7 +106,7 @@ __headersinst: $(subdirs) $(install-file) @: targets += $(install-file) -$(install-file): scripts/headers_install.sh $(input-files) FORCE +$(install-file): scripts/headers_install.sh $(input-files1) $(input-files2) $(input-files3) FORCE $(if $(unwanted),$(call cmd,remove),) $(if $(wildcard $(dir $@)),,$(shell mkdir -p $(dir $@))) $(call if_changed,install) diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 6031e23..49392ec 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -63,7 +63,7 @@ multi-objs := $(multi-objs-y) $(multi-objs-m) subdir-obj-y := $(filter %/built-in.o, $(obj-y)) # $(obj-dirs) is a list of directories that contain object files -obj-dirs := $(dir $(multi-objs) $(subdir-obj-y)) +obj-dirs := $(dir $(multi-objs) $(obj-y)) # Replace multi-part objects by their individual parts, look at local dir only real-objs-y := $(foreach m, $(filter-out $(subdir-obj-y), $(obj-y)), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y))),$($(m:.o=-objs)) $($(m:.o=-y)),$(m))) $(extra-y) @@ -244,7 +244,7 @@ cmd_gzip = (cat $(filter-out FORCE,$^) | gzip -n -f -9 > $@) || \ # --------------------------------------------------------------------------- # Generate an assembly file to wrap the output of the device tree compiler -quiet_cmd_dt_S_dtb= DTB $@ +quiet_cmd_dt_S_dtb= DTB $@ cmd_dt_S_dtb= \ ( \ echo '\#include '; \ diff --git a/scripts/coccicheck b/scripts/coccicheck index 06fcb33..bbf901a 100755 --- a/scripts/coccicheck +++ b/scripts/coccicheck @@ -1,17 +1,31 @@ #!/bin/bash +# +# This script requires at least spatch +# version 1.0.0-rc11. +# + SPATCH="`which ${SPATCH:=spatch}`" +trap kill_running SIGTERM SIGINT +declare -a SPATCH_PID + # The verbosity may be set by the environmental parameter V= # as for example with 'make V=1 coccicheck' if [ -n "$V" -a "$V" != "0" ]; then - VERBOSE=1 + VERBOSE="$V" else VERBOSE=0 fi -FLAGS="$SPFLAGS -very_quiet" +if [ -z "$J" ]; then + NPROC=$(getconf _NPROCESSORS_ONLN) +else + NPROC="$J" +fi + +FLAGS="$SPFLAGS --very-quiet" # spatch only allows include directories with the syntax "-I include" # while gcc also allows "-Iinclude" and "-include include" @@ -27,14 +41,14 @@ if [ "$C" = "1" -o "$C" = "2" ]; then else ONLINE=0 if [ "$KBUILD_EXTMOD" = "" ] ; then - OPTIONS="-dir $srctree $COCCIINCLUDE" + OPTIONS="--dir $srctree $COCCIINCLUDE" else - OPTIONS="-dir $KBUILD_EXTMOD $COCCIINCLUDE" + OPTIONS="--dir $KBUILD_EXTMOD $COCCIINCLUDE" fi fi if [ "$KBUILD_EXTMOD" != "" ] ; then - OPTIONS="-patch $srctree $OPTIONS" + OPTIONS="--patch $srctree $OPTIONS" fi if [ ! -x "$SPATCH" ]; then @@ -44,13 +58,21 @@ fi if [ "$MODE" = "" ] ; then if [ "$ONLINE" = "0" ] ; then - echo 'You have not explicitly specified the mode to use. Using default "chain" mode.' - echo 'All available modes will be tried (in that order): patch, report, context, org' + echo 'You have not explicitly specified the mode to use. Using default "report" mode.' + echo 'Available modes are the following: patch, report, context, org' echo 'You can specify the mode with "make coccicheck MODE="' + echo 'Note however that some modes are not implemented by some semantic patches.' + fi + MODE="report" +fi + +if [ "$MODE" = "chain" ] ; then + if [ "$ONLINE" = "0" ] ; then + echo 'You have selected the "chain" mode.' + echo 'All available modes will be tried (in that order): patch, report, context, org' fi - MODE="chain" elif [ "$MODE" = "report" -o "$MODE" = "org" ] ; then - FLAGS="$FLAGS -no_show_diff" + FLAGS="$FLAGS --no-show-diff" fi if [ "$ONLINE" = "0" ] ; then @@ -61,19 +83,35 @@ if [ "$ONLINE" = "0" ] ; then fi run_cmd() { + local i if [ $VERBOSE -ne 0 ] ; then - echo "Running: $@" + echo "Running ($NPROC in parallel): $@" fi - eval $@ + for i in $(seq 0 $(( NPROC - 1)) ); do + eval "$@ --max $NPROC --index $i &" + SPATCH_PID[$i]=$! + if [ $VERBOSE -eq 2 ] ; then + echo "${SPATCH_PID[$i]} running" + fi + done + wait } +kill_running() { + for i in $(seq $(( NPROC - 1 )) ); do + if [ $VERBOSE -eq 2 ] ; then + echo "Killing ${SPATCH_PID[$i]}" + fi + kill ${SPATCH_PID[$i]} 2>/dev/null + done +} coccinelle () { COCCI="$1" OPT=`grep "Option" $COCCI | cut -d':' -f2` -# The option '-parse_cocci' can be used to syntactically check the SmPL files. +# The option '--parse-cocci' can be used to syntactically check the SmPL files. # # $SPATCH -D $MODE $FLAGS -parse_cocci $COCCI $OPT > /dev/null @@ -114,20 +152,20 @@ coccinelle () { if [ "$MODE" = "chain" ] ; then run_cmd $SPATCH -D patch \ - $FLAGS -sp_file $COCCI $OPT $OPTIONS || \ + $FLAGS --cocci-file $COCCI $OPT $OPTIONS || \ run_cmd $SPATCH -D report \ - $FLAGS -sp_file $COCCI $OPT $OPTIONS -no_show_diff || \ + $FLAGS --cocci-file $COCCI $OPT $OPTIONS --no-show-diff || \ run_cmd $SPATCH -D context \ - $FLAGS -sp_file $COCCI $OPT $OPTIONS || \ + $FLAGS --cocci-file $COCCI $OPT $OPTIONS || \ run_cmd $SPATCH -D org \ - $FLAGS -sp_file $COCCI $OPT $OPTIONS -no_show_diff || exit 1 + $FLAGS --cocci-file $COCCI $OPT $OPTIONS --no-show-diff || exit 1 elif [ "$MODE" = "rep+ctxt" ] ; then run_cmd $SPATCH -D report \ - $FLAGS -sp_file $COCCI $OPT $OPTIONS -no_show_diff && \ + $FLAGS --cocci-file $COCCI $OPT $OPTIONS --no-show-diff && \ run_cmd $SPATCH -D context \ - $FLAGS -sp_file $COCCI $OPT $OPTIONS || exit 1 + $FLAGS --cocci-file $COCCI $OPT $OPTIONS || exit 1 else - run_cmd $SPATCH -D $MODE $FLAGS -sp_file $COCCI $OPT $OPTIONS || exit 1 + run_cmd $SPATCH -D $MODE $FLAGS --cocci-file $COCCI $OPT $OPTIONS || exit 1 fi } diff --git a/scripts/coccinelle/api/alloc/drop_kmalloc_cast.cocci b/scripts/coccinelle/api/alloc/drop_kmalloc_cast.cocci index 7d4771d..bd5d08b 100644 --- a/scripts/coccinelle/api/alloc/drop_kmalloc_cast.cocci +++ b/scripts/coccinelle/api/alloc/drop_kmalloc_cast.cocci @@ -5,7 +5,7 @@ // Confidence: High // Copyright: 2009,2010 Nicolas Palix, DIKU. GPLv2. // URL: http://coccinelle.lip6.fr/ -// Options: -no_includes -include_headers +// Options: --no-includes --include-headers // // Keywords: kmalloc, kzalloc, kcalloc // Version min: < 2.6.12 kmalloc diff --git a/scripts/coccinelle/api/alloc/kzalloc-simple.cocci b/scripts/coccinelle/api/alloc/kzalloc-simple.cocci index 046b9b1..52c55e4 100644 --- a/scripts/coccinelle/api/alloc/kzalloc-simple.cocci +++ b/scripts/coccinelle/api/alloc/kzalloc-simple.cocci @@ -9,7 +9,7 @@ // Copyright: (C) 2009-2010 Julia Lawall, Nicolas Palix, DIKU. GPLv2. // Copyright: (C) 2009-2010 Gilles Muller, INRIA/LiP6. GPLv2. // URL: http://coccinelle.lip6.fr/rules/kzalloc.html -// Options: -no_includes -include_headers +// Options: --no-includes --include-headers // // Keywords: kmalloc, kzalloc // Version min: < 2.6.12 kmalloc diff --git a/scripts/coccinelle/api/d_find_alias.cocci b/scripts/coccinelle/api/d_find_alias.cocci index a9694a8..9594c9f 100644 --- a/scripts/coccinelle/api/d_find_alias.cocci +++ b/scripts/coccinelle/api/d_find_alias.cocci @@ -4,7 +4,7 @@ // // Confidence: Moderate // URL: http://coccinelle.lip6.fr/ -// Options: -include_headers +// Options: --include-headers virtual context virtual org diff --git a/scripts/coccinelle/api/devm_request_and_ioremap.cocci b/scripts/coccinelle/api/devm_request_and_ioremap.cocci index 46beb81..562ec88 100644 --- a/scripts/coccinelle/api/devm_request_and_ioremap.cocci +++ b/scripts/coccinelle/api/devm_request_and_ioremap.cocci @@ -10,7 +10,7 @@ // Copyright: (C) 2011 Gilles Muller, INRIA/LiP6. GPLv2. // URL: http://coccinelle.lip6.fr/ // Comments: -// Options: -no_includes -include_headers +// Options: --no-includes --include-headers virtual patch virtual org diff --git a/scripts/coccinelle/api/kstrdup.cocci b/scripts/coccinelle/api/kstrdup.cocci index 07a74b2..09cba54 100644 --- a/scripts/coccinelle/api/kstrdup.cocci +++ b/scripts/coccinelle/api/kstrdup.cocci @@ -6,7 +6,7 @@ // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6. GPLv2. // URL: http://coccinelle.lip6.fr/ // Comments: -// Options: -no_includes -include_headers +// Options: --no-includes --include-headers virtual patch virtual context diff --git a/scripts/coccinelle/api/memdup.cocci b/scripts/coccinelle/api/memdup.cocci index 4dceab6..3d1aa71 100644 --- a/scripts/coccinelle/api/memdup.cocci +++ b/scripts/coccinelle/api/memdup.cocci @@ -6,7 +6,7 @@ // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6. GPLv2. // URL: http://coccinelle.lip6.fr/ // Comments: -// Options: -no_includes -include_headers +// Options: --no-includes --include-headers virtual patch virtual context diff --git a/scripts/coccinelle/api/memdup_user.cocci b/scripts/coccinelle/api/memdup_user.cocci index 2b131a8..c606231 100644 --- a/scripts/coccinelle/api/memdup_user.cocci +++ b/scripts/coccinelle/api/memdup_user.cocci @@ -7,7 +7,7 @@ // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6. GPLv2. // URL: http://coccinelle.lip6.fr/ // Comments: -// Options: -no_includes -include_headers +// Options: --no-includes --include-headers virtual patch virtual context diff --git a/scripts/coccinelle/api/ptr_ret.cocci b/scripts/coccinelle/api/ptr_ret.cocci index 15f076f..2274638 100644 --- a/scripts/coccinelle/api/ptr_ret.cocci +++ b/scripts/coccinelle/api/ptr_ret.cocci @@ -5,7 +5,7 @@ // Copyright: (C) 2012 Julia Lawall, INRIA/LIP6. GPLv2. // Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2. // URL: http://coccinelle.lip6.fr/ -// Options: -no_includes -include_headers +// Options: --no-includes --include-headers // // Keywords: ERR_PTR, PTR_ERR, PTR_RET // Version min: 2.6.39 diff --git a/scripts/coccinelle/api/simple_open.cocci b/scripts/coccinelle/api/simple_open.cocci index 05962f7..b67e174 100644 --- a/scripts/coccinelle/api/simple_open.cocci +++ b/scripts/coccinelle/api/simple_open.cocci @@ -4,7 +4,7 @@ /// // Confidence: High // Comments: -// Options: -no_includes -include_headers +// Options: --no-includes --include-headers virtual patch virtual report diff --git a/scripts/coccinelle/free/devm_free.cocci b/scripts/coccinelle/free/devm_free.cocci index 0a1e361..3d93490 100644 --- a/scripts/coccinelle/free/devm_free.cocci +++ b/scripts/coccinelle/free/devm_free.cocci @@ -18,7 +18,7 @@ // Copyright: (C) 2011 Gilles Muller, INRIA/LiP6. GPLv2. // URL: http://coccinelle.lip6.fr/ // Comments: -// Options: -no_includes -include_headers +// Options: --no-includes --include-headers virtual org virtual report diff --git a/scripts/coccinelle/free/kfree.cocci b/scripts/coccinelle/free/kfree.cocci index d9ae6d8..577b780 100644 --- a/scripts/coccinelle/free/kfree.cocci +++ b/scripts/coccinelle/free/kfree.cocci @@ -10,7 +10,7 @@ // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6. GPLv2. // URL: http://coccinelle.lip6.fr/ // Comments: -// Options: -no_includes -include_headers +// Options: --no-includes --include-headers virtual org virtual report diff --git a/scripts/coccinelle/free/kfreeaddr.cocci b/scripts/coccinelle/free/kfreeaddr.cocci new file mode 100644 index 0000000..ce8aacc --- /dev/null +++ b/scripts/coccinelle/free/kfreeaddr.cocci @@ -0,0 +1,32 @@ +/// Free of a structure field +/// +// Confidence: High +// Copyright: (C) 2013 Julia Lawall, INRIA/LIP6. GPLv2. +// URL: http://coccinelle.lip6.fr/ +// Comments: +// Options: --no-includes --include-headers + +virtual org +virtual report +virtual context + +@r depends on context || report || org @ +expression e; +identifier f; +position p; +@@ + +* kfree@p(&e->f) + +@script:python depends on org@ +p << r.p; +@@ + +cocci.print_main("kfree",p) + +@script:python depends on report@ +p << r.p; +@@ + +msg = "ERROR: kfree of structure field" +coccilib.report.print_report(p[0],msg) diff --git a/scripts/coccinelle/free/pci_free_consistent.cocci b/scripts/coccinelle/free/pci_free_consistent.cocci new file mode 100644 index 0000000..43600cc --- /dev/null +++ b/scripts/coccinelle/free/pci_free_consistent.cocci @@ -0,0 +1,52 @@ +/// Find missing pci_free_consistent for every pci_alloc_consistent. +/// +// Confidence: Moderate +// Copyright: (C) 2013 Petr Strnad. GPLv2. +// URL: http://coccinelle.lip6.fr/ +// Keywords: pci_free_consistent, pci_alloc_consistent +// Options: --no-includes --include-headers + +virtual report +virtual org + +@search@ +local idexpression id; +expression x,y,z,e; +position p1,p2; +type T; +@@ + +id = pci_alloc_consistent@p1(x,y,&z) +... when != e = id +if (id == NULL || ...) { ... return ...; } +... when != pci_free_consistent(x,y,id,z) + when != if (id) { ... pci_free_consistent(x,y,id,z) ... } + when != if (y) { ... pci_free_consistent(x,y,id,z) ... } + when != e = (T)id + when exists +( +return 0; +| +return 1; +| +return id; +| +return@p2 ...; +) + +@script:python depends on report@ +p1 << search.p1; +p2 << search.p2; +@@ + +msg = "ERROR: missing pci_free_consistent; pci_alloc_consistent on line %s and return without freeing on line %s" % (p1[0].line,p2[0].line) +coccilib.report.print_report(p2[0],msg) + +@script:python depends on org@ +p1 << search.p1; +p2 << search.p2; +@@ + +msg = "ERROR: missing pci_free_consistent; pci_alloc_consistent on line %s and return without freeing on line %s" % (p1[0].line,p2[0].line) +cocci.print_main(msg,p1) +cocci.print_secs("",p2) diff --git a/scripts/coccinelle/iterators/fen.cocci b/scripts/coccinelle/iterators/fen.cocci index 0a40af8..48c152f 100644 --- a/scripts/coccinelle/iterators/fen.cocci +++ b/scripts/coccinelle/iterators/fen.cocci @@ -7,7 +7,7 @@ // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6. GPLv2. // URL: http://coccinelle.lip6.fr/ // Comments: -// Options: -no_includes -include_headers +// Options: --no-includes --include-headers virtual patch virtual context diff --git a/scripts/coccinelle/iterators/itnull.cocci b/scripts/coccinelle/iterators/itnull.cocci index 259899f..f58732b 100644 --- a/scripts/coccinelle/iterators/itnull.cocci +++ b/scripts/coccinelle/iterators/itnull.cocci @@ -11,7 +11,7 @@ // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6. GPLv2. // URL: http://coccinelle.lip6.fr/ // Comments: -// Options: -no_includes -include_headers +// Options: --no-includes --include-headers virtual patch virtual context diff --git a/scripts/coccinelle/iterators/list_entry_update.cocci b/scripts/coccinelle/iterators/list_entry_update.cocci index b296747..873f444 100644 --- a/scripts/coccinelle/iterators/list_entry_update.cocci +++ b/scripts/coccinelle/iterators/list_entry_update.cocci @@ -9,7 +9,7 @@ // Copyright: (C) 2010 Gilles Muller, INRIA/LiP6. GPLv2. // URL: http://coccinelle.lip6.fr/ // Comments: -// Options: -no_includes -include_headers +// Options: --no-includes --include-headers virtual context virtual org diff --git a/scripts/coccinelle/iterators/use_after_iter.cocci b/scripts/coccinelle/iterators/use_after_iter.cocci index 06284c5..f085f59 100644 --- a/scripts/coccinelle/iterators/use_after_iter.cocci +++ b/scripts/coccinelle/iterators/use_after_iter.cocci @@ -11,7 +11,7 @@ // Copyright: (C) 2012 Gilles Muller, INRIA/LIP6. GPLv2. // URL: http://coccinelle.lip6.fr/ // Comments: -// Options: -no_includes -include_headers +// Options: --no-includes --include-headers virtual context virtual org diff --git a/scripts/coccinelle/locks/call_kern.cocci b/scripts/coccinelle/locks/call_kern.cocci index 8f10b49..669b244 100644 --- a/scripts/coccinelle/locks/call_kern.cocci +++ b/scripts/coccinelle/locks/call_kern.cocci @@ -9,7 +9,7 @@ // Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2. // URL: http://coccinelle.lip6.fr/ // Comments: -// Options: -no_includes -include_headers +// Options: --no-includes --include-headers virtual patch virtual context diff --git a/scripts/coccinelle/locks/double_lock.cocci b/scripts/coccinelle/locks/double_lock.cocci index 63b24e6..002752f 100644 --- a/scripts/coccinelle/locks/double_lock.cocci +++ b/scripts/coccinelle/locks/double_lock.cocci @@ -8,7 +8,7 @@ // Copyright: (C) 2010 Gilles Muller, INRIA/LiP6. GPLv2. // URL: http://coccinelle.lip6.fr/ // Comments: -// Options: -no_includes -include_headers +// Options: --no-includes --include-headers virtual org virtual report diff --git a/scripts/coccinelle/locks/flags.cocci b/scripts/coccinelle/locks/flags.cocci index 1c4ffe6..debd70e 100644 --- a/scripts/coccinelle/locks/flags.cocci +++ b/scripts/coccinelle/locks/flags.cocci @@ -6,7 +6,7 @@ // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6. GPLv2. // URL: http://coccinelle.lip6.fr/ // Comments: -// Options: -no_includes -include_headers +// Options: --no-includes --include-headers virtual context virtual org diff --git a/scripts/coccinelle/locks/mini_lock.cocci b/scripts/coccinelle/locks/mini_lock.cocci index 3267d74..47f649b 100644 --- a/scripts/coccinelle/locks/mini_lock.cocci +++ b/scripts/coccinelle/locks/mini_lock.cocci @@ -11,7 +11,7 @@ // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6. GPLv2. // URL: http://coccinelle.lip6.fr/ // Comments: -// Options: -no_includes -include_headers +// Options: --no-includes --include-headers virtual context virtual org diff --git a/scripts/coccinelle/misc/boolinit.cocci b/scripts/coccinelle/misc/boolinit.cocci index 97ce41c..b9abed4 100644 --- a/scripts/coccinelle/misc/boolinit.cocci +++ b/scripts/coccinelle/misc/boolinit.cocci @@ -6,7 +6,7 @@ // Copyright: (C) 2012 Julia Lawall, INRIA/LIP6. GPLv2. // Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2. // URL: http://coccinelle.lip6.fr/ -// Options: -include_headers +// Options: --include-headers virtual patch virtual context diff --git a/scripts/coccinelle/misc/cstptr.cocci b/scripts/coccinelle/misc/cstptr.cocci index d425644..f0368b3 100644 --- a/scripts/coccinelle/misc/cstptr.cocci +++ b/scripts/coccinelle/misc/cstptr.cocci @@ -6,7 +6,7 @@ // Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2. // URL: http://coccinelle.lip6.fr/ // Comments: -// Options: -no_includes -include_headers +// Options: --no-includes --include-headers virtual org virtual report diff --git a/scripts/coccinelle/misc/doubleinit.cocci b/scripts/coccinelle/misc/doubleinit.cocci index cf74a00..c0c3371 100644 --- a/scripts/coccinelle/misc/doubleinit.cocci +++ b/scripts/coccinelle/misc/doubleinit.cocci @@ -8,7 +8,7 @@ // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6. GPLv2. // URL: http://coccinelle.lip6.fr/ // Comments: requires at least Coccinelle 0.2.4, lex or parse error otherwise -// Options: -no_includes -include_headers +// Options: --no-includes --include-headers virtual org virtual report diff --git a/scripts/coccinelle/misc/ifaddr.cocci b/scripts/coccinelle/misc/ifaddr.cocci index 3e4089a..8aebd18 100644 --- a/scripts/coccinelle/misc/ifaddr.cocci +++ b/scripts/coccinelle/misc/ifaddr.cocci @@ -6,7 +6,7 @@ // Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2. // URL: http://coccinelle.lip6.fr/ // Comments: -// Options: -no_includes -include_headers +// Options: --no-includes --include-headers virtual org virtual report diff --git a/scripts/coccinelle/misc/ifcol.cocci b/scripts/coccinelle/misc/ifcol.cocci index b7ed91d..d0d00ef 100644 --- a/scripts/coccinelle/misc/ifcol.cocci +++ b/scripts/coccinelle/misc/ifcol.cocci @@ -13,7 +13,7 @@ // Copyright: (C) 2010 Gilles Muller, INRIA/LiP6. GPLv2. // URL: http://coccinelle.lip6.fr/ // Comments: -// Options: -no_includes -include_headers +// Options: --no-includes --include-headers virtual org virtual report diff --git a/scripts/coccinelle/misc/noderef.cocci b/scripts/coccinelle/misc/noderef.cocci index c170721..80a831c 100644 --- a/scripts/coccinelle/misc/noderef.cocci +++ b/scripts/coccinelle/misc/noderef.cocci @@ -6,7 +6,7 @@ // Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2. // URL: http://coccinelle.lip6.fr/ // Comments: -// Options: -no_includes -include_headers +// Options: --no-includes --include-headers virtual org virtual report diff --git a/scripts/coccinelle/misc/orplus.cocci b/scripts/coccinelle/misc/orplus.cocci index 4a28cef..81fabf3 100644 --- a/scripts/coccinelle/misc/orplus.cocci +++ b/scripts/coccinelle/misc/orplus.cocci @@ -7,7 +7,7 @@ // Copyright: (C) 2013 Gilles Muller, INRIA/LIP6. GPLv2. // URL: http://coccinelle.lip6.fr/ // Comments: -// Options: -no_includes -include_headers +// Options: --no-includes --include-headers virtual org virtual report diff --git a/scripts/coccinelle/misc/warn.cocci b/scripts/coccinelle/misc/warn.cocci index fda8c35..d2e5b6c 100644 --- a/scripts/coccinelle/misc/warn.cocci +++ b/scripts/coccinelle/misc/warn.cocci @@ -5,7 +5,7 @@ // Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2. // URL: http://coccinelle.lip6.fr/ // Comments: -// Options: -no_includes -include_headers +// Options: --no-includes --include-headers virtual patch virtual context diff --git a/scripts/coccinelle/null/eno.cocci b/scripts/coccinelle/null/eno.cocci index ed961a1..9bd29aa 100644 --- a/scripts/coccinelle/null/eno.cocci +++ b/scripts/coccinelle/null/eno.cocci @@ -6,7 +6,7 @@ // Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6. GPLv2. // URL: http://coccinelle.lip6.fr/ // Comments: -// Options: -no_includes -include_headers +// Options: --no-includes --include-headers virtual patch virtual context diff --git a/scripts/coccinelle/null/kmerr.cocci b/scripts/coccinelle/null/kmerr.cocci index 949bf65..5354a79 100644 --- a/scripts/coccinelle/null/kmerr.cocci +++ b/scripts/coccinelle/null/kmerr.cocci @@ -10,7 +10,7 @@ // Copyright: (C) 2010 Gilles Muller, INRIA/LiP6. GPLv2. // URL: http://coccinelle.lip6.fr/ // Comments: -// Options: -no_includes -include_headers +// Options: --no-includes --include-headers virtual context virtual org diff --git a/scripts/coccinelle/tests/doublebitand.cocci b/scripts/coccinelle/tests/doublebitand.cocci index 9ba73d0..72f1572a 100644 --- a/scripts/coccinelle/tests/doublebitand.cocci +++ b/scripts/coccinelle/tests/doublebitand.cocci @@ -10,7 +10,7 @@ // Copyright: (C) 2010 Gilles Muller, INRIA/LiP6. GPLv2. // URL: http://coccinelle.lip6.fr/ // Comments: -// Options: -no_includes -include_headers +// Options: --no-includes --include-headers virtual context virtual org diff --git a/scripts/coccinelle/tests/doubletest.cocci b/scripts/coccinelle/tests/doubletest.cocci index 13a2c0e..78d74c2 100644 --- a/scripts/coccinelle/tests/doubletest.cocci +++ b/scripts/coccinelle/tests/doubletest.cocci @@ -8,7 +8,7 @@ // Copyright: (C) 2010 Gilles Muller, INRIA/LiP6. GPLv2. // URL: http://coccinelle.lip6.fr/ // Comments: -// Options: -no_includes -include_headers +// Options: --no-includes --include-headers virtual context virtual org diff --git a/scripts/coccinelle/tests/odd_ptr_err.cocci b/scripts/coccinelle/tests/odd_ptr_err.cocci index e8dd8a6..cfe0a35 100644 --- a/scripts/coccinelle/tests/odd_ptr_err.cocci +++ b/scripts/coccinelle/tests/odd_ptr_err.cocci @@ -7,7 +7,7 @@ // Copyright: (C) 2012 Gilles Muller, INRIA. GPLv2. // URL: http://coccinelle.lip6.fr/ // Comments: -// Options: -no_includes -include_headers +// Options: --no-includes --include-headers virtual patch virtual context diff --git a/scripts/config b/scripts/config index a65ecbb..567120a 100755 --- a/scripts/config +++ b/scripts/config @@ -1,6 +1,8 @@ #!/bin/bash # Manipulate options in a .config file from the command line +myname=${0##*/} + # If no prefix forced, use the default CONFIG_ CONFIG_="${CONFIG_-CONFIG_}" @@ -8,7 +10,7 @@ usage() { cat >&2 < "$OUTDIR/$FILE.sed" || exit 1 + "$SRCDIR/$i" > "$OUTDIR/$FILE.sed" || exit 1 scripts/unifdef -U__KERNEL__ -D__EXPORTED_HEADERS__ "$OUTDIR/$FILE.sed" \ > "$OUTDIR/$FILE" [ $? -gt 1 ] && exit 1 diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c index bde5b95..d19944f 100644 --- a/scripts/kconfig/conf.c +++ b/scripts/kconfig/conf.c @@ -527,11 +527,12 @@ int main(int ac, char **av) seed_env = getenv("KCONFIG_SEED"); if( seed_env && *seed_env ) { char *endp; - int tmp = (int)strtol(seed_env, &endp, 10); + int tmp = (int)strtol(seed_env, &endp, 0); if (*endp == '\0') { seed = tmp; } } + fprintf( stderr, "KCONFIG_SEED=0x%X\n", seed ); srand(seed); break; } @@ -653,7 +654,8 @@ int main(int ac, char **av) conf_set_all_new_symbols(def_default); break; case randconfig: - conf_set_all_new_symbols(def_random); + /* Really nothing to do in this loop */ + while (conf_set_all_new_symbols(def_random)) ; break; case defconfig: conf_set_all_new_symbols(def_default); diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c index 43eda40..c55c227 100644 --- a/scripts/kconfig/confdata.c +++ b/scripts/kconfig/confdata.c @@ -1040,7 +1040,7 @@ void conf_set_changed_callback(void (*fn)(void)) conf_changed_callback = fn; } -static void randomize_choice_values(struct symbol *csym) +static bool randomize_choice_values(struct symbol *csym) { struct property *prop; struct symbol *sym; @@ -1053,7 +1053,7 @@ static void randomize_choice_values(struct symbol *csym) * In both cases stop. */ if (csym->curr.tri != yes) - return; + return false; prop = sym_get_choice_prop(csym); @@ -1077,13 +1077,18 @@ static void randomize_choice_values(struct symbol *csym) else { sym->def[S_DEF_USER].tri = no; } + sym->flags |= SYMBOL_DEF_USER; + /* clear VALID to get value calculated */ + sym->flags &= ~SYMBOL_VALID; } csym->flags |= SYMBOL_DEF_USER; /* clear VALID to get value calculated */ csym->flags &= ~(SYMBOL_VALID); + + return true; } -static void set_all_choice_values(struct symbol *csym) +void set_all_choice_values(struct symbol *csym) { struct property *prop; struct symbol *sym; @@ -1100,10 +1105,10 @@ static void set_all_choice_values(struct symbol *csym) } csym->flags |= SYMBOL_DEF_USER; /* clear VALID to get value calculated */ - csym->flags &= ~(SYMBOL_VALID); + csym->flags &= ~(SYMBOL_VALID | SYMBOL_NEED_SET_CHOICE_VALUES); } -void conf_set_all_new_symbols(enum conf_def_mode mode) +bool conf_set_all_new_symbols(enum conf_def_mode mode) { struct symbol *sym, *csym; int i, cnt, pby, pty, ptm; /* pby: probability of boolean = y @@ -1151,6 +1156,7 @@ void conf_set_all_new_symbols(enum conf_def_mode mode) exit( 1 ); } } + bool has_changed = false; for_all_symbols(i, sym) { if (sym_has_value(sym) || (sym->flags & SYMBOL_VALID)) @@ -1158,6 +1164,7 @@ void conf_set_all_new_symbols(enum conf_def_mode mode) switch (sym_get_type(sym)) { case S_BOOLEAN: case S_TRISTATE: + has_changed = true; switch (mode) { case def_yes: sym->def[S_DEF_USER].tri = yes; @@ -1202,14 +1209,26 @@ void conf_set_all_new_symbols(enum conf_def_mode mode) * selected in a choice block and we set it to yes, * and the rest to no. */ + if (mode != def_random) { + for_all_symbols(i, csym) { + if ((sym_is_choice(csym) && !sym_has_value(csym)) || + sym_is_choice_value(csym)) + csym->flags |= SYMBOL_NEED_SET_CHOICE_VALUES; + } + } + for_all_symbols(i, csym) { if (sym_has_value(csym) || !sym_is_choice(csym)) continue; sym_calc_value(csym); if (mode == def_random) - randomize_choice_values(csym); - else + has_changed = randomize_choice_values(csym); + else { set_all_choice_values(csym); + has_changed = true; + } } + + return has_changed; } diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h index cdd4860..df198a5 100644 --- a/scripts/kconfig/expr.h +++ b/scripts/kconfig/expr.h @@ -106,6 +106,9 @@ struct symbol { #define SYMBOL_DEF3 0x40000 /* symbol.def[S_DEF_3] is valid */ #define SYMBOL_DEF4 0x80000 /* symbol.def[S_DEF_4] is valid */ +/* choice values need to be set before calculating this symbol value */ +#define SYMBOL_NEED_SET_CHOICE_VALUES 0x100000 + #define SYMBOL_MAXLENGTH 256 #define SYMBOL_HASHSIZE 9973 diff --git a/scripts/kconfig/lkc.h b/scripts/kconfig/lkc.h index f8aee5f..09f4edf 100644 --- a/scripts/kconfig/lkc.h +++ b/scripts/kconfig/lkc.h @@ -86,7 +86,8 @@ const char *conf_get_autoconfig_name(void); char *conf_get_default_confname(void); void sym_set_change_count(int count); void sym_add_change_count(int count); -void conf_set_all_new_symbols(enum conf_def_mode mode); +bool conf_set_all_new_symbols(enum conf_def_mode mode); +void set_all_choice_values(struct symbol *csym); struct conf_printer { void (*print_symbol)(FILE *, struct symbol *, const char *, void *); diff --git a/scripts/kconfig/lkc_proto.h b/scripts/kconfig/lkc_proto.h index ef1a738..ecdb965 100644 --- a/scripts/kconfig/lkc_proto.h +++ b/scripts/kconfig/lkc_proto.h @@ -14,6 +14,7 @@ P(conf_set_message_callback, void,(void (*fn)(const char *fmt, va_list ap))); /* menu.c */ P(rootmenu,struct menu,); +P(menu_is_empty, bool, (struct menu *menu)); P(menu_is_visible, bool, (struct menu *menu)); P(menu_has_prompt, bool, (struct menu *menu)); P(menu_get_prompt,const char *,(struct menu *menu)); diff --git a/scripts/kconfig/lxdialog/checklist.c b/scripts/kconfig/lxdialog/checklist.c index a2eb80f..3b15c08 100644 --- a/scripts/kconfig/lxdialog/checklist.c +++ b/scripts/kconfig/lxdialog/checklist.c @@ -132,16 +132,16 @@ int dialog_checklist(const char *title, const char *prompt, int height, } do_resize: - if (getmaxy(stdscr) < (height + 6)) + if (getmaxy(stdscr) < (height + CHECKLIST_HEIGTH_MIN)) return -ERRDISPLAYTOOSMALL; - if (getmaxx(stdscr) < (width + 6)) + if (getmaxx(stdscr) < (width + CHECKLIST_WIDTH_MIN)) return -ERRDISPLAYTOOSMALL; max_choice = MIN(list_height, item_count()); /* center dialog box on screen */ - x = (COLS - width) / 2; - y = (LINES - height) / 2; + x = (getmaxx(stdscr) - width) / 2; + y = (getmaxy(stdscr) - height) / 2; draw_shadow(stdscr, y, x, height, width); diff --git a/scripts/kconfig/lxdialog/dialog.h b/scripts/kconfig/lxdialog/dialog.h index 10993370..b4343d3 100644 --- a/scripts/kconfig/lxdialog/dialog.h +++ b/scripts/kconfig/lxdialog/dialog.h @@ -200,6 +200,20 @@ int item_is_tag(char tag); int on_key_esc(WINDOW *win); int on_key_resize(void); +/* minimum (re)size values */ +#define CHECKLIST_HEIGTH_MIN 6 /* For dialog_checklist() */ +#define CHECKLIST_WIDTH_MIN 6 +#define INPUTBOX_HEIGTH_MIN 2 /* For dialog_inputbox() */ +#define INPUTBOX_WIDTH_MIN 2 +#define MENUBOX_HEIGTH_MIN 15 /* For dialog_menu() */ +#define MENUBOX_WIDTH_MIN 65 +#define TEXTBOX_HEIGTH_MIN 8 /* For dialog_textbox() */ +#define TEXTBOX_WIDTH_MIN 8 +#define YESNO_HEIGTH_MIN 4 /* For dialog_yesno() */ +#define YESNO_WIDTH_MIN 4 +#define WINDOW_HEIGTH_MIN 19 /* For init_dialog() */ +#define WINDOW_WIDTH_MIN 80 + int init_dialog(const char *backtitle); void set_dialog_backtitle(const char *backtitle); void set_dialog_subtitles(struct subtitle_list *subtitles); diff --git a/scripts/kconfig/lxdialog/inputbox.c b/scripts/kconfig/lxdialog/inputbox.c index 21404a0..447a582 100644 --- a/scripts/kconfig/lxdialog/inputbox.c +++ b/scripts/kconfig/lxdialog/inputbox.c @@ -56,14 +56,14 @@ int dialog_inputbox(const char *title, const char *prompt, int height, int width strcpy(instr, init); do_resize: - if (getmaxy(stdscr) <= (height - 2)) + if (getmaxy(stdscr) <= (height - INPUTBOX_HEIGTH_MIN)) return -ERRDISPLAYTOOSMALL; - if (getmaxx(stdscr) <= (width - 2)) + if (getmaxx(stdscr) <= (width - INPUTBOX_WIDTH_MIN)) return -ERRDISPLAYTOOSMALL; /* center dialog box on screen */ - x = (COLS - width) / 2; - y = (LINES - height) / 2; + x = (getmaxx(stdscr) - width) / 2; + y = (getmaxy(stdscr) - height) / 2; draw_shadow(stdscr, y, x, height, width); diff --git a/scripts/kconfig/lxdialog/menubox.c b/scripts/kconfig/lxdialog/menubox.c index 38cd69c..c93de0b 100644 --- a/scripts/kconfig/lxdialog/menubox.c +++ b/scripts/kconfig/lxdialog/menubox.c @@ -193,7 +193,7 @@ int dialog_menu(const char *title, const char *prompt, do_resize: height = getmaxy(stdscr); width = getmaxx(stdscr); - if (height < 15 || width < 65) + if (height < MENUBOX_HEIGTH_MIN || width < MENUBOX_WIDTH_MIN) return -ERRDISPLAYTOOSMALL; height -= 4; @@ -203,8 +203,8 @@ do_resize: max_choice = MIN(menu_height, item_count()); /* center dialog box on screen */ - x = (COLS - width) / 2; - y = (LINES - height) / 2; + x = (getmaxx(stdscr) - width) / 2; + y = (getmaxy(stdscr) - height) / 2; draw_shadow(stdscr, y, x, height, width); diff --git a/scripts/kconfig/lxdialog/textbox.c b/scripts/kconfig/lxdialog/textbox.c index a48bb93..1773319 100644 --- a/scripts/kconfig/lxdialog/textbox.c +++ b/scripts/kconfig/lxdialog/textbox.c @@ -80,7 +80,7 @@ int dialog_textbox(const char *title, char *tbuf, int initial_height, do_resize: getmaxyx(stdscr, height, width); - if (height < 8 || width < 8) + if (height < TEXTBOX_HEIGTH_MIN || width < TEXTBOX_WIDTH_MIN) return -ERRDISPLAYTOOSMALL; if (initial_height != 0) height = initial_height; @@ -98,8 +98,8 @@ do_resize: width = 0; /* center dialog box on screen */ - x = (COLS - width) / 2; - y = (LINES - height) / 2; + x = (getmaxx(stdscr) - width) / 2; + y = (getmaxy(stdscr) - height) / 2; draw_shadow(stdscr, y, x, height, width); diff --git a/scripts/kconfig/lxdialog/util.c b/scripts/kconfig/lxdialog/util.c index a0e97c2..58a8289 100644 --- a/scripts/kconfig/lxdialog/util.c +++ b/scripts/kconfig/lxdialog/util.c @@ -254,7 +254,12 @@ void attr_clear(WINDOW * win, int height, int width, chtype attr) void dialog_clear(void) { - attr_clear(stdscr, LINES, COLS, dlg.screen.atr); + int lines, columns; + + lines = getmaxy(stdscr); + columns = getmaxx(stdscr); + + attr_clear(stdscr, lines, columns, dlg.screen.atr); /* Display background title if it exists ... - SLH */ if (dlg.backtitle != NULL) { int i, len = 0, skip = 0; @@ -269,10 +274,10 @@ void dialog_clear(void) } wmove(stdscr, 1, 1); - if (len > COLS - 2) { + if (len > columns - 2) { const char *ellipsis = "[...] "; waddstr(stdscr, ellipsis); - skip = len - (COLS - 2 - strlen(ellipsis)); + skip = len - (columns - 2 - strlen(ellipsis)); } for (pos = dlg.subtitles; pos != NULL; pos = pos->next) { @@ -298,7 +303,7 @@ void dialog_clear(void) skip--; } - for (i = len + 1; i < COLS - 1; i++) + for (i = len + 1; i < columns - 1; i++) waddch(stdscr, ACS_HLINE); } wnoutrefresh(stdscr); @@ -317,7 +322,7 @@ int init_dialog(const char *backtitle) getyx(stdscr, saved_y, saved_x); getmaxyx(stdscr, height, width); - if (height < 19 || width < 80) { + if (height < WINDOW_HEIGTH_MIN || width < WINDOW_WIDTH_MIN) { endwin(); return -ERRDISPLAYTOOSMALL; } @@ -371,27 +376,19 @@ void print_title(WINDOW *dialog, const char *title, int width) /* * Print a string of text in a window, automatically wrap around to the * next line if the string is too long to fit on one line. Newline - * characters '\n' are replaced by spaces. We start on a new line + * characters '\n' are propperly processed. We start on a new line * if there is no room for at least 4 nonblanks following a double-space. */ void print_autowrap(WINDOW * win, const char *prompt, int width, int y, int x) { int newl, cur_x, cur_y; - int i, prompt_len, room, wlen; - char tempstr[MAX_LEN + 1], *word, *sp, *sp2; + int prompt_len, room, wlen; + char tempstr[MAX_LEN + 1], *word, *sp, *sp2, *newline_separator = 0; strcpy(tempstr, prompt); prompt_len = strlen(tempstr); - /* - * Remove newlines - */ - for (i = 0; i < prompt_len; i++) { - if (tempstr[i] == '\n') - tempstr[i] = ' '; - } - if (prompt_len <= width - x * 2) { /* If prompt is short */ wmove(win, y, (width - prompt_len) / 2); waddstr(win, tempstr); @@ -401,7 +398,10 @@ void print_autowrap(WINDOW * win, const char *prompt, int width, int y, int x) newl = 1; word = tempstr; while (word && *word) { - sp = strchr(word, ' '); + sp = strpbrk(word, "\n "); + if (sp && *sp == '\n') + newline_separator = sp; + if (sp) *sp++ = 0; @@ -413,7 +413,7 @@ void print_autowrap(WINDOW * win, const char *prompt, int width, int y, int x) if (wlen > room || (newl && wlen < 4 && sp && wlen + 1 + strlen(sp) > room - && (!(sp2 = strchr(sp, ' ')) + && (!(sp2 = strpbrk(sp, "\n ")) || wlen + 1 + (sp2 - sp) > room))) { cur_y++; cur_x = x; @@ -421,7 +421,15 @@ void print_autowrap(WINDOW * win, const char *prompt, int width, int y, int x) wmove(win, cur_y, cur_x); waddstr(win, word); getyx(win, cur_y, cur_x); - cur_x++; + + /* Move to the next line if the word separator was a newline */ + if (newline_separator) { + cur_y++; + cur_x = x; + newline_separator = 0; + } else + cur_x++; + if (sp && *sp == ' ') { cur_x++; /* double space */ while (*++sp == ' ') ; diff --git a/scripts/kconfig/lxdialog/yesno.c b/scripts/kconfig/lxdialog/yesno.c index 4e6e809..676fb2f 100644 --- a/scripts/kconfig/lxdialog/yesno.c +++ b/scripts/kconfig/lxdialog/yesno.c @@ -45,14 +45,14 @@ int dialog_yesno(const char *title, const char *prompt, int height, int width) WINDOW *dialog; do_resize: - if (getmaxy(stdscr) < (height + 4)) + if (getmaxy(stdscr) < (height + YESNO_HEIGTH_MIN)) return -ERRDISPLAYTOOSMALL; - if (getmaxx(stdscr) < (width + 4)) + if (getmaxx(stdscr) < (width + YESNO_WIDTH_MIN)) return -ERRDISPLAYTOOSMALL; /* center dialog box on screen */ - x = (COLS - width) / 2; - y = (LINES - height) / 2; + x = (getmaxx(stdscr) - width) / 2; + y = (getmaxy(stdscr) - height) / 2; draw_shadow(stdscr, y, x, height, width); diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c index a69cbd7..6c9c45f 100644 --- a/scripts/kconfig/mconf.c +++ b/scripts/kconfig/mconf.c @@ -48,7 +48,7 @@ static const char mconf_readme[] = N_( "----------\n" "o Use the Up/Down arrow keys (cursor keys) to highlight the item\n" " you wish to change or submenu wish to select and press .\n" -" Submenus are designated by \"--->\".\n" +" Submenus are designated by \"--->\", empty ones by \"----\".\n" "\n" " Shortcut: Press the option's highlighted letter (hotkey).\n" " Pressing a hotkey more than once will sequence\n" @@ -176,7 +176,7 @@ static const char mconf_readme[] = N_( "\n"), menu_instructions[] = N_( "Arrow keys navigate the menu. " - " selects submenus --->. " + " selects submenus ---> (or empty submenus ----). " "Highlighted letters are hotkeys. " "Pressing includes, excludes, modularizes features. " "Press to exit, for Help, for Search. " @@ -401,7 +401,7 @@ static void search_conf(void) struct subtitle_part stpart; title = str_new(); - str_printf( &title, _("Enter %s (sub)string to search for " + str_printf( &title, _("Enter %s (sub)string or regexp to search for " "(with or without \"%s\")"), CONFIG_, CONFIG_); again: @@ -498,8 +498,9 @@ static void build_conf(struct menu *menu) menu->data ? "-->" : "++>", indent + 1, ' ', prompt); } else - item_make(" %*c%s --->", indent + 1, ' ', prompt); - + item_make(" %*c%s %s", + indent + 1, ' ', prompt, + menu_is_empty(menu) ? "----" : "--->"); item_set_tag('m'); item_set_data(menu); if (single_menu_mode && menu->data) @@ -630,7 +631,7 @@ static void build_conf(struct menu *menu) (sym_has_value(sym) || !sym_is_changable(sym)) ? "" : _(" (NEW)")); if (menu->prompt->type == P_MENU) { - item_add_str(" --->"); + item_add_str(" %s", menu_is_empty(menu) ? "----" : "--->"); return; } } @@ -826,7 +827,9 @@ static void conf_choice(struct menu *menu) dialog_clear(); res = dialog_checklist(prompt ? _(prompt) : _("Main Menu"), _(radiolist_instructions), - 15, 70, 6); + MENUBOX_HEIGTH_MIN, + MENUBOX_WIDTH_MIN, + CHECKLIST_HEIGTH_MIN); selected = item_activate_selected(); switch (res) { case 0: @@ -957,8 +960,8 @@ static int handle_exit(void) dialog_clear(); if (conf_get_changed()) res = dialog_yesno(NULL, - _("Do you wish to save your new configuration ?\n" - " to continue."), + _("Do you wish to save your new configuration?\n" + "(Press to continue kernel configuration.)"), 6, 60); else res = -1; diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c index fd3f018..7e233a6 100644 --- a/scripts/kconfig/menu.c +++ b/scripts/kconfig/menu.c @@ -443,6 +443,22 @@ bool menu_has_prompt(struct menu *menu) return true; } +/* + * Determine if a menu is empty. + * A menu is considered empty if it contains no or only + * invisible entries. + */ +bool menu_is_empty(struct menu *menu) +{ + struct menu *child; + + for (child = menu->list; child; child = child->next) { + if (menu_is_visible(child)) + return(false); + } + return(true); +} + bool menu_is_visible(struct menu *menu) { struct menu *child; diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c index dbf31ed..7975d8d 100644 --- a/scripts/kconfig/nconf.c +++ b/scripts/kconfig/nconf.c @@ -45,8 +45,8 @@ static const char nconf_global_help[] = N_( " to remove it. You may press the key to cycle through the\n" "available options.\n" "\n" -"A trailing \"--->\" designates a submenu.\n" -"\n" +"A trailing \"--->\" designates a submenu, a trailing \"----\" an\n" +"empty submenu.\n" "\n" "Menu navigation keys\n" "----------------------------------------------------------------------\n" @@ -131,7 +131,7 @@ static const char nconf_global_help[] = N_( "\n"), menu_no_f_instructions[] = N_( "Legend: [*] built-in [ ] excluded module < > module capable.\n" -"Submenus are designated by a trailing \"--->\".\n" +"Submenus are designated by a trailing \"--->\", empty ones by \"----\".\n" "\n" "Use the following keys to navigate the menus:\n" "Move up or down with and .\n" @@ -148,7 +148,7 @@ menu_no_f_instructions[] = N_( "For help related to the current menu entry press or .\n"), menu_instructions[] = N_( "Legend: [*] built-in [ ] excluded module < > module capable.\n" -"Submenus are designated by a trailing \"--->\".\n" +"Submenus are designated by a trailing \"--->\", empty ones by \"----\".\n" "\n" "Use the following keys to navigate the menus:\n" "Move up or down with or .\n" @@ -365,15 +365,16 @@ static void print_function_line(void) int i; int offset = 1; const int skip = 1; + int lines = getmaxy(stdscr); for (i = 0; i < function_keys_num; i++) { (void) wattrset(main_window, attributes[FUNCTION_HIGHLIGHT]); - mvwprintw(main_window, LINES-3, offset, + mvwprintw(main_window, lines-3, offset, "%s", function_keys[i].key_str); (void) wattrset(main_window, attributes[FUNCTION_TEXT]); offset += strlen(function_keys[i].key_str); - mvwprintw(main_window, LINES-3, + mvwprintw(main_window, lines-3, offset, "%s", function_keys[i].func); offset += strlen(function_keys[i].func) + skip; @@ -694,7 +695,7 @@ static void search_conf(void) int dres; title = str_new(); - str_printf( &title, _("Enter %s (sub)string to search for " + str_printf( &title, _("Enter %s (sub)string or regexp to search for " "(with or without \"%s\")"), CONFIG_, CONFIG_); again: @@ -759,9 +760,9 @@ static void build_conf(struct menu *menu) indent + 1, ' ', prompt); } else item_make(menu, 'm', - " %*c%s --->", - indent + 1, - ' ', prompt); + " %*c%s %s", + indent + 1, ' ', prompt, + menu_is_empty(menu) ? "----" : "--->"); if (single_menu_mode && menu->data) goto conf_childs; @@ -903,7 +904,7 @@ static void build_conf(struct menu *menu) (sym_has_value(sym) || !sym_is_changable(sym)) ? "" : _(" (NEW)")); if (menu->prompt && menu->prompt->type == P_MENU) { - item_add_str(" --->"); + item_add_str(" %s", menu_is_empty(menu) ? "----" : "--->"); return; } } @@ -954,7 +955,7 @@ static void show_menu(const char *prompt, const char *instructions, clear(); (void) wattrset(main_window, attributes[NORMAL]); - print_in_middle(stdscr, 1, 0, COLS, + print_in_middle(stdscr, 1, 0, getmaxx(stdscr), menu_backtitle, attributes[MAIN_HEADING]); @@ -1455,14 +1456,18 @@ static void conf_save(void) void setup_windows(void) { + int lines, columns; + + getmaxyx(stdscr, lines, columns); + if (main_window != NULL) delwin(main_window); /* set up the menu and menu window */ - main_window = newwin(LINES-2, COLS-2, 2, 1); + main_window = newwin(lines-2, columns-2, 2, 1); keypad(main_window, TRUE); - mwin_max_lines = LINES-7; - mwin_max_cols = COLS-6; + mwin_max_lines = lines-7; + mwin_max_cols = columns-6; /* panels order is from bottom to top */ new_panel(main_window); @@ -1470,6 +1475,7 @@ void setup_windows(void) int main(int ac, char **av) { + int lines, columns; char *mode; setlocale(LC_ALL, ""); @@ -1495,7 +1501,8 @@ int main(int ac, char **av) keypad(stdscr, TRUE); curs_set(0); - if (COLS < 75 || LINES < 20) { + getmaxyx(stdscr, lines, columns); + if (columns < 75 || lines < 20) { endwin(); printf("Your terminal should have at " "least 20 lines and 75 columns\n"); diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c index 9f8c44e..8275f0e5 100644 --- a/scripts/kconfig/nconf.gui.c +++ b/scripts/kconfig/nconf.gui.c @@ -276,8 +276,8 @@ int btn_dialog(WINDOW *main_window, const char *msg, int btn_num, ...) total_width = max(msg_width, btns_width); /* place dialog in middle of screen */ - y = (LINES-(msg_lines+4))/2; - x = (COLS-(total_width+4))/2; + y = (getmaxy(stdscr)-(msg_lines+4))/2; + x = (getmaxx(stdscr)-(total_width+4))/2; /* create the windows */ @@ -387,8 +387,8 @@ int dialog_inputbox(WINDOW *main_window, prompt_width = max(prompt_width, strlen(title)); /* place dialog in middle of screen */ - y = (LINES-(prompt_lines+4))/2; - x = (COLS-(prompt_width+4))/2; + y = (getmaxy(stdscr)-(prompt_lines+4))/2; + x = (getmaxx(stdscr)-(prompt_width+4))/2; strncpy(result, init, *result_len); @@ -545,7 +545,7 @@ void show_scroll_win(WINDOW *main_window, { int res; int total_lines = get_line_no(text); - int x, y; + int x, y, lines, columns; int start_x = 0, start_y = 0; int text_lines = 0, text_cols = 0; int total_cols = 0; @@ -556,6 +556,8 @@ void show_scroll_win(WINDOW *main_window, WINDOW *pad; PANEL *panel; + getmaxyx(stdscr, lines, columns); + /* find the widest line of msg: */ total_lines = get_line_no(text); for (i = 0; i < total_lines; i++) { @@ -569,14 +571,14 @@ void show_scroll_win(WINDOW *main_window, (void) wattrset(pad, attributes[SCROLLWIN_TEXT]); fill_window(pad, text); - win_lines = min(total_lines+4, LINES-2); - win_cols = min(total_cols+2, COLS-2); + win_lines = min(total_lines+4, lines-2); + win_cols = min(total_cols+2, columns-2); text_lines = max(win_lines-4, 0); text_cols = max(win_cols-2, 0); /* place window in middle of screen */ - y = (LINES-win_lines)/2; - x = (COLS-win_cols)/2; + y = (lines-win_lines)/2; + x = (columns-win_cols)/2; win = newwin(win_lines, win_cols, y, x); keypad(win, TRUE); diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c index ecc5aa5..d550300 100644 --- a/scripts/kconfig/symbol.c +++ b/scripts/kconfig/symbol.c @@ -136,7 +136,7 @@ static struct property *sym_get_range_prop(struct symbol *sym) return NULL; } -static int sym_get_range_val(struct symbol *sym, int base) +static long sym_get_range_val(struct symbol *sym, int base) { sym_calc_value(sym); switch (sym->type) { @@ -155,7 +155,7 @@ static int sym_get_range_val(struct symbol *sym, int base) static void sym_validate_range(struct symbol *sym) { struct property *prop; - int base, val, val2; + long base, val, val2; char str[64]; switch (sym->type) { @@ -179,9 +179,9 @@ static void sym_validate_range(struct symbol *sym) return; } if (sym->type == S_INT) - sprintf(str, "%d", val2); + sprintf(str, "%ld", val2); else - sprintf(str, "0x%x", val2); + sprintf(str, "0x%lx", val2); sym->curr.val = strdup(str); } @@ -300,6 +300,14 @@ void sym_calc_value(struct symbol *sym) if (sym->flags & SYMBOL_VALID) return; + + if (sym_is_choice_value(sym) && + sym->flags & SYMBOL_NEED_SET_CHOICE_VALUES) { + sym->flags &= ~SYMBOL_NEED_SET_CHOICE_VALUES; + prop = sym_get_choice_prop(sym); + sym_calc_value(prop_get_symbol(prop)); + } + sym->flags |= SYMBOL_VALID; oldval = sym->curr; @@ -425,6 +433,9 @@ void sym_calc_value(struct symbol *sym) if (sym->flags & SYMBOL_AUTO) sym->flags &= ~SYMBOL_WRITE; + + if (sym->flags & SYMBOL_NEED_SET_CHOICE_VALUES) + set_all_choice_values(sym); } void sym_clear_all_valid(void) @@ -583,7 +594,7 @@ bool sym_string_valid(struct symbol *sym, const char *str) bool sym_string_within_range(struct symbol *sym, const char *str) { struct property *prop; - int val; + long val; switch (sym->type) { case S_STRING: @@ -943,38 +954,98 @@ const char *sym_escape_string_value(const char *in) return res; } +struct sym_match { + struct symbol *sym; + off_t so, eo; +}; + +/* Compare matched symbols as thus: + * - first, symbols that match exactly + * - then, alphabetical sort + */ +static int sym_rel_comp( const void *sym1, const void *sym2 ) +{ + struct sym_match *s1 = *(struct sym_match **)sym1; + struct sym_match *s2 = *(struct sym_match **)sym2; + int l1, l2; + + /* Exact match: + * - if matched length on symbol s1 is the length of that symbol, + * then this symbol should come first; + * - if matched length on symbol s2 is the length of that symbol, + * then this symbol should come first. + * Note: since the search can be a regexp, both symbols may match + * exactly; if this is the case, we can't decide which comes first, + * and we fallback to sorting alphabetically. + */ + l1 = s1->eo - s1->so; + l2 = s2->eo - s2->so; + if (l1 == strlen(s1->sym->name) && l2 != strlen(s2->sym->name)) + return -1; + if (l1 != strlen(s1->sym->name) && l2 == strlen(s2->sym->name)) + return 1; + + /* As a fallback, sort symbols alphabetically */ + return strcmp(s1->sym->name, s2->sym->name); +} + struct symbol **sym_re_search(const char *pattern) { struct symbol *sym, **sym_arr = NULL; + struct sym_match **sym_match_arr = NULL; int i, cnt, size; regex_t re; + regmatch_t match[1]; cnt = size = 0; /* Skip if empty */ if (strlen(pattern) == 0) return NULL; - if (regcomp(&re, pattern, REG_EXTENDED|REG_NOSUB|REG_ICASE)) + if (regcomp(&re, pattern, REG_EXTENDED|REG_ICASE)) return NULL; for_all_symbols(i, sym) { + struct sym_match *tmp_sym_match; if (sym->flags & SYMBOL_CONST || !sym->name) continue; - if (regexec(&re, sym->name, 0, NULL, 0)) + if (regexec(&re, sym->name, 1, match, 0)) continue; if (cnt + 1 >= size) { - void *tmp = sym_arr; + void *tmp; size += 16; - sym_arr = realloc(sym_arr, size * sizeof(struct symbol *)); - if (!sym_arr) { - free(tmp); - return NULL; + tmp = realloc(sym_match_arr, size * sizeof(struct sym_match *)); + if (!tmp) { + goto sym_re_search_free; } + sym_match_arr = tmp; } sym_calc_value(sym); - sym_arr[cnt++] = sym; + tmp_sym_match = (struct sym_match*)malloc(sizeof(struct sym_match)); + if (!tmp_sym_match) + goto sym_re_search_free; + tmp_sym_match->sym = sym; + /* As regexec return 0, we know we have a match, so + * we can use match[0].rm_[se]o without further checks + */ + tmp_sym_match->so = match[0].rm_so; + tmp_sym_match->eo = match[0].rm_eo; + sym_match_arr[cnt++] = tmp_sym_match; } - if (sym_arr) + if (sym_match_arr) { + qsort(sym_match_arr, cnt, sizeof(struct sym_match*), sym_rel_comp); + sym_arr = malloc((cnt+1) * sizeof(struct symbol)); + if (!sym_arr) + goto sym_re_search_free; + for (i = 0; i < cnt; i++) + sym_arr[i] = sym_match_arr[i]->sym; sym_arr[cnt] = NULL; + } +sym_re_search_free: + if (sym_match_arr) { + for (i = 0; i < cnt; i++) + free(sym_match_arr[i]); + free(sym_match_arr); + } regfree(&re); return sym_arr; diff --git a/scripts/mod/Makefile b/scripts/mod/Makefile index 75d59fc..c11212f 100644 --- a/scripts/mod/Makefile +++ b/scripts/mod/Makefile @@ -15,8 +15,8 @@ endef quiet_cmd_offsets = GEN $@ define cmd_offsets (set -e; \ - echo "#ifndef __DEVICEVTABLE_OFFSETS_H__"; \ - echo "#define __DEVICEVTABLE_OFFSETS_H__"; \ + echo "#ifndef __DEVICETABLE_OFFSETS_H__"; \ + echo "#define __DEVICETABLE_OFFSETS_H__"; \ echo "/*"; \ echo " * DO NOT MODIFY."; \ echo " *"; \ @@ -29,15 +29,10 @@ define cmd_offsets echo "#endif" ) > $@ endef -# We use internal kbuild rules to avoid the "is up to date" message from make -scripts/mod/devicetable-offsets.s: scripts/mod/devicetable-offsets.c FORCE - $(Q)mkdir -p $(dir $@) - $(call if_changed_dep,cc_s_c) +$(obj)/$(devicetable-offsets-file): $(obj)/devicetable-offsets.s + $(call if_changed,offsets) -$(obj)/$(devicetable-offsets-file): scripts/mod/devicetable-offsets.s - $(call cmd,offsets) - -targets += $(devicetable-offsets-file) +targets += $(devicetable-offsets-file) devicetable-offsets.s # dependencies on generated files need to be listed explicitly diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index d9e67b7..2370863 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c @@ -79,10 +79,12 @@ struct devtable **__start___devtable, **__stop___devtable; extern struct devtable *__start___devtable[], *__stop___devtable[]; #endif /* __MACH__ */ -#if __GNUC__ == 3 && __GNUC_MINOR__ < 3 -# define __used __attribute__((__unused__)) -#else -# define __used __attribute__((__used__)) +#if !defined(__used) +# if __GNUC__ == 3 && __GNUC_MINOR__ < 3 +# define __used __attribute__((__unused__)) +# else +# define __used __attribute__((__used__)) +# endif #endif /* Define a variable f that holds the value of field f of struct devid diff --git a/scripts/package/mkspec b/scripts/package/mkspec index fbbfd08..fdd3fbf 100755 --- a/scripts/package/mkspec +++ b/scripts/package/mkspec @@ -74,6 +74,7 @@ echo "" fi echo "%install" +echo 'KBUILD_IMAGE=$(make image_name)' echo "%ifarch ia64" echo 'mkdir -p $RPM_BUILD_ROOT/boot/efi $RPM_BUILD_ROOT/lib/modules' echo 'mkdir -p $RPM_BUILD_ROOT/lib/firmware' diff --git a/scripts/setlocalversion b/scripts/setlocalversion index 84b88f1..d105a44 100755 --- a/scripts/setlocalversion +++ b/scripts/setlocalversion @@ -71,9 +71,6 @@ scm_version() printf -- '-svn%s' "`git svn find-rev $head`" fi - # Update index only on r/w media - [ -w . ] && git update-index --refresh --unmerged > /dev/null - # Check for uncommitted changes if git diff-index --name-only HEAD | grep -qv "^scripts/package"; then printf '%s' -dirty diff --git a/tools/include/tools/be_byteshift.h b/tools/include/tools/be_byteshift.h index f4912e2..84c17d8 100644 --- a/tools/include/tools/be_byteshift.h +++ b/tools/include/tools/be_byteshift.h @@ -1,68 +1,68 @@ #ifndef _TOOLS_BE_BYTESHIFT_H #define _TOOLS_BE_BYTESHIFT_H -#include +#include -static inline __u16 __get_unaligned_be16(const __u8 *p) +static inline uint16_t __get_unaligned_be16(const uint8_t *p) { return p[0] << 8 | p[1]; } -static inline __u32 __get_unaligned_be32(const __u8 *p) +static inline uint32_t __get_unaligned_be32(const uint8_t *p) { return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3]; } -static inline __u64 __get_unaligned_be64(const __u8 *p) +static inline uint64_t __get_unaligned_be64(const uint8_t *p) { - return (__u64)__get_unaligned_be32(p) << 32 | + return (uint64_t)__get_unaligned_be32(p) << 32 | __get_unaligned_be32(p + 4); } -static inline void __put_unaligned_be16(__u16 val, __u8 *p) +static inline void __put_unaligned_be16(uint16_t val, uint8_t *p) { *p++ = val >> 8; *p++ = val; } -static inline void __put_unaligned_be32(__u32 val, __u8 *p) +static inline void __put_unaligned_be32(uint32_t val, uint8_t *p) { __put_unaligned_be16(val >> 16, p); __put_unaligned_be16(val, p + 2); } -static inline void __put_unaligned_be64(__u64 val, __u8 *p) +static inline void __put_unaligned_be64(uint64_t val, uint8_t *p) { __put_unaligned_be32(val >> 32, p); __put_unaligned_be32(val, p + 4); } -static inline __u16 get_unaligned_be16(const void *p) +static inline uint16_t get_unaligned_be16(const void *p) { - return __get_unaligned_be16((const __u8 *)p); + return __get_unaligned_be16((const uint8_t *)p); } -static inline __u32 get_unaligned_be32(const void *p) +static inline uint32_t get_unaligned_be32(const void *p) { - return __get_unaligned_be32((const __u8 *)p); + return __get_unaligned_be32((const uint8_t *)p); } -static inline __u64 get_unaligned_be64(const void *p) +static inline uint64_t get_unaligned_be64(const void *p) { - return __get_unaligned_be64((const __u8 *)p); + return __get_unaligned_be64((const uint8_t *)p); } -static inline void put_unaligned_be16(__u16 val, void *p) +static inline void put_unaligned_be16(uint16_t val, void *p) { __put_unaligned_be16(val, p); } -static inline void put_unaligned_be32(__u32 val, void *p) +static inline void put_unaligned_be32(uint32_t val, void *p) { __put_unaligned_be32(val, p); } -static inline void put_unaligned_be64(__u64 val, void *p) +static inline void put_unaligned_be64(uint64_t val, void *p) { __put_unaligned_be64(val, p); } diff --git a/tools/include/tools/le_byteshift.h b/tools/include/tools/le_byteshift.h index c99d45a..8fe9f24 100644 --- a/tools/include/tools/le_byteshift.h +++ b/tools/include/tools/le_byteshift.h @@ -1,68 +1,68 @@ #ifndef _TOOLS_LE_BYTESHIFT_H #define _TOOLS_LE_BYTESHIFT_H -#include +#include -static inline __u16 __get_unaligned_le16(const __u8 *p) +static inline uint16_t __get_unaligned_le16(const uint8_t *p) { return p[0] | p[1] << 8; } -static inline __u32 __get_unaligned_le32(const __u8 *p) +static inline uint32_t __get_unaligned_le32(const uint8_t *p) { return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24; } -static inline __u64 __get_unaligned_le64(const __u8 *p) +static inline uint64_t __get_unaligned_le64(const uint8_t *p) { - return (__u64)__get_unaligned_le32(p + 4) << 32 | + return (uint64_t)__get_unaligned_le32(p + 4) << 32 | __get_unaligned_le32(p); } -static inline void __put_unaligned_le16(__u16 val, __u8 *p) +static inline void __put_unaligned_le16(uint16_t val, uint8_t *p) { *p++ = val; *p++ = val >> 8; } -static inline void __put_unaligned_le32(__u32 val, __u8 *p) +static inline void __put_unaligned_le32(uint32_t val, uint8_t *p) { __put_unaligned_le16(val >> 16, p + 2); __put_unaligned_le16(val, p); } -static inline void __put_unaligned_le64(__u64 val, __u8 *p) +static inline void __put_unaligned_le64(uint64_t val, uint8_t *p) { __put_unaligned_le32(val >> 32, p + 4); __put_unaligned_le32(val, p); } -static inline __u16 get_unaligned_le16(const void *p) +static inline uint16_t get_unaligned_le16(const void *p) { - return __get_unaligned_le16((const __u8 *)p); + return __get_unaligned_le16((const uint8_t *)p); } -static inline __u32 get_unaligned_le32(const void *p) +static inline uint32_t get_unaligned_le32(const void *p) { - return __get_unaligned_le32((const __u8 *)p); + return __get_unaligned_le32((const uint8_t *)p); } -static inline __u64 get_unaligned_le64(const void *p) +static inline uint64_t get_unaligned_le64(const void *p) { - return __get_unaligned_le64((const __u8 *)p); + return __get_unaligned_le64((const uint8_t *)p); } -static inline void put_unaligned_le16(__u16 val, void *p) +static inline void put_unaligned_le16(uint16_t val, void *p) { __put_unaligned_le16(val, p); } -static inline void put_unaligned_le32(__u32 val, void *p) +static inline void put_unaligned_le32(uint32_t val, void *p) { __put_unaligned_le32(val, p); } -static inline void put_unaligned_le64(__u64 val, void *p) +static inline void put_unaligned_le64(uint64_t val, void *p) { __put_unaligned_le64(val, p); }