a8a466
diff -up linux-3.10.0-514.sdl7.i686/arch/x86/include/asm/cpufeature.h.morefixes linux-3.10.0-514.sdl7.i686/arch/x86/include/asm/cpufeature.h
a8a466
--- linux-3.10.0-514.sdl7.i686/arch/x86/include/asm/cpufeature.h.morefixes	2016-10-19 10:16:25.000000000 -0400
a8a466
+++ linux-3.10.0-514.sdl7.i686/arch/x86/include/asm/cpufeature.h	2016-11-05 08:30:46.276033474 -0400
a8a466
@@ -354,6 +354,7 @@ extern const char * const x86_power_flag
a8a466
 #define cpu_has_avx		boot_cpu_has(X86_FEATURE_AVX)
a8a466
 #define cpu_has_avx2		boot_cpu_has(X86_FEATURE_AVX2)
a8a466
 #define cpu_has_ht		boot_cpu_has(X86_FEATURE_HT)
a8a466
+#define cpu_has_mp		boot_cpu_has(X86_FEATURE_MP)
a8a466
 #define cpu_has_nx		boot_cpu_has(X86_FEATURE_NX)
a8a466
 #define cpu_has_xstore		boot_cpu_has(X86_FEATURE_XSTORE)
a8a466
 #define cpu_has_xstore_enabled	boot_cpu_has(X86_FEATURE_XSTORE_EN)
a8a466
diff -up linux-3.10.0-514.sdl7.i686/arch/x86/include/asm/irq_remapping.h.morefixes linux-3.10.0-514.sdl7.i686/arch/x86/include/asm/irq_remapping.h
a8a466
--- linux-3.10.0-514.sdl7.i686/arch/x86/include/asm/irq_remapping.h.morefixes	2016-10-19 10:16:25.000000000 -0400
a8a466
+++ linux-3.10.0-514.sdl7.i686/arch/x86/include/asm/irq_remapping.h	2016-11-05 09:39:38.425043476 -0400
a8a466
@@ -110,7 +110,7 @@ static inline bool setup_remapped_irq(in
a8a466
 	return false;
a8a466
 }
a8a466
 
a8a466
-int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
a8a466
+static inline int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
a8a466
 {
a8a466
 	return -ENOSYS;
a8a466
 }
a8a466
diff -up linux-3.10.0-514.sdl7.i686/arch/x86/kernel/irq_32.c.morefixes linux-3.10.0-514.sdl7.i686/arch/x86/kernel/irq_32.c
a8a466
--- linux-3.10.0-514.sdl7.i686/arch/x86/kernel/irq_32.c.morefixes	2016-10-19 10:16:25.000000000 -0400
a8a466
+++ linux-3.10.0-514.sdl7.i686/arch/x86/kernel/irq_32.c	2016-11-05 09:49:59.371055125 -0400
a8a466
@@ -24,9 +24,12 @@
a8a466
 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
a8a466
 EXPORT_PER_CPU_SYMBOL(irq_stat);
a8a466
 
a8a466
+DEFINE_PER_CPU_SHARED_ALIGNED(rh_irq_cpustat_t, rh_irq_stat);
a8a466
+
a8a466
 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
a8a466
 EXPORT_PER_CPU_SYMBOL(irq_regs);
a8a466
 
a8a466
+
a8a466
 #ifdef CONFIG_DEBUG_STACKOVERFLOW
a8a466
 
a8a466
 int sysctl_panic_on_stackoverflow __read_mostly;
a8a466
diff -up linux-3.10.0-514.sdl7.i686/include/linux/pps_kernel.h.morefixes linux-3.10.0-514.sdl7.i686/include/linux/pps_kernel.h
a8a466
--- linux-3.10.0-514.sdl7.i686/include/linux/pps_kernel.h.morefixes	2016-10-19 10:16:25.000000000 -0400
a8a466
+++ linux-3.10.0-514.sdl7.i686/include/linux/pps_kernel.h	2016-11-05 10:44:41.492049162 -0400
a8a466
@@ -115,10 +115,17 @@ static inline void pps_get_ts(struct pps
a8a466
 {
a8a466
 	struct system_time_snapshot snap;
a8a466
 	ktime_get_snapshot(&snap);
a8a466
+#if defined CONFIG_X86_64
a8a466
 	ts->ts_real = ktime_to_timespec64(snap.real);
a8a466
 #ifdef CONFIG_NTP_PPS
a8a466
 	ts->ts_raw = ktime_to_timespec64(snap.raw);
a8a466
 #endif
a8a466
+#else
a8a466
+	ts->ts_real = ktime_to_timespec(snap.real);
a8a466
+#ifdef CONFIG_NTP_PPS
a8a466
+	ts->ts_raw = ktime_to_timespec(snap.raw);
a8a466
+#endif
a8a466
+#endif
a8a466
 }
a8a466
 
a8a466
 /* Subtract known time delay from PPS event time(s) */
a8a466
diff -up linux-3.10.0-514.sdl7.i686/kernel/hrtimer.c.morefixes linux-3.10.0-514.sdl7.i686/kernel/hrtimer.c
a8a466
--- linux-3.10.0-514.sdl7.i686/kernel/hrtimer.c.morefixes	2016-10-19 10:16:25.000000000 -0400
a8a466
+++ linux-3.10.0-514.sdl7.i686/kernel/hrtimer.c	2016-11-05 10:58:56.726065206 -0400
a8a466
@@ -328,6 +328,7 @@ u64 ktime_divns(const ktime_t kt, s64 di
a8a466
 
a8a466
 	return dclc;
a8a466
 }
a8a466
+EXPORT_SYMBOL_GPL(ktime_divns);
a8a466
 #endif /* BITS_PER_LONG >= 64 */
a8a466
 
a8a466
 /*
a8a466
diff -up linux-3.10.0-514.sdl7.i686/mm/swap.c.morefixes linux-3.10.0-514.sdl7.i686/mm/swap.c
a8a466
--- linux-3.10.0-514.sdl7.i686/mm/swap.c.morefixes	2016-10-19 10:16:25.000000000 -0400
a8a466
+++ linux-3.10.0-514.sdl7.i686/mm/swap.c	2016-11-05 08:55:41.521061525 -0400
a8a466
@@ -972,9 +972,6 @@ void release_pages(struct page **pages,
a8a466
 		if (!put_page_testzero(page))
a8a466
 			continue;
a8a466
 
a8a466
-		VM_BUG_ON_PAGE(check_mmu_gather &&
a8a466
-			       trans_huge_mmu_gather_count(page), page);
a8a466
-
a8a466
 		if (PageLRU(page)) {
a8a466
 			if (!was_thp)
a8a466
 				zone = zone_lru_lock(zone, page, &lock_batch,