aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/arm_pmuv3.h8
-rw-r--r--arch/arm/include/asm/div64.h13
-rw-r--r--arch/arm/include/asm/memory.h6
-rw-r--r--arch/arm/include/asm/page.h5
-rw-r--r--arch/arm/include/asm/perf_event.h7
-rw-r--r--arch/arm/include/asm/vdso/gettimeofday.h4
-rw-r--r--arch/arm/include/asm/vdso/vsyscall.h4
7 files changed, 20 insertions, 27 deletions
diff --git a/arch/arm/include/asm/arm_pmuv3.h b/arch/arm/include/asm/arm_pmuv3.h
index f63ba8986b24..2ec0e5e83fc9 100644
--- a/arch/arm/include/asm/arm_pmuv3.h
+++ b/arch/arm/include/asm/arm_pmuv3.h
@@ -212,6 +212,8 @@ static inline void write_pmuserenr(u32 val)
write_sysreg(val, PMUSERENR);
}
+static inline void write_pmuacr(u64 val) {}
+
static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
static inline void kvm_clr_pmu_events(u32 clr) {}
static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
@@ -231,6 +233,7 @@ static inline void kvm_vcpu_pmu_resync_el0(void) {}
#define ARMV8_PMU_DFR_VER_V3P1 0x4
#define ARMV8_PMU_DFR_VER_V3P4 0x5
#define ARMV8_PMU_DFR_VER_V3P5 0x6
+#define ARMV8_PMU_DFR_VER_V3P9 0x9
#define ARMV8_PMU_DFR_VER_IMP_DEF 0xF
static inline bool pmuv3_implemented(int pmuver)
@@ -249,6 +252,11 @@ static inline bool is_pmuv3p5(int pmuver)
return pmuver >= ARMV8_PMU_DFR_VER_V3P5;
}
+static inline bool is_pmuv3p9(int pmuver)
+{
+ return pmuver >= ARMV8_PMU_DFR_VER_V3P9;
+}
+
static inline u64 read_pmceid0(void)
{
u64 val = read_sysreg(PMCEID0);
diff --git a/arch/arm/include/asm/div64.h b/arch/arm/include/asm/div64.h
index 4b69cf850451..d3ef8e416b27 100644
--- a/arch/arm/include/asm/div64.h
+++ b/arch/arm/include/asm/div64.h
@@ -52,10 +52,17 @@ static inline uint32_t __div64_32(uint64_t *n, uint32_t base)
#else
-static inline uint64_t __arch_xprod_64(uint64_t m, uint64_t n, bool bias)
+#ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE
+static __always_inline
+#else
+static inline
+#endif
+uint64_t __arch_xprod_64(uint64_t m, uint64_t n, bool bias)
{
unsigned long long res;
register unsigned int tmp asm("ip") = 0;
+ bool no_ovf = __builtin_constant_p(m) &&
+ ((m >> 32) + (m & 0xffffffff) < 0x100000000);
if (!bias) {
asm ( "umull %Q0, %R0, %Q1, %Q2\n\t"
@@ -63,7 +70,7 @@ static inline uint64_t __arch_xprod_64(uint64_t m, uint64_t n, bool bias)
: "=&r" (res)
: "r" (m), "r" (n)
: "cc");
- } else if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
+ } else if (no_ovf) {
res = m;
asm ( "umlal %Q0, %R0, %Q1, %Q2\n\t"
"mov %Q0, #0"
@@ -80,7 +87,7 @@ static inline uint64_t __arch_xprod_64(uint64_t m, uint64_t n, bool bias)
: "cc");
}
- if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
+ if (no_ovf) {
asm ( "umlal %R0, %Q0, %R1, %Q2\n\t"
"umlal %R0, %Q0, %Q1, %R2\n\t"
"mov %R0, #0\n\t"
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index ef2aa79ece5a..7c2fa7dcec6d 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -148,12 +148,6 @@ extern unsigned long vectors_base;
#endif
/*
- * Convert a page to/from a physical address
- */
-#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
-#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
-
-/*
* PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
* memory. This is used for XIP and NoMMU kernels, and on platforms that don't
* have CONFIG_ARM_PATCH_PHYS_VIRT. Assembly code must always use
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index 62af9f7f9e96..ef11b721230e 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -7,10 +7,7 @@
#ifndef _ASMARM_PAGE_H
#define _ASMARM_PAGE_H
-/* PAGE_SHIFT determines the page size */
-#define PAGE_SHIFT CONFIG_PAGE_SHIFT
-#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
-#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
+#include <vdso/page.h>
#ifndef __ASSEMBLY__
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index bdbc1e590891..c08f16f2e243 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -8,13 +8,6 @@
#ifndef __ARM_PERF_EVENT_H__
#define __ARM_PERF_EVENT_H__
-#ifdef CONFIG_PERF_EVENTS
-struct pt_regs;
-extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
-extern unsigned long perf_misc_flags(struct pt_regs *regs);
-#define perf_misc_flags(regs) perf_misc_flags(regs)
-#endif
-
#define perf_arch_fetch_caller_regs(regs, __ip) { \
(regs)->ARM_pc = (__ip); \
frame_pointer((regs)) = (unsigned long) __builtin_frame_address(0); \
diff --git a/arch/arm/include/asm/vdso/gettimeofday.h b/arch/arm/include/asm/vdso/gettimeofday.h
index 2134cbd5469f..592d3d015ca7 100644
--- a/arch/arm/include/asm/vdso/gettimeofday.h
+++ b/arch/arm/include/asm/vdso/gettimeofday.h
@@ -15,8 +15,6 @@
#define VDSO_HAS_CLOCK_GETRES 1
-extern struct vdso_data *__get_datapage(void);
-
static __always_inline int gettimeofday_fallback(
struct __kernel_old_timeval *_tv,
struct timezone *_tz)
@@ -139,7 +137,7 @@ static __always_inline u64 __arch_get_hw_counter(int clock_mode,
static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
{
- return __get_datapage();
+ return _vdso_data;
}
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm/include/asm/vdso/vsyscall.h b/arch/arm/include/asm/vdso/vsyscall.h
index 47e41ae8ccd0..705414710dcd 100644
--- a/arch/arm/include/asm/vdso/vsyscall.h
+++ b/arch/arm/include/asm/vdso/vsyscall.h
@@ -4,16 +4,12 @@
#ifndef __ASSEMBLY__
-#include <linux/timekeeper_internal.h>
#include <vdso/datapage.h>
#include <asm/cacheflush.h>
extern struct vdso_data *vdso_data;
extern bool cntvct_ok;
-/*
- * Update the vDSO data page to keep in sync with kernel timekeeping.
- */
static __always_inline
struct vdso_data *__arm_get_k_vdso_data(void)
{