@@ -185,16 +185,29 @@ static DEFINE_MUTEX(pmc_reserve_mutex);
185185
186186#ifdef CONFIG_X86_LOCAL_APIC
187187
188+ static inline int get_possible_num_counters (void )
189+ {
190+ int i , num_counters = x86_pmu .num_counters ;
191+
192+ if (!is_hybrid ())
193+ return num_counters ;
194+
195+ for (i = 0 ; i < x86_pmu .num_hybrid_pmus ; i ++ )
196+ num_counters = max_t (int , num_counters , x86_pmu .hybrid_pmu [i ].num_counters );
197+
198+ return num_counters ;
199+ }
200+
188201static bool reserve_pmc_hardware (void )
189202{
190- int i ;
203+ int i , num_counters = get_possible_num_counters () ;
191204
192- for (i = 0 ; i < x86_pmu . num_counters ; i ++ ) {
205+ for (i = 0 ; i < num_counters ; i ++ ) {
193206if (!reserve_perfctr_nmi (x86_pmu_event_addr (i )))
194207goto perfctr_fail ;
195208}
196209
197- for (i = 0 ; i < x86_pmu . num_counters ; i ++ ) {
210+ for (i = 0 ; i < num_counters ; i ++ ) {
198211if (!reserve_evntsel_nmi (x86_pmu_config_addr (i )))
199212goto eventsel_fail ;
200213}
@@ -205,7 +218,7 @@ static bool reserve_pmc_hardware(void)
205218for (i -- ; i >= 0 ; i -- )
206219release_evntsel_nmi (x86_pmu_config_addr (i ));
207220
208- i = x86_pmu . num_counters ;
221+ i = num_counters ;
209222
210223perfctr_fail :
211224for (i -- ; i >= 0 ; i -- )
@@ -216,9 +229,9 @@ static bool reserve_pmc_hardware(void)
216229
217230static void release_pmc_hardware (void )
218231{
219- int i ;
232+ int i , num_counters = get_possible_num_counters () ;
220233
221- for (i = 0 ; i < x86_pmu . num_counters ; i ++ ) {
234+ for (i = 0 ; i < num_counters ; i ++ ) {
222235release_perfctr_nmi (x86_pmu_event_addr (i ));
223236release_evntsel_nmi (x86_pmu_config_addr (i ));
224237}
@@ -946,6 +959,7 @@ EXPORT_SYMBOL_GPL(perf_assign_events);
946959
947960int x86_schedule_events (struct cpu_hw_events * cpuc , int n , int * assign )
948961{
962+ int num_counters = hybrid (cpuc -> pmu , num_counters );
949963struct event_constraint * c ;
950964struct perf_event * e ;
951965int n0 , i , wmin , wmax , unsched = 0 ;
@@ -1021,7 +1035,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
10211035
10221036/* slow path */
10231037if (i != n ) {
1024- int gpmax = x86_pmu . num_counters ;
1038+ int gpmax = num_counters ;
10251039
10261040/*
10271041 * Do not allow scheduling of more than half the available
@@ -1042,7 +1056,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
10421056 * the extra Merge events needed by large increment events.
10431057 */
10441058if (x86_pmu .flags & PMU_FL_PAIR ) {
1045- gpmax = x86_pmu . num_counters - cpuc -> n_pair ;
1059+ gpmax = num_counters - cpuc -> n_pair ;
10461060WARN_ON (gpmax <= 0 );
10471061}
10481062
@@ -1129,10 +1143,12 @@ static int collect_event(struct cpu_hw_events *cpuc, struct perf_event *event,
11291143 */
11301144static int collect_events (struct cpu_hw_events * cpuc , struct perf_event * leader , bool dogrp )
11311145{
1146+ int num_counters = hybrid (cpuc -> pmu , num_counters );
1147+ int num_counters_fixed = hybrid (cpuc -> pmu , num_counters_fixed );
11321148struct perf_event * event ;
11331149int n , max_count ;
11341150
1135- max_count = x86_pmu . num_counters + x86_pmu . num_counters_fixed ;
1151+ max_count = num_counters + num_counters_fixed ;
11361152
11371153/* current number of events already accepted */
11381154n = cpuc -> n_events ;
@@ -1499,18 +1515,18 @@ void perf_event_print_debug(void)
14991515{
15001516u64 ctrl , status , overflow , pmc_ctrl , pmc_count , prev_left , fixed ;
15011517u64 pebs , debugctl ;
1502- struct cpu_hw_events * cpuc ;
1518+ int cpu = smp_processor_id ();
1519+ struct cpu_hw_events * cpuc = & per_cpu (cpu_hw_events , cpu );
1520+ int num_counters = hybrid (cpuc -> pmu , num_counters );
1521+ int num_counters_fixed = hybrid (cpuc -> pmu , num_counters_fixed );
15031522unsigned long flags ;
1504- int cpu , idx ;
1523+ int idx ;
15051524
1506- if (!x86_pmu . num_counters )
1525+ if (!num_counters )
15071526return ;
15081527
15091528local_irq_save (flags );
15101529
1511- cpu = smp_processor_id ();
1512- cpuc = & per_cpu (cpu_hw_events , cpu );
1513-
15141530if (x86_pmu .version >= 2 ) {
15151531rdmsrl (MSR_CORE_PERF_GLOBAL_CTRL , ctrl );
15161532rdmsrl (MSR_CORE_PERF_GLOBAL_STATUS , status );
@@ -1533,7 +1549,7 @@ void perf_event_print_debug(void)
15331549}
15341550pr_info ("CPU#%d: active: %016llx\n" , cpu , * (u64 * )cpuc -> active_mask );
15351551
1536- for (idx = 0 ; idx < x86_pmu . num_counters ; idx ++ ) {
1552+ for (idx = 0 ; idx < num_counters ; idx ++ ) {
15371553rdmsrl (x86_pmu_config_addr (idx ), pmc_ctrl );
15381554rdmsrl (x86_pmu_event_addr (idx ), pmc_count );
15391555
@@ -1546,7 +1562,7 @@ void perf_event_print_debug(void)
15461562pr_info ("CPU#%d: gen-PMC%d left: %016llx\n" ,
15471563cpu , idx , prev_left );
15481564}
1549- for (idx = 0 ; idx < x86_pmu . num_counters_fixed ; idx ++ ) {
1565+ for (idx = 0 ; idx < num_counters_fixed ; idx ++ ) {
15501566if (fixed_counter_disabled (idx , cpuc -> pmu ))
15511567continue ;
15521568rdmsrl (MSR_ARCH_PERFMON_FIXED_CTR0 + idx , pmc_count );
@@ -2781,6 +2797,11 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
27812797void perf_get_x86_pmu_capability (struct x86_pmu_capability * cap )
27822798{
27832799cap -> version = x86_pmu .version ;
2800+ /*
2801+ * KVM doesn't support the hybrid PMU yet.
2802+ * Return the common value in global x86_pmu,
2803+ * which available for all cores.
2804+ */
27842805cap -> num_counters_gp = x86_pmu .num_counters ;
27852806cap -> num_counters_fixed = x86_pmu .num_counters_fixed ;
27862807cap -> bit_width_gp = x86_pmu .cntval_bits ;
0 commit comments