PAPI  5.3.2.0
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
perf_event_uncore.c
Go to the documentation of this file.
1 /*
2 * File: perf_event_uncore.c
3 *
4 * Author: Vince Weaver
5 * vincent.weaver@maine.edu
6 */
7 
8 #include <fcntl.h>
9 #include <string.h>
10 #include <errno.h>
11 #include <signal.h>
12 #include <syscall.h>
13 #include <sys/utsname.h>
14 #include <sys/mman.h>
15 #include <sys/ioctl.h>
16 
17 /* PAPI-specific includes */
18 #include "papi.h"
19 #include "papi_memory.h"
20 #include "papi_internal.h"
21 #include "papi_vector.h"
22 #include "extras.h"
23 
24 /* libpfm4 includes */
25 #include "papi_libpfm4_events.h"
26 #include "peu_libpfm4_events.h"
27 #include "perfmon/pfmlib.h"
28 #include PEINCLUDE
29 
30 /* Linux-specific includes */
31 #include "mb.h"
32 #include "linux-memory.h"
33 #include "linux-timer.h"
34 #include "linux-common.h"
35 #include "linux-context.h"
36 
38 
39 /* Forward declaration */
41 
42 /* Globals */
44 static int our_cidx;
45 
46 /* Defines for ctx->state */
47 #define PERF_EVENTS_OPENED 0x01
48 #define PERF_EVENTS_RUNNING 0x02
49 
50 
51 /* The read format on perf_event varies based on various flags that */
52 /* are passed into it. This helper avoids copying this logic */
53 /* multiple places. */
54 static unsigned int
55 get_read_format( unsigned int multiplex,
56  unsigned int inherit,
57  int format_group )
58 {
59  unsigned int format = 0;
60 
61  /* if we need read format options for multiplexing, add them now */
62  if (multiplex) {
63  format |= PERF_FORMAT_TOTAL_TIME_ENABLED;
64  format |= PERF_FORMAT_TOTAL_TIME_RUNNING;
65  }
66 
67  /* If we are not using inherit, add the group read options */
68  if (!inherit) {
69  if (format_group) {
70  format |= PERF_FORMAT_GROUP;
71  }
72  }
73 
74  SUBDBG("multiplex: %d, inherit: %d, group_leader: %d, format: %#x\n",
75  multiplex, inherit, format_group, format);
76 
77  return format;
78 }
79 
80 /********************************************************************/
81 /* Low-level perf_event calls */
82 /********************************************************************/
83 
84 /* In case headers aren't new enough to have __NR_perf_event_open */
85 #ifndef __NR_perf_event_open
86 
87 #ifdef __powerpc__
88 #define __NR_perf_event_open 319
89 #elif defined(__x86_64__)
90 #define __NR_perf_event_open 298
91 #elif defined(__i386__)
92 #define __NR_perf_event_open 336
93 #elif defined(__arm__) 366+0x900000
94 #define __NR_perf_event_open
95 #endif
96 
97 #endif
98 
99 static long
100 sys_perf_event_open( struct perf_event_attr *hw_event, pid_t pid, int cpu,
101  int group_fd, unsigned long flags )
102 {
103  int ret;
104 
105 // If the comments on the following define symbol are removed, then applications that do not pass
106 // cpu numbers can be used with uncore events. They will always get counts from the package which
107 // contains cpu 0 but at least they will work. For instance this allows you to do uncore event testing
108 // with the papi_command_line utility. But normally the cpu number to identify which package should
109 // be counted should be provided by the papi application so this should be commented out by default.
110 // #define UNCORE_HELP 1
111 #ifdef UNCORE_HELP
112  // A cpu number of -1 normally indicates that the caller does not want to limit counting to a specified cpu.
113  // But if a negative cpu number is passed to the kernel when counting uncore events, it will return an
114  // invalid argument error so we force the cpu number to zero in this case. In uncore the cpu number is
115  // used by the kernel to specify which package should be counted so this code will set the default for
116  // counting uncore events to the package that contains cpu 0 (when not specified by the calling
117  // applicaiton). Since uncore events are not associated with any cpu, it really does not matter
118  // which cpu number is used as a default.
119  if (cpu < 0)
120  cpu = 0;
121 #endif
122 
123  SUBDBG("sys_perf_event_open(hw_event: %p, pid: %d, cpu: %d, group_fd: %d, flags: %lx\n",hw_event,pid,cpu,group_fd,flags);
124  SUBDBG(" type: %d\n",hw_event->type);
125  SUBDBG(" size: %d\n",hw_event->size);
126  SUBDBG(" config: %#"PRIx64" (%"PRIu64")\n",hw_event->config,
127  hw_event->config);
128  SUBDBG(" sample_period: %"PRIu64"\n",hw_event->sample_period);
129  SUBDBG(" sample_type: %"PRIu64"\n",hw_event->sample_type);
130  SUBDBG(" read_format: %"PRIu64"\n",hw_event->read_format);
131  SUBDBG(" disabled: %d\n",hw_event->disabled);
132  SUBDBG(" inherit: %d\n",hw_event->inherit);
133  SUBDBG(" pinned: %d\n",hw_event->pinned);
134  SUBDBG(" exclusive: %d\n",hw_event->exclusive);
135  SUBDBG(" exclude_user: %d\n",hw_event->exclude_user);
136  SUBDBG(" exclude_kernel: %d\n",hw_event->exclude_kernel);
137  SUBDBG(" exclude_hv: %d\n",hw_event->exclude_hv);
138  SUBDBG(" exclude_idle: %d\n",hw_event->exclude_idle);
139  SUBDBG(" mmap: %d\n",hw_event->mmap);
140  SUBDBG(" comm: %d\n",hw_event->comm);
141  SUBDBG(" freq: %d\n",hw_event->freq);
142  SUBDBG(" inherit_stat: %d\n",hw_event->inherit_stat);
143  SUBDBG(" enable_on_exec: %d\n",hw_event->enable_on_exec);
144  SUBDBG(" task: %d\n",hw_event->task);
145  SUBDBG(" watermark: %d\n",hw_event->watermark);
146  SUBDBG(" precise_ip: %d\n",hw_event->precise_ip);
147  SUBDBG(" mmap_data: %d\n",hw_event->mmap_data);
148  SUBDBG(" sample_id_all: %d\n",hw_event->sample_id_all);
149  SUBDBG(" exclude_host: %d\n",hw_event->exclude_host);
150  SUBDBG(" exclude_guest: %d\n",hw_event->exclude_guest);
151  SUBDBG(" exclude_callchain_kernel: %d\n",hw_event->exclude_callchain_kernel);
152  SUBDBG(" exclude_callchain_user: %d\n",hw_event->exclude_callchain_user);
153  SUBDBG(" wakeup_watermark: %d\n",hw_event->wakeup_watermark);
154  SUBDBG(" bp_type: %d\n",hw_event->bp_type);
155  SUBDBG(" config1: %#lx (%lu)\n",hw_event->config1,hw_event->config1);
156  SUBDBG(" config2: %#lx (%lu)\n",hw_event->config2,hw_event->config2);
157  SUBDBG(" branch_sample_type: %lu\n",hw_event->branch_sample_type);
158  SUBDBG(" sample_regs_user: %lu\n",hw_event->sample_regs_user);
159  SUBDBG(" sample_stack_user: %d\n",hw_event->sample_stack_user);
160 
161  ret =
162  syscall( __NR_perf_event_open, hw_event, pid, cpu, group_fd, flags );
163  SUBDBG("Returned %d %d %s\n",ret,
164  ret<0?errno:0,
165  ret<0?strerror(errno):" ");
166  return ret;
167 }
168 
169 
170 static int map_perf_event_errors_to_papi(int perf_event_error) {
171 
172  int ret;
173 
174  /* These mappings are approximate.
175  EINVAL in particular can mean lots of different things */
176  switch(perf_event_error) {
177  case EPERM:
178  case EACCES:
179  ret = PAPI_EPERM;
180  break;
181  case ENODEV:
182  case EOPNOTSUPP:
183  ret = PAPI_ENOSUPP;
184  break;
185  case ENOENT:
186  ret = PAPI_ENOEVNT;
187  break;
188  case ENOSYS:
189  case EAGAIN:
190  case EBUSY:
191  case E2BIG:
192  ret = PAPI_ESYS;
193  break;
194  case ENOMEM:
195  ret = PAPI_ENOMEM;
196  break;
197  case EINVAL:
198  default:
199  ret = PAPI_EINVAL;
200  break;
201  }
202  return ret;
203 }
204 
205 /* Maximum size we ever expect to read from a perf_event fd */
206 /* (this is the number of 64-bit values) */
207 /* We use this to size the read buffers */
208 /* The three is for event count, time_enabled, time_running */
209 /* and the counter term is count value and count id for each */
210 /* possible counter value. */
211 #define READ_BUFFER_SIZE (3 + (2 * PERF_EVENT_MAX_MPX_COUNTERS))
212 
213 /* Open all events in the control state */
214 static int
216 {
217 
218  int i, ret = PAPI_OK;
219  long pid;
220 
221  if (ctl->granularity==PAPI_GRN_SYS) {
222  pid = -1;
223  }
224  else {
225  pid = ctl->tid;
226  }
227 
228  for( i = 0; i < ctl->num_events; i++ ) {
229 
230  ctl->events[i].event_opened=0;
231 
232  /* set up the attr structure. We don't set up all fields here */
233  /* as some have already been set up previously. */
234 
235 /*
236  * The following code controls how the uncore component interfaces with the
237  * kernel for uncore events. The code inside the ifdef will use grouping of
238  * uncore events which can make the cost of reading the results more efficient.
239  * The problem with it is that the uncore component supports 20 different uncore
240  * PMU's. The kernel requires that all events in a group must be for the same PMU.
241  * This means that with grouping enabled papi applications can count events on only
242  * one of the 20 PMU's during a run.
243  *
244  * The code inside the else clause treats each event in the event set as
245  * independent. When running in this mode the kernel allows the papi multiple
246  * uncore PMU's at the same time.
247  *
248  * Example:
249  * An application wants to measure all the L3 cache write requests.
250  * The event to do this is part of a cbox pmu (there are 8 cbox pmu's).
251  * When built with the code in the ifdef, the application would have to be
252  * run 8 times and count write requests from one pmu at a time.
253  * When built with the code in the else, the write requests in all 8 cbox
254  * pmu's could be counted in the same run.
255  *
256  */
257 // #define GROUPIT 1 // remove the comment on this line to force event grouping
258 #ifdef GROUPIT
259  /* group leader (event 0) is special */
260  /* If we're multiplexed, everyone is a group leader */
261  if (( i == 0 ) || (ctl->multiplexed)) {
262  ctl->events[i].attr.pinned = !ctl->multiplexed;
263  ctl->events[i].attr.disabled = 1;
264  ctl->events[i].group_leader_fd=-1;
265  ctl->events[i].attr.read_format = get_read_format(ctl->multiplexed,
266  ctl->inherit,
267  !ctl->multiplexed );
268  } else {
269  ctl->events[i].attr.pinned=0;
270  ctl->events[i].attr.disabled = 0;
271  ctl->events[i].group_leader_fd=ctl->events[0].event_fd,
272  ctl->events[i].attr.read_format = get_read_format(ctl->multiplexed,
273  ctl->inherit,
274  0 );
275  }
276 #else
277  ctl->events[i].attr.pinned = !ctl->multiplexed;
278  ctl->events[i].attr.disabled = 1;
279  ctl->inherit = 1;
280  ctl->events[i].group_leader_fd=-1;
281  ctl->events[i].attr.read_format = get_read_format(ctl->multiplexed, ctl->inherit, 0 );
282 #endif
283 
284 
285  /* try to open */
286  ctl->events[i].event_fd = sys_perf_event_open( &ctl->events[i].attr,
287  pid,
288  ctl->cpu,
289  ctl->events[i].group_leader_fd,
290  0 /* flags */
291  );
292 
293  /* Try to match Linux errors to PAPI errors */
294  if ( ctl->events[i].event_fd == -1 ) {
295  SUBDBG("sys_perf_event_open returned error on event #%d."
296  " Error: %s\n",
297  i, strerror( errno ) );
299 
300  goto open_pe_cleanup;
301  }
302 
303  SUBDBG ("sys_perf_event_open: tid: %ld, cpu_num: %d,"
304  " group_leader/fd: %d, event_fd: %d,"
305  " read_format: %#"PRIu64"\n",
306  pid, ctl->cpu, ctl->events[i].group_leader_fd,
307  ctl->events[i].event_fd, ctl->events[i].attr.read_format);
308 
309  ctl->events[i].event_opened=1;
310  }
311 
312  /* Now that we've successfully opened all of the events, do whatever */
313  /* "tune-up" is needed to attach the mmap'd buffers, signal handlers, */
314  /* and so on. */
315  for ( i = 0; i < ctl->num_events; i++ ) {
316 
317  /* No sampling if uncore */
318  ctl->events[i].mmap_buf = NULL;
319  }
320 
321  /* Set num_evts only if completely successful */
322  ctx->state |= PERF_EVENTS_OPENED;
323 
324  return PAPI_OK;
325 
326 open_pe_cleanup:
327  /* We encountered an error, close up the fds we successfully opened. */
328  /* We go backward in an attempt to close group leaders last, although */
329  /* That's probably not strictly necessary. */
330  while ( i > 0 ) {
331  i--;
332  if (ctl->events[i].event_fd>=0) {
333  close( ctl->events[i].event_fd );
334  ctl->events[i].event_opened=0;
335  }
336  }
337 
338  return ret;
339 }
340 
341 /* Close all of the opened events */
342 static int
344 {
345  int i;
346  int num_closed=0;
347  int events_not_opened=0;
348 
349  /* should this be a more serious error? */
350  if ( ctx->state & PERF_EVENTS_RUNNING ) {
351  SUBDBG("Closing without stopping first\n");
352  }
353 
354  /* Close child events first */
355  for( i=0; i<ctl->num_events; i++ ) {
356 
357  if (ctl->events[i].event_opened) {
358 
359  if (ctl->events[i].group_leader_fd!=-1) {
360  if ( ctl->events[i].mmap_buf ) {
361  if ( munmap ( ctl->events[i].mmap_buf,
362  ctl->events[i].nr_mmap_pages * getpagesize() ) ) {
363  PAPIERROR( "munmap of fd = %d returned error: %s",
364  ctl->events[i].event_fd, strerror( errno ) );
365  return PAPI_ESYS;
366  }
367  }
368 
369  if ( close( ctl->events[i].event_fd ) ) {
370  PAPIERROR( "close of fd = %d returned error: %s",
371  ctl->events[i].event_fd, strerror( errno ) );
372  return PAPI_ESYS;
373  } else {
374  num_closed++;
375  }
376  ctl->events[i].event_opened=0;
377  }
378  }
379  else {
380  events_not_opened++;
381  }
382  }
383 
384  /* Close the group leaders last */
385  for( i=0; i<ctl->num_events; i++ ) {
386 
387  if (ctl->events[i].event_opened) {
388 
389  if (ctl->events[i].group_leader_fd==-1) {
390  if ( ctl->events[i].mmap_buf ) {
391  if ( munmap ( ctl->events[i].mmap_buf,
392  ctl->events[i].nr_mmap_pages * getpagesize() ) ) {
393  PAPIERROR( "munmap of fd = %d returned error: %s",
394  ctl->events[i].event_fd, strerror( errno ) );
395  return PAPI_ESYS;
396  }
397  }
398 
399 
400  if ( close( ctl->events[i].event_fd ) ) {
401  PAPIERROR( "close of fd = %d returned error: %s",
402  ctl->events[i].event_fd, strerror( errno ) );
403  return PAPI_ESYS;
404  } else {
405  num_closed++;
406  }
407  ctl->events[i].event_opened=0;
408  }
409  }
410  }
411 
412 
413  if (ctl->num_events!=num_closed) {
414  if (ctl->num_events!=(num_closed+events_not_opened)) {
415  PAPIERROR("Didn't close all events: "
416  "Closed %d Not Opened: %d Expected %d\n",
417  num_closed,events_not_opened,ctl->num_events);
418  return PAPI_EBUG;
419  }
420  }
421 
422  ctl->num_events=0;
423 
424  ctx->state &= ~PERF_EVENTS_OPENED;
425 
426  return PAPI_OK;
427 }
428 
429 
430 
431 
432 /********************************************************************/
433 /* Component Interface */
434 /********************************************************************/
435 
436 
437 
438 /* Initialize a thread */
439 int
441 {
442 
443  pe_context_t *pe_ctx = ( pe_context_t *) hwd_ctx;
444 
445  /* clear the context structure and mark as initialized */
446  memset( pe_ctx, 0, sizeof ( pe_context_t ) );
447  pe_ctx->initialized=1;
448 
450  pe_ctx->cidx=our_cidx;
451 
452  return PAPI_OK;
453 }
454 
455 /* Initialize a new control state */
456 int
458 {
459  pe_control_t *pe_ctl = ( pe_control_t *) ctl;
460 
461  /* clear the contents */
462  memset( pe_ctl, 0, sizeof ( pe_control_t ) );
463 
464  /* Set the default domain */
465  _pe_set_domain( ctl, _perf_event_uncore_vector.cmp_info.default_domain );
466 
467  /* Set the default granularity */
468  pe_ctl->granularity=_perf_event_uncore_vector.cmp_info.default_granularity;
469 
470  pe_ctl->cidx=our_cidx;
471 
472  /* Set cpu number in the control block to show events */
473  /* are not tied to specific cpu */
474  pe_ctl->cpu = -1;
475  return PAPI_OK;
476 }
477 
478 
479 
480 /* Initialize the perf_event uncore component */
481 int
483 {
484 
485  int retval;
486  int paranoid_level;
487 
488  FILE *fff;
489 
490  our_cidx=cidx;
491 
492  /* The is the official way to detect if perf_event support exists */
493  /* The file is called perf_counter_paranoid on 2.6.31 */
494  /* currently we are lazy and do not support 2.6.31 kernels */
495 
496  fff=fopen("/proc/sys/kernel/perf_event_paranoid","r");
497  if (fff==NULL) {
498  strncpy(_papi_hwd[cidx]->cmp_info.disabled_reason,
499  "perf_event support not detected",PAPI_MAX_STR_LEN);
500  return PAPI_ENOCMP;
501  }
502  retval=fscanf(fff,"%d",&paranoid_level);
503  if (retval!=1) fprintf(stderr,"Error reading paranoid level\n");
504  fclose(fff);
505 
506 
507  /* Run the libpfm4-specific setup */
508 
509  retval = _papi_libpfm4_init(_papi_hwd[cidx]);
510  if (retval) {
511  strncpy(_papi_hwd[cidx]->cmp_info.disabled_reason,
512  "Error initializing libpfm4",PAPI_MAX_STR_LEN);
513  return PAPI_ENOCMP;
514  }
515 
516 
517  /* Run the uncore specific libpfm4 setup */
518 
519  retval = _peu_libpfm4_init(_papi_hwd[cidx],
522  if (retval) {
523  strncpy(_papi_hwd[cidx]->cmp_info.disabled_reason,
524  "Error setting up libpfm4",PAPI_MAX_STR_LEN);
525  return PAPI_ENOCMP;
526  }
527 
528  /* Check if no uncore events found */
529 
530  if (_papi_hwd[cidx]->cmp_info.num_native_events==0) {
531  strncpy(_papi_hwd[cidx]->cmp_info.disabled_reason,
532  "No uncore PMUs or events found",PAPI_MAX_STR_LEN);
533  return PAPI_ENOCMP;
534  }
535 
536  /* Check if we have enough permissions for uncore */
537 
538  /* 2 means no kernel measurements allowed */
539  /* 1 means normal counter access */
540  /* 0 means you can access CPU-specific data */
541  /* -1 means no restrictions */
542 
543  if ((paranoid_level>0) && (getuid()!=0)) {
544  strncpy(_papi_hwd[cidx]->cmp_info.disabled_reason,
545  "Insufficient permissions for uncore access. Set /proc/sys/kernel/perf_event_paranoid to 0 or run as root.",
547  return PAPI_ENOCMP;
548  }
549 
550  return PAPI_OK;
551 
552 }
553 
554 /* Shutdown the perf_event component */
556 
557  /* deallocate our event table */
559 
560  /* Shutdown libpfm4 */
562 
563  return PAPI_OK;
564 }
565 
566 /* This function clears the current contents of the control structure and
567  updates it with whatever resources are allocated for all the native events
568  in the native info structure array. */
569 
570 int
573  int count, hwd_context_t *ctx )
574 {
575  int i = 0, ret;
576  pe_context_t *pe_ctx = ( pe_context_t *) ctx;
577  pe_control_t *pe_ctl = ( pe_control_t *) ctl;
578 
579  /* close all of the existing fds and start over again */
580  /* In theory we could have finer-grained control and know if */
581  /* things were changed, but it's easier to tear things down and rebuild. */
582  close_pe_events( pe_ctx, pe_ctl );
583 
584  /* Calling with count==0 should be OK, it's how things are deallocated */
585  /* when an eventset is destroyed. */
586  if ( count == 0 ) {
587  SUBDBG( "Called with count == 0\n" );
588  return PAPI_OK;
589  }
590 
591  /* set up all the events */
592  for( i = 0; i < count; i++ ) {
593  if ( native ) {
594  /* Have libpfm4 set the config values for the event */
596  native[i].ni_event,
597  pe_ctx->event_table);
598  SUBDBG( "pe_ctl->eventss[%d].config=%#"PRIx64"\n",i,
599  pe_ctl->events[i].attr.config);
600  if (ret!=PAPI_OK) return ret;
601 
602  } else {
603  /* I'm not sure how we'd end up in this case */
604  /* should it be an error? */
605  }
606 
607  /* Copy the inherit flag into the attribute block that will be */
608  /* passed to the kernel */
609  pe_ctl->events[i].attr.inherit = pe_ctl->inherit;
610 
611  /* Set the position in the native structure */
612  /* We just set up events linearly */
613  if ( native ) {
614  native[i].ni_position = i;
615  }
616  }
617 
618  pe_ctl->num_events = count;
619  _pe_set_domain( ctl, pe_ctl->domain );
620 
621  /* actuall open the events */
622  /* (why is this a separate function?) */
623  ret = open_pe_events( pe_ctx, pe_ctl );
624  if ( ret != PAPI_OK ) {
625  SUBDBG("open_pe_events failed\n");
626  /* Restore values ? */
627  return ret;
628  }
629 
630  return PAPI_OK;
631 }
632 
633 /********************************************************************/
634 /********************************************************************/
635 /* Start with functions that are exported via the module interface */
636 /********************************************************************/
637 /********************************************************************/
638 
639 
640 /* set the domain. FIXME: perf_events allows per-event control of this. */
641 /* we do not handle that yet. */
642 int
644 {
645 
646  int i;
647  pe_control_t *pe_ctl = ( pe_control_t *) ctl;
648 
649  SUBDBG("old control domain %d, new domain %d\n",
650  pe_ctl->domain,domain);
651 
652  pe_ctl->domain = domain;
653 
654  /* Force the domain on all events */
655  for( i = 0; i < pe_ctl->num_events; i++ ) {
656  pe_ctl->events[i].attr.exclude_user =
657  !( pe_ctl->domain & PAPI_DOM_USER );
658  pe_ctl->events[i].attr.exclude_kernel =
659  !( pe_ctl->domain & PAPI_DOM_KERNEL );
660  pe_ctl->events[i].attr.exclude_hv =
661  !( pe_ctl->domain & PAPI_DOM_SUPERVISOR );
662  }
663  return PAPI_OK;
664 }
665 
666 /* Shutdown a thread */
667 int
669 {
670  pe_context_t *pe_ctx = ( pe_context_t *) ctx;
671 
672  pe_ctx->initialized=0;
673 
674  return PAPI_OK;
675 }
676 
677 
678 /* reset the hardware counters */
679 /* Note: PAPI_reset() does not necessarily call this */
680 /* unless the events are actually running. */
681 int
683 {
684  int i, ret;
685  pe_control_t *pe_ctl = ( pe_control_t *) ctl;
686 
687  ( void ) ctx; /*unused */
688 
689  /* We need to reset all of the events, not just the group leaders */
690  for( i = 0; i < pe_ctl->num_events; i++ ) {
691  ret = ioctl( pe_ctl->events[i].event_fd, PERF_EVENT_IOC_RESET, NULL );
692  if ( ret == -1 ) {
693  PAPIERROR("ioctl(%d, PERF_EVENT_IOC_RESET, NULL) "
694  "returned error, Linux says: %s",
695  pe_ctl->events[i].event_fd, strerror( errno ) );
696  return PAPI_ESYS;
697  }
698  }
699 
700  return PAPI_OK;
701 }
702 
703 
704 /* write (set) the hardware counters */
705 /* Current we do not support this. */
706 int
708  long long *from )
709 {
710  ( void ) ctx; /*unused */
711  ( void ) ctl; /*unused */
712  ( void ) from; /*unused */
713  /*
714  * Counters cannot be written. Do we need to virtualize the
715  * counters so that they can be written, or perhaps modify code so that
716  * they can be written? FIXME ?
717  */
718 
719  return PAPI_ENOSUPP;
720 }
721 
722 /*
723  * perf_event provides a complicated read interface.
724  * the info returned by read() varies depending on whether
725  * you have PERF_FORMAT_GROUP, PERF_FORMAT_TOTAL_TIME_ENABLED,
726  * PERF_FORMAT_TOTAL_TIME_RUNNING, or PERF_FORMAT_ID set
727  *
728  * To simplify things we just always ask for everything. This might
729  * lead to overhead when reading more than we need, but it makes the
730  * read code a lot simpler than the original implementation we had here.
731  *
732  * For more info on the layout see include/linux/perf_event.h
733  *
734  */
735 
736 int
738  long long **events, int flags )
739 {
740  ( void ) flags; /*unused */
741  int i, ret = -1;
742  /* pe_context_t *pe_ctx = ( pe_context_t *) ctx; */
743  (void) ctx; /*unused*/
744  pe_control_t *pe_ctl = ( pe_control_t *) ctl;
745  long long papi_pe_buffer[READ_BUFFER_SIZE];
746  long long tot_time_running, tot_time_enabled, scale;
747 
748  /* Handle case where we are multiplexing */
749  if (pe_ctl->multiplexed) {
750 
751  /* currently we handle multiplexing by having individual events */
752  /* so we read from each in turn. */
753 
754  for ( i = 0; i < pe_ctl->num_events; i++ ) {
755 
756  ret = read( pe_ctl->events[i].event_fd, papi_pe_buffer,
757  sizeof ( papi_pe_buffer ) );
758  if ( ret == -1 ) {
759  PAPIERROR("read returned an error: ", strerror( errno ));
760  return PAPI_ESYS;
761  }
762 
763  /* We should read 3 64-bit values from the counter */
764  if (ret<(signed)(3*sizeof(long long))) {
765  PAPIERROR("Error! short read!\n");
766  return PAPI_ESYS;
767  }
768 
769  SUBDBG("read: fd: %2d, tid: %ld, cpu: %d, ret: %d\n",
770  pe_ctl->events[i].event_fd,
771  (long)pe_ctl->tid, pe_ctl->cpu, ret);
772  SUBDBG("read: %lld %lld %lld\n",papi_pe_buffer[0],
773  papi_pe_buffer[1],papi_pe_buffer[2]);
774 
775  tot_time_enabled = papi_pe_buffer[1];
776  tot_time_running = papi_pe_buffer[2];
777 
778  SUBDBG("count[%d] = (papi_pe_buffer[%d] %lld * "
779  "tot_time_enabled %lld) / tot_time_running %lld\n",
780  i, 0,papi_pe_buffer[0],
781  tot_time_enabled,tot_time_running);
782 
783  if (tot_time_running == tot_time_enabled) {
784  /* No scaling needed */
785  pe_ctl->counts[i] = papi_pe_buffer[0];
786  } else if (tot_time_running && tot_time_enabled) {
787  /* Scale factor of 100 to avoid overflows when computing */
788  /*enabled/running */
789 
790  scale = (tot_time_enabled * 100LL) / tot_time_running;
791  scale = scale * papi_pe_buffer[0];
792  scale = scale / 100LL;
793  pe_ctl->counts[i] = scale;
794  } else {
795  /* This should not happen, but Phil reports it sometime does. */
796  SUBDBG("perf_event kernel bug(?) count, enabled, "
797  "running: %lld, %lld, %lld\n",
798  papi_pe_buffer[0],tot_time_enabled,
799  tot_time_running);
800 
801  pe_ctl->counts[i] = papi_pe_buffer[0];
802  }
803  }
804  }
805 
806  /* Handle cases where we cannot use FORMAT GROUP */
807  else if (pe_ctl->inherit) {
808 
809  /* we must read each counter individually */
810  for ( i = 0; i < pe_ctl->num_events; i++ ) {
811 
812  ret = read( pe_ctl->events[i].event_fd, papi_pe_buffer,
813  sizeof ( papi_pe_buffer ) );
814  if ( ret == -1 ) {
815  PAPIERROR("read returned an error: ", strerror( errno ));
816  return PAPI_ESYS;
817  }
818 
819  /* we should read one 64-bit value from each counter */
820  if (ret!=sizeof(long long)) {
821  PAPIERROR("Error! short read!\n");
822  PAPIERROR("read: fd: %2d, tid: %ld, cpu: %d, ret: %d\n",
823  pe_ctl->events[i].event_fd,
824  (long)pe_ctl->tid, pe_ctl->cpu, ret);
825  return PAPI_ESYS;
826  }
827 
828  SUBDBG("read: fd: %2d, tid: %ld, cpu: %d, ret: %d\n",
829  pe_ctl->events[i].event_fd, (long)pe_ctl->tid,
830  pe_ctl->cpu, ret);
831  SUBDBG("read: %lld\n",papi_pe_buffer[0]);
832 
833  pe_ctl->counts[i] = papi_pe_buffer[0];
834  }
835  }
836 
837 
838  /* Handle cases where we are using FORMAT_GROUP */
839  /* We assume only one group leader, in position 0 */
840 
841  else {
842  if (pe_ctl->events[0].group_leader_fd!=-1) {
843  PAPIERROR("Was expecting group leader!\n");
844  }
845 
846  ret = read( pe_ctl->events[0].event_fd, papi_pe_buffer,
847  sizeof ( papi_pe_buffer ) );
848 
849  if ( ret == -1 ) {
850  PAPIERROR("read returned an error: ", strerror( errno ));
851  return PAPI_ESYS;
852  }
853 
854  /* we read 1 64-bit value (number of events) then */
855  /* num_events more 64-bit values that hold the counts */
856  if (ret<(signed)((1+pe_ctl->num_events)*sizeof(long long))) {
857  PAPIERROR("Error! short read!\n");
858  return PAPI_ESYS;
859  }
860 
861  SUBDBG("read: fd: %2d, tid: %ld, cpu: %d, ret: %d\n",
862  pe_ctl->events[0].event_fd,
863  (long)pe_ctl->tid, pe_ctl->cpu, ret);
864  {
865  int j;
866  for(j=0;j<ret/8;j++) {
867  SUBDBG("read %d: %lld\n",j,papi_pe_buffer[j]);
868  }
869  }
870 
871  /* Make sure the kernel agrees with how many events we have */
872  if (papi_pe_buffer[0]!=pe_ctl->num_events) {
873  PAPIERROR("Error! Wrong number of events!\n");
874  return PAPI_ESYS;
875  }
876 
877  /* put the count values in their proper location */
878  for(i=0;i<papi_pe_buffer[0];i++) {
879  pe_ctl->counts[i] = papi_pe_buffer[1+i];
880  }
881  }
882 
883  /* point PAPI to the values we read */
884  *events = pe_ctl->counts;
885 
886  return PAPI_OK;
887 }
888 
889 /* Start counting events */
890 int
892 {
893  int ret;
894  int i;
895  int did_something = 0;
896  pe_context_t *pe_ctx = ( pe_context_t *) ctx;
897  pe_control_t *pe_ctl = ( pe_control_t *) ctl;
898 
899  /* Reset the counters first. Is this necessary? */
900  ret = _pe_reset( pe_ctx, pe_ctl );
901  if ( ret ) {
902  return ret;
903  }
904 
905  /* Enable all of the group leaders */
906  /* All group leaders have a group_leader_fd of -1 */
907  for( i = 0; i < pe_ctl->num_events; i++ ) {
908  if (pe_ctl->events[i].group_leader_fd == -1) {
909  SUBDBG("ioctl(enable): fd: %d\n", pe_ctl->events[i].event_fd);
910  ret=ioctl( pe_ctl->events[i].event_fd, PERF_EVENT_IOC_ENABLE, NULL) ;
911 
912  /* ioctls always return -1 on failure */
913  if (ret == -1) {
914  PAPIERROR("ioctl(PERF_EVENT_IOC_ENABLE) failed.\n");
915  return PAPI_ESYS;
916  }
917 
918  did_something++;
919  }
920  }
921 
922  if (!did_something) {
923  PAPIERROR("Did not enable any counters.\n");
924  return PAPI_EBUG;
925  }
926 
927  pe_ctx->state |= PERF_EVENTS_RUNNING;
928 
929  return PAPI_OK;
930 
931 }
932 
933 /* Stop all of the counters */
934 int
936 {
937 
938  int ret;
939  int i;
940  pe_context_t *pe_ctx = ( pe_context_t *) ctx;
941  pe_control_t *pe_ctl = ( pe_control_t *) ctl;
942 
943  /* Just disable the group leaders */
944  for ( i = 0; i < pe_ctl->num_events; i++ ) {
945  if ( pe_ctl->events[i].group_leader_fd == -1 ) {
946  ret=ioctl( pe_ctl->events[i].event_fd, PERF_EVENT_IOC_DISABLE, NULL);
947  if ( ret == -1 ) {
948  PAPIERROR( "ioctl(%d, PERF_EVENT_IOC_DISABLE, NULL) "
949  "returned error, Linux says: %s",
950  pe_ctl->events[i].event_fd, strerror( errno ) );
951  return PAPI_EBUG;
952  }
953  }
954  }
955 
956  pe_ctx->state &= ~PERF_EVENTS_RUNNING;
957 
958  return PAPI_OK;
959 }
960 
961 /* Set various options on a control state */
962 int
963 _peu_ctl( hwd_context_t *ctx, int code, _papi_int_option_t *option )
964 {
965  int ret;
966  pe_context_t *pe_ctx = ( pe_context_t *) ctx;
967  pe_control_t *pe_ctl = NULL;
968 
969  switch ( code ) {
970  case PAPI_MULTIPLEX:
971  pe_ctl = ( pe_control_t * ) ( option->multiplex.ESI->ctl_state );
972 
973  pe_ctl->multiplexed = 1;
974  ret = _peu_update_control_state( pe_ctl, NULL,
975  pe_ctl->num_events, pe_ctx );
976  if (ret != PAPI_OK) {
977  pe_ctl->multiplexed = 0;
978  }
979  return ret;
980 
981  case PAPI_ATTACH:
982  pe_ctl = ( pe_control_t * ) ( option->attach.ESI->ctl_state );
983 
984  pe_ctl->tid = option->attach.tid;
985 
986  /* If events have been already been added, something may */
987  /* have been done to the kernel, so update */
988  ret =_peu_update_control_state( pe_ctl, NULL,
989  pe_ctl->num_events, pe_ctx);
990 
991  return ret;
992 
993  case PAPI_DETACH:
994  pe_ctl = ( pe_control_t *) ( option->attach.ESI->ctl_state );
995 
996  pe_ctl->tid = 0;
997  return PAPI_OK;
998 
999  case PAPI_CPU_ATTACH:
1000  pe_ctl = ( pe_control_t *) ( option->cpu.ESI->ctl_state );
1001 
1002  /* this tells the kernel not to count for a thread */
1003  /* should we warn if we try to set both? perf_event */
1004  /* will reject it. */
1005  pe_ctl->tid = -1;
1006 
1007  pe_ctl->cpu = option->cpu.cpu_num;
1008 
1009  return PAPI_OK;
1010 
1011  case PAPI_DOMAIN:
1012  pe_ctl = ( pe_control_t *) ( option->domain.ESI->ctl_state );
1013 
1014  /* looks like we are allowed, so set counting domain */
1015  return _pe_set_domain( pe_ctl, option->domain.domain );
1016 
1017  case PAPI_GRANUL:
1018  pe_ctl = (pe_control_t *) ( option->granularity.ESI->ctl_state );
1019 
1020  /* FIXME: we really don't support this yet */
1021 
1022  switch ( option->granularity.granularity ) {
1023  case PAPI_GRN_PROCG:
1024  case PAPI_GRN_SYS_CPU:
1025  case PAPI_GRN_PROC:
1026  return PAPI_ECMP;
1027 
1028  /* Currently we only support thread and CPU granularity */
1029  case PAPI_GRN_SYS:
1030  pe_ctl->granularity=PAPI_GRN_SYS;
1031  break;
1032 
1033  case PAPI_GRN_THR:
1034  pe_ctl->granularity=PAPI_GRN_THR;
1035  break;
1036 
1037 
1038  default:
1039  return PAPI_EINVAL;
1040  }
1041  return PAPI_OK;
1042 
1043  case PAPI_INHERIT:
1044  pe_ctl = (pe_control_t *) ( option->inherit.ESI->ctl_state );
1045 
1046  if (option->inherit.inherit) {
1047  /* children will inherit counters */
1048  pe_ctl->inherit = 1;
1049  } else {
1050  /* children won't inherit counters */
1051  pe_ctl->inherit = 0;
1052  }
1053  return PAPI_OK;
1054 
1055  case PAPI_DATA_ADDRESS:
1056  return PAPI_ENOSUPP;
1057 
1058  case PAPI_INSTR_ADDRESS:
1059  return PAPI_ENOSUPP;
1060 
1061  case PAPI_DEF_ITIMER:
1062  return PAPI_ENOSUPP;
1063 
1064  case PAPI_DEF_MPX_NS:
1065  return PAPI_ENOSUPP;
1066 
1067  case PAPI_DEF_ITIMER_NS:
1068  return PAPI_ENOSUPP;
1069 
1070  default:
1071  return PAPI_ENOSUPP;
1072  }
1073 }
1074 
1075 
1076 int
1077 _peu_ntv_enum_events( unsigned int *PapiEventCode, int modifier )
1078 {
1079 
1080  if (_perf_event_uncore_vector.cmp_info.disabled) return PAPI_ENOEVNT;
1081 
1082 
1083  return _peu_libpfm4_ntv_enum_events(PapiEventCode, modifier,
1085 }
1086 
1087 int
1088 _peu_ntv_name_to_code( char *name, unsigned int *event_code) {
1089 
1090  if (_perf_event_uncore_vector.cmp_info.disabled) return PAPI_ENOEVNT;
1091 
1092  return _peu_libpfm4_ntv_name_to_code(name,event_code,
1094 }
1095 
1096 int
1097 _peu_ntv_code_to_name(unsigned int EventCode,
1098  char *ntv_name, int len) {
1099 
1100  if (_perf_event_uncore_vector.cmp_info.disabled) return PAPI_ENOEVNT;
1101 
1102  return _peu_libpfm4_ntv_code_to_name(EventCode,
1103  ntv_name, len,
1105 }
1106 
1107 int
1108 _peu_ntv_code_to_descr( unsigned int EventCode,
1109  char *ntv_descr, int len) {
1110 
1111  if (_perf_event_uncore_vector.cmp_info.disabled) return PAPI_ENOEVNT;
1112 
1113  return _peu_libpfm4_ntv_code_to_descr(EventCode,ntv_descr,len,
1115 }
1116 
1117 int
1118 _peu_ntv_code_to_info(unsigned int EventCode,
1119  PAPI_event_info_t *info) {
1120 
1121  if (_perf_event_uncore_vector.cmp_info.disabled) return PAPI_ENOEVNT;
1122 
1123  return _peu_libpfm4_ntv_code_to_info(EventCode, info,
1125 }
1126 
1127 /* Our component vector */
1128 
1129 papi_vector_t _perf_event_uncore_vector = {
1130  .cmp_info = {
1131  /* component information (unspecified values initialized to 0) */
1132  .name = "perf_event_uncore",
1133  .short_name = "peu",
1134  .version = "5.0",
1135  .description = "Linux perf_event CPU uncore and northbridge",
1136 
1137  .default_domain = PAPI_DOM_ALL,
1138  .available_domains = PAPI_DOM_USER | PAPI_DOM_KERNEL | PAPI_DOM_SUPERVISOR,
1139  .default_granularity = PAPI_GRN_SYS,
1140  .available_granularities = PAPI_GRN_SYS,
1141 
1142  .num_mpx_cntrs = PERF_EVENT_MAX_MPX_COUNTERS,
1143 
1144  /* component specific cmp_info initializations */
1145  .fast_virtual_timer = 0,
1146  .attach = 1,
1147  .attach_must_ptrace = 1,
1148  .cpu = 1,
1149  .inherit = 1,
1150  .cntr_umasks = 1,
1151 
1152  },
1153 
1154  /* sizes of framework-opaque component-private structures */
1155  .size = {
1156  .context = sizeof ( pe_context_t ),
1157  .control_state = sizeof ( pe_control_t ),
1158  .reg_value = sizeof ( int ),
1159  .reg_alloc = sizeof ( int ),
1160  },
1161 
1162  /* function pointers in this component */
1163  .init_component = _peu_init_component,
1164  .shutdown_component = _peu_shutdown_component,
1165  .init_thread = _peu_init_thread,
1166  .init_control_state = _peu_init_control_state,
1167  .start = _peu_start,
1168  .stop = _peu_stop,
1169  .read = _peu_read,
1170  .shutdown_thread = _peu_shutdown_thread,
1171  .ctl = _peu_ctl,
1172  .update_control_state = _peu_update_control_state,
1173  .set_domain = _peu_set_domain,
1174  .reset = _peu_reset,
1175  .write = _peu_write,
1176 
1177  /* from counter name mapper */
1178  .ntv_enum_events = _peu_ntv_enum_events,
1179  .ntv_name_to_code = _peu_ntv_name_to_code,
1180  .ntv_code_to_name = _peu_ntv_code_to_name,
1181  .ntv_code_to_descr = _peu_ntv_code_to_descr,
1182  .ntv_code_to_info = _peu_ntv_code_to_info,
1183 };
1184 
1185 
char name[PAPI_MAX_STR_LEN]
Definition: papi.h:625
i inherit inherit
int _peu_ctl(hwd_context_t *ctx, int code, _papi_int_option_t *option)
ssize_t read(int fd, void *buf, size_t count)
Definition: appio.c:225
memset(eventId, 0, size)
int _peu_ntv_name_to_code(char *name, unsigned int *event_code)
static int open_pe_events(pe_context_t *ctx, pe_control_t *ctl)
long long counts[PERF_EVENT_MAX_MPX_COUNTERS]
_papi_int_inherit_t inherit
int errno
int close(int fd)
Definition: appio.c:175
#define PAPI_ENOMEM
Definition: fpapi.h:107
#define PAPI_GRN_SYS_CPU
Definition: fpapi.h:72
int _peu_ntv_code_to_name(unsigned int EventCode, char *ntv_name, int len)
#define PAPI_CPU_ATTACH
Definition: papi.h:455
int _pe_reset(hwd_context_t *ctx, hwd_control_state_t *ctl)
Definition: perf_event.c:860
int _peu_shutdown_component(void)
#define PERF_EVENT_MAX_MPX_COUNTERS
Definition: perf_event_lib.h:5
EventSetInfo_t * ESI
unsigned int granularity
long long flags
Definition: iozone.c:12330
#define PAPI_DEF_ITIMER_NS
Definition: papi.h:453
EventSetInfo_t * ESI
int _papi_libpfm4_init(papi_vector_t *my_vector)
int _pe_set_domain(hwd_control_state_t *ctl, int domain)
Definition: perf_event.c:821
static int map_perf_event_errors_to_papi(int perf_event_error)
#define PAPI_INSTR_ADDRESS
Definition: papi.h:451
int _peu_set_domain(hwd_control_state_t *ctl, int domain)
#define PAPI_MAX_STR_LEN
Definition: fpapi.h:43
#define PAPI_DOM_ALL
Definition: fpapi.h:25
int _peu_reset(hwd_context_t *ctx, hwd_control_state_t *ctl)
cpu
Definition: iozone.c:3872
int _peu_write(hwd_context_t *ctx, hwd_control_state_t *ctl, long long *from)
int default_granularity
Definition: papi.h:641
#define PAPI_ENOEVNT
Definition: fpapi.h:112
#define PAPI_DATA_ADDRESS
Definition: papi.h:450
papi_vector_t _perf_event_uncore_vector
#define PAPI_EPERM
Definition: fpapi.h:120
EventSetInfo_t * ESI
#define PERF_EVENTS_OPENED
pe_event_info_t events[PERF_EVENT_MAX_MPX_COUNTERS]
static int close_pe_events(pe_context_t *ctx, pe_control_t *ctl)
return PAPI_OK
Definition: linux-nvml.c:458
int count
Definition: iozone.c:22422
#define PAPI_ENOCMP
Definition: fpapi.h:122
#define PAPI_GRN_SYS
Definition: fpapi.h:71
fclose(thread_wqfd)
#define PAPI_DOM_KERNEL
Definition: fpapi.h:22
#define PAPI_GRN_PROC
Definition: fpapi.h:69
int _peu_libpfm4_ntv_code_to_info(unsigned int EventCode, PAPI_event_info_t *info, struct native_event_table_t *event_table)
void
Definition: iozone.c:18627
int _peu_libpfm4_ntv_enum_events(unsigned int *PapiEventCode, int modifier, struct native_event_table_t *event_table)
return PAPI_EINVAL
Definition: linux-nvml.c:408
PAPI_component_info_t cmp_info
Definition: papi_vector.h:20
int _peu_libpfm4_init(papi_vector_t *my_vector, struct native_event_table_t *event_table, int pmu_type)
int _peu_ntv_enum_events(unsigned int *PapiEventCode, int modifier)
#define PAPI_INHERIT
Definition: papi.h:456
Return codes and api definitions.
uint32_t nr_mmap_pages
FILE * fff[MAX_EVENTS]
unsigned int domain
int multiplex(void)
Definition: multiplex.c:35
_papi_int_attach_t attach
long long ret
Definition: iozone.c:1346
unsigned long tid
_papi_int_cpu_t cpu
int i
Definition: fileop.c:140
#define PAPI_ENOSUPP
Definition: fpapi.h:123
int _papi_libpfm4_shutdown(void)
#define PAPI_GRN_PROCG
Definition: fpapi.h:70
struct native_event_table_t uncore_native_event_table
#define PAPI_DOM_SUPERVISOR
Definition: fpapi.h:24
static int pid
int _peu_libpfm4_ntv_name_to_code(char *name, unsigned int *event_code, struct native_event_table_t *event_table)
static int cidx
Definition: event_info.c:40
int _peu_libpfm4_ntv_code_to_descr(unsigned int EventCode, char *ntv_descr, int len, struct native_event_table_t *event_table)
#define PAPI_ECMP
Definition: fpapi.h:109
static int native
Definition: event_info.c:39
int _peu_libpfm4_setup_counters(struct perf_event_attr *attr, int event, struct native_event_table_t *event_table)
#define SUBDBG(format, args...)
Definition: papi_debug.h:63
_papi_int_granularity_t granularity
int _peu_shutdown_thread(hwd_context_t *ctx)
EventSetInfo_t * ESI
int _peu_start(hwd_context_t *ctx, hwd_control_state_t *ctl)
void PAPIERROR(char *format,...)
unsigned int multiplexed
#define PAPI_DOMAIN
Definition: fpapi.h:50
char events[MAX_EVENTS][BUFSIZ]
int _peu_init_control_state(hwd_control_state_t *ctl)
struct native_event_table_t * event_table
#define PAPI_ATTACH
Definition: fpapi.h:62
int _peu_init_component(int cidx)
EventSetInfo_t * ESI
#define PAPI_GRANUL
Definition: fpapi.h:52
_papi_int_multiplex_t multiplex
#define PERF_EVENTS_RUNNING
#define PAPI_DEF_MPX_NS
Definition: fpapi.h:53
#define PAPI_ESYS
Definition: fpapi.h:108
int _peu_ntv_code_to_info(unsigned int EventCode, PAPI_event_info_t *info)
again struct sockaddr sizeof(struct sockaddr_in))
#define PAPI_DETACH
Definition: fpapi.h:66
int _peu_init_thread(hwd_context_t *hwd_ctx)
int _peu_read(hwd_context_t *ctx, hwd_control_state_t *ctl, long long **events, int flags)
unsigned int cpu_num
#define PMU_TYPE_UNCORE
#define PAPI_EBUG
Definition: fpapi.h:111
#define PAPI_DEF_ITIMER
Definition: papi.h:452
char * name
Definition: iozone.c:23648
struct perf_event_attr attr
int
Definition: iozone.c:18528
int _peu_libpfm4_ntv_code_to_name(unsigned int EventCode, char *ntv_name, int len, struct native_event_table_t *event_table)
static unsigned int get_read_format(unsigned int multiplex, unsigned int inherit, int format_group)
int our_cidx
Definition: perf_event.c:60
int _peu_stop(hwd_context_t *ctx, hwd_control_state_t *ctl)
int _peu_libpfm4_shutdown(struct native_event_table_t *event_table)
unsigned int inherit
struct papi_vectors * _papi_hwd[]
_papi_int_domain_t domain
static long sys_perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu, int group_fd, unsigned long flags)
int _peu_ntv_code_to_descr(unsigned int EventCode, char *ntv_descr, int len)
#define PAPI_DOM_USER
Definition: fpapi.h:21
#define READ_BUFFER_SIZE
EventSetInfo_t * ESI
hwd_control_state_t * ctl_state
int _peu_update_control_state(hwd_control_state_t *ctl, NativeInfo_t *native, int count, hwd_context_t *ctx)
long j
Definition: iozone.c:19135
ssize_t retval
Definition: libasync.c:338
#define PAPI_GRN_THR
Definition: fpapi.h:67
#define PAPI_MULTIPLEX
Definition: fpapi.h:48