perf_event_uncore.c File Reference

Include dependency graph for perf_event_uncore.c:

Go to the source code of this file.

Defines

#define PERF_EVENTS_OPENED   0x01
#define PERF_EVENTS_RUNNING   0x02
#define READ_BUFFER_SIZE   (3 + (2 * PERF_EVENT_MAX_MPX_COUNTERS))

Functions

int _peu_libpfm4_get_cidx ()
static int _peu_set_domain (hwd_control_state_t *ctl, int domain)
static int bug_check_scheduability (void)
static unsigned int get_read_format (unsigned int multiplex, unsigned int inherit, int format_group)
static long sys_perf_event_open (struct perf_event_attr *hw_event, pid_t pid, int cpu, int group_fd, unsigned long flags)
static int map_perf_event_errors_to_papi (int perf_event_error)
static int check_scheduability (pe_context_t *ctx, pe_control_t *ctl)
static int open_pe_events (pe_context_t *ctx, pe_control_t *ctl)
static int close_pe_events (pe_context_t *ctx, pe_control_t *ctl)
int _peu_init_thread (hwd_context_t *hwd_ctx)
int _peu_init_control_state (hwd_control_state_t *ctl)
int _peu_init_component (int cidx)
int _peu_shutdown_component (void)
int _peu_update_control_state (hwd_control_state_t *ctl, NativeInfo_t *native, int count, hwd_context_t *ctx)
int _peu_shutdown_thread (hwd_context_t *ctx)
int _peu_reset (hwd_context_t *ctx, hwd_control_state_t *ctl)
int _peu_write (hwd_context_t *ctx, hwd_control_state_t *ctl, long long *from)
int _peu_read (hwd_context_t *ctx, hwd_control_state_t *ctl, long long **events, int flags)
int _peu_start (hwd_context_t *ctx, hwd_control_state_t *ctl)
int _peu_stop (hwd_context_t *ctx, hwd_control_state_t *ctl)
int _peu_ctl (hwd_context_t *ctx, int code, _papi_int_option_t *option)
int _peu_ntv_enum_events (unsigned int *PapiEventCode, int modifier)
int _peu_ntv_name_to_code (char *name, unsigned int *event_code)
int _peu_ntv_code_to_name (unsigned int EventCode, char *ntv_name, int len)
int _peu_ntv_code_to_descr (unsigned int EventCode, char *ntv_descr, int len)
int _peu_ntv_code_to_info (unsigned int EventCode, PAPI_event_info_t *info)

Variables

papi_vector_t _perf_event_uncore_vector
struct native_event_table_t uncore_native_event_table
static int our_cidx

Define Documentation

#define PERF_EVENTS_OPENED   0x01

Definition at line 59 of file perf_event_uncore.c.

#define PERF_EVENTS_RUNNING   0x02

Definition at line 60 of file perf_event_uncore.c.

#define READ_BUFFER_SIZE   (3 + (2 * PERF_EVENT_MAX_MPX_COUNTERS))

Definition at line 243 of file perf_event_uncore.c.


Function Documentation

int _peu_ctl ( hwd_context_t ctx,
int  code,
_papi_int_option_t option 
)

Definition at line 1150 of file perf_event_uncore.c.

01151 {
01152    int ret;
01153    pe_context_t *pe_ctx = ( pe_context_t *) ctx;
01154    pe_control_t *pe_ctl = NULL;
01155 
01156    switch ( code ) {
01157       case PAPI_MULTIPLEX:
01158        pe_ctl = ( pe_control_t * ) ( option->multiplex.ESI->ctl_state );
01159 
01160        pe_ctl->multiplexed = 1;
01161        ret = _peu_update_control_state( pe_ctl, NULL,
01162                         pe_ctl->num_events, pe_ctx );
01163        if (ret != PAPI_OK) {
01164           pe_ctl->multiplexed = 0;
01165        }
01166        return ret;
01167 
01168       case PAPI_ATTACH:
01169        pe_ctl = ( pe_control_t * ) ( option->attach.ESI->ctl_state );
01170 
01171        pe_ctl->tid = option->attach.tid;
01172 
01173        /* If events have been already been added, something may */
01174        /* have been done to the kernel, so update */
01175        ret =_peu_update_control_state( pe_ctl, NULL,
01176                         pe_ctl->num_events, pe_ctx);
01177 
01178        return ret;
01179 
01180       case PAPI_DETACH:
01181        pe_ctl = ( pe_control_t *) ( option->attach.ESI->ctl_state );
01182 
01183        pe_ctl->tid = 0;
01184        return PAPI_OK;
01185 
01186       case PAPI_CPU_ATTACH:
01187        pe_ctl = ( pe_control_t *) ( option->cpu.ESI->ctl_state );
01188 
01189        /* this tells the kernel not to count for a thread   */
01190        /* should we warn if we try to set both?  perf_event */
01191        /* will reject it.                                   */
01192        pe_ctl->tid = -1;
01193 
01194        pe_ctl->cpu = option->cpu.cpu_num;
01195 
01196        return PAPI_OK;
01197 
01198       case PAPI_DOMAIN:
01199        pe_ctl = ( pe_control_t *) ( option->domain.ESI->ctl_state );
01200 
01201        /* looks like we are allowed, so set event set level counting domains */
01202        pe_ctl->domain = option->domain.domain;
01203        return PAPI_OK;
01204 
01205       case PAPI_GRANUL:
01206        pe_ctl = (pe_control_t *) ( option->granularity.ESI->ctl_state );
01207 
01208        /* FIXME: we really don't support this yet */
01209 
01210            switch ( option->granularity.granularity  ) {
01211               case PAPI_GRN_PROCG:
01212               case PAPI_GRN_SYS_CPU:
01213               case PAPI_GRN_PROC:
01214            return PAPI_ECMP;
01215 
01216           /* Currently we only support thread and CPU granularity */
01217               case PAPI_GRN_SYS:
01218            pe_ctl->granularity=PAPI_GRN_SYS;
01219            break;
01220 
01221               case PAPI_GRN_THR:
01222            pe_ctl->granularity=PAPI_GRN_THR;
01223            break;
01224 
01225 
01226               default:
01227            return PAPI_EINVAL;
01228        }
01229            return PAPI_OK;
01230 
01231       case PAPI_INHERIT:
01232        pe_ctl = (pe_control_t *) ( option->inherit.ESI->ctl_state );
01233 
01234        if (option->inherit.inherit) {
01235           /* children will inherit counters */
01236           pe_ctl->inherit = 1;
01237        } else {
01238           /* children won't inherit counters */
01239           pe_ctl->inherit = 0;
01240        }
01241        return PAPI_OK;
01242 
01243       case PAPI_DATA_ADDRESS:
01244        return PAPI_ENOSUPP;
01245 
01246       case PAPI_INSTR_ADDRESS:
01247        return PAPI_ENOSUPP;
01248 
01249       case PAPI_DEF_ITIMER:
01250        return PAPI_ENOSUPP;
01251 
01252       case PAPI_DEF_MPX_NS:
01253        return PAPI_ENOSUPP;
01254 
01255       case PAPI_DEF_ITIMER_NS:
01256        return PAPI_ENOSUPP;
01257 
01258       default:
01259        return PAPI_ENOSUPP;
01260    }
01261 }

Here is the call graph for this function:

int _peu_init_component ( int  cidx  ) 

Definition at line 617 of file perf_event_uncore.c.

00618 {
00619 
00620    int retval;
00621    int paranoid_level;
00622 
00623    FILE *fff;
00624 
00625    our_cidx=cidx;
00626 
00627    /* The is the official way to detect if perf_event support exists */
00628    /* The file is called perf_counter_paranoid on 2.6.31             */
00629    /* currently we are lazy and do not support 2.6.31 kernels        */
00630 
00631    fff=fopen("/proc/sys/kernel/perf_event_paranoid","r");
00632    if (fff==NULL) {
00633      strncpy(_papi_hwd[cidx]->cmp_info.disabled_reason,
00634         "perf_event support not detected",PAPI_MAX_STR_LEN);
00635      return PAPI_ENOCMP;
00636    }
00637    retval=fscanf(fff,"%d",&paranoid_level);
00638    if (retval!=1) fprintf(stderr,"Error reading paranoid level\n");
00639    fclose(fff);
00640 
00641 
00642    /* Run the libpfm4-specific setup */
00643 
00644    retval = _papi_libpfm4_init(_papi_hwd[cidx]);
00645    if (retval) {
00646      strncpy(_papi_hwd[cidx]->cmp_info.disabled_reason,
00647          "Error initializing libpfm4",PAPI_MAX_STR_LEN);
00648      return PAPI_ENOCMP;
00649    }
00650 
00651 
00652    /* Run the uncore specific libpfm4 setup */
00653 
00654    retval = _peu_libpfm4_init(_papi_hwd[cidx], 
00655                    &uncore_native_event_table,
00656                                PMU_TYPE_UNCORE);
00657    if (retval) {
00658      strncpy(_papi_hwd[cidx]->cmp_info.disabled_reason,
00659          "Error setting up libpfm4",PAPI_MAX_STR_LEN);
00660      return PAPI_ENOCMP;
00661    }
00662 
00663    /* Check if no uncore events found */
00664 
00665    if (_papi_hwd[cidx]->cmp_info.num_native_events==0) {
00666      strncpy(_papi_hwd[cidx]->cmp_info.disabled_reason,
00667          "No uncore PMUs or events found",PAPI_MAX_STR_LEN);
00668      return PAPI_ENOCMP;
00669    }
00670 
00671    /* Check if we have enough permissions for uncore */
00672 
00673    /* 2 means no kernel measurements allowed   */
00674    /* 1 means normal counter access            */
00675    /* 0 means you can access CPU-specific data */
00676    /* -1 means no restrictions                 */
00677 
00678    if ((paranoid_level>0) && (getuid()!=0)) {
00679       strncpy(_papi_hwd[cidx]->cmp_info.disabled_reason,
00680         "Insufficient permissions for uncore access.  Set /proc/sys/kernel/perf_event_paranoid to 0 or run as root.",
00681         PAPI_MAX_STR_LEN);
00682      return PAPI_ENOCMP;
00683    }
00684 
00685    return PAPI_OK;
00686 
00687 }

Here is the call graph for this function:

int _peu_init_control_state ( hwd_control_state_t ctl  ) 

Definition at line 592 of file perf_event_uncore.c.

00593 {
00594   pe_control_t *pe_ctl = ( pe_control_t *) ctl;
00595 
00596   /* clear the contents */
00597   memset( pe_ctl, 0, sizeof ( pe_control_t ) );
00598 
00599   /* Set the default domain */
00600   _peu_set_domain( ctl, _perf_event_uncore_vector.cmp_info.default_domain );
00601 
00602   /* Set the default granularity */
00603   pe_ctl->granularity=_perf_event_uncore_vector.cmp_info.default_granularity;
00604 
00605   pe_ctl->cidx=our_cidx;
00606 
00607   /* Set cpu number in the control block to show events */
00608   /* are not tied to specific cpu                       */
00609   pe_ctl->cpu = -1;
00610   return PAPI_OK;
00611 }

Here is the call graph for this function:

int _peu_init_thread ( hwd_context_t hwd_ctx  ) 

Definition at line 575 of file perf_event_uncore.c.

00576 {
00577 
00578   pe_context_t *pe_ctx = ( pe_context_t *) hwd_ctx;
00579 
00580   /* clear the context structure and mark as initialized */
00581   memset( pe_ctx, 0, sizeof ( pe_context_t ) );
00582   pe_ctx->initialized=1;
00583 
00584   pe_ctx->event_table=&uncore_native_event_table;
00585   pe_ctx->cidx=our_cidx;
00586 
00587   return PAPI_OK;
00588 }

int _peu_libpfm4_get_cidx ( void   ) 

Definition at line 54 of file perf_event_uncore.c.

00054                         {
00055     return our_cidx;
00056 }

Here is the caller graph for this function:

int _peu_ntv_code_to_descr ( unsigned int  EventCode,
char *  ntv_descr,
int  len 
)

Definition at line 1296 of file perf_event_uncore.c.

01297                                                       {
01298 
01299    if (_perf_event_uncore_vector.cmp_info.disabled) return PAPI_ENOEVNT;
01300 
01301    return _peu_libpfm4_ntv_code_to_descr(EventCode,ntv_descr,len,
01302                                           &uncore_native_event_table);
01303 }

Here is the call graph for this function:

int _peu_ntv_code_to_info ( unsigned int  EventCode,
PAPI_event_info_t info 
)

Definition at line 1306 of file perf_event_uncore.c.

01307                                                    {
01308 
01309   if (_perf_event_uncore_vector.cmp_info.disabled) return PAPI_ENOEVNT;
01310 
01311   return _peu_libpfm4_ntv_code_to_info(EventCode, info,
01312                                         &uncore_native_event_table);
01313 }

Here is the call graph for this function:

int _peu_ntv_code_to_name ( unsigned int  EventCode,
char *  ntv_name,
int  len 
)

Definition at line 1285 of file perf_event_uncore.c.

01286                                                    {
01287 
01288    if (_perf_event_uncore_vector.cmp_info.disabled) return PAPI_ENOEVNT;
01289 
01290    return _peu_libpfm4_ntv_code_to_name(EventCode,
01291                                          ntv_name, len, 
01292                      &uncore_native_event_table);
01293 }

Here is the call graph for this function:

int _peu_ntv_enum_events ( unsigned int *  PapiEventCode,
int  modifier 
)

Definition at line 1265 of file perf_event_uncore.c.

01266 {
01267 
01268   if (_perf_event_uncore_vector.cmp_info.disabled) return PAPI_ENOEVNT;
01269 
01270 
01271   return _peu_libpfm4_ntv_enum_events(PapiEventCode, modifier,
01272                                        &uncore_native_event_table);
01273 }

Here is the call graph for this function:

int _peu_ntv_name_to_code ( char *  name,
unsigned int *  event_code 
)

Definition at line 1276 of file perf_event_uncore.c.

01276                                                              {
01277 
01278   if (_perf_event_uncore_vector.cmp_info.disabled) return PAPI_ENOEVNT;
01279 
01280   return _peu_libpfm4_ntv_name_to_code(name,event_code,
01281                                         &uncore_native_event_table);
01282 }

Here is the call graph for this function:

int _peu_read ( hwd_context_t ctx,
hwd_control_state_t ctl,
long long **  events,
int  flags 
)

Definition at line 914 of file perf_event_uncore.c.

00916 {
00917     SUBDBG("ENTER: ctx: %p, ctl: %p, events: %p, flags: %#x\n", ctx, ctl, events, flags);
00918 
00919    ( void ) flags;           /*unused */
00920    int i, ret = -1;
00921    /* pe_context_t *pe_ctx = ( pe_context_t *) ctx; */ 
00922    (void) ctx; /*unused*/
00923    pe_control_t *pe_ctl = ( pe_control_t *) ctl;
00924    long long papi_pe_buffer[READ_BUFFER_SIZE];
00925    long long tot_time_running, tot_time_enabled, scale;
00926 
00927    /* Handle case where we are multiplexing */
00928    if (pe_ctl->multiplexed) {
00929 
00930       /* currently we handle multiplexing by having individual events */
00931       /* so we read from each in turn.                                */
00932 
00933       for ( i = 0; i < pe_ctl->num_events; i++ ) {
00934 
00935          ret = read( pe_ctl->events[i].event_fd, papi_pe_buffer,
00936             sizeof ( papi_pe_buffer ) );
00937          if ( ret == -1 ) {
00938         PAPIERROR("read returned an error: ", strerror( errno ));
00939        SUBDBG("EXIT: PAPI_ESYS\n");
00940         return PAPI_ESYS;
00941      }
00942 
00943      /* We should read 3 64-bit values from the counter */
00944      if (ret<(signed)(3*sizeof(long long))) {
00945         PAPIERROR("Error!  short read!\n");
00946        SUBDBG("EXIT: PAPI_ESYS\n");
00947         return PAPI_ESYS;
00948      }
00949 
00950          SUBDBG("read: fd: %2d, tid: %ld, cpu: %d, ret: %d\n",
00951             pe_ctl->events[i].event_fd,
00952         (long)pe_ctl->tid, pe_ctl->events[i].cpu, ret);
00953          SUBDBG("read: %lld %lld %lld\n",papi_pe_buffer[0],
00954             papi_pe_buffer[1],papi_pe_buffer[2]);
00955 
00956          tot_time_enabled = papi_pe_buffer[1];
00957          tot_time_running = papi_pe_buffer[2];
00958 
00959          SUBDBG("count[%d] = (papi_pe_buffer[%d] %lld * "
00960         "tot_time_enabled %lld) / tot_time_running %lld\n",
00961         i, 0,papi_pe_buffer[0],
00962         tot_time_enabled,tot_time_running);
00963 
00964          if (tot_time_running == tot_time_enabled) {
00965         /* No scaling needed */
00966         pe_ctl->counts[i] = papi_pe_buffer[0];
00967          } else if (tot_time_running && tot_time_enabled) {
00968         /* Scale factor of 100 to avoid overflows when computing */
00969         /*enabled/running */
00970 
00971         scale = (tot_time_enabled * 100LL) / tot_time_running;
00972         scale = scale * papi_pe_buffer[0];
00973         scale = scale / 100LL;
00974         pe_ctl->counts[i] = scale;
00975      } else {
00976        /* This should not happen, but Phil reports it sometime does. */
00977         SUBDBG("perf_event kernel bug(?) count, enabled, "
00978            "running: %lld, %lld, %lld\n",
00979            papi_pe_buffer[0],tot_time_enabled,
00980            tot_time_running);
00981 
00982         pe_ctl->counts[i] = papi_pe_buffer[0];
00983      }
00984       }
00985    }
00986 
00987    /* Handle cases where we cannot use FORMAT GROUP */
00988    else if (pe_ctl->inherit) {
00989 
00990       /* we must read each counter individually */
00991       for ( i = 0; i < pe_ctl->num_events; i++ ) {
00992 
00993          ret = read( pe_ctl->events[i].event_fd, papi_pe_buffer, 
00994             sizeof ( papi_pe_buffer ) );
00995          if ( ret == -1 ) {
00996         PAPIERROR("read returned an error: ", strerror( errno ));
00997        SUBDBG("EXIT: PAPI_ESYS\n");
00998         return PAPI_ESYS;
00999      }
01000 
01001      /* we should read one 64-bit value from each counter */
01002      if (ret!=sizeof(long long)) {
01003         PAPIERROR("Error!  short read!\n");
01004         PAPIERROR("read: fd: %2d, tid: %ld, cpu: %d, ret: %d\n",
01005            pe_ctl->events[i].event_fd,
01006            (long)pe_ctl->tid, pe_ctl->events[i].cpu, ret);
01007        SUBDBG("EXIT: PAPI_ESYS\n");
01008         return PAPI_ESYS;
01009      }
01010 
01011          SUBDBG("read: fd: %2d, tid: %ld, cpu: %d, ret: %d\n",
01012             pe_ctl->events[i].event_fd, (long)pe_ctl->tid,
01013         pe_ctl->events[i].cpu, ret);
01014          SUBDBG("read: %lld\n",papi_pe_buffer[0]);
01015 
01016      pe_ctl->counts[i] = papi_pe_buffer[0];
01017       }
01018    }
01019 
01020 
01021    /* Handle cases where we are using FORMAT_GROUP   */
01022    /* We assume only one group leader, in position 0 */
01023 
01024    else {
01025       if (pe_ctl->events[0].group_leader_fd!=-1) {
01026      PAPIERROR("Was expecting group leader!\n");
01027       }
01028 
01029       ret = read( pe_ctl->events[0].event_fd, papi_pe_buffer,
01030           sizeof ( papi_pe_buffer ) );
01031 
01032       if ( ret == -1 ) {
01033      PAPIERROR("read returned an error: ", strerror( errno ));
01034        SUBDBG("EXIT: PAPI_ESYS\n");
01035      return PAPI_ESYS;
01036       }
01037 
01038       /* we read 1 64-bit value (number of events) then     */
01039       /* num_events more 64-bit values that hold the counts */
01040       if (ret<(signed)((1+pe_ctl->num_events)*sizeof(long long))) {
01041      PAPIERROR("Error! short read!\n");
01042        SUBDBG("EXIT: PAPI_ESYS\n");
01043      return PAPI_ESYS;
01044       }
01045 
01046       SUBDBG("read: fd: %2d, tid: %ld, cpu: %d, ret: %d\n",
01047          pe_ctl->events[0].event_fd,
01048          (long)pe_ctl->tid, pe_ctl->events[0].cpu, ret);
01049       {
01050      int j;
01051      for(j=0;j<ret/8;j++) {
01052             SUBDBG("read %d: %lld\n",j,papi_pe_buffer[j]);
01053      }
01054       }
01055 
01056       /* Make sure the kernel agrees with how many events we have */
01057       if (papi_pe_buffer[0]!=pe_ctl->num_events) {
01058      PAPIERROR("Error!  Wrong number of events!\n");
01059        SUBDBG("EXIT: PAPI_ESYS\n");
01060      return PAPI_ESYS;
01061       }
01062 
01063       /* put the count values in their proper location */
01064       for(i=0;i<pe_ctl->num_events;i++) {
01065          pe_ctl->counts[i] = papi_pe_buffer[1+i];
01066       }
01067    }
01068 
01069    /* point PAPI to the values we read */
01070    *events = pe_ctl->counts;
01071 
01072    SUBDBG("EXIT: PAPI_OK\n");
01073    return PAPI_OK;
01074 }

Here is the call graph for this function:

int _peu_reset ( hwd_context_t ctx,
hwd_control_state_t ctl 
)

Definition at line 859 of file perf_event_uncore.c.

00860 {
00861    int i, ret;
00862    pe_control_t *pe_ctl = ( pe_control_t *) ctl;
00863 
00864    ( void ) ctx;             /*unused */
00865 
00866    /* We need to reset all of the events, not just the group leaders */
00867    for( i = 0; i < pe_ctl->num_events; i++ ) {
00868       ret = ioctl( pe_ctl->events[i].event_fd, PERF_EVENT_IOC_RESET, NULL );
00869       if ( ret == -1 ) {
00870      PAPIERROR("ioctl(%d, PERF_EVENT_IOC_RESET, NULL) "
00871            "returned error, Linux says: %s",
00872            pe_ctl->events[i].event_fd, strerror( errno ) );
00873      return PAPI_ESYS;
00874       }
00875    }
00876 
00877    return PAPI_OK;
00878 }

Here is the call graph for this function:

Here is the caller graph for this function:

static int _peu_set_domain ( hwd_control_state_t ctl,
int  domain 
) [static]

Definition at line 832 of file perf_event_uncore.c.

00833 {
00834    pe_control_t *pe_ctl = ( pe_control_t *) ctl;
00835 
00836    SUBDBG("old control domain %d, new domain %d\n",
00837       pe_ctl->domain,domain);
00838 
00839    pe_ctl->domain = domain;
00840    return PAPI_OK;
00841 }

Here is the caller graph for this function:

int _peu_shutdown_component ( void   ) 

Definition at line 690 of file perf_event_uncore.c.

00690                                     {
00691 
00692   /* deallocate our event table */
00693   _peu_libpfm4_shutdown(&_perf_event_uncore_vector, &uncore_native_event_table);
00694 
00695   /* Shutdown libpfm4 */
00696   _papi_libpfm4_shutdown();
00697 
00698   return PAPI_OK;
00699 }

Here is the call graph for this function:

int _peu_shutdown_thread ( hwd_context_t ctx  ) 

Definition at line 845 of file perf_event_uncore.c.

00846 {
00847     pe_context_t *pe_ctx = ( pe_context_t *) ctx;
00848 
00849     pe_ctx->initialized=0;
00850 
00851     return PAPI_OK;
00852 }

int _peu_start ( hwd_context_t ctx,
hwd_control_state_t ctl 
)

Definition at line 1078 of file perf_event_uncore.c.

01079 {
01080    int ret;
01081    int i;
01082    int did_something = 0;
01083    pe_context_t *pe_ctx = ( pe_context_t *) ctx;
01084    pe_control_t *pe_ctl = ( pe_control_t *) ctl;
01085 
01086    /* Reset the counters first.  Is this necessary? */
01087    ret = _peu_reset( pe_ctx, pe_ctl );
01088    if ( ret ) {
01089       return ret;
01090    }
01091 
01092    /* Enable all of the group leaders                */
01093    /* All group leaders have a group_leader_fd of -1 */
01094    for( i = 0; i < pe_ctl->num_events; i++ ) {
01095       if (pe_ctl->events[i].group_leader_fd == -1) {
01096      SUBDBG("ioctl(enable): fd: %d\n", pe_ctl->events[i].event_fd);
01097      ret=ioctl( pe_ctl->events[i].event_fd, PERF_EVENT_IOC_ENABLE, NULL) ; 
01098 
01099      /* ioctls always return -1 on failure */
01100          if (ret == -1) {
01101             PAPIERROR("ioctl(PERF_EVENT_IOC_ENABLE) failed.\n");
01102             return PAPI_ESYS;
01103      }
01104 
01105      did_something++;
01106       } 
01107    }
01108 
01109    if (!did_something) {
01110       PAPIERROR("Did not enable any counters.\n");
01111       return PAPI_EBUG;
01112    }
01113 
01114    pe_ctx->state |= PERF_EVENTS_RUNNING;
01115 
01116    return PAPI_OK;
01117 
01118 }

Here is the call graph for this function:

int _peu_stop ( hwd_context_t ctx,
hwd_control_state_t ctl 
)

Definition at line 1122 of file perf_event_uncore.c.

01123 {
01124 
01125    int ret;
01126    int i;
01127    pe_context_t *pe_ctx = ( pe_context_t *) ctx;
01128    pe_control_t *pe_ctl = ( pe_control_t *) ctl;
01129 
01130    /* Just disable the group leaders */
01131    for ( i = 0; i < pe_ctl->num_events; i++ ) {
01132       if ( pe_ctl->events[i].group_leader_fd == -1 ) {
01133      ret=ioctl( pe_ctl->events[i].event_fd, PERF_EVENT_IOC_DISABLE, NULL);
01134      if ( ret == -1 ) {
01135         PAPIERROR( "ioctl(%d, PERF_EVENT_IOC_DISABLE, NULL) "
01136                "returned error, Linux says: %s",
01137                pe_ctl->events[i].event_fd, strerror( errno ) );
01138         return PAPI_EBUG;
01139      }
01140       }
01141    }
01142 
01143    pe_ctx->state &= ~PERF_EVENTS_RUNNING;
01144 
01145    return PAPI_OK;
01146 }

Here is the call graph for this function:

int _peu_update_control_state ( hwd_control_state_t ctl,
NativeInfo_t native,
int  count,
hwd_context_t ctx 
)

Definition at line 706 of file perf_event_uncore.c.

00709 {
00710     int i;
00711     int j;
00712     int ret;
00713     int skipped_events=0;
00714     struct native_event_t *ntv_evt;
00715    pe_context_t *pe_ctx = ( pe_context_t *) ctx;
00716    pe_control_t *pe_ctl = ( pe_control_t *) ctl;
00717 
00718    /* close all of the existing fds and start over again */
00719    /* In theory we could have finer-grained control and know if             */
00720    /* things were changed, but it's easier to tear things down and rebuild. */
00721    close_pe_events( pe_ctx, pe_ctl );
00722 
00723    /* Calling with count==0 should be OK, it's how things are deallocated */
00724    /* when an eventset is destroyed.                                      */
00725    if ( count == 0 ) {
00726       SUBDBG( "Called with count == 0\n" );
00727       return PAPI_OK;
00728    }
00729 
00730    /* set up all the events */
00731    for( i = 0; i < count; i++ ) {
00732       if ( native ) {
00733             // get the native event pointer used for this papi event
00734             int ntv_idx = _papi_hwi_get_ntv_idx((unsigned)(native[i].ni_papi_code));
00735             if (ntv_idx < -1) {
00736                 SUBDBG("papi_event_code: %#x known by papi but not by the component\n", native[i].ni_papi_code);
00737                 continue;
00738             }
00739             // if native index is -1, then we have an event without a mask and need to find the right native index to use
00740             if (ntv_idx == -1) {
00741                 // find the native event index we want by matching for the right papi event code
00742                 for (j=0 ; j<pe_ctx->event_table->num_native_events ; j++) {
00743                     if (pe_ctx->event_table->native_events[j].papi_event_code == native[i].ni_papi_code) {
00744                         ntv_idx = j;
00745                     }
00746                 }
00747             }
00748 
00749             // if native index is still negative, we did not find event we wanted so just return error
00750             if (ntv_idx < 0) {
00751                 SUBDBG("papi_event_code: %#x not found in native event tables\n", native[i].ni_papi_code);
00752                 continue;
00753             }
00754 
00755             // this native index is positive so there was a mask with the event, the ntv_idx identifies which native event to use
00756             ntv_evt = (struct native_event_t *)(&(pe_ctx->event_table->native_events[ntv_idx]));
00757 
00758             SUBDBG("ntv_evt: %p\n", ntv_evt);
00759 
00760             SUBDBG("i: %d, pe_ctx->event_table->num_native_events: %d\n", i, pe_ctx->event_table->num_native_events);
00761 
00762             // Move this events hardware config values and other attributes to the perf_events attribute structure
00763             memcpy (&pe_ctl->events[i].attr, &ntv_evt->attr, sizeof(perf_event_attr_t));
00764 
00765             // may need to update the attribute structure with information from event set level domain settings (values set by PAPI_set_domain)
00766             // only done if the event mask which controls each counting domain was not provided
00767 
00768             // get pointer to allocated name, will be NULL when adding preset events to event set
00769             char *aName = ntv_evt->allocated_name;
00770             if ((aName == NULL)  ||  (strstr(aName, ":u=") == NULL)) {
00771                 SUBDBG("set exclude_user attribute from eventset level domain flags, encode: %d, eventset: %d\n", pe_ctl->events[i].attr.exclude_user, !(pe_ctl->domain & PAPI_DOM_USER));
00772                 pe_ctl->events[i].attr.exclude_user = !(pe_ctl->domain & PAPI_DOM_USER);
00773             }
00774             if ((aName == NULL)  ||  (strstr(aName, ":k=") == NULL)) {
00775                 SUBDBG("set exclude_kernel attribute from eventset level domain flags, encode: %d, eventset: %d\n", pe_ctl->events[i].attr.exclude_kernel, !(pe_ctl->domain & PAPI_DOM_KERNEL));
00776                 pe_ctl->events[i].attr.exclude_kernel = !(pe_ctl->domain & PAPI_DOM_KERNEL);
00777             }
00778 
00779             // set the cpu number provided with an event mask if there was one (will be -1 if mask not provided)
00780             pe_ctl->events[i].cpu = ntv_evt->cpu;
00781             // if cpu event mask not provided, then set the cpu to use to what may have been set on call to PAPI_set_opt (will still be -1 if not called)
00782             if (pe_ctl->events[i].cpu == -1) {
00783                 pe_ctl->events[i].cpu = pe_ctl->cpu;
00784             }
00785       } else {
00786           // This case happens when called from _pe_set_overflow and _pe_ctl
00787           // Those callers put things directly into the pe_ctl structure so it is already set for the open call
00788       }
00789 
00790       // Copy the inherit flag into the attribute block that will be passed to the kernel
00791       pe_ctl->events[i].attr.inherit = pe_ctl->inherit;
00792 
00793       /* Set the position in the native structure */
00794       /* We just set up events linearly           */
00795       if ( native ) {
00796           native[i].ni_position = i;
00797           SUBDBG( "&native[%d]: %p, ni_papi_code: %#x, ni_event: %#x, ni_position: %d, ni_owners: %d\n",
00798             i, &(native[i]), native[i].ni_papi_code, native[i].ni_event, native[i].ni_position, native[i].ni_owners);
00799       }
00800    }
00801 
00802     if (count <= skipped_events) {
00803         SUBDBG("EXIT: No events to count, they all contained invalid umasks\n");
00804         return PAPI_ENOEVNT;
00805     }
00806 
00807   pe_ctl->num_events = count - skipped_events;
00808 
00809    /* actuall open the events */
00810    /* (why is this a separate function?) */
00811    ret = open_pe_events( pe_ctx, pe_ctl );
00812    if ( ret != PAPI_OK ) {
00813       SUBDBG("open_pe_events failed\n");
00814       /* Restore values ? */
00815       return ret;
00816    }
00817 
00818    SUBDBG( "EXIT: PAPI_OK\n" );
00819    return PAPI_OK;
00820 }

Here is the call graph for this function:

Here is the caller graph for this function:

int _peu_write ( hwd_context_t ctx,
hwd_control_state_t ctl,
long long *  from 
)

Definition at line 884 of file perf_event_uncore.c.

00886 {
00887    ( void ) ctx;             /*unused */
00888    ( void ) ctl;             /*unused */
00889    ( void ) from;            /*unused */
00890    /*
00891     * Counters cannot be written.  Do we need to virtualize the
00892     * counters so that they can be written, or perhaps modify code so that
00893     * they can be written? FIXME ?
00894     */
00895 
00896     return PAPI_ENOSUPP;
00897 }

static int bug_check_scheduability ( void   )  [static]

Definition at line 82 of file perf_event_uncore.c.

00082                               {
00083 
00084 #if defined(__powerpc__)
00085    /* PowerPC not affected by this bug */
00086 #elif defined(__mips__)
00087    /* MIPS as of kernel 3.1 does not properly detect schedulability */
00088    return 1;
00089 #else
00090    if (_papi_os_info.os_version < LINUX_VERSION(2,6,33))
00091     return 1;
00092 #endif
00093 
00094    return 0;
00095 }

Here is the caller graph for this function:

static int check_scheduability ( pe_context_t ctx,
pe_control_t ctl 
) [static]

Definition at line 253 of file perf_event_uncore.c.

00254 {
00255    SUBDBG("ENTER: ctx: %p, ctl: %p\n", ctx, ctl);
00256    int retval = 0, cnt = -1;
00257    ( void ) ctx;         /*unused */
00258    long long papi_pe_buffer[READ_BUFFER_SIZE];
00259    int i;
00260 
00261    if (bug_check_scheduability()) {
00262 
00263     /* If the kernel isn't tracking scheduability right       */
00264     /* Then we need to start/stop/read to force the event     */
00265     /* to be scheduled and see if an error condition happens. */
00266 
00267     /* start all events */
00268     for( i = 0; i < ctl->num_events; i++) {
00269         retval = ioctl( ctl->events[i].event_fd, PERF_EVENT_IOC_ENABLE, NULL );
00270         if (retval == -1) {
00271         SUBDBG("EXIT: Enable failed event index: %d, num_events: %d, return PAPI_ESYS\n", i, ctl->num_events);
00272         return PAPI_ESYS;
00273         }
00274     }
00275 
00276     /* stop all events */
00277     for( i = 0; i < ctl->num_events; i++) {
00278         retval = ioctl(ctl->events[i].event_fd, PERF_EVENT_IOC_DISABLE, NULL );
00279         if (retval == -1) {
00280         SUBDBG("EXIT: Disable failed: event index: %d, num_events: %d, return PAPI_ESYS\n", i, ctl->num_events);
00281         return PAPI_ESYS;
00282         }
00283     }
00284 
00285     /* See if a read of each event returns results */
00286     for( i = 0; i < ctl->num_events; i++) {
00287         cnt = read( ctl->events[i].event_fd, papi_pe_buffer, sizeof(papi_pe_buffer));
00288         if ( cnt == -1 ) {
00289         SUBDBG( "EXIT: read failed: event index: %d, num_events: %d, return PAPI_ESYS.  Should never happen.\n", i, ctl->num_events);
00290         return PAPI_ESYS;
00291         }
00292 
00293         if ( cnt == 0 ) {
00294         /* We read 0 bytes if we could not schedule the event */
00295         /* The kernel should have detected this at open       */
00296         /* but various bugs (including NMI watchdog)          */
00297         /* result in this behavior                            */
00298 
00299         SUBDBG( "EXIT: read returned 0: event index: %d, num_events: %d, return PAPI_ECNFLCT.\n", i, ctl->num_events);
00300         return PAPI_ECNFLCT;
00301         }
00302     }
00303 
00304     /* Reset all of the counters (opened so far) back to zero      */
00305     /* from the above brief enable/disable call pair.              */
00306 
00307     /* We have to reset all events because reset of group leader      */
00308     /* does not reset all.                                            */
00309     /* we assume that the events are being added one by one and that  */
00310     /* we do not need to reset higher events (doing so may reset ones */
00311     /* that have not been initialized yet.                            */
00312 
00313     /* Note... PERF_EVENT_IOC_RESET does not reset time running       */
00314     /* info if multiplexing, so we should avoid coming here if        */
00315     /* we are multiplexing the event.                                 */
00316     for( i = 0; i < ctl->num_events; i++) {
00317         retval=ioctl( ctl->events[i].event_fd, PERF_EVENT_IOC_RESET, NULL );
00318         if (retval == -1) {
00319         SUBDBG("EXIT: Reset failed: event index: %d, num_events: %d, return PAPI_ESYS\n", i, ctl->num_events);
00320         return PAPI_ESYS;
00321         }
00322     }
00323    }
00324    SUBDBG("EXIT: return PAPI_OK\n");
00325    return PAPI_OK;
00326 }

Here is the call graph for this function:

Here is the caller graph for this function:

static int close_pe_events ( pe_context_t ctx,
pe_control_t ctl 
) [static]

Definition at line 478 of file perf_event_uncore.c.

00479 {
00480    int i;
00481    int num_closed=0;
00482    int events_not_opened=0;
00483 
00484    /* should this be a more serious error? */
00485    if ( ctx->state & PERF_EVENTS_RUNNING ) {
00486       SUBDBG("Closing without stopping first\n");
00487    }
00488 
00489    /* Close child events first */
00490    for( i=0; i<ctl->num_events; i++ ) {
00491 
00492       if (ctl->events[i].event_opened) {
00493 
00494          if (ctl->events[i].group_leader_fd!=-1) {
00495             if ( ctl->events[i].mmap_buf ) {
00496            if ( munmap ( ctl->events[i].mmap_buf,
00497                      ctl->events[i].nr_mmap_pages * getpagesize() ) ) {
00498               PAPIERROR( "munmap of fd = %d returned error: %s",
00499                  ctl->events[i].event_fd, strerror( errno ) );
00500               return PAPI_ESYS;
00501            }
00502         }
00503 
00504             if ( close( ctl->events[i].event_fd ) ) {
00505            PAPIERROR( "close of fd = %d returned error: %s",
00506                ctl->events[i].event_fd, strerror( errno ) );
00507            return PAPI_ESYS;
00508         } else {
00509            num_closed++;
00510         }
00511         ctl->events[i].event_opened=0;
00512      }
00513       }
00514       else {
00515     events_not_opened++;
00516       }
00517    }
00518 
00519    /* Close the group leaders last */
00520    for( i=0; i<ctl->num_events; i++ ) {
00521 
00522       if (ctl->events[i].event_opened) {
00523 
00524          if (ctl->events[i].group_leader_fd==-1) {
00525             if ( ctl->events[i].mmap_buf ) {
00526            if ( munmap ( ctl->events[i].mmap_buf,
00527                      ctl->events[i].nr_mmap_pages * getpagesize() ) ) {
00528               PAPIERROR( "munmap of fd = %d returned error: %s",
00529                  ctl->events[i].event_fd, strerror( errno ) );
00530               return PAPI_ESYS;
00531            }
00532         }
00533 
00534 
00535             if ( close( ctl->events[i].event_fd ) ) {
00536            PAPIERROR( "close of fd = %d returned error: %s",
00537                ctl->events[i].event_fd, strerror( errno ) );
00538            return PAPI_ESYS;
00539         } else {
00540            num_closed++;
00541         }
00542         ctl->events[i].event_opened=0;
00543      }
00544       }
00545    }
00546 
00547 
00548    if (ctl->num_events!=num_closed) {
00549       if (ctl->num_events!=(num_closed+events_not_opened)) {
00550          PAPIERROR("Didn't close all events: "
00551            "Closed %d Not Opened: %d Expected %d\n",
00552            num_closed,events_not_opened,ctl->num_events);
00553          return PAPI_EBUG;
00554       }
00555    }
00556 
00557    ctl->num_events=0;
00558 
00559    ctx->state &= ~PERF_EVENTS_OPENED;
00560 
00561    return PAPI_OK;
00562 }

Here is the call graph for this function:

Here is the caller graph for this function:

static unsigned int get_read_format ( unsigned int  multiplex,
unsigned int  inherit,
int  format_group 
) [static]

Definition at line 102 of file perf_event_uncore.c.

00105 {
00106    unsigned int format = 0;
00107 
00108    /* if we need read format options for multiplexing, add them now */
00109    if (multiplex) {
00110       format |= PERF_FORMAT_TOTAL_TIME_ENABLED;
00111       format |= PERF_FORMAT_TOTAL_TIME_RUNNING;
00112    }
00113 
00114    /* If we are not using inherit, add the group read options     */
00115    if (!inherit) {
00116       if (format_group) {
00117      format |= PERF_FORMAT_GROUP;
00118       }
00119    }
00120 
00121    SUBDBG("multiplex: %d, inherit: %d, group_leader: %d, format: %#x\n",
00122       multiplex, inherit, format_group, format);
00123 
00124    return format;
00125 }

Here is the caller graph for this function:

static int map_perf_event_errors_to_papi ( int  perf_event_error  )  [static]

Definition at line 202 of file perf_event_uncore.c.

00202                                                                {
00203 
00204    int ret;
00205 
00206    /* These mappings are approximate.
00207       EINVAL in particular can mean lots of different things */
00208    switch(perf_event_error) {
00209       case EPERM:
00210       case EACCES:
00211            ret = PAPI_EPERM;
00212        break;
00213       case ENODEV:
00214       case EOPNOTSUPP:
00215        ret = PAPI_ENOSUPP;
00216            break;
00217       case ENOENT:
00218        ret = PAPI_ENOEVNT;
00219            break;
00220       case ENOSYS:
00221       case EAGAIN:
00222       case EBUSY:
00223       case E2BIG:
00224        ret = PAPI_ESYS;
00225        break;
00226       case ENOMEM:
00227        ret = PAPI_ENOMEM;
00228        break;
00229       case EINVAL:
00230       default:
00231        ret = PAPI_EINVAL;
00232            break;
00233    }
00234    return ret;
00235 }

Here is the caller graph for this function:

static int open_pe_events ( pe_context_t ctx,
pe_control_t ctl 
) [static]

Definition at line 331 of file perf_event_uncore.c.

00332 {
00333 
00334    int i, ret = PAPI_OK;
00335    long pid;
00336 
00337    if (ctl->granularity==PAPI_GRN_SYS) {
00338       pid = -1;
00339    }
00340    else {
00341       pid = ctl->tid;
00342    }
00343 
00344    for( i = 0; i < ctl->num_events; i++ ) {
00345 
00346       ctl->events[i].event_opened=0;
00347 
00348       /* set up the attr structure.  We don't set up all fields here */
00349       /* as some have already been set up previously.                */
00350 
00351 /*
00352  * The following code controls how the uncore component interfaces with the 
00353  * kernel for uncore events.  The code inside the ifdef will use grouping of 
00354  * uncore events which can make the cost of reading the results more efficient.
00355  * The problem with it is that the uncore component supports 20 different uncore 
00356  * PMU's.  The kernel requires that all events in a group must be for the same PMU.
00357  * This means that with grouping enabled papi applications can count events on only
00358  * one of the 20 PMU's during a run.
00359  * 
00360  * The code inside the else clause treats each event in the event set as 
00361  * independent.  When running in this mode the kernel allows the papi multiple 
00362  * uncore PMU's at the same time.
00363  * 
00364  * Example:
00365  *  An application wants to measure all the L3 cache write requests.
00366  *  The event to do this is part of a cbox pmu (there are 8 cbox pmu's).
00367  *  When built with the code in the ifdef, the application would have to be 
00368  *    run 8 times and count write requests from one pmu at a time.
00369  *  When built with the code in the else, the write requests in all 8 cbox 
00370  *    pmu's could be counted in the same run.
00371  * 
00372  */
00373 // #define GROUPIT 1       // remove the comment on this line to force event grouping
00374 #ifdef GROUPIT
00375       /* group leader (event 0) is special                */
00376       /* If we're multiplexed, everyone is a group leader */
00377       if (( i == 0 ) || (ctl->multiplexed)) {
00378          ctl->events[i].attr.pinned = !ctl->multiplexed;
00379      ctl->events[i].attr.disabled = 1;
00380      ctl->events[i].group_leader_fd=-1;
00381          ctl->events[i].attr.read_format = get_read_format(ctl->multiplexed,
00382                                ctl->inherit,
00383                                !ctl->multiplexed );
00384       } else {
00385      ctl->events[i].attr.pinned=0;
00386      ctl->events[i].attr.disabled = 0;
00387      ctl->events[i].group_leader_fd=ctl->events[0].event_fd,
00388          ctl->events[i].attr.read_format = get_read_format(ctl->multiplexed,
00389                                ctl->inherit,
00390                                0 );
00391       }
00392 #else
00393              ctl->events[i].attr.pinned = !ctl->multiplexed;
00394              ctl->events[i].attr.disabled = 1;
00395              ctl->inherit = 1;
00396              ctl->events[i].group_leader_fd=-1;
00397              ctl->events[i].attr.read_format = get_read_format(ctl->multiplexed, ctl->inherit, 0 );
00398 #endif
00399 
00400 
00401       /* try to open */
00402       ctl->events[i].event_fd = sys_perf_event_open( &ctl->events[i].attr,
00403                              pid,
00404                              ctl->events[i].cpu,
00405                    ctl->events[i].group_leader_fd,
00406                              0 /* flags */
00407                              );
00408 
00409       /* Try to match Linux errors to PAPI errors */
00410       if ( ctl->events[i].event_fd == -1 ) {
00411      SUBDBG("sys_perf_event_open returned error on event #%d."
00412         "  Error: %s\n",
00413         i, strerror( errno ) );
00414          ret=map_perf_event_errors_to_papi(errno);
00415 
00416     goto open_peu_cleanup;
00417       }
00418 
00419       SUBDBG ("sys_perf_event_open: tid: %ld, cpu_num: %d,"
00420               " group_leader/fd: %d, event_fd: %d,"
00421               " read_format: %"PRIu64"\n",
00422           pid, ctl->events[i].cpu, ctl->events[i].group_leader_fd,
00423           ctl->events[i].event_fd, ctl->events[i].attr.read_format);
00424 
00425       ctl->events[i].event_opened=1;
00426    }
00427 
00428 
00429    /* in many situations the kernel will indicate we opened fine */
00430    /* yet things will fail later.  So we need to double check    */
00431    /* we actually can use the events we've set up.               */
00432 
00433    /* This is not necessary if we are multiplexing, and in fact */
00434    /* we cannot do this properly if multiplexed because         */
00435    /* PERF_EVENT_IOC_RESET does not reset the time running info */
00436    if (!ctl->multiplexed) {
00437     ret = check_scheduability( ctx, ctl);
00438 
00439     if ( ret != PAPI_OK ) {
00440         /* the last event did open, so we need to bump the counter */
00441         /* before doing the cleanup                                */
00442         i++;
00443         goto open_peu_cleanup;
00444     }
00445    }
00446 
00447    /* Now that we've successfully opened all of the events, do whatever  */
00448    /* "tune-up" is needed to attach the mmap'd buffers, signal handlers, */
00449    /* and so on.                                                         */
00450    for ( i = 0; i < ctl->num_events; i++ ) {
00451 
00452       /* No sampling if uncore */
00453       ctl->events[i].mmap_buf = NULL;
00454    }
00455 
00456    /* Set num_evts only if completely successful */
00457    ctx->state |= PERF_EVENTS_OPENED;
00458 
00459    return PAPI_OK;
00460 
00461 open_peu_cleanup:
00462    /* We encountered an error, close up the fds we successfully opened.  */
00463    /* We go backward in an attempt to close group leaders last, although */
00464    /* That's probably not strictly necessary.                            */
00465    while ( i > 0 ) {
00466       i--;
00467       if (ctl->events[i].event_fd>=0) {
00468      close( ctl->events[i].event_fd );
00469      ctl->events[i].event_opened=0;
00470       }
00471    }
00472 
00473    return ret;
00474 }

Here is the call graph for this function:

Here is the caller graph for this function:

static long sys_perf_event_open ( struct perf_event_attr hw_event,
pid_t  pid,
int  cpu,
int  group_fd,
unsigned long  flags 
) [static]

Definition at line 151 of file perf_event_uncore.c.

00153 {
00154    int ret;
00155 
00156    SUBDBG("sys_perf_event_open(hw_event: %p, pid: %d, cpu: %d, group_fd: %d, flags: %lx\n",hw_event,pid,cpu,group_fd,flags);
00157    SUBDBG("   type: %d\n",hw_event->type);
00158    SUBDBG("   size: %d\n",hw_event->size);
00159    SUBDBG("   config: %#"PRIx64" (%"PRIu64")\n",hw_event->config,
00160       hw_event->config);
00161    SUBDBG("   sample_period: %"PRIu64"\n",hw_event->sample_period);
00162    SUBDBG("   sample_type: %"PRIu64"\n",hw_event->sample_type);
00163    SUBDBG("   read_format: %"PRIu64"\n",hw_event->read_format);
00164    SUBDBG("   disabled: %d\n",hw_event->disabled);
00165    SUBDBG("   inherit: %d\n",hw_event->inherit);
00166    SUBDBG("   pinned: %d\n",hw_event->pinned);
00167    SUBDBG("   exclusive: %d\n",hw_event->exclusive);
00168    SUBDBG("   exclude_user: %d\n",hw_event->exclude_user);
00169    SUBDBG("   exclude_kernel: %d\n",hw_event->exclude_kernel);
00170    SUBDBG("   exclude_hv: %d\n",hw_event->exclude_hv);
00171    SUBDBG("   exclude_idle: %d\n",hw_event->exclude_idle);
00172    SUBDBG("   mmap: %d\n",hw_event->mmap);
00173    SUBDBG("   comm: %d\n",hw_event->comm);
00174    SUBDBG("   freq: %d\n",hw_event->freq);
00175    SUBDBG("   inherit_stat: %d\n",hw_event->inherit_stat);
00176    SUBDBG("   enable_on_exec: %d\n",hw_event->enable_on_exec);
00177    SUBDBG("   task: %d\n",hw_event->task);
00178    SUBDBG("   watermark: %d\n",hw_event->watermark);
00179    SUBDBG("   precise_ip: %d\n",hw_event->precise_ip);
00180    SUBDBG("   mmap_data: %d\n",hw_event->mmap_data);
00181    SUBDBG("   sample_id_all: %d\n",hw_event->sample_id_all);
00182    SUBDBG("   exclude_host: %d\n",hw_event->exclude_host);
00183    SUBDBG("   exclude_guest: %d\n",hw_event->exclude_guest);
00184    SUBDBG("   exclude_callchain_kernel: %d\n",hw_event->exclude_callchain_kernel);
00185    SUBDBG("   exclude_callchain_user: %d\n",hw_event->exclude_callchain_user);
00186    SUBDBG("   wakeup_watermark: %d\n",hw_event->wakeup_watermark);
00187    SUBDBG("   bp_type: %d\n",hw_event->bp_type);
00188    SUBDBG("   config1: %#lx (%lu)\n",hw_event->config1,hw_event->config1);
00189    SUBDBG("   config2: %#lx (%lu)\n",hw_event->config2,hw_event->config2);
00190    SUBDBG("   branch_sample_type: %lu\n",hw_event->branch_sample_type);
00191    SUBDBG("   sample_regs_user: %lu\n",hw_event->sample_regs_user);
00192    SUBDBG("   sample_stack_user: %d\n",hw_event->sample_stack_user);
00193 
00194     ret = syscall( __NR_perf_event_open, hw_event, pid, cpu, group_fd, flags );
00195     SUBDBG("Returned %d %d %s\n",ret,
00196            ret<0?errno:0,
00197            ret<0?strerror(errno):" ");
00198     return ret;
00199 }

Here is the caller graph for this function:


Variable Documentation

Definition at line 48 of file perf_event_uncore.c.

int our_cidx [static]

Definition at line 52 of file perf_event_uncore.c.

Definition at line 51 of file perf_event_uncore.c.


Generated on 26 Jan 2016 for PAPI by  doxygen 1.6.1