perf_event_uncore.c File Reference

Include dependency graph for perf_event_uncore.c:

Go to the source code of this file.

Defines

#define PERF_EVENTS_OPENED   0x01
#define PERF_EVENTS_RUNNING   0x02
#define READ_BUFFER_SIZE   (3 + (2 * PERF_EVENT_MAX_MPX_COUNTERS))

Functions

int _peu_libpfm4_get_cidx ()
static int _peu_set_domain (hwd_control_state_t *ctl, int domain)
static unsigned int get_read_format (unsigned int multiplex, unsigned int inherit, int format_group)
static long sys_perf_event_open (struct perf_event_attr *hw_event, pid_t pid, int cpu, int group_fd, unsigned long flags)
static int map_perf_event_errors_to_papi (int perf_event_error)
static int check_scheduability (pe_context_t *ctx, pe_control_t *ctl)
static int open_pe_events (pe_context_t *ctx, pe_control_t *ctl)
static int close_pe_events (pe_context_t *ctx, pe_control_t *ctl)
static int _peu_init_thread (hwd_context_t *hwd_ctx)
static int _peu_init_control_state (hwd_control_state_t *ctl)
static int _peu_init_component (int cidx)
static int _peu_shutdown_component (void)
int _peu_update_control_state (hwd_control_state_t *ctl, NativeInfo_t *native, int count, hwd_context_t *ctx)
static int _peu_shutdown_thread (hwd_context_t *ctx)
static int _peu_reset (hwd_context_t *ctx, hwd_control_state_t *ctl)
static int _peu_write (hwd_context_t *ctx, hwd_control_state_t *ctl, long long *from)
static int _peu_read (hwd_context_t *ctx, hwd_control_state_t *ctl, long long **events, int flags)
static int _peu_start (hwd_context_t *ctx, hwd_control_state_t *ctl)
static int _peu_stop (hwd_context_t *ctx, hwd_control_state_t *ctl)
static int _peu_ctl (hwd_context_t *ctx, int code, _papi_int_option_t *option)
static int _peu_ntv_enum_events (unsigned int *PapiEventCode, int modifier)
static int _peu_ntv_name_to_code (char *name, unsigned int *event_code)
static int _peu_ntv_code_to_name (unsigned int EventCode, char *ntv_name, int len)
static int _peu_ntv_code_to_descr (unsigned int EventCode, char *ntv_descr, int len)
static int _peu_ntv_code_to_info (unsigned int EventCode, PAPI_event_info_t *info)

Variables

papi_vector_t _perf_event_uncore_vector
struct native_event_table_t uncore_native_event_table
static int our_cidx

Define Documentation

#define PERF_EVENTS_OPENED   0x01

Definition at line 59 of file perf_event_uncore.c.

#define PERF_EVENTS_RUNNING   0x02

Definition at line 60 of file perf_event_uncore.c.

#define READ_BUFFER_SIZE   (3 + (2 * PERF_EVENT_MAX_MPX_COUNTERS))

Definition at line 215 of file perf_event_uncore.c.


Function Documentation

static int _peu_ctl ( hwd_context_t ctx,
int  code,
_papi_int_option_t option 
) [static]

Definition at line 1120 of file perf_event_uncore.c.

01121 {
01122    int ret;
01123    pe_context_t *pe_ctx = ( pe_context_t *) ctx;
01124    pe_control_t *pe_ctl = NULL;
01125 
01126    switch ( code ) {
01127       case PAPI_MULTIPLEX:
01128        pe_ctl = ( pe_control_t * ) ( option->multiplex.ESI->ctl_state );
01129 
01130        pe_ctl->multiplexed = 1;
01131        ret = _peu_update_control_state( pe_ctl, NULL,
01132                         pe_ctl->num_events, pe_ctx );
01133        if (ret != PAPI_OK) {
01134           pe_ctl->multiplexed = 0;
01135        }
01136        return ret;
01137 
01138       case PAPI_ATTACH:
01139        pe_ctl = ( pe_control_t * ) ( option->attach.ESI->ctl_state );
01140 
01141        pe_ctl->tid = option->attach.tid;
01142 
01143        /* If events have been already been added, something may */
01144        /* have been done to the kernel, so update */
01145        ret =_peu_update_control_state( pe_ctl, NULL,
01146                         pe_ctl->num_events, pe_ctx);
01147 
01148        return ret;
01149 
01150       case PAPI_DETACH:
01151        pe_ctl = ( pe_control_t *) ( option->attach.ESI->ctl_state );
01152 
01153        pe_ctl->tid = 0;
01154        return PAPI_OK;
01155 
01156       case PAPI_CPU_ATTACH:
01157        pe_ctl = ( pe_control_t *) ( option->cpu.ESI->ctl_state );
01158 
01159        /* this tells the kernel not to count for a thread   */
01160        /* should we warn if we try to set both?  perf_event */
01161        /* will reject it.                                   */
01162        pe_ctl->tid = -1;
01163 
01164        pe_ctl->cpu = option->cpu.cpu_num;
01165 
01166        return PAPI_OK;
01167 
01168       case PAPI_DOMAIN:
01169        pe_ctl = ( pe_control_t *) ( option->domain.ESI->ctl_state );
01170 
01171        /* looks like we are allowed, so set event set level counting domains */
01172        pe_ctl->domain = option->domain.domain;
01173        return PAPI_OK;
01174 
01175       case PAPI_GRANUL:
01176        pe_ctl = (pe_control_t *) ( option->granularity.ESI->ctl_state );
01177 
01178        /* FIXME: we really don't support this yet */
01179 
01180            switch ( option->granularity.granularity  ) {
01181               case PAPI_GRN_PROCG:
01182               case PAPI_GRN_SYS_CPU:
01183               case PAPI_GRN_PROC:
01184            return PAPI_ECMP;
01185 
01186           /* Currently we only support thread and CPU granularity */
01187               case PAPI_GRN_SYS:
01188            pe_ctl->granularity=PAPI_GRN_SYS;
01189            break;
01190 
01191               case PAPI_GRN_THR:
01192            pe_ctl->granularity=PAPI_GRN_THR;
01193            break;
01194 
01195 
01196               default:
01197            return PAPI_EINVAL;
01198        }
01199            return PAPI_OK;
01200 
01201       case PAPI_INHERIT:
01202        pe_ctl = (pe_control_t *) ( option->inherit.ESI->ctl_state );
01203 
01204        if (option->inherit.inherit) {
01205           /* children will inherit counters */
01206           pe_ctl->inherit = 1;
01207        } else {
01208           /* children won't inherit counters */
01209           pe_ctl->inherit = 0;
01210        }
01211        return PAPI_OK;
01212 
01213       case PAPI_DATA_ADDRESS:
01214        return PAPI_ENOSUPP;
01215 
01216       case PAPI_INSTR_ADDRESS:
01217        return PAPI_ENOSUPP;
01218 
01219       case PAPI_DEF_ITIMER:
01220        return PAPI_ENOSUPP;
01221 
01222       case PAPI_DEF_MPX_NS:
01223        return PAPI_ENOSUPP;
01224 
01225       case PAPI_DEF_ITIMER_NS:
01226        return PAPI_ENOSUPP;
01227 
01228       default:
01229        return PAPI_ENOSUPP;
01230    }
01231 }

Here is the call graph for this function:

static int _peu_init_component ( int  cidx  )  [static]

Definition at line 586 of file perf_event_uncore.c.

00587 {
00588 
00589    int retval;
00590    int paranoid_level;
00591 
00592    FILE *fff;
00593 
00594    our_cidx=cidx;
00595 
00596    /* The is the official way to detect if perf_event support exists */
00597    /* The file is called perf_counter_paranoid on 2.6.31             */
00598    /* currently we are lazy and do not support 2.6.31 kernels        */
00599 
00600    fff=fopen("/proc/sys/kernel/perf_event_paranoid","r");
00601    if (fff==NULL) {
00602      strncpy(_papi_hwd[cidx]->cmp_info.disabled_reason,
00603         "perf_event support not detected",PAPI_MAX_STR_LEN);
00604      return PAPI_ENOCMP;
00605    }
00606    retval=fscanf(fff,"%d",&paranoid_level);
00607    if (retval!=1) fprintf(stderr,"Error reading paranoid level\n");
00608    fclose(fff);
00609 
00610 
00611    /* Run the libpfm4-specific setup */
00612 
00613    retval = _papi_libpfm4_init(_papi_hwd[cidx]);
00614    if (retval) {
00615      strncpy(_papi_hwd[cidx]->cmp_info.disabled_reason,
00616          "Error initializing libpfm4",PAPI_MAX_STR_LEN);
00617      return PAPI_ENOCMP;
00618    }
00619 
00620 
00621    /* Run the uncore specific libpfm4 setup */
00622 
00623    retval = _peu_libpfm4_init(_papi_hwd[cidx], 
00624                    &uncore_native_event_table,
00625                                PMU_TYPE_UNCORE);
00626    if (retval) {
00627      strncpy(_papi_hwd[cidx]->cmp_info.disabled_reason,
00628          "Error setting up libpfm4",PAPI_MAX_STR_LEN);
00629      return PAPI_ENOCMP;
00630    }
00631 
00632    /* Check if no uncore events found */
00633 
00634    if (_papi_hwd[cidx]->cmp_info.num_native_events==0) {
00635      strncpy(_papi_hwd[cidx]->cmp_info.disabled_reason,
00636          "No uncore PMUs or events found",PAPI_MAX_STR_LEN);
00637      return PAPI_ENOCMP;
00638    }
00639 
00640    /* Check if we have enough permissions for uncore */
00641 
00642    /* 2 means no kernel measurements allowed   */
00643    /* 1 means normal counter access            */
00644    /* 0 means you can access CPU-specific data */
00645    /* -1 means no restrictions                 */
00646 
00647    if ((paranoid_level>0) && (getuid()!=0)) {
00648       strncpy(_papi_hwd[cidx]->cmp_info.disabled_reason,
00649         "Insufficient permissions for uncore access.  Set /proc/sys/kernel/perf_event_paranoid to 0 or run as root.",
00650         PAPI_MAX_STR_LEN);
00651      return PAPI_ENOCMP;
00652    }
00653 
00654    return PAPI_OK;
00655 
00656 }

Here is the call graph for this function:

static int _peu_init_control_state ( hwd_control_state_t ctl  )  [static]

Definition at line 561 of file perf_event_uncore.c.

00562 {
00563   pe_control_t *pe_ctl = ( pe_control_t *) ctl;
00564 
00565   /* clear the contents */
00566   memset( pe_ctl, 0, sizeof ( pe_control_t ) );
00567 
00568   /* Set the default domain */
00569   _peu_set_domain( ctl, _perf_event_uncore_vector.cmp_info.default_domain );
00570 
00571   /* Set the default granularity */
00572   pe_ctl->granularity=_perf_event_uncore_vector.cmp_info.default_granularity;
00573 
00574   pe_ctl->cidx=our_cidx;
00575 
00576   /* Set cpu number in the control block to show events */
00577   /* are not tied to specific cpu                       */
00578   pe_ctl->cpu = -1;
00579   return PAPI_OK;
00580 }

Here is the call graph for this function:

static int _peu_init_thread ( hwd_context_t hwd_ctx  )  [static]

Definition at line 544 of file perf_event_uncore.c.

00545 {
00546 
00547   pe_context_t *pe_ctx = ( pe_context_t *) hwd_ctx;
00548 
00549   /* clear the context structure and mark as initialized */
00550   memset( pe_ctx, 0, sizeof ( pe_context_t ) );
00551   pe_ctx->initialized=1;
00552 
00553   pe_ctx->event_table=&uncore_native_event_table;
00554   pe_ctx->cidx=our_cidx;
00555 
00556   return PAPI_OK;
00557 }

int _peu_libpfm4_get_cidx ( void   ) 

Definition at line 54 of file perf_event_uncore.c.

00054                         {
00055     return our_cidx;
00056 }

Here is the caller graph for this function:

static int _peu_ntv_code_to_descr ( unsigned int  EventCode,
char *  ntv_descr,
int  len 
) [static]

Definition at line 1266 of file perf_event_uncore.c.

01267                                                       {
01268 
01269    if (_perf_event_uncore_vector.cmp_info.disabled) return PAPI_ENOEVNT;
01270 
01271    return _peu_libpfm4_ntv_code_to_descr(EventCode,ntv_descr,len,
01272                                           &uncore_native_event_table);
01273 }

Here is the call graph for this function:

static int _peu_ntv_code_to_info ( unsigned int  EventCode,
PAPI_event_info_t info 
) [static]

Definition at line 1276 of file perf_event_uncore.c.

01277                                                    {
01278 
01279   if (_perf_event_uncore_vector.cmp_info.disabled) return PAPI_ENOEVNT;
01280 
01281   return _peu_libpfm4_ntv_code_to_info(EventCode, info,
01282                                         &uncore_native_event_table);
01283 }

Here is the call graph for this function:

static int _peu_ntv_code_to_name ( unsigned int  EventCode,
char *  ntv_name,
int  len 
) [static]

Definition at line 1255 of file perf_event_uncore.c.

01256                                                    {
01257 
01258    if (_perf_event_uncore_vector.cmp_info.disabled) return PAPI_ENOEVNT;
01259 
01260    return _peu_libpfm4_ntv_code_to_name(EventCode,
01261                                          ntv_name, len, 
01262                      &uncore_native_event_table);
01263 }

Here is the call graph for this function:

static int _peu_ntv_enum_events ( unsigned int *  PapiEventCode,
int  modifier 
) [static]

Definition at line 1235 of file perf_event_uncore.c.

01236 {
01237 
01238   if (_perf_event_uncore_vector.cmp_info.disabled) return PAPI_ENOEVNT;
01239 
01240 
01241   return _peu_libpfm4_ntv_enum_events(PapiEventCode, modifier,
01242                                        &uncore_native_event_table);
01243 }

Here is the call graph for this function:

static int _peu_ntv_name_to_code ( char *  name,
unsigned int *  event_code 
) [static]

Definition at line 1246 of file perf_event_uncore.c.

01246                                                              {
01247 
01248   if (_perf_event_uncore_vector.cmp_info.disabled) return PAPI_ENOEVNT;
01249 
01250   return _peu_libpfm4_ntv_name_to_code(name,event_code,
01251                                         &uncore_native_event_table);
01252 }

Here is the call graph for this function:

static int _peu_read ( hwd_context_t ctx,
hwd_control_state_t ctl,
long long **  events,
int  flags 
) [static]

Definition at line 884 of file perf_event_uncore.c.

00886 {
00887     SUBDBG("ENTER: ctx: %p, ctl: %p, events: %p, flags: %#x\n", ctx, ctl, events, flags);
00888 
00889    ( void ) flags;           /*unused */
00890    int i, ret = -1;
00891    /* pe_context_t *pe_ctx = ( pe_context_t *) ctx; */ 
00892    (void) ctx; /*unused*/
00893    pe_control_t *pe_ctl = ( pe_control_t *) ctl;
00894    long long papi_pe_buffer[READ_BUFFER_SIZE];
00895    long long tot_time_running, tot_time_enabled, scale;
00896 
00897    /* Handle case where we are multiplexing */
00898    if (pe_ctl->multiplexed) {
00899 
00900       /* currently we handle multiplexing by having individual events */
00901       /* so we read from each in turn.                                */
00902 
00903       for ( i = 0; i < pe_ctl->num_events; i++ ) {
00904 
00905          ret = read( pe_ctl->events[i].event_fd, papi_pe_buffer,
00906             sizeof ( papi_pe_buffer ) );
00907          if ( ret == -1 ) {
00908         PAPIERROR("read returned an error: ", strerror( errno ));
00909        SUBDBG("EXIT: PAPI_ESYS\n");
00910         return PAPI_ESYS;
00911      }
00912 
00913      /* We should read 3 64-bit values from the counter */
00914      if (ret<(signed)(3*sizeof(long long))) {
00915         PAPIERROR("Error!  short read!\n");
00916        SUBDBG("EXIT: PAPI_ESYS\n");
00917         return PAPI_ESYS;
00918      }
00919 
00920          SUBDBG("read: fd: %2d, tid: %ld, cpu: %d, ret: %d\n",
00921             pe_ctl->events[i].event_fd,
00922         (long)pe_ctl->tid, pe_ctl->events[i].cpu, ret);
00923          SUBDBG("read: %lld %lld %lld\n",papi_pe_buffer[0],
00924             papi_pe_buffer[1],papi_pe_buffer[2]);
00925 
00926          tot_time_enabled = papi_pe_buffer[1];
00927          tot_time_running = papi_pe_buffer[2];
00928 
00929          SUBDBG("count[%d] = (papi_pe_buffer[%d] %lld * "
00930         "tot_time_enabled %lld) / tot_time_running %lld\n",
00931         i, 0,papi_pe_buffer[0],
00932         tot_time_enabled,tot_time_running);
00933 
00934          if (tot_time_running == tot_time_enabled) {
00935         /* No scaling needed */
00936         pe_ctl->counts[i] = papi_pe_buffer[0];
00937          } else if (tot_time_running && tot_time_enabled) {
00938         /* Scale factor of 100 to avoid overflows when computing */
00939         /*enabled/running */
00940 
00941         scale = (tot_time_enabled * 100LL) / tot_time_running;
00942         scale = scale * papi_pe_buffer[0];
00943         scale = scale / 100LL;
00944         pe_ctl->counts[i] = scale;
00945      } else {
00946        /* This should not happen, but Phil reports it sometime does. */
00947         SUBDBG("perf_event kernel bug(?) count, enabled, "
00948            "running: %lld, %lld, %lld\n",
00949            papi_pe_buffer[0],tot_time_enabled,
00950            tot_time_running);
00951 
00952         pe_ctl->counts[i] = papi_pe_buffer[0];
00953      }
00954       }
00955    }
00956 
00957    /* Handle cases where we cannot use FORMAT GROUP */
00958    else if (pe_ctl->inherit) {
00959 
00960       /* we must read each counter individually */
00961       for ( i = 0; i < pe_ctl->num_events; i++ ) {
00962 
00963          ret = read( pe_ctl->events[i].event_fd, papi_pe_buffer, 
00964             sizeof ( papi_pe_buffer ) );
00965          if ( ret == -1 ) {
00966         PAPIERROR("read returned an error: ", strerror( errno ));
00967        SUBDBG("EXIT: PAPI_ESYS\n");
00968         return PAPI_ESYS;
00969      }
00970 
00971      /* we should read one 64-bit value from each counter */
00972      if (ret!=sizeof(long long)) {
00973         PAPIERROR("Error!  short read!\n");
00974         PAPIERROR("read: fd: %2d, tid: %ld, cpu: %d, ret: %d\n",
00975            pe_ctl->events[i].event_fd,
00976            (long)pe_ctl->tid, pe_ctl->events[i].cpu, ret);
00977        SUBDBG("EXIT: PAPI_ESYS\n");
00978         return PAPI_ESYS;
00979      }
00980 
00981          SUBDBG("read: fd: %2d, tid: %ld, cpu: %d, ret: %d\n",
00982             pe_ctl->events[i].event_fd, (long)pe_ctl->tid,
00983         pe_ctl->events[i].cpu, ret);
00984          SUBDBG("read: %lld\n",papi_pe_buffer[0]);
00985 
00986      pe_ctl->counts[i] = papi_pe_buffer[0];
00987       }
00988    }
00989 
00990 
00991    /* Handle cases where we are using FORMAT_GROUP   */
00992    /* We assume only one group leader, in position 0 */
00993 
00994    else {
00995       if (pe_ctl->events[0].group_leader_fd!=-1) {
00996      PAPIERROR("Was expecting group leader!\n");
00997       }
00998 
00999       ret = read( pe_ctl->events[0].event_fd, papi_pe_buffer,
01000           sizeof ( papi_pe_buffer ) );
01001 
01002       if ( ret == -1 ) {
01003      PAPIERROR("read returned an error: ", strerror( errno ));
01004        SUBDBG("EXIT: PAPI_ESYS\n");
01005      return PAPI_ESYS;
01006       }
01007 
01008       /* we read 1 64-bit value (number of events) then     */
01009       /* num_events more 64-bit values that hold the counts */
01010       if (ret<(signed)((1+pe_ctl->num_events)*sizeof(long long))) {
01011      PAPIERROR("Error! short read!\n");
01012        SUBDBG("EXIT: PAPI_ESYS\n");
01013      return PAPI_ESYS;
01014       }
01015 
01016       SUBDBG("read: fd: %2d, tid: %ld, cpu: %d, ret: %d\n",
01017          pe_ctl->events[0].event_fd,
01018          (long)pe_ctl->tid, pe_ctl->events[0].cpu, ret);
01019       {
01020      int j;
01021      for(j=0;j<ret/8;j++) {
01022             SUBDBG("read %d: %lld\n",j,papi_pe_buffer[j]);
01023      }
01024       }
01025 
01026       /* Make sure the kernel agrees with how many events we have */
01027       if (papi_pe_buffer[0]!=pe_ctl->num_events) {
01028      PAPIERROR("Error!  Wrong number of events!\n");
01029        SUBDBG("EXIT: PAPI_ESYS\n");
01030      return PAPI_ESYS;
01031       }
01032 
01033       /* put the count values in their proper location */
01034       for(i=0;i<pe_ctl->num_events;i++) {
01035          pe_ctl->counts[i] = papi_pe_buffer[1+i];
01036       }
01037    }
01038 
01039    /* point PAPI to the values we read */
01040    *events = pe_ctl->counts;
01041 
01042    SUBDBG("EXIT: PAPI_OK\n");
01043    return PAPI_OK;
01044 }

Here is the call graph for this function:

static int _peu_reset ( hwd_context_t ctx,
hwd_control_state_t ctl 
) [static]

Definition at line 829 of file perf_event_uncore.c.

00830 {
00831    int i, ret;
00832    pe_control_t *pe_ctl = ( pe_control_t *) ctl;
00833 
00834    ( void ) ctx;             /*unused */
00835 
00836    /* We need to reset all of the events, not just the group leaders */
00837    for( i = 0; i < pe_ctl->num_events; i++ ) {
00838       ret = ioctl( pe_ctl->events[i].event_fd, PERF_EVENT_IOC_RESET, NULL );
00839       if ( ret == -1 ) {
00840      PAPIERROR("ioctl(%d, PERF_EVENT_IOC_RESET, NULL) "
00841            "returned error, Linux says: %s",
00842            pe_ctl->events[i].event_fd, strerror( errno ) );
00843      return PAPI_ESYS;
00844       }
00845    }
00846 
00847    return PAPI_OK;
00848 }

Here is the call graph for this function:

Here is the caller graph for this function:

static int _peu_set_domain ( hwd_control_state_t ctl,
int  domain 
) [static]

Definition at line 802 of file perf_event_uncore.c.

00803 {
00804    pe_control_t *pe_ctl = ( pe_control_t *) ctl;
00805 
00806    SUBDBG("old control domain %d, new domain %d\n",
00807       pe_ctl->domain,domain);
00808 
00809    pe_ctl->domain = domain;
00810    return PAPI_OK;
00811 }

Here is the caller graph for this function:

static int _peu_shutdown_component ( void   )  [static]

Definition at line 660 of file perf_event_uncore.c.

00660                                 {
00661 
00662   /* deallocate our event table */
00663   _peu_libpfm4_shutdown(&_perf_event_uncore_vector, &uncore_native_event_table);
00664 
00665   /* Shutdown libpfm4 */
00666   _papi_libpfm4_shutdown();
00667 
00668   return PAPI_OK;
00669 }

Here is the call graph for this function:

static int _peu_shutdown_thread ( hwd_context_t ctx  )  [static]

Definition at line 815 of file perf_event_uncore.c.

00816 {
00817     pe_context_t *pe_ctx = ( pe_context_t *) ctx;
00818 
00819     pe_ctx->initialized=0;
00820 
00821     return PAPI_OK;
00822 }

static int _peu_start ( hwd_context_t ctx,
hwd_control_state_t ctl 
) [static]

Definition at line 1048 of file perf_event_uncore.c.

01049 {
01050    int ret;
01051    int i;
01052    int did_something = 0;
01053    pe_context_t *pe_ctx = ( pe_context_t *) ctx;
01054    pe_control_t *pe_ctl = ( pe_control_t *) ctl;
01055 
01056    /* Reset the counters first.  Is this necessary? */
01057    ret = _peu_reset( pe_ctx, pe_ctl );
01058    if ( ret ) {
01059       return ret;
01060    }
01061 
01062    /* Enable all of the group leaders                */
01063    /* All group leaders have a group_leader_fd of -1 */
01064    for( i = 0; i < pe_ctl->num_events; i++ ) {
01065       if (pe_ctl->events[i].group_leader_fd == -1) {
01066      SUBDBG("ioctl(enable): fd: %d\n", pe_ctl->events[i].event_fd);
01067      ret=ioctl( pe_ctl->events[i].event_fd, PERF_EVENT_IOC_ENABLE, NULL) ; 
01068 
01069      /* ioctls always return -1 on failure */
01070          if (ret == -1) {
01071             PAPIERROR("ioctl(PERF_EVENT_IOC_ENABLE) failed.\n");
01072             return PAPI_ESYS;
01073      }
01074 
01075      did_something++;
01076       } 
01077    }
01078 
01079    if (!did_something) {
01080       PAPIERROR("Did not enable any counters.\n");
01081       return PAPI_EBUG;
01082    }
01083 
01084    pe_ctx->state |= PERF_EVENTS_RUNNING;
01085 
01086    return PAPI_OK;
01087 
01088 }

Here is the call graph for this function:

static int _peu_stop ( hwd_context_t ctx,
hwd_control_state_t ctl 
) [static]

Definition at line 1092 of file perf_event_uncore.c.

01093 {
01094 
01095    int ret;
01096    int i;
01097    pe_context_t *pe_ctx = ( pe_context_t *) ctx;
01098    pe_control_t *pe_ctl = ( pe_control_t *) ctl;
01099 
01100    /* Just disable the group leaders */
01101    for ( i = 0; i < pe_ctl->num_events; i++ ) {
01102       if ( pe_ctl->events[i].group_leader_fd == -1 ) {
01103      ret=ioctl( pe_ctl->events[i].event_fd, PERF_EVENT_IOC_DISABLE, NULL);
01104      if ( ret == -1 ) {
01105         PAPIERROR( "ioctl(%d, PERF_EVENT_IOC_DISABLE, NULL) "
01106                "returned error, Linux says: %s",
01107                pe_ctl->events[i].event_fd, strerror( errno ) );
01108         return PAPI_EBUG;
01109      }
01110       }
01111    }
01112 
01113    pe_ctx->state &= ~PERF_EVENTS_RUNNING;
01114 
01115    return PAPI_OK;
01116 }

Here is the call graph for this function:

int _peu_update_control_state ( hwd_control_state_t ctl,
NativeInfo_t native,
int  count,
hwd_context_t ctx 
)

Definition at line 676 of file perf_event_uncore.c.

00679 {
00680     int i;
00681     int j;
00682     int ret;
00683     int skipped_events=0;
00684     struct native_event_t *ntv_evt;
00685    pe_context_t *pe_ctx = ( pe_context_t *) ctx;
00686    pe_control_t *pe_ctl = ( pe_control_t *) ctl;
00687 
00688    /* close all of the existing fds and start over again */
00689    /* In theory we could have finer-grained control and know if             */
00690    /* things were changed, but it's easier to tear things down and rebuild. */
00691    close_pe_events( pe_ctx, pe_ctl );
00692 
00693    /* Calling with count==0 should be OK, it's how things are deallocated */
00694    /* when an eventset is destroyed.                                      */
00695    if ( count == 0 ) {
00696       SUBDBG( "Called with count == 0\n" );
00697       return PAPI_OK;
00698    }
00699 
00700    /* set up all the events */
00701    for( i = 0; i < count; i++ ) {
00702       if ( native ) {
00703             // get the native event pointer used for this papi event
00704             int ntv_idx = _papi_hwi_get_ntv_idx((unsigned)(native[i].ni_papi_code));
00705             if (ntv_idx < -1) {
00706                 SUBDBG("papi_event_code: %#x known by papi but not by the component\n", native[i].ni_papi_code);
00707                 continue;
00708             }
00709             // if native index is -1, then we have an event without a mask and need to find the right native index to use
00710             if (ntv_idx == -1) {
00711                 // find the native event index we want by matching for the right papi event code
00712                 for (j=0 ; j<pe_ctx->event_table->num_native_events ; j++) {
00713                     if (pe_ctx->event_table->native_events[j].papi_event_code == native[i].ni_papi_code) {
00714                         ntv_idx = j;
00715                     }
00716                 }
00717             }
00718 
00719             // if native index is still negative, we did not find event we wanted so just return error
00720             if (ntv_idx < 0) {
00721                 SUBDBG("papi_event_code: %#x not found in native event tables\n", native[i].ni_papi_code);
00722                 continue;
00723             }
00724 
00725             // this native index is positive so there was a mask with the event, the ntv_idx identifies which native event to use
00726             ntv_evt = (struct native_event_t *)(&(pe_ctx->event_table->native_events[ntv_idx]));
00727 
00728             SUBDBG("ntv_evt: %p\n", ntv_evt);
00729 
00730             SUBDBG("i: %d, pe_ctx->event_table->num_native_events: %d\n", i, pe_ctx->event_table->num_native_events);
00731 
00732             // Move this events hardware config values and other attributes to the perf_events attribute structure
00733             memcpy (&pe_ctl->events[i].attr, &ntv_evt->attr, sizeof(perf_event_attr_t));
00734 
00735             // may need to update the attribute structure with information from event set level domain settings (values set by PAPI_set_domain)
00736             // only done if the event mask which controls each counting domain was not provided
00737 
00738             // get pointer to allocated name, will be NULL when adding preset events to event set
00739             char *aName = ntv_evt->allocated_name;
00740             if ((aName == NULL)  ||  (strstr(aName, ":u=") == NULL)) {
00741                 SUBDBG("set exclude_user attribute from eventset level domain flags, encode: %d, eventset: %d\n", pe_ctl->events[i].attr.exclude_user, !(pe_ctl->domain & PAPI_DOM_USER));
00742                 pe_ctl->events[i].attr.exclude_user = !(pe_ctl->domain & PAPI_DOM_USER);
00743             }
00744             if ((aName == NULL)  ||  (strstr(aName, ":k=") == NULL)) {
00745                 SUBDBG("set exclude_kernel attribute from eventset level domain flags, encode: %d, eventset: %d\n", pe_ctl->events[i].attr.exclude_kernel, !(pe_ctl->domain & PAPI_DOM_KERNEL));
00746                 pe_ctl->events[i].attr.exclude_kernel = !(pe_ctl->domain & PAPI_DOM_KERNEL);
00747             }
00748 
00749             // set the cpu number provided with an event mask if there was one (will be -1 if mask not provided)
00750             pe_ctl->events[i].cpu = ntv_evt->cpu;
00751             // if cpu event mask not provided, then set the cpu to use to what may have been set on call to PAPI_set_opt (will still be -1 if not called)
00752             if (pe_ctl->events[i].cpu == -1) {
00753                 pe_ctl->events[i].cpu = pe_ctl->cpu;
00754             }
00755       } else {
00756           // This case happens when called from _pe_set_overflow and _pe_ctl
00757           // Those callers put things directly into the pe_ctl structure so it is already set for the open call
00758       }
00759 
00760       // Copy the inherit flag into the attribute block that will be passed to the kernel
00761       pe_ctl->events[i].attr.inherit = pe_ctl->inherit;
00762 
00763       /* Set the position in the native structure */
00764       /* We just set up events linearly           */
00765       if ( native ) {
00766           native[i].ni_position = i;
00767           SUBDBG( "&native[%d]: %p, ni_papi_code: %#x, ni_event: %#x, ni_position: %d, ni_owners: %d\n",
00768             i, &(native[i]), native[i].ni_papi_code, native[i].ni_event, native[i].ni_position, native[i].ni_owners);
00769       }
00770    }
00771 
00772     if (count <= skipped_events) {
00773         SUBDBG("EXIT: No events to count, they all contained invalid umasks\n");
00774         return PAPI_ENOEVNT;
00775     }
00776 
00777   pe_ctl->num_events = count - skipped_events;
00778 
00779    /* actuall open the events */
00780    /* (why is this a separate function?) */
00781    ret = open_pe_events( pe_ctx, pe_ctl );
00782    if ( ret != PAPI_OK ) {
00783       SUBDBG("open_pe_events failed\n");
00784       /* Restore values ? */
00785       return ret;
00786    }
00787 
00788    SUBDBG( "EXIT: PAPI_OK\n" );
00789    return PAPI_OK;
00790 }

Here is the call graph for this function:

Here is the caller graph for this function:

static int _peu_write ( hwd_context_t ctx,
hwd_control_state_t ctl,
long long *  from 
) [static]

Definition at line 854 of file perf_event_uncore.c.

00856 {
00857    ( void ) ctx;             /*unused */
00858    ( void ) ctl;             /*unused */
00859    ( void ) from;            /*unused */
00860    /*
00861     * Counters cannot be written.  Do we need to virtualize the
00862     * counters so that they can be written, or perhaps modify code so that
00863     * they can be written? FIXME ?
00864     */
00865 
00866     return PAPI_ENOSUPP;
00867 }

static int check_scheduability ( pe_context_t ctx,
pe_control_t ctl 
) [static]

Definition at line 225 of file perf_event_uncore.c.

00226 {
00227    SUBDBG("ENTER: ctx: %p, ctl: %p\n", ctx, ctl);
00228    int retval = 0, cnt = -1;
00229    ( void ) ctx;         /*unused */
00230    long long papi_pe_buffer[READ_BUFFER_SIZE];
00231    int i;
00232 
00233    /* If the kernel isn't tracking scheduability right       */
00234    /* Then we need to start/stop/read to force the event     */
00235    /* to be scheduled and see if an error condition happens. */
00236 
00237    /* start all events */
00238    for( i = 0; i < ctl->num_events; i++) {
00239       retval = ioctl( ctl->events[i].event_fd, PERF_EVENT_IOC_ENABLE, NULL );
00240       if (retval == -1) {
00241      SUBDBG("EXIT: Enable failed event index: %d, num_events: %d, return PAPI_ESYS\n", i, ctl->num_events);
00242      return PAPI_ESYS;
00243       }
00244    }
00245 
00246    /* stop all events */
00247    for( i = 0; i < ctl->num_events; i++) {
00248       retval = ioctl(ctl->events[i].event_fd, PERF_EVENT_IOC_DISABLE, NULL );
00249       if (retval == -1) {
00250      SUBDBG("EXIT: Disable failed: event index: %d, num_events: %d, return PAPI_ESYS\n", i, ctl->num_events);
00251      return PAPI_ESYS;
00252       }
00253    }
00254 
00255    /* See if a read of each event returns results */
00256    for( i = 0; i < ctl->num_events; i++) {
00257       cnt = read( ctl->events[i].event_fd, papi_pe_buffer, sizeof(papi_pe_buffer));
00258       if ( cnt == -1 ) {
00259      SUBDBG( "EXIT: read failed: event index: %d, num_events: %d, return PAPI_ESYS.  Should never happen.\n", i, ctl->num_events);
00260      return PAPI_ESYS;
00261       }
00262 
00263       if ( cnt == 0 ) {
00264      /* We read 0 bytes if we could not schedule the event */
00265      /* The kernel should have detected this at open       */
00266      /* but various bugs (including NMI watchdog)          */
00267      /* result in this behavior                            */
00268 
00269      SUBDBG( "EXIT: read returned 0: event index: %d, num_events: %d, return PAPI_ECNFLCT.\n", i, ctl->num_events);
00270      return PAPI_ECNFLCT;
00271       }
00272    }
00273 
00274    /* Reset all of the counters (opened so far) back to zero      */
00275    /* from the above brief enable/disable call pair.              */
00276 
00277    /* We have to reset all events because reset of group leader      */
00278    /* does not reset all.                                            */
00279    /* we assume that the events are being added one by one and that  */
00280    /* we do not need to reset higher events (doing so may reset ones */
00281    /* that have not been initialized yet.                            */
00282 
00283    /* Note... PERF_EVENT_IOC_RESET does not reset time running       */
00284    /* info if multiplexing, so we should avoid coming here if        */
00285    /* we are multiplexing the event.                                 */
00286    for( i = 0; i < ctl->num_events; i++) {
00287       retval=ioctl( ctl->events[i].event_fd, PERF_EVENT_IOC_RESET, NULL );
00288       if (retval == -1) {
00289      SUBDBG("EXIT: Reset failed: event index: %d, num_events: %d, return PAPI_ESYS\n", i, ctl->num_events);
00290      return PAPI_ESYS;
00291       }
00292    }
00293    SUBDBG("EXIT: return PAPI_OK\n");
00294    return PAPI_OK;
00295 }

Here is the call graph for this function:

Here is the caller graph for this function:

static int close_pe_events ( pe_context_t ctx,
pe_control_t ctl 
) [static]

Definition at line 447 of file perf_event_uncore.c.

00448 {
00449    int i;
00450    int num_closed=0;
00451    int events_not_opened=0;
00452 
00453    /* should this be a more serious error? */
00454    if ( ctx->state & PERF_EVENTS_RUNNING ) {
00455       SUBDBG("Closing without stopping first\n");
00456    }
00457 
00458    /* Close child events first */
00459    for( i=0; i<ctl->num_events; i++ ) {
00460 
00461       if (ctl->events[i].event_opened) {
00462 
00463          if (ctl->events[i].group_leader_fd!=-1) {
00464             if ( ctl->events[i].mmap_buf ) {
00465            if ( munmap ( ctl->events[i].mmap_buf,
00466                      ctl->events[i].nr_mmap_pages * getpagesize() ) ) {
00467               PAPIERROR( "munmap of fd = %d returned error: %s",
00468                  ctl->events[i].event_fd, strerror( errno ) );
00469               return PAPI_ESYS;
00470            }
00471         }
00472 
00473             if ( close( ctl->events[i].event_fd ) ) {
00474            PAPIERROR( "close of fd = %d returned error: %s",
00475                ctl->events[i].event_fd, strerror( errno ) );
00476            return PAPI_ESYS;
00477         } else {
00478            num_closed++;
00479         }
00480         ctl->events[i].event_opened=0;
00481      }
00482       }
00483       else {
00484     events_not_opened++;
00485       }
00486    }
00487 
00488    /* Close the group leaders last */
00489    for( i=0; i<ctl->num_events; i++ ) {
00490 
00491       if (ctl->events[i].event_opened) {
00492 
00493          if (ctl->events[i].group_leader_fd==-1) {
00494             if ( ctl->events[i].mmap_buf ) {
00495            if ( munmap ( ctl->events[i].mmap_buf,
00496                      ctl->events[i].nr_mmap_pages * getpagesize() ) ) {
00497               PAPIERROR( "munmap of fd = %d returned error: %s",
00498                  ctl->events[i].event_fd, strerror( errno ) );
00499               return PAPI_ESYS;
00500            }
00501         }
00502 
00503 
00504             if ( close( ctl->events[i].event_fd ) ) {
00505            PAPIERROR( "close of fd = %d returned error: %s",
00506                ctl->events[i].event_fd, strerror( errno ) );
00507            return PAPI_ESYS;
00508         } else {
00509            num_closed++;
00510         }
00511         ctl->events[i].event_opened=0;
00512      }
00513       }
00514    }
00515 
00516 
00517    if (ctl->num_events!=num_closed) {
00518       if (ctl->num_events!=(num_closed+events_not_opened)) {
00519          PAPIERROR("Didn't close all events: "
00520            "Closed %d Not Opened: %d Expected %d\n",
00521            num_closed,events_not_opened,ctl->num_events);
00522          return PAPI_EBUG;
00523       }
00524    }
00525 
00526    ctl->num_events=0;
00527 
00528    ctx->state &= ~PERF_EVENTS_OPENED;
00529 
00530    return PAPI_OK;
00531 }

Here is the call graph for this function:

Here is the caller graph for this function:

static unsigned int get_read_format ( unsigned int  multiplex,
unsigned int  inherit,
int  format_group 
) [static]

Definition at line 74 of file perf_event_uncore.c.

00077 {
00078    unsigned int format = 0;
00079 
00080    /* if we need read format options for multiplexing, add them now */
00081    if (multiplex) {
00082       format |= PERF_FORMAT_TOTAL_TIME_ENABLED;
00083       format |= PERF_FORMAT_TOTAL_TIME_RUNNING;
00084    }
00085 
00086    /* If we are not using inherit, add the group read options     */
00087    if (!inherit) {
00088       if (format_group) {
00089      format |= PERF_FORMAT_GROUP;
00090       }
00091    }
00092 
00093    SUBDBG("multiplex: %d, inherit: %d, group_leader: %d, format: %#x\n",
00094       multiplex, inherit, format_group, format);
00095 
00096    return format;
00097 }

Here is the caller graph for this function:

static int map_perf_event_errors_to_papi ( int  perf_event_error  )  [static]

Definition at line 174 of file perf_event_uncore.c.

00174                                                                {
00175 
00176    int ret;
00177 
00178    /* These mappings are approximate.
00179       EINVAL in particular can mean lots of different things */
00180    switch(perf_event_error) {
00181       case EPERM:
00182       case EACCES:
00183            ret = PAPI_EPERM;
00184        break;
00185       case ENODEV:
00186       case EOPNOTSUPP:
00187        ret = PAPI_ENOSUPP;
00188            break;
00189       case ENOENT:
00190        ret = PAPI_ENOEVNT;
00191            break;
00192       case ENOSYS:
00193       case EAGAIN:
00194       case EBUSY:
00195       case E2BIG:
00196        ret = PAPI_ESYS;
00197        break;
00198       case ENOMEM:
00199        ret = PAPI_ENOMEM;
00200        break;
00201       case EINVAL:
00202       default:
00203        ret = PAPI_EINVAL;
00204            break;
00205    }
00206    return ret;
00207 }

Here is the caller graph for this function:

static int open_pe_events ( pe_context_t ctx,
pe_control_t ctl 
) [static]

Definition at line 300 of file perf_event_uncore.c.

00301 {
00302 
00303    int i, ret = PAPI_OK;
00304    long pid;
00305 
00306    if (ctl->granularity==PAPI_GRN_SYS) {
00307       pid = -1;
00308    }
00309    else {
00310       pid = ctl->tid;
00311    }
00312 
00313    for( i = 0; i < ctl->num_events; i++ ) {
00314 
00315       ctl->events[i].event_opened=0;
00316 
00317       /* set up the attr structure.  We don't set up all fields here */
00318       /* as some have already been set up previously.                */
00319 
00320 /*
00321  * The following code controls how the uncore component interfaces with the 
00322  * kernel for uncore events.  The code inside the ifdef will use grouping of 
00323  * uncore events which can make the cost of reading the results more efficient.
00324  * The problem with it is that the uncore component supports 20 different uncore 
00325  * PMU's.  The kernel requires that all events in a group must be for the same PMU.
00326  * This means that with grouping enabled papi applications can count events on only
00327  * one of the 20 PMU's during a run.
00328  * 
00329  * The code inside the else clause treats each event in the event set as 
00330  * independent.  When running in this mode the kernel allows the papi multiple 
00331  * uncore PMU's at the same time.
00332  * 
00333  * Example:
00334  *  An application wants to measure all the L3 cache write requests.
00335  *  The event to do this is part of a cbox pmu (there are 8 cbox pmu's).
00336  *  When built with the code in the ifdef, the application would have to be 
00337  *    run 8 times and count write requests from one pmu at a time.
00338  *  When built with the code in the else, the write requests in all 8 cbox 
00339  *    pmu's could be counted in the same run.
00340  * 
00341  */
00342 // #define GROUPIT 1       // remove the comment on this line to force event grouping
00343 #ifdef GROUPIT
00344       /* group leader (event 0) is special                */
00345       /* If we're multiplexed, everyone is a group leader */
00346       if (( i == 0 ) || (ctl->multiplexed)) {
00347          ctl->events[i].attr.pinned = !ctl->multiplexed;
00348      ctl->events[i].attr.disabled = 1;
00349      ctl->events[i].group_leader_fd=-1;
00350          ctl->events[i].attr.read_format = get_read_format(ctl->multiplexed,
00351                                ctl->inherit,
00352                                !ctl->multiplexed );
00353       } else {
00354      ctl->events[i].attr.pinned=0;
00355      ctl->events[i].attr.disabled = 0;
00356      ctl->events[i].group_leader_fd=ctl->events[0].event_fd,
00357          ctl->events[i].attr.read_format = get_read_format(ctl->multiplexed,
00358                                ctl->inherit,
00359                                0 );
00360       }
00361 #else
00362              ctl->events[i].attr.pinned = !ctl->multiplexed;
00363              ctl->events[i].attr.disabled = 1;
00364              ctl->inherit = 1;
00365              ctl->events[i].group_leader_fd=-1;
00366              ctl->events[i].attr.read_format = get_read_format(ctl->multiplexed, ctl->inherit, 0 );
00367 #endif
00368 
00369 
00370       /* try to open */
00371       ctl->events[i].event_fd = sys_perf_event_open( &ctl->events[i].attr,
00372                              pid,
00373                              ctl->events[i].cpu,
00374                    ctl->events[i].group_leader_fd,
00375                              0 /* flags */
00376                              );
00377 
00378       /* Try to match Linux errors to PAPI errors */
00379       if ( ctl->events[i].event_fd == -1 ) {
00380      SUBDBG("sys_perf_event_open returned error on event #%d."
00381         "  Error: %s\n",
00382         i, strerror( errno ) );
00383          ret=map_perf_event_errors_to_papi(errno);
00384 
00385     goto open_peu_cleanup;
00386       }
00387 
00388       SUBDBG ("sys_perf_event_open: tid: %ld, cpu_num: %d,"
00389               " group_leader/fd: %d, event_fd: %d,"
00390               " read_format: %"PRIu64"\n",
00391           pid, ctl->events[i].cpu, ctl->events[i].group_leader_fd,
00392           ctl->events[i].event_fd, ctl->events[i].attr.read_format);
00393 
00394       ctl->events[i].event_opened=1;
00395    }
00396 
00397 
00398    /* in many situations the kernel will indicate we opened fine */
00399    /* yet things will fail later.  So we need to double check    */
00400    /* we actually can use the events we've set up.               */
00401 
00402    /* This is not necessary if we are multiplexing, and in fact */
00403    /* we cannot do this properly if multiplexed because         */
00404    /* PERF_EVENT_IOC_RESET does not reset the time running info */
00405    if (!ctl->multiplexed) {
00406     ret = check_scheduability( ctx, ctl);
00407 
00408     if ( ret != PAPI_OK ) {
00409         /* the last event did open, so we need to bump the counter */
00410         /* before doing the cleanup                                */
00411         i++;
00412         goto open_peu_cleanup;
00413     }
00414    }
00415 
00416    /* Now that we've successfully opened all of the events, do whatever  */
00417    /* "tune-up" is needed to attach the mmap'd buffers, signal handlers, */
00418    /* and so on.                                                         */
00419    for ( i = 0; i < ctl->num_events; i++ ) {
00420 
00421       /* No sampling if uncore */
00422       ctl->events[i].mmap_buf = NULL;
00423    }
00424 
00425    /* Set num_evts only if completely successful */
00426    ctx->state |= PERF_EVENTS_OPENED;
00427 
00428    return PAPI_OK;
00429 
00430 open_peu_cleanup:
00431    /* We encountered an error, close up the fds we successfully opened.  */
00432    /* We go backward in an attempt to close group leaders last, although */
00433    /* That's probably not strictly necessary.                            */
00434    while ( i > 0 ) {
00435       i--;
00436       if (ctl->events[i].event_fd>=0) {
00437      close( ctl->events[i].event_fd );
00438      ctl->events[i].event_opened=0;
00439       }
00440    }
00441 
00442    return ret;
00443 }

Here is the call graph for this function:

Here is the caller graph for this function:

static long sys_perf_event_open ( struct perf_event_attr hw_event,
pid_t  pid,
int  cpu,
int  group_fd,
unsigned long  flags 
) [static]

Definition at line 123 of file perf_event_uncore.c.

00125 {
00126    int ret;
00127 
00128    SUBDBG("sys_perf_event_open(hw_event: %p, pid: %d, cpu: %d, group_fd: %d, flags: %lx\n",hw_event,pid,cpu,group_fd,flags);
00129    SUBDBG("   type: %d\n",hw_event->type);
00130    SUBDBG("   size: %d\n",hw_event->size);
00131    SUBDBG("   config: %#"PRIx64" (%"PRIu64")\n",hw_event->config,
00132       hw_event->config);
00133    SUBDBG("   sample_period: %"PRIu64"\n",hw_event->sample_period);
00134    SUBDBG("   sample_type: %"PRIu64"\n",hw_event->sample_type);
00135    SUBDBG("   read_format: %"PRIu64"\n",hw_event->read_format);
00136    SUBDBG("   disabled: %d\n",hw_event->disabled);
00137    SUBDBG("   inherit: %d\n",hw_event->inherit);
00138    SUBDBG("   pinned: %d\n",hw_event->pinned);
00139    SUBDBG("   exclusive: %d\n",hw_event->exclusive);
00140    SUBDBG("   exclude_user: %d\n",hw_event->exclude_user);
00141    SUBDBG("   exclude_kernel: %d\n",hw_event->exclude_kernel);
00142    SUBDBG("   exclude_hv: %d\n",hw_event->exclude_hv);
00143    SUBDBG("   exclude_idle: %d\n",hw_event->exclude_idle);
00144    SUBDBG("   mmap: %d\n",hw_event->mmap);
00145    SUBDBG("   comm: %d\n",hw_event->comm);
00146    SUBDBG("   freq: %d\n",hw_event->freq);
00147    SUBDBG("   inherit_stat: %d\n",hw_event->inherit_stat);
00148    SUBDBG("   enable_on_exec: %d\n",hw_event->enable_on_exec);
00149    SUBDBG("   task: %d\n",hw_event->task);
00150    SUBDBG("   watermark: %d\n",hw_event->watermark);
00151    SUBDBG("   precise_ip: %d\n",hw_event->precise_ip);
00152    SUBDBG("   mmap_data: %d\n",hw_event->mmap_data);
00153    SUBDBG("   sample_id_all: %d\n",hw_event->sample_id_all);
00154    SUBDBG("   exclude_host: %d\n",hw_event->exclude_host);
00155    SUBDBG("   exclude_guest: %d\n",hw_event->exclude_guest);
00156    SUBDBG("   exclude_callchain_kernel: %d\n",hw_event->exclude_callchain_kernel);
00157    SUBDBG("   exclude_callchain_user: %d\n",hw_event->exclude_callchain_user);
00158    SUBDBG("   wakeup_watermark: %d\n",hw_event->wakeup_watermark);
00159    SUBDBG("   bp_type: %d\n",hw_event->bp_type);
00160    SUBDBG("   config1: %#lx (%lu)\n",hw_event->config1,hw_event->config1);
00161    SUBDBG("   config2: %#lx (%lu)\n",hw_event->config2,hw_event->config2);
00162    SUBDBG("   branch_sample_type: %lu\n",hw_event->branch_sample_type);
00163    SUBDBG("   sample_regs_user: %lu\n",hw_event->sample_regs_user);
00164    SUBDBG("   sample_stack_user: %d\n",hw_event->sample_stack_user);
00165 
00166     ret = syscall( __NR_perf_event_open, hw_event, pid, cpu, group_fd, flags );
00167     SUBDBG("Returned %d %d %s\n",ret,
00168            ret<0?errno:0,
00169            ret<0?strerror(errno):" ");
00170     return ret;
00171 }

Here is the caller graph for this function:


Variable Documentation

Definition at line 48 of file perf_event_uncore.c.

int our_cidx [static]

Definition at line 52 of file perf_event_uncore.c.

Definition at line 51 of file perf_event_uncore.c.


Generated on 17 Nov 2016 for PAPI by  doxygen 1.6.1