Loop Id: 96 | Module: exec | Source: timestep.c:107-116 | Coverage: 0.02% |
---|
Loop Id: 96 | Module: exec | Source: timestep.c:107-116 | Coverage: 0.02% |
---|
0x412b00 MOV %RCX,%RBX |
0x412b03 NOPW %CS:(%RAX,%RAX,1) |
0x412b10 ADD $0x40,%ESI |
0x412b13 CMP %R12,%RDI |
0x412b16 LEA 0x1(%RDI),%RDI |
0x412b1a JE 412a3b |
0x412b20 LEA (%RDI,%RAX,1),%RDX |
0x412b24 MOV (%RBX,%RDX,4),%R14D |
0x412b28 TEST %R14D,%R14D |
0x412b2b JLE 412b10 |
0x412b2d LEA (%RAX,%RDI,1),%R10D |
0x412b31 SAL $0x6,%R10D |
0x412b35 MOV -0x58(%RBP),%R8 |
0x412b39 MOV 0x20(%R8),%RDX |
0x412b3d MOV 0x28(%R8),%R8 |
0x412b41 MOV 0x10(%RDX),%R11 |
0x412b45 MOV 0x20(%RDX),%R9 |
0x412b49 MOV %R14D,%R13D |
0x412b4c AND $-0x8,%R13D |
0x412b50 JE 412c80 |
0x412b56 MOV %RBX,%RCX |
0x412b59 MOV %ESI,%R15D |
0x412b5c SAL $0x2,%R15 |
0x412b60 LEA -0x1(%R13),%EBX |
0x412b64 ADD %R11,%R15 |
0x412b67 VXORPD %XMM10,%XMM10,%XMM10 |
0x412b6c VMOVD %R10D,%XMM11 |
0x412b71 XOR %EDX,%EDX |
0x412b73 NOPW %CS:(%RAX,%RAX,1) |
(97) 0x412b80 VMOVQ %RDX,%XMM12 |
(97) 0x412b85 VPADDQ %XMM12,%XMM11,%XMM12 |
(97) 0x412b8a VPSLLQ $0x3,%XMM12,%XMM13 |
(97) 0x412b90 VPSLLQ $0x4,%XMM12,%XMM12 |
(97) 0x412b96 VPADDQ %XMM13,%XMM12,%XMM12 |
(97) 0x412b9b VPBROADCASTQ %XMM12,%ZMM12 |
(97) 0x412ba1 VPADDQ %ZMM4,%ZMM12,%ZMM12 |
(97) 0x412ba7 VPXOR %XMM13,%XMM13,%XMM13 |
(97) 0x412bac KXNORW %K0,%K0,%K1 |
(97) 0x412bb0 VGATHERQPD (%R9,%ZMM12,1),%ZMM13{%K1} |
(97) 0x412bb7 VXORPD %XMM14,%XMM14,%XMM14 |
(97) 0x412bbc KXNORW %K0,%K0,%K1 |
(97) 0x412bc0 VGATHERQPD 0x8(%R9,%ZMM12,1),%ZMM14{%K1} |
(97) 0x412bc8 VXORPD %XMM15,%XMM15,%XMM15 |
(97) 0x412bcd KXNORW %K0,%K0,%K1 |
(97) 0x412bd1 VGATHERQPD 0x10(%R9,%ZMM12,1),%ZMM15{%K1} |
(97) 0x412bd9 VPMOVSXDQ (%R15,%RDX,4),%ZMM12 |
(97) 0x412be0 VMULPD %ZMM13,%ZMM13,%ZMM13 |
(97) 0x412be6 VFMADD213PD %ZMM13,%ZMM14,%ZMM14 |
(97) 0x412bec VFMADD213PD %ZMM14,%ZMM15,%ZMM15 |
(97) 0x412bf2 VMULPD %ZMM3,%ZMM15,%ZMM13 |
(97) 0x412bf8 VPSLLQ $0x4,%ZMM12,%ZMM12 |
(97) 0x412bff VXORPD %XMM14,%XMM14,%XMM14 |
(97) 0x412c04 KXNORW %K0,%K0,%K1 |
(97) 0x412c08 VGATHERQPD 0x8(%R8,%ZMM12,1),%ZMM14{%K1} |
(97) 0x412c10 VDIVPD %ZMM14,%ZMM13,%ZMM12 |
(97) 0x412c16 VADDPD %ZMM10,%ZMM12,%ZMM10 |
(97) 0x412c1c ADD $0x8,%RDX |
(97) 0x412c20 CMP %EBX,%EDX |
(97) 0x412c22 JLE 412b80 |
0x412c28 VEXTRACTF64X4 $0x1,%ZMM10,%YMM11 |
0x412c2f VADDPD %ZMM11,%ZMM10,%ZMM10 |
0x412c35 VMOVAPD %XMM10,%XMM11 |
0x412c3a VEXTRACTF128 $0x1,%YMM10,%XMM10 |
0x412c40 VADDPD %XMM10,%XMM11,%XMM10 |
0x412c45 VSHUFPD $0x1,%XMM10,%XMM10,%XMM11 |
0x412c4b VADDSD %XMM11,%XMM10,%XMM10 |
0x412c50 VADDSD %XMM0,%XMM10,%XMM0 |
0x412c54 CMP %R13D,%R14D |
0x412c57 JE 412b00 |
0x412c5d VPBROADCASTD %R14D,%YMM10 |
0x412c63 MOV %RCX,%RBX |
0x412c66 JMP 412c89 |
0x412c80 VPBROADCASTD %R14D,%YMM10 |
0x412c86 XOR %R13D,%R13D |
0x412c89 VPBROADCASTD %R13D,%YMM11 |
0x412c8f VPSUBD %YMM11,%YMM10,%YMM10 |
0x412c94 VPCMPNLEUD %YMM1,%YMM10,%K1 |
0x412c9b KORTESTB %K1,%K1 |
0x412c9f JE 412d80 |
0x412ca5 VPCMPNLEUD %YMM2,%YMM10,%K2 |
0x412cac VPBROADCASTQ %R10,%ZMM10 |
0x412cb2 VPADDD %YMM2,%YMM11,%YMM11 |
0x412cb6 VPMOVSXDQ %YMM11,%ZMM11 |
0x412cbc VPADDQ %ZMM11,%ZMM10,%ZMM10 |
0x412cc2 VPSLLQ $0x3,%ZMM10,%ZMM11 |
0x412cc9 VPSLLQ $0x4,%ZMM10,%ZMM10 |
0x412cd0 VPADDQ %ZMM11,%ZMM10,%ZMM10 |
0x412cd6 VPXOR %XMM11,%XMM11,%XMM11 |
0x412cdb KMOVQ %K2,%K3 |
0x412ce0 VGATHERQPD (%R9,%ZMM10,1),%ZMM11{%K3} |
0x412ce7 MOVSXD %R13D,%RDX |
0x412cea ADD %R10,%RDX |
0x412ced VMOVDQU32 (%R11,%RDX,4),%YMM12{%K2}{z} |
0x412cf4 VMOVDQA32 %YMM12,%YMM9{%K2} |
0x412cfa VMOVAPD %ZMM11,%ZMM8{%K2} |
0x412d00 VXORPD %XMM11,%XMM11,%XMM11 |
0x412d05 KMOVQ %K2,%K3 |
0x412d0a VGATHERQPD 0x8(%R9,%ZMM10,1),%ZMM11{%K3} |
0x412d12 VPXOR %XMM12,%XMM12,%XMM12 |
0x412d17 KMOVQ %K2,%K3 |
0x412d1c VGATHERQPD 0x10(%R9,%ZMM10,1),%ZMM12{%K3} |
0x412d24 VMULPD %ZMM8,%ZMM8,%ZMM10 |
0x412d2a VMOVAPD %ZMM11,%ZMM7{%K2} |
0x412d30 VFMADD231PD %ZMM7,%ZMM7,%ZMM10 |
0x412d36 VMOVAPD %ZMM12,%ZMM6{%K2} |
0x412d3c VPMOVSXDQ %YMM9,%ZMM11 |
0x412d42 VPSLLQ $0x4,%ZMM11,%ZMM11 |
0x412d49 VXORPD %XMM12,%XMM12,%XMM12 |
0x412d4e KMOVQ %K2,%K3 |
0x412d53 VGATHERQPD 0x8(%R8,%ZMM11,1),%ZMM12{%K3} |
0x412d5b VFMADD231PD %ZMM6,%ZMM6,%ZMM10 |
0x412d61 VMULPD %ZMM3,%ZMM10,%ZMM10 |
0x412d67 VMOVAPD %ZMM12,%ZMM5{%K2} |
0x412d6d VDIVPD %ZMM5,%ZMM10,%ZMM10 |
0x412d73 JMP 412d85 |
0x412d80 VPXOR %XMM10,%XMM10,%XMM10 |
0x412d85 VMOVAPD %ZMM10,%ZMM10{%K1}{z} |
0x412d8b VEXTRACTF64X4 $0x1,%ZMM10,%YMM11 |
0x412d92 VADDPD %ZMM11,%ZMM10,%ZMM10 |
0x412d98 VMOVAPD %XMM10,%XMM11 |
0x412d9d VEXTRACTF128 $0x1,%YMM10,%XMM10 |
0x412da3 VADDPD %XMM10,%XMM11,%XMM10 |
0x412da8 VSHUFPD $0x1,%XMM10,%XMM10,%XMM11 |
0x412dae VADDSD %XMM11,%XMM10,%XMM10 |
0x412db3 VADDSD %XMM0,%XMM10,%XMM0 |
0x412db7 JMP 412b10 |
/scratch_na/users/xoserete/qaas_runs/171-172-4338/intel/CoMD/build/CoMD/CoMD/src-openmp/timestep.c: 107 - 116 |
-------------------------------------------------------------------------------- |
107: #pragma omp parallel for reduction(+:kenergy) |
108: for (int iBox=0; iBox<s->boxes->nLocalBoxes; iBox++) |
109: { |
110: for (int iOff=MAXATOMS*iBox,ii=0; ii<s->boxes->nAtoms[iBox]; ii++,iOff++) |
111: { |
112: int iSpecies = s->atoms->iSpecies[iOff]; |
113: real_t invMass = 0.5/s->species[iSpecies].mass; |
114: kenergy += ( s->atoms->p[iOff][0] * s->atoms->p[iOff][0] + |
115: s->atoms->p[iOff][1] * s->atoms->p[iOff][1] + |
116: s->atoms->p[iOff][2] * s->atoms->p[iOff][2] )*invMass; |
Path / |
Metric | Value |
---|---|
CQA speedup if no scalar integer | 1.11 |
CQA speedup if FP arith vectorized | 1.00 |
CQA speedup if fully vectorized | 1.17 |
CQA speedup if no inter-iteration dependency | NA |
CQA speedup if next bottleneck killed | 1.12 |
Bottlenecks | P0, P5, |
Function | kineticEnergy.extracted |
Source | timestep.c:107-116 |
Source loop unroll info | NA |
Source loop unroll confidence level | NA |
Unroll/vectorization loop type | NA |
Unroll factor | NA |
CQA cycles | 21.00 |
CQA cycles if no scalar integer | 19.00 |
CQA cycles if FP arith vectorized | 21.00 |
CQA cycles if fully vectorized | 18.00 |
Front-end cycles | 18.67 |
DIV/SQRT cycles | 21.00 |
P0 cycles | 10.00 |
P1 cycles | 13.00 |
P2 cycles | 13.00 |
P3 cycles | 0.00 |
P4 cycles | 21.00 |
P5 cycles | 8.60 |
P6 cycles | 0.00 |
P7 cycles | 0.00 |
P8 cycles | 0.00 |
P9 cycles | 8.40 |
P10 cycles | 13.00 |
P11 cycles | 16.00 |
Inter-iter dependencies cycles | NA |
FE+BE cycles (UFS) | 23.32 - 49.36 |
Stall cycles (UFS) | 5.17 - 31.18 |
Nb insns | 96.00 |
Nb uops | 112.00 |
Nb loads | 11.00 |
Nb stores | 0.00 |
Nb stack references | 1.00 |
FLOP/cycle | 3.81 |
Nb FLOP add-sub | 24.00 |
Nb FLOP mul | 16.00 |
Nb FLOP fma | 16.00 |
Nb FLOP div | 8.00 |
Nb FLOP rcp | 0.00 |
Nb FLOP sqrt | 0.00 |
Nb FLOP rsqrt | 0.00 |
Bytes/cycle | 15.81 |
Bytes prefetched | 0.00 |
Bytes loaded | 332.00 |
Bytes stored | 0.00 |
Stride 0 | NA |
Stride 1 | NA |
Stride n | NA |
Stride unknown | NA |
Stride indirect | NA |
Vectorization ratio all | 71.43 |
Vectorization ratio load | 83.33 |
Vectorization ratio store | NA |
Vectorization ratio mul | 100.00 |
Vectorization ratio add_sub | 57.14 |
Vectorization ratio fma | 100.00 |
Vectorization ratio div_sqrt | 100.00 |
Vectorization ratio other | 73.81 |
Vector-efficiency ratio all | 49.50 |
Vector-efficiency ratio load | 76.04 |
Vector-efficiency ratio store | NA |
Vector-efficiency ratio mul | 100.00 |
Vector-efficiency ratio add_sub | 44.20 |
Vector-efficiency ratio fma | 100.00 |
Vector-efficiency ratio div_sqrt | 100.00 |
Vector-efficiency ratio other | 46.28 |
Metric | Value |
---|---|
CQA speedup if no scalar integer | 1.11 |
CQA speedup if FP arith vectorized | 1.00 |
CQA speedup if fully vectorized | 1.17 |
CQA speedup if no inter-iteration dependency | NA |
CQA speedup if next bottleneck killed | 1.12 |
Bottlenecks | P0, P5, |
Function | kineticEnergy.extracted |
Source | timestep.c:107-116 |
Source loop unroll info | NA |
Source loop unroll confidence level | NA |
Unroll/vectorization loop type | NA |
Unroll factor | NA |
CQA cycles | 21.00 |
CQA cycles if no scalar integer | 19.00 |
CQA cycles if FP arith vectorized | 21.00 |
CQA cycles if fully vectorized | 18.00 |
Front-end cycles | 18.67 |
DIV/SQRT cycles | 21.00 |
P0 cycles | 10.00 |
P1 cycles | 13.00 |
P2 cycles | 13.00 |
P3 cycles | 0.00 |
P4 cycles | 21.00 |
P5 cycles | 8.60 |
P6 cycles | 0.00 |
P7 cycles | 0.00 |
P8 cycles | 0.00 |
P9 cycles | 8.40 |
P10 cycles | 13.00 |
P11 cycles | 16.00 |
Inter-iter dependencies cycles | NA |
FE+BE cycles (UFS) | 23.32 - 49.36 |
Stall cycles (UFS) | 5.17 - 31.18 |
Nb insns | 96.00 |
Nb uops | 112.00 |
Nb loads | 11.00 |
Nb stores | 0.00 |
Nb stack references | 1.00 |
FLOP/cycle | 3.81 |
Nb FLOP add-sub | 24.00 |
Nb FLOP mul | 16.00 |
Nb FLOP fma | 16.00 |
Nb FLOP div | 8.00 |
Nb FLOP rcp | 0.00 |
Nb FLOP sqrt | 0.00 |
Nb FLOP rsqrt | 0.00 |
Bytes/cycle | 15.81 |
Bytes prefetched | 0.00 |
Bytes loaded | 332.00 |
Bytes stored | 0.00 |
Stride 0 | NA |
Stride 1 | NA |
Stride n | NA |
Stride unknown | NA |
Stride indirect | NA |
Vectorization ratio all | 71.43 |
Vectorization ratio load | 83.33 |
Vectorization ratio store | NA |
Vectorization ratio mul | 100.00 |
Vectorization ratio add_sub | 57.14 |
Vectorization ratio fma | 100.00 |
Vectorization ratio div_sqrt | 100.00 |
Vectorization ratio other | 73.81 |
Vector-efficiency ratio all | 49.50 |
Vector-efficiency ratio load | 76.04 |
Vector-efficiency ratio store | NA |
Vector-efficiency ratio mul | 100.00 |
Vector-efficiency ratio add_sub | 44.20 |
Vector-efficiency ratio fma | 100.00 |
Vector-efficiency ratio div_sqrt | 100.00 |
Vector-efficiency ratio other | 46.28 |
Path / |
Function | kineticEnergy.extracted |
Source file and lines | timestep.c:107-116 |
Module | exec |
nb instructions | 96 |
nb uops | 112 |
loop length | 497 |
used x86 registers | 15 |
used mmx registers | 0 |
used xmm registers | 4 |
used ymm registers | 6 |
used zmm registers | 8 |
nb stack references | 1 |
ADD-SUB / MUL ratio | 4.00 |
micro-operation queue | 18.67 cycles |
front end | 18.67 cycles |
P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|
uops | 21.00 | 10.00 | 13.00 | 13.00 | 0.00 | 21.00 | 8.60 | 0.00 | 0.00 | 0.00 | 8.40 | 13.00 |
cycles | 21.00 | 10.00 | 13.00 | 13.00 | 0.00 | 21.00 | 8.60 | 0.00 | 0.00 | 0.00 | 8.40 | 13.00 |
Cycles executing div or sqrt instructions | 16.00 |
FE+BE cycles | 23.32-49.36 |
Stall cycles | 5.17-31.18 |
ROB full (events) | 5.89-33.90 |
Front-end | 18.67 |
Dispatch | 21.00 |
DIV/SQRT | 16.00 |
Overall L1 | 21.00 |
all | 53% |
load | 50% |
store | NA (no store vectorizable/vectorized instructions) |
mul | NA (no mul vectorizable/vectorized instructions) |
add-sub | 66% |
fma | NA (no fma vectorizable/vectorized instructions) |
other | 50% |
all | 87% |
load | 100% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 100% |
add-sub | 50% |
fma | 100% |
div/sqrt | 100% |
other | 100% |
all | 71% |
load | 83% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 100% |
add-sub | 57% |
fma | 100% |
div/sqrt | 100% |
other | 73% |
all | 36% |
load | 28% |
store | NA (no store vectorizable/vectorized instructions) |
mul | NA (no mul vectorizable/vectorized instructions) |
add-sub | 53% |
fma | NA (no fma vectorizable/vectorized instructions) |
other | 32% |
all | 61% |
load | 100% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 100% |
add-sub | 37% |
fma | 100% |
div/sqrt | 100% |
other | 61% |
all | 49% |
load | 76% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 100% |
add-sub | 44% |
fma | 100% |
div/sqrt | 100% |
other | 46% |
Instruction | Nb FU | P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | Latency | Recip. throughput |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
MOV %RCX,%RBX | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
NOPW %CS:(%RAX,%RAX,1) | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
ADD $0x40,%ESI | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
CMP %R12,%RDI | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
LEA 0x1(%RDI),%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
JE 412a3b <kineticEnergy.extracted+0x7b> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |
LEA (%RDI,%RAX,1),%RDX | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
MOV (%RBX,%RDX,4),%R14D | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
TEST %R14D,%R14D | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 2 | 0.20 |
JLE 412b10 <kineticEnergy.extracted+0x150> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |
LEA (%RAX,%RDI,1),%R10D | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1-2 | 0.20 |
SAL $0x6,%R10D | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0-2 | 0.50 |
MOV -0x58(%RBP),%R8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
MOV 0x20(%R8),%RDX | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
MOV 0x28(%R8),%R8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
MOV 0x10(%RDX),%R11 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
MOV 0x20(%RDX),%R9 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
MOV %R14D,%R13D | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
AND $-0x8,%R13D | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1-2 | 0.20 |
JE 412c80 <kineticEnergy.extracted+0x2c0> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |
MOV %RBX,%RCX | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
MOV %ESI,%R15D | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
SAL $0x2,%R15 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0-2 | 0.50 |
LEA -0x1(%R13),%EBX | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1-2 | 0.20 |
ADD %R11,%R15 | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
VXORPD %XMM10,%XMM10,%XMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VMOVD %R10D,%XMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
XOR %EDX,%EDX | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
NOPW %CS:(%RAX,%RAX,1) | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VEXTRACTF64X4 $0x1,%ZMM10,%YMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VADDPD %ZMM11,%ZMM10,%ZMM10 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMOVAPD %XMM10,%XMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VEXTRACTF128 $0x1,%YMM10,%XMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VADDPD %XMM10,%XMM11,%XMM10 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VSHUFPD $0x1,%XMM10,%XMM10,%XMM11 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VADDSD %XMM11,%XMM10,%XMM10 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VADDSD %XMM0,%XMM10,%XMM0 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
CMP %R13D,%R14D | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
JE 412b00 <kineticEnergy.extracted+0x140> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |
VPBROADCASTD %R14D,%YMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
MOV %RCX,%RBX | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
JMP 412c89 <kineticEnergy.extracted+0x2c9> | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5.84 |
VPBROADCASTD %R14D,%YMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
XOR %R13D,%R13D | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VPBROADCASTD %R13D,%YMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPSUBD %YMM11,%YMM10,%YMM10 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.33 |
VPCMPNLEUD %YMM1,%YMM10,%K1 | |||||||||||||||
KORTESTB %K1,%K1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
JE 412d80 <kineticEnergy.extracted+0x3c0> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |
VPCMPNLEUD %YMM2,%YMM10,%K2 | |||||||||||||||
VPBROADCASTQ %R10,%ZMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPADDD %YMM2,%YMM11,%YMM11 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.33 |
VPMOVSXDQ %YMM11,%ZMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPADDQ %ZMM11,%ZMM10,%ZMM10 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VPSLLQ $0x3,%ZMM10,%ZMM11 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2-4 | 1 |
VPSLLQ $0x4,%ZMM10,%ZMM10 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2-4 | 1 |
VPADDQ %ZMM11,%ZMM10,%ZMM10 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VPXOR %XMM11,%XMM11,%XMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
KMOVQ %K2,%K3 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
VGATHERQPD (%R9,%ZMM10,1),%ZMM11{%K3} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
MOVSXD %R13D,%RDX | 1 | 0 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0.33 | 0 | 1 | 0.33 |
ADD %R10,%RDX | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
VMOVDQU32 (%R11,%RDX,4),%YMM12{%K2}{z} | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.33 |
VMOVDQA32 %YMM12,%YMM9{%K2} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VMOVAPD %ZMM11,%ZMM8{%K2} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VXORPD %XMM11,%XMM11,%XMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
KMOVQ %K2,%K3 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
VGATHERQPD 0x8(%R9,%ZMM10,1),%ZMM11{%K3} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
VPXOR %XMM12,%XMM12,%XMM12 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
KMOVQ %K2,%K3 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
VGATHERQPD 0x10(%R9,%ZMM10,1),%ZMM12{%K3} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
VMULPD %ZMM8,%ZMM8,%ZMM10 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVAPD %ZMM11,%ZMM7{%K2} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VFMADD231PD %ZMM7,%ZMM7,%ZMM10 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVAPD %ZMM12,%ZMM6{%K2} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VPMOVSXDQ %YMM9,%ZMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPSLLQ $0x4,%ZMM11,%ZMM11 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2-4 | 1 |
VXORPD %XMM12,%XMM12,%XMM12 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
KMOVQ %K2,%K3 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
VGATHERQPD 0x8(%R8,%ZMM11,1),%ZMM12{%K3} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
VFMADD231PD %ZMM6,%ZMM6,%ZMM10 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMULPD %ZMM3,%ZMM10,%ZMM10 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVAPD %ZMM12,%ZMM5{%K2} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VDIVPD %ZMM5,%ZMM10,%ZMM10 | 3 | 2.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 22-24 | 16 |
JMP 412d85 <kineticEnergy.extracted+0x3c5> | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5.84 |
VPXOR %XMM10,%XMM10,%XMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VMOVAPD %ZMM10,%ZMM10{%K1}{z} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VEXTRACTF64X4 $0x1,%ZMM10,%YMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VADDPD %ZMM11,%ZMM10,%ZMM10 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMOVAPD %XMM10,%XMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VEXTRACTF128 $0x1,%YMM10,%XMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VADDPD %XMM10,%XMM11,%XMM10 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VSHUFPD $0x1,%XMM10,%XMM10,%XMM11 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VADDSD %XMM11,%XMM10,%XMM10 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VADDSD %XMM0,%XMM10,%XMM0 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
JMP 412b10 <kineticEnergy.extracted+0x150> | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2.08 |
Function | kineticEnergy.extracted |
Source file and lines | timestep.c:107-116 |
Module | exec |
nb instructions | 96 |
nb uops | 112 |
loop length | 497 |
used x86 registers | 15 |
used mmx registers | 0 |
used xmm registers | 4 |
used ymm registers | 6 |
used zmm registers | 8 |
nb stack references | 1 |
ADD-SUB / MUL ratio | 4.00 |
micro-operation queue | 18.67 cycles |
front end | 18.67 cycles |
P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|
uops | 21.00 | 10.00 | 13.00 | 13.00 | 0.00 | 21.00 | 8.60 | 0.00 | 0.00 | 0.00 | 8.40 | 13.00 |
cycles | 21.00 | 10.00 | 13.00 | 13.00 | 0.00 | 21.00 | 8.60 | 0.00 | 0.00 | 0.00 | 8.40 | 13.00 |
Cycles executing div or sqrt instructions | 16.00 |
FE+BE cycles | 23.32-49.36 |
Stall cycles | 5.17-31.18 |
ROB full (events) | 5.89-33.90 |
Front-end | 18.67 |
Dispatch | 21.00 |
DIV/SQRT | 16.00 |
Overall L1 | 21.00 |
all | 53% |
load | 50% |
store | NA (no store vectorizable/vectorized instructions) |
mul | NA (no mul vectorizable/vectorized instructions) |
add-sub | 66% |
fma | NA (no fma vectorizable/vectorized instructions) |
other | 50% |
all | 87% |
load | 100% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 100% |
add-sub | 50% |
fma | 100% |
div/sqrt | 100% |
other | 100% |
all | 71% |
load | 83% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 100% |
add-sub | 57% |
fma | 100% |
div/sqrt | 100% |
other | 73% |
all | 36% |
load | 28% |
store | NA (no store vectorizable/vectorized instructions) |
mul | NA (no mul vectorizable/vectorized instructions) |
add-sub | 53% |
fma | NA (no fma vectorizable/vectorized instructions) |
other | 32% |
all | 61% |
load | 100% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 100% |
add-sub | 37% |
fma | 100% |
div/sqrt | 100% |
other | 61% |
all | 49% |
load | 76% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 100% |
add-sub | 44% |
fma | 100% |
div/sqrt | 100% |
other | 46% |
Instruction | Nb FU | P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | Latency | Recip. throughput |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
MOV %RCX,%RBX | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
NOPW %CS:(%RAX,%RAX,1) | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
ADD $0x40,%ESI | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
CMP %R12,%RDI | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
LEA 0x1(%RDI),%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
JE 412a3b <kineticEnergy.extracted+0x7b> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |
LEA (%RDI,%RAX,1),%RDX | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
MOV (%RBX,%RDX,4),%R14D | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
TEST %R14D,%R14D | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 2 | 0.20 |
JLE 412b10 <kineticEnergy.extracted+0x150> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |
LEA (%RAX,%RDI,1),%R10D | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1-2 | 0.20 |
SAL $0x6,%R10D | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0-2 | 0.50 |
MOV -0x58(%RBP),%R8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
MOV 0x20(%R8),%RDX | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
MOV 0x28(%R8),%R8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
MOV 0x10(%RDX),%R11 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
MOV 0x20(%RDX),%R9 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.33 |
MOV %R14D,%R13D | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
AND $-0x8,%R13D | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1-2 | 0.20 |
JE 412c80 <kineticEnergy.extracted+0x2c0> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |
MOV %RBX,%RCX | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
MOV %ESI,%R15D | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
SAL $0x2,%R15 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0-2 | 0.50 |
LEA -0x1(%R13),%EBX | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1-2 | 0.20 |
ADD %R11,%R15 | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
VXORPD %XMM10,%XMM10,%XMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VMOVD %R10D,%XMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
XOR %EDX,%EDX | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
NOPW %CS:(%RAX,%RAX,1) | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VEXTRACTF64X4 $0x1,%ZMM10,%YMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VADDPD %ZMM11,%ZMM10,%ZMM10 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMOVAPD %XMM10,%XMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VEXTRACTF128 $0x1,%YMM10,%XMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VADDPD %XMM10,%XMM11,%XMM10 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VSHUFPD $0x1,%XMM10,%XMM10,%XMM11 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VADDSD %XMM11,%XMM10,%XMM10 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VADDSD %XMM0,%XMM10,%XMM0 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
CMP %R13D,%R14D | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
JE 412b00 <kineticEnergy.extracted+0x140> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |
VPBROADCASTD %R14D,%YMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
MOV %RCX,%RBX | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
JMP 412c89 <kineticEnergy.extracted+0x2c9> | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5.84 |
VPBROADCASTD %R14D,%YMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
XOR %R13D,%R13D | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VPBROADCASTD %R13D,%YMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPSUBD %YMM11,%YMM10,%YMM10 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.33 |
VPCMPNLEUD %YMM1,%YMM10,%K1 | |||||||||||||||
KORTESTB %K1,%K1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
JE 412d80 <kineticEnergy.extracted+0x3c0> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |
VPCMPNLEUD %YMM2,%YMM10,%K2 | |||||||||||||||
VPBROADCASTQ %R10,%ZMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPADDD %YMM2,%YMM11,%YMM11 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.33 |
VPMOVSXDQ %YMM11,%ZMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPADDQ %ZMM11,%ZMM10,%ZMM10 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VPSLLQ $0x3,%ZMM10,%ZMM11 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2-4 | 1 |
VPSLLQ $0x4,%ZMM10,%ZMM10 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2-4 | 1 |
VPADDQ %ZMM11,%ZMM10,%ZMM10 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VPXOR %XMM11,%XMM11,%XMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
KMOVQ %K2,%K3 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
VGATHERQPD (%R9,%ZMM10,1),%ZMM11{%K3} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
MOVSXD %R13D,%RDX | 1 | 0 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0.33 | 0 | 1 | 0.33 |
ADD %R10,%RDX | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
VMOVDQU32 (%R11,%RDX,4),%YMM12{%K2}{z} | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.33 |
VMOVDQA32 %YMM12,%YMM9{%K2} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VMOVAPD %ZMM11,%ZMM8{%K2} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VXORPD %XMM11,%XMM11,%XMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
KMOVQ %K2,%K3 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
VGATHERQPD 0x8(%R9,%ZMM10,1),%ZMM11{%K3} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
VPXOR %XMM12,%XMM12,%XMM12 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
KMOVQ %K2,%K3 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
VGATHERQPD 0x10(%R9,%ZMM10,1),%ZMM12{%K3} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
VMULPD %ZMM8,%ZMM8,%ZMM10 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVAPD %ZMM11,%ZMM7{%K2} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VFMADD231PD %ZMM7,%ZMM7,%ZMM10 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVAPD %ZMM12,%ZMM6{%K2} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VPMOVSXDQ %YMM9,%ZMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPSLLQ $0x4,%ZMM11,%ZMM11 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2-4 | 1 |
VXORPD %XMM12,%XMM12,%XMM12 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
KMOVQ %K2,%K3 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
VGATHERQPD 0x8(%R8,%ZMM11,1),%ZMM12{%K3} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
VFMADD231PD %ZMM6,%ZMM6,%ZMM10 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMULPD %ZMM3,%ZMM10,%ZMM10 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVAPD %ZMM12,%ZMM5{%K2} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VDIVPD %ZMM5,%ZMM10,%ZMM10 | 3 | 2.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 22-24 | 16 |
JMP 412d85 <kineticEnergy.extracted+0x3c5> | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5.84 |
VPXOR %XMM10,%XMM10,%XMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VMOVAPD %ZMM10,%ZMM10{%K1}{z} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VEXTRACTF64X4 $0x1,%ZMM10,%YMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VADDPD %ZMM11,%ZMM10,%ZMM10 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMOVAPD %XMM10,%XMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VEXTRACTF128 $0x1,%YMM10,%XMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VADDPD %XMM10,%XMM11,%XMM10 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VSHUFPD $0x1,%XMM10,%XMM10,%XMM11 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VADDSD %XMM11,%XMM10,%XMM10 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VADDSD %XMM0,%XMM10,%XMM0 | 1 | 0 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
JMP 412b10 <kineticEnergy.extracted+0x150> | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2.08 |