Loop Id: 323 | Module: exec | Source: calc_dt_kernel.f90:94-129 [...] | Coverage: 3.34% |
---|
Loop Id: 323 | Module: exec | Source: calc_dt_kernel.f90:94-129 [...] | Coverage: 3.34% |
---|
0x43f2b0 VMOVUPD (%R11,%RCX,8),%ZMM6 [12] |
0x43f2b7 VMINPD %ZMM29,%ZMM6,%ZMM6 |
0x43f2bd VMOVUPD (%RDX,%RCX,8),%ZMM7 [2] |
0x43f2c4 VADDPD %ZMM7,%ZMM7,%ZMM7 |
0x43f2ca VDIVPD (%RDI,%RCX,8),%ZMM7,%ZMM7 [6] |
0x43f2d1 VMULPD %ZMM6,%ZMM30,%ZMM6 |
0x43f2d7 VMOVUPD (%RAX,%RCX,8),%ZMM8 [1] |
0x43f2de VFMADD231PD %ZMM8,%ZMM8,%ZMM7 |
0x43f2e4 VSQRTPD %ZMM7,%ZMM7 |
0x43f2ea VMAXPD %ZMM2,%ZMM7,%ZMM7 |
0x43f2f0 VDIVPD %ZMM7,%ZMM6,%ZMM6 |
0x43f2f6 VMOVUPD -0x8(%RBX,%RCX,8),%ZMM7 [9] |
0x43f301 VMOVUPD (%RBX,%RCX,8),%ZMM8 [9] |
0x43f308 VADDPD -0x8(%R10,%RCX,8),%ZMM7,%ZMM7 [11] |
0x43f313 VMULPD -0x8(%R14,%RCX,8),%ZMM7,%ZMM7 [8] |
0x43f31e VADDPD (%R10,%RCX,8),%ZMM8,%ZMM8 [11] |
0x43f325 VMULPD (%R14,%RCX,8),%ZMM8,%ZMM8 [8] |
0x43f32c VMOVUPD (%R9,%RCX,8),%ZMM9 [7] |
0x43f333 VADDPD %ZMM9,%ZMM9,%ZMM10 |
0x43f339 VMULPD %ZMM31,%ZMM10,%ZMM11 |
0x43f33f VANDPD %ZMM3,%ZMM7,%ZMM12 |
0x43f345 VANDPD %ZMM3,%ZMM8,%ZMM13 |
0x43f34b VMULPD %ZMM2,%ZMM9,%ZMM9 |
0x43f351 VCMPPD $0x2,%ZMM13,%ZMM9,%K1 |
0x43f358 VBLENDMPD %ZMM13,%ZMM9,%ZMM13{%K1} |
0x43f35e VCMPPD $0x2,%ZMM12,%ZMM13,%K1 |
0x43f365 VMOVAPD %ZMM12,%ZMM13{%K1} |
0x43f36b VDIVPD %ZMM13,%ZMM11,%ZMM11 |
0x43f371 VMOVUPD (%R15,%RCX,8),%ZMM12 [3] |
0x43f378 VADDPD -0x8(%R15,%RCX,8),%ZMM12,%ZMM12 [3] |
0x43f383 VMULPD (%R13,%RCX,8),%ZMM12,%ZMM12 [4] |
0x43f38b VMOVUPD (%R12,%RCX,8),%ZMM13 [5] |
0x43f392 VADDPD -0x8(%R12,%RCX,8),%ZMM13,%ZMM13 [5] |
0x43f39d VMULPD (%R8,%RCX,8),%ZMM13,%ZMM13 [10] |
0x43f3a4 VADDPD %ZMM12,%ZMM7,%ZMM7 |
0x43f3aa VSUBPD %ZMM7,%ZMM8,%ZMM7 |
0x43f3b0 VADDPD %ZMM13,%ZMM7,%ZMM7 |
0x43f3b6 VMULPD %ZMM1,%ZMM10,%ZMM8 |
0x43f3bc VANDPD %ZMM3,%ZMM12,%ZMM12 |
0x43f3c2 VANDPD %ZMM3,%ZMM13,%ZMM13 |
0x43f3c8 VCMPPD $0x2,%ZMM13,%ZMM9,%K1 |
0x43f3cf VMOVAPD %ZMM13,%ZMM9{%K1} |
0x43f3d5 VCMPPD $0x2,%ZMM12,%ZMM9,%K1 |
0x43f3dc VMOVAPD %ZMM12,%ZMM9{%K1} |
0x43f3e2 VDIVPD %ZMM9,%ZMM8,%ZMM8 |
0x43f3e8 VDIVPD %ZMM10,%ZMM7,%ZMM7 |
0x43f3ee VMOVAPD %ZMM28,%ZMM9 |
0x43f3f4 VCMPPD $0x1,%ZMM4,%ZMM7,%K1 |
0x43f3fb VMOVAPD %ZMM5,%ZMM28 |
0x43f401 VDIVPD %ZMM7,%ZMM0,%ZMM28{%K1} |
0x43f407 VCMPPD $0x2,%ZMM28,%ZMM8,%K1 |
0x43f40e VMOVAPD %ZMM8,%ZMM28{%K1} |
0x43f414 VCMPPD $0x2,%ZMM28,%ZMM11,%K1 |
0x43f41b VMOVAPD %ZMM11,%ZMM28{%K1} |
0x43f421 VCMPPD $0x2,%ZMM28,%ZMM6,%K1 |
0x43f428 VMOVAPD %ZMM6,%ZMM28{%K1} |
0x43f42e VCMPPD $0x2,%ZMM28,%ZMM9,%K1 |
0x43f435 VMOVAPD %ZMM9,%ZMM28{%K1} |
0x43f43b ADD $0x8,%RCX |
0x43f43f CMP %RSI,%RCX |
0x43f442 JB 43f2b0 |
/home/eoseret/qaas_runs_CPU_9468/171-152-3172/intel/CloverLeafFC/build/CloverLeafFC/CloverLeaf_ref/kernels/calc_dt_kernel.f90: 94 - 129 |
-------------------------------------------------------------------------------- |
94: DO j=x_min,x_max |
95: |
96: dsx=celldx(j) |
97: dsy=celldy(k) |
98: |
99: cc=soundspeed(j,k)*soundspeed(j,k) |
100: cc=cc+2.0_8*viscosity_a(j,k)/density0(j,k) |
101: cc=MAX(SQRT(cc),g_small) |
102: |
103: dtct=dtc_safe*MIN(dsx,dsy)/cc |
104: |
105: div=0.0 |
106: |
107: dv1=(xvel0(j ,k)+xvel0(j ,k+1))*xarea(j ,k) |
108: dv2=(xvel0(j+1,k)+xvel0(j+1,k+1))*xarea(j+1,k) |
109: |
110: div=div+dv2-dv1 |
111: |
112: dtut=dtu_safe*2.0_8*volume(j,k)/MAX(ABS(dv1),ABS(dv2),g_small*volume(j,k)) |
113: |
114: dv1=(yvel0(j,k )+yvel0(j+1,k ))*yarea(j,k ) |
115: dv2=(yvel0(j,k+1)+yvel0(j+1,k+1))*yarea(j,k+1) |
116: |
117: div=div+dv2-dv1 |
118: |
119: dtvt=dtv_safe*2.0_8*volume(j,k)/MAX(ABS(dv1),ABS(dv2),g_small*volume(j,k)) |
120: |
121: div=div/(2.0_8*volume(j,k)) |
122: |
123: IF(div.LT.-g_small)THEN |
[...] |
129: dt_min_val=MIN(dt_min_val,dtct,dtut,dtvt,dtdivt) |
Path / |
Metric | Value |
---|---|
CQA speedup if no scalar integer | 1.00 |
CQA speedup if FP arith vectorized | 1.00 |
CQA speedup if fully vectorized | 1.00 |
CQA speedup if no inter-iteration dependency | NA |
CQA speedup if next bottleneck killed | 4.15 |
Bottlenecks | P0, |
Function | calc_dt_kernel_.DIR.OMP.PARALLEL.2 |
Source | calc_dt_kernel.f90:94-123,calc_dt_kernel.f90:129-129 |
Source loop unroll info | not unrolled or unrolled with no peel/tail loop |
Source loop unroll confidence level | max |
Unroll/vectorization loop type | NA |
Unroll factor | NA |
CQA cycles | 114.00 |
CQA cycles if no scalar integer | 114.00 |
CQA cycles if FP arith vectorized | 114.00 |
CQA cycles if fully vectorized | 114.00 |
Front-end cycles | 13.83 |
DIV/SQRT cycles | 27.50 |
P0 cycles | 9.00 |
P1 cycles | 5.67 |
P2 cycles | 5.67 |
P3 cycles | 0.00 |
P4 cycles | 27.50 |
P5 cycles | 1.00 |
P6 cycles | 0.00 |
P7 cycles | 0.00 |
P8 cycles | 0.00 |
P9 cycles | 0.00 |
P10 cycles | 5.67 |
P11 cycles | 114.00 |
Inter-iter dependencies cycles | 1 - 2 |
FE+BE cycles (UFS) | 114.25 - 114.78 |
Stall cycles (UFS) | 99.57 - 100.10 |
Nb insns | 61.00 |
Nb uops | 75.00 |
Nb loads | 17.00 |
Nb stores | 0.00 |
Nb stack references | 0.00 |
FLOP/cycle | 1.82 |
Nb FLOP add-sub | 72.00 |
Nb FLOP mul | 64.00 |
Nb FLOP fma | 8.00 |
Nb FLOP div | 48.00 |
Nb FLOP rcp | 0.00 |
Nb FLOP sqrt | 8.00 |
Nb FLOP rsqrt | 0.00 |
Bytes/cycle | 9.54 |
Bytes prefetched | 0.00 |
Bytes loaded | 1088.00 |
Bytes stored | 0.00 |
Stride 0 | 0.00 |
Stride 1 | 7.00 |
Stride n | 5.00 |
Stride unknown | 0.00 |
Stride indirect | 0.00 |
Vectorization ratio all | 100.00 |
Vectorization ratio load | 100.00 |
Vectorization ratio store | NA |
Vectorization ratio mul | 100.00 |
Vectorization ratio add_sub | 100.00 |
Vectorization ratio fma | 100.00 |
Vectorization ratio div_sqrt | 100.00 |
Vectorization ratio other | 100.00 |
Vector-efficiency ratio all | 100.00 |
Vector-efficiency ratio load | 100.00 |
Vector-efficiency ratio store | NA |
Vector-efficiency ratio mul | 100.00 |
Vector-efficiency ratio add_sub | 100.00 |
Vector-efficiency ratio fma | 100.00 |
Vector-efficiency ratio div_sqrt | 100.00 |
Vector-efficiency ratio other | 100.00 |
Metric | Value |
---|---|
CQA speedup if no scalar integer | 1.00 |
CQA speedup if FP arith vectorized | 1.00 |
CQA speedup if fully vectorized | 1.00 |
CQA speedup if no inter-iteration dependency | NA |
CQA speedup if next bottleneck killed | 4.15 |
Bottlenecks | P0, |
Function | calc_dt_kernel_.DIR.OMP.PARALLEL.2 |
Source | calc_dt_kernel.f90:94-123,calc_dt_kernel.f90:129-129 |
Source loop unroll info | not unrolled or unrolled with no peel/tail loop |
Source loop unroll confidence level | max |
Unroll/vectorization loop type | NA |
Unroll factor | NA |
CQA cycles | 114.00 |
CQA cycles if no scalar integer | 114.00 |
CQA cycles if FP arith vectorized | 114.00 |
CQA cycles if fully vectorized | 114.00 |
Front-end cycles | 13.83 |
DIV/SQRT cycles | 27.50 |
P0 cycles | 9.00 |
P1 cycles | 5.67 |
P2 cycles | 5.67 |
P3 cycles | 0.00 |
P4 cycles | 27.50 |
P5 cycles | 1.00 |
P6 cycles | 0.00 |
P7 cycles | 0.00 |
P8 cycles | 0.00 |
P9 cycles | 0.00 |
P10 cycles | 5.67 |
P11 cycles | 114.00 |
Inter-iter dependencies cycles | 1 - 2 |
FE+BE cycles (UFS) | 114.25 - 114.78 |
Stall cycles (UFS) | 99.57 - 100.10 |
Nb insns | 61.00 |
Nb uops | 75.00 |
Nb loads | 17.00 |
Nb stores | 0.00 |
Nb stack references | 0.00 |
FLOP/cycle | 1.82 |
Nb FLOP add-sub | 72.00 |
Nb FLOP mul | 64.00 |
Nb FLOP fma | 8.00 |
Nb FLOP div | 48.00 |
Nb FLOP rcp | 0.00 |
Nb FLOP sqrt | 8.00 |
Nb FLOP rsqrt | 0.00 |
Bytes/cycle | 9.54 |
Bytes prefetched | 0.00 |
Bytes loaded | 1088.00 |
Bytes stored | 0.00 |
Stride 0 | 0.00 |
Stride 1 | 7.00 |
Stride n | 5.00 |
Stride unknown | 0.00 |
Stride indirect | 0.00 |
Vectorization ratio all | 100.00 |
Vectorization ratio load | 100.00 |
Vectorization ratio store | NA |
Vectorization ratio mul | 100.00 |
Vectorization ratio add_sub | 100.00 |
Vectorization ratio fma | 100.00 |
Vectorization ratio div_sqrt | 100.00 |
Vectorization ratio other | 100.00 |
Vector-efficiency ratio all | 100.00 |
Vector-efficiency ratio load | 100.00 |
Vector-efficiency ratio store | NA |
Vector-efficiency ratio mul | 100.00 |
Vector-efficiency ratio add_sub | 100.00 |
Vector-efficiency ratio fma | 100.00 |
Vector-efficiency ratio div_sqrt | 100.00 |
Vector-efficiency ratio other | 100.00 |
Path / |
Function | calc_dt_kernel_.DIR.OMP.PARALLEL.2 |
Source file and lines | calc_dt_kernel.f90:94-129 |
Module | exec |
nb instructions | 61 |
nb uops | 75 |
loop length | 408 |
used x86 registers | 14 |
used mmx registers | 0 |
used xmm registers | 0 |
used ymm registers | 0 |
used zmm registers | 18 |
nb stack references | 0 |
ADD-SUB / MUL ratio | 1.13 |
micro-operation queue | 13.83 cycles |
front end | 13.83 cycles |
P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|
uops | 27.50 | 0.00 | 5.67 | 5.67 | 0.00 | 27.50 | 1.00 | 0.00 | 0.00 | 0.00 | 0.00 | 5.67 |
cycles | 27.50 | 9.00 | 5.67 | 5.67 | 0.00 | 27.50 | 1.00 | 0.00 | 0.00 | 0.00 | 0.00 | 5.67 |
Cycles executing div or sqrt instructions | 114.00 |
Longest recurrence chain latency (RecMII) | 1.00-2.00 |
FE+BE cycles | 114.25-114.78 |
Stall cycles | 99.57-100.10 |
ROB full (events) | 10.00-1.98 |
RS full (events) | 0.04-0.02 |
PRF_FLOAT full (events) | 94.36-102.90 |
Front-end | 13.83 |
Dispatch | 27.50 |
DIV/SQRT | 114.00 |
Data deps. | 1.00-2.00 |
Overall L1 | 114.00 |
all | 100% |
load | 100% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 100% |
add-sub | 100% |
fma | 100% |
div/sqrt | 100% |
other | 100% |
all | 100% |
load | 100% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 100% |
add-sub | 100% |
fma | 100% |
div/sqrt | 100% |
other | 100% |
Instruction | Nb FU | P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | Latency | Recip. throughput |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
VMOVUPD (%R11,%RCX,8),%ZMM6 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.50 |
VMINPD %ZMM29,%ZMM6,%ZMM6 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVUPD (%RDX,%RCX,8),%ZMM7 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.50 |
VADDPD %ZMM7,%ZMM7,%ZMM7 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VDIVPD (%RDI,%RCX,8),%ZMM7,%ZMM7 | 4 | 2.50 | 0 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 22-24 | 16 |
VMULPD %ZMM6,%ZMM30,%ZMM6 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVUPD (%RAX,%RCX,8),%ZMM8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.50 |
VFMADD231PD %ZMM8,%ZMM8,%ZMM7 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VSQRTPD %ZMM7,%ZMM7 | 3 | 2.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 23-32 | 18 |
VMAXPD %ZMM2,%ZMM7,%ZMM7 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VDIVPD %ZMM7,%ZMM6,%ZMM6 | 3 | 2.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 22-24 | 16 |
VMOVUPD -0x8(%RBX,%RCX,8),%ZMM7 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.50 |
VMOVUPD (%RBX,%RCX,8),%ZMM8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.50 |
VADDPD -0x8(%R10,%RCX,8),%ZMM7,%ZMM7 | 1 | 0.50 | 0 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 3 | 0.65 |
VMULPD -0x8(%R14,%RCX,8),%ZMM7,%ZMM7 | 1 | 0.50 | 0 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VADDPD (%R10,%RCX,8),%ZMM8,%ZMM8 | 1 | 0.50 | 0 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 3 | 0.65 |
VMULPD (%R14,%RCX,8),%ZMM8,%ZMM8 | 1 | 0.50 | 0 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVUPD (%R9,%RCX,8),%ZMM9 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.50 |
VADDPD %ZMM9,%ZMM9,%ZMM10 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMULPD %ZMM31,%ZMM10,%ZMM11 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VANDPD %ZMM3,%ZMM7,%ZMM12 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VANDPD %ZMM3,%ZMM8,%ZMM13 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VMULPD %ZMM2,%ZMM9,%ZMM9 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VCMPPD $0x2,%ZMM13,%ZMM9,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VBLENDMPD %ZMM13,%ZMM9,%ZMM13{%K1} | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VCMPPD $0x2,%ZMM12,%ZMM13,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVAPD %ZMM12,%ZMM13{%K1} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VDIVPD %ZMM13,%ZMM11,%ZMM11 | 3 | 2.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 22-24 | 16 |
VMOVUPD (%R15,%RCX,8),%ZMM12 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.50 |
VADDPD -0x8(%R15,%RCX,8),%ZMM12,%ZMM12 | 1 | 0.50 | 0 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 3 | 0.65 |
VMULPD (%R13,%RCX,8),%ZMM12,%ZMM12 | 1 | 0.50 | 0 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVUPD (%R12,%RCX,8),%ZMM13 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.50 |
VADDPD -0x8(%R12,%RCX,8),%ZMM13,%ZMM13 | 1 | 0.50 | 0 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 3 | 0.65 |
VMULPD (%R8,%RCX,8),%ZMM13,%ZMM13 | 1 | 0.50 | 0 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VADDPD %ZMM12,%ZMM7,%ZMM7 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VSUBPD %ZMM7,%ZMM8,%ZMM7 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VADDPD %ZMM13,%ZMM7,%ZMM7 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMULPD %ZMM1,%ZMM10,%ZMM8 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VANDPD %ZMM3,%ZMM12,%ZMM12 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VANDPD %ZMM3,%ZMM13,%ZMM13 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VCMPPD $0x2,%ZMM13,%ZMM9,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVAPD %ZMM13,%ZMM9{%K1} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VCMPPD $0x2,%ZMM12,%ZMM9,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVAPD %ZMM12,%ZMM9{%K1} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VDIVPD %ZMM9,%ZMM8,%ZMM8 | 3 | 2.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 22-24 | 16 |
VDIVPD %ZMM10,%ZMM7,%ZMM7 | 3 | 2.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 22-24 | 16 |
VMOVAPD %ZMM28,%ZMM9 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VCMPPD $0x1,%ZMM4,%ZMM7,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVAPD %ZMM5,%ZMM28 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VDIVPD %ZMM7,%ZMM0,%ZMM28{%K1} | 3 | 2.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 22-24 | 16 |
VCMPPD $0x2,%ZMM28,%ZMM8,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVAPD %ZMM8,%ZMM28{%K1} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VCMPPD $0x2,%ZMM28,%ZMM11,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVAPD %ZMM11,%ZMM28{%K1} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VCMPPD $0x2,%ZMM28,%ZMM6,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVAPD %ZMM6,%ZMM28{%K1} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VCMPPD $0x2,%ZMM28,%ZMM9,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVAPD %ZMM9,%ZMM28{%K1} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
ADD $0x8,%RCX | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
CMP %RSI,%RCX | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
JB 43f2b0 <calc_dt_kernel_module_mp_calc_dt_kernel_.DIR.OMP.PARALLEL.2+0x4f0> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |
Function | calc_dt_kernel_.DIR.OMP.PARALLEL.2 |
Source file and lines | calc_dt_kernel.f90:94-129 |
Module | exec |
nb instructions | 61 |
nb uops | 75 |
loop length | 408 |
used x86 registers | 14 |
used mmx registers | 0 |
used xmm registers | 0 |
used ymm registers | 0 |
used zmm registers | 18 |
nb stack references | 0 |
ADD-SUB / MUL ratio | 1.13 |
micro-operation queue | 13.83 cycles |
front end | 13.83 cycles |
P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|
uops | 27.50 | 0.00 | 5.67 | 5.67 | 0.00 | 27.50 | 1.00 | 0.00 | 0.00 | 0.00 | 0.00 | 5.67 |
cycles | 27.50 | 9.00 | 5.67 | 5.67 | 0.00 | 27.50 | 1.00 | 0.00 | 0.00 | 0.00 | 0.00 | 5.67 |
Cycles executing div or sqrt instructions | 114.00 |
Longest recurrence chain latency (RecMII) | 1.00-2.00 |
FE+BE cycles | 114.25-114.78 |
Stall cycles | 99.57-100.10 |
ROB full (events) | 10.00-1.98 |
RS full (events) | 0.04-0.02 |
PRF_FLOAT full (events) | 94.36-102.90 |
Front-end | 13.83 |
Dispatch | 27.50 |
DIV/SQRT | 114.00 |
Data deps. | 1.00-2.00 |
Overall L1 | 114.00 |
all | 100% |
load | 100% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 100% |
add-sub | 100% |
fma | 100% |
div/sqrt | 100% |
other | 100% |
all | 100% |
load | 100% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 100% |
add-sub | 100% |
fma | 100% |
div/sqrt | 100% |
other | 100% |
Instruction | Nb FU | P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | Latency | Recip. throughput |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
VMOVUPD (%R11,%RCX,8),%ZMM6 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.50 |
VMINPD %ZMM29,%ZMM6,%ZMM6 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVUPD (%RDX,%RCX,8),%ZMM7 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.50 |
VADDPD %ZMM7,%ZMM7,%ZMM7 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VDIVPD (%RDI,%RCX,8),%ZMM7,%ZMM7 | 4 | 2.50 | 0 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 22-24 | 16 |
VMULPD %ZMM6,%ZMM30,%ZMM6 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVUPD (%RAX,%RCX,8),%ZMM8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.50 |
VFMADD231PD %ZMM8,%ZMM8,%ZMM7 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VSQRTPD %ZMM7,%ZMM7 | 3 | 2.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 23-32 | 18 |
VMAXPD %ZMM2,%ZMM7,%ZMM7 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VDIVPD %ZMM7,%ZMM6,%ZMM6 | 3 | 2.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 22-24 | 16 |
VMOVUPD -0x8(%RBX,%RCX,8),%ZMM7 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.50 |
VMOVUPD (%RBX,%RCX,8),%ZMM8 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.50 |
VADDPD -0x8(%R10,%RCX,8),%ZMM7,%ZMM7 | 1 | 0.50 | 0 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 3 | 0.65 |
VMULPD -0x8(%R14,%RCX,8),%ZMM7,%ZMM7 | 1 | 0.50 | 0 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VADDPD (%R10,%RCX,8),%ZMM8,%ZMM8 | 1 | 0.50 | 0 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 3 | 0.65 |
VMULPD (%R14,%RCX,8),%ZMM8,%ZMM8 | 1 | 0.50 | 0 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVUPD (%R9,%RCX,8),%ZMM9 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.50 |
VADDPD %ZMM9,%ZMM9,%ZMM10 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMULPD %ZMM31,%ZMM10,%ZMM11 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VANDPD %ZMM3,%ZMM7,%ZMM12 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VANDPD %ZMM3,%ZMM8,%ZMM13 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VMULPD %ZMM2,%ZMM9,%ZMM9 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VCMPPD $0x2,%ZMM13,%ZMM9,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VBLENDMPD %ZMM13,%ZMM9,%ZMM13{%K1} | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VCMPPD $0x2,%ZMM12,%ZMM13,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVAPD %ZMM12,%ZMM13{%K1} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VDIVPD %ZMM13,%ZMM11,%ZMM11 | 3 | 2.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 22-24 | 16 |
VMOVUPD (%R15,%RCX,8),%ZMM12 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.50 |
VADDPD -0x8(%R15,%RCX,8),%ZMM12,%ZMM12 | 1 | 0.50 | 0 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 3 | 0.65 |
VMULPD (%R13,%RCX,8),%ZMM12,%ZMM12 | 1 | 0.50 | 0 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VMOVUPD (%R12,%RCX,8),%ZMM13 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.50 |
VADDPD -0x8(%R12,%RCX,8),%ZMM13,%ZMM13 | 1 | 0.50 | 0 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 3 | 0.65 |
VMULPD (%R8,%RCX,8),%ZMM13,%ZMM13 | 1 | 0.50 | 0 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 4 | 0.50 |
VADDPD %ZMM12,%ZMM7,%ZMM7 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VSUBPD %ZMM7,%ZMM8,%ZMM7 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VADDPD %ZMM13,%ZMM7,%ZMM7 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMULPD %ZMM1,%ZMM10,%ZMM8 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VANDPD %ZMM3,%ZMM12,%ZMM12 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VANDPD %ZMM3,%ZMM13,%ZMM13 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VCMPPD $0x2,%ZMM13,%ZMM9,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVAPD %ZMM13,%ZMM9{%K1} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VCMPPD $0x2,%ZMM12,%ZMM9,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVAPD %ZMM12,%ZMM9{%K1} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VDIVPD %ZMM9,%ZMM8,%ZMM8 | 3 | 2.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 22-24 | 16 |
VDIVPD %ZMM10,%ZMM7,%ZMM7 | 3 | 2.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 22-24 | 16 |
VMOVAPD %ZMM28,%ZMM9 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VCMPPD $0x1,%ZMM4,%ZMM7,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVAPD %ZMM5,%ZMM28 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VDIVPD %ZMM7,%ZMM0,%ZMM28{%K1} | 3 | 2.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 22-24 | 16 |
VCMPPD $0x2,%ZMM28,%ZMM8,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVAPD %ZMM8,%ZMM28{%K1} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VCMPPD $0x2,%ZMM28,%ZMM11,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVAPD %ZMM11,%ZMM28{%K1} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VCMPPD $0x2,%ZMM28,%ZMM6,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVAPD %ZMM6,%ZMM28{%K1} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VCMPPD $0x2,%ZMM28,%ZMM9,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVAPD %ZMM9,%ZMM28{%K1} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
ADD $0x8,%RCX | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
CMP %RSI,%RCX | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
JB 43f2b0 <calc_dt_kernel_module_mp_calc_dt_kernel_.DIR.OMP.PARALLEL.2+0x4f0> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |