Loop Id: 1310 | Module: bench | Source: t1fv_8.c:141-196 [...] | Coverage: 25.81% |
---|
Loop Id: 1310 | Module: bench | Source: t1fv_8.c:141-196 [...] | Coverage: 25.81% |
---|
0x1b6e20 VMOVUPD (%RDI),%YMM3 [1] |
0x1b6e24 MOV -0x18(%RCX),%R10 [2] |
0x1b6e28 VMOVUPD (%RDI,%R10,8),%YMM2 [1] |
0x1b6e2e VMOVUPD -0xc0(%RDX),%YMM5 [3] |
0x1b6e36 VMOVUPD -0xa0(%RDX),%YMM4 [3] |
0x1b6e3e VMOVUPD -0x80(%RDX),%YMM6 [3] |
0x1b6e43 VMOVUPD -0x60(%RDX),%YMM7 [3] |
0x1b6e48 VMOVDDUP %YMM7,%YMM8 |
0x1b6e4c VSHUFPD $0x5,%YMM2,%YMM2,%YMM9 |
0x1b6e51 VSHUFPD $0xf,%YMM7,%YMM7,%YMM7 |
0x1b6e56 VMULPD %YMM7,%YMM9,%YMM7 |
0x1b6e5a VFMSUBADD231PD %YMM8,%YMM2,%YMM7 |
0x1b6e5f VSUBPD %YMM7,%YMM3,%YMM2 |
0x1b6e63 VADDPD %YMM7,%YMM3,%YMM3 |
0x1b6e67 MOV -0x30(%RCX),%R11 [2] |
0x1b6e6b MOV -0x28(%RCX),%RBX [2] |
0x1b6e6f VMOVUPD (%RDI,%RBX,8),%YMM7 [1] |
0x1b6e74 VMOVDDUP %YMM4,%YMM8 |
0x1b6e78 VSHUFPD $0x5,%YMM7,%YMM7,%YMM9 |
0x1b6e7d VSHUFPD $0xf,%YMM4,%YMM4,%YMM4 |
0x1b6e82 VMULPD %YMM4,%YMM9,%YMM9 |
0x1b6e86 VFMSUBADD231PD %YMM8,%YMM7,%YMM9 |
0x1b6e8b MOV -0x8(%RCX),%RBX [2] |
0x1b6e8f VMOVUPD (%RDI,%RBX,8),%YMM4 [1] |
0x1b6e94 VMOVUPD -0x20(%RDX),%YMM7 [3] |
0x1b6e99 VMOVDDUP %YMM7,%YMM8 |
0x1b6e9d VSHUFPD $0x5,%YMM4,%YMM4,%YMM10 |
0x1b6ea2 VSHUFPD $0xf,%YMM7,%YMM7,%YMM7 |
0x1b6ea7 VMULPD %YMM7,%YMM10,%YMM7 |
0x1b6eab VFMSUBADD231PD %YMM8,%YMM4,%YMM7 |
0x1b6eb0 VSUBPD %YMM7,%YMM9,%YMM4 |
0x1b6eb4 VADDPD %YMM7,%YMM9,%YMM7 |
0x1b6eb8 VMOVUPD (%RDI,%R11,8),%YMM8 [1] |
0x1b6ebe VMOVDDUP %YMM5,%YMM9 |
0x1b6ec2 VSHUFPD $0x5,%YMM8,%YMM8,%YMM10 |
0x1b6ec8 VSHUFPD $0xf,%YMM5,%YMM5,%YMM5 |
0x1b6ecd VMULPD %YMM5,%YMM10,%YMM5 |
0x1b6ed1 MOV -0x10(%RCX),%R11 [2] |
0x1b6ed5 VMOVUPD (%RDI,%R11,8),%YMM10 [1] |
0x1b6edb VFMSUBADD231PD %YMM9,%YMM8,%YMM5 |
0x1b6ee0 VMOVUPD -0x40(%RDX),%YMM8 [3] |
0x1b6ee5 VMOVDDUP %YMM8,%YMM9 |
0x1b6eea VSHUFPD $0x5,%YMM10,%YMM10,%YMM11 |
0x1b6ef0 VSHUFPD $0xf,%YMM8,%YMM8,%YMM8 |
0x1b6ef6 VMULPD %YMM11,%YMM8,%YMM8 |
0x1b6efb VFMSUBADD231PD %YMM9,%YMM10,%YMM8 |
0x1b6f00 VSUBPD %YMM8,%YMM5,%YMM9 |
0x1b6f05 MOV (%RCX),%R11 [2] |
0x1b6f08 VMOVUPD (%RDI,%R11,8),%YMM10 [1] |
0x1b6f0e VADDPD %YMM5,%YMM8,%YMM5 |
0x1b6f12 VMOVUPD (%RDX),%YMM8 [3] |
0x1b6f16 VMOVDDUP %YMM8,%YMM11 |
0x1b6f1b VSHUFPD $0x5,%YMM10,%YMM10,%YMM12 |
0x1b6f21 VSHUFPD $0xf,%YMM8,%YMM8,%YMM8 |
0x1b6f27 VMULPD %YMM12,%YMM8,%YMM8 |
0x1b6f2c VFMSUBADD231PD %YMM11,%YMM10,%YMM8 |
0x1b6f31 MOV -0x20(%RCX),%R11 [2] |
0x1b6f35 VMOVUPD (%RDI,%R11,8),%YMM10 [1] |
0x1b6f3b VMOVDDUP %YMM6,%YMM11 |
0x1b6f3f VSHUFPD $0x5,%YMM10,%YMM10,%YMM12 |
0x1b6f45 VSHUFPD $0xf,%YMM6,%YMM6,%YMM6 |
0x1b6f4a VMULPD %YMM6,%YMM12,%YMM6 |
0x1b6f4e VFMSUBADD231PD %YMM11,%YMM10,%YMM6 |
0x1b6f53 VSUBPD %YMM6,%YMM8,%YMM10 |
0x1b6f57 VADDPD %YMM6,%YMM8,%YMM6 |
0x1b6f5b VADDPD %YMM7,%YMM3,%YMM8 |
0x1b6f5f VADDPD %YMM6,%YMM5,%YMM11 |
0x1b6f63 VSUBPD %YMM11,%YMM8,%YMM12 |
0x1b6f68 VMOVUPD %YMM12,(%RDI,%R10,8) [1] |
0x1b6f6e VADDPD %YMM11,%YMM8,%YMM8 |
0x1b6f73 VMOVUPD %YMM8,(%RDI) [1] |
0x1b6f77 VSUBPD %YMM7,%YMM3,%YMM3 |
0x1b6f7b VSUBPD %YMM5,%YMM6,%YMM5 |
0x1b6f7f VXORPD %YMM0,%YMM5,%YMM5 |
0x1b6f83 VSHUFPS $0x4e,%YMM5,%YMM5,%YMM5 |
0x1b6f88 MOV -0x8(%RCX),%R10 [2] |
0x1b6f8c VSUBPD %YMM5,%YMM3,%YMM6 |
0x1b6f90 VMOVUPD %YMM6,(%RDI,%R10,8) [1] |
0x1b6f96 MOV -0x28(%RCX),%R10 [2] |
0x1b6f9a VADDPD %YMM5,%YMM3,%YMM3 |
0x1b6f9e VMOVUPD %YMM3,(%RDI,%R10,8) [1] |
0x1b6fa4 VADDPD %YMM10,%YMM9,%YMM3 |
0x1b6fa9 VMULPD %YMM1,%YMM3,%YMM3 |
0x1b6fad VADDPD %YMM3,%YMM2,%YMM5 |
0x1b6fb1 VSUBPD %YMM3,%YMM2,%YMM2 |
0x1b6fb5 VSUBPD %YMM9,%YMM10,%YMM3 |
0x1b6fba VMULPD %YMM1,%YMM3,%YMM3 |
0x1b6fbe VSUBPD %YMM4,%YMM3,%YMM6 |
0x1b6fc2 VXORPD %YMM0,%YMM6,%YMM6 |
0x1b6fc6 VSHUFPS $0x4e,%YMM6,%YMM6,%YMM6 |
0x1b6fcb VADDPD %YMM3,%YMM4,%YMM3 |
0x1b6fcf VXORPD %YMM0,%YMM3,%YMM3 |
0x1b6fd3 VSHUFPS $0x4e,%YMM3,%YMM3,%YMM3 |
0x1b6fd8 MOV (%RCX),%R10 [2] |
0x1b6fdb VSUBPD %YMM6,%YMM5,%YMM4 |
0x1b6fdf VMOVUPD %YMM4,(%RDI,%R10,8) [1] |
0x1b6fe5 MOV -0x20(%RCX),%R10 [2] |
0x1b6fe9 VADDPD %YMM3,%YMM2,%YMM4 |
0x1b6fed VMOVUPD %YMM4,(%RDI,%R10,8) [1] |
0x1b6ff3 MOV -0x30(%RCX),%R10 [2] |
0x1b6ff7 VADDPD %YMM6,%YMM5,%YMM4 |
0x1b6ffb VMOVUPD %YMM4,(%RDI,%R10,8) [1] |
0x1b7001 MOV -0x10(%RCX),%R10 [2] |
0x1b7005 VSUBPD %YMM3,%YMM2,%YMM2 |
0x1b7009 VMOVUPD %YMM2,(%RDI,%R10,8) [1] |
0x1b700f ADD $0x2,%R8 |
0x1b7013 ADD %RAX,%RDI |
0x1b7016 ADD $0xe0,%RDX |
0x1b701d ADD %RSI,%RCX |
0x1b7020 CMP %R9,%R8 |
0x1b7023 JL 1b6e20 |
/home/fmusial/FFTW_Benchmarks/fftw-3.3.10-clang/dft/simd/avx2/../../../simd-support/simd-avx2.h: 85 - 315 |
-------------------------------------------------------------------------------- |
85: SUFF(_mm256_storeu_p)(x, v); |
[...] |
252: return VPERM1(x, DS(SHUFVALD(1, 0), SHUFVALS(1, 0, 3, 2))); |
[...] |
280: return VXOR(pmpm.v, x); |
[...] |
315: return SUFF(_mm256_fmsubadd_p)(sr, VDUPL(tx), VMUL(FLIP_RI(sr), VDUPH(tx))); |
/home/fmusial/FFTW_Benchmarks/fftw-3.3.10-clang/dft/simd/avx2/./../common/t1fv_8.c: 141 - 196 |
-------------------------------------------------------------------------------- |
141: for (m = mb, W = W + (mb * ((TWVL / VL) * 14)); m < me; m = m + VL, x = x + (VL * ms), W = W + (TWVL * 14), MAKE_VOLATILE_STRIDE(8, rs)) { |
142: V T4, Tq, Tm, Tr, T9, Tt, Te, Tu, T1, T3, T2; |
143: T1 = LD(&(x[0]), ms, &(x[0])); |
144: T2 = LD(&(x[WS(rs, 4)]), ms, &(x[0])); |
145: T3 = BYTWJ(&(W[TWVL * 6]), T2); |
146: T4 = VSUB(T1, T3); |
147: Tq = VADD(T1, T3); |
148: { |
149: V Tj, Tl, Ti, Tk; |
150: Ti = LD(&(x[WS(rs, 2)]), ms, &(x[0])); |
151: Tj = BYTWJ(&(W[TWVL * 2]), Ti); |
152: Tk = LD(&(x[WS(rs, 6)]), ms, &(x[0])); |
153: Tl = BYTWJ(&(W[TWVL * 10]), Tk); |
154: Tm = VSUB(Tj, Tl); |
155: Tr = VADD(Tj, Tl); |
156: } |
157: { |
158: V T6, T8, T5, T7; |
159: T5 = LD(&(x[WS(rs, 1)]), ms, &(x[WS(rs, 1)])); |
160: T6 = BYTWJ(&(W[0]), T5); |
161: T7 = LD(&(x[WS(rs, 5)]), ms, &(x[WS(rs, 1)])); |
162: T8 = BYTWJ(&(W[TWVL * 8]), T7); |
163: T9 = VSUB(T6, T8); |
164: Tt = VADD(T6, T8); |
165: } |
166: { |
167: V Tb, Td, Ta, Tc; |
168: Ta = LD(&(x[WS(rs, 7)]), ms, &(x[WS(rs, 1)])); |
169: Tb = BYTWJ(&(W[TWVL * 12]), Ta); |
170: Tc = LD(&(x[WS(rs, 3)]), ms, &(x[WS(rs, 1)])); |
171: Td = BYTWJ(&(W[TWVL * 4]), Tc); |
172: Te = VSUB(Tb, Td); |
173: Tu = VADD(Tb, Td); |
174: } |
175: { |
176: V Ts, Tv, Tw, Tx; |
177: Ts = VADD(Tq, Tr); |
178: Tv = VADD(Tt, Tu); |
179: ST(&(x[WS(rs, 4)]), VSUB(Ts, Tv), ms, &(x[0])); |
180: ST(&(x[0]), VADD(Ts, Tv), ms, &(x[0])); |
181: Tw = VSUB(Tq, Tr); |
182: Tx = VBYI(VSUB(Tu, Tt)); |
183: ST(&(x[WS(rs, 6)]), VSUB(Tw, Tx), ms, &(x[0])); |
184: ST(&(x[WS(rs, 2)]), VADD(Tw, Tx), ms, &(x[0])); |
185: { |
186: V Tg, To, Tn, Tp, Tf, Th; |
187: Tf = VMUL(LDK(KP707106781), VADD(T9, Te)); |
188: Tg = VADD(T4, Tf); |
189: To = VSUB(T4, Tf); |
190: Th = VMUL(LDK(KP707106781), VSUB(Te, T9)); |
191: Tn = VBYI(VSUB(Th, Tm)); |
192: Tp = VBYI(VADD(Tm, Th)); |
193: ST(&(x[WS(rs, 7)]), VSUB(Tg, Tn), ms, &(x[WS(rs, 1)])); |
194: ST(&(x[WS(rs, 3)]), VADD(To, Tp), ms, &(x[WS(rs, 1)])); |
195: ST(&(x[WS(rs, 1)]), VADD(Tg, Tn), ms, &(x[WS(rs, 1)])); |
196: ST(&(x[WS(rs, 5)]), VSUB(To, Tp), ms, &(x[WS(rs, 1)])); |
Coverage (%) | Name | Source Location | Module |
---|---|---|---|
►100.00+ | apply#0x296210 | dftw-direct.c:51 | bench |
○ | doit | fftw-bench.c:274 | bench |
○ | speed | bench | |
○ | bench_main | bench-main.c:91 | bench |
○ | __libc_init_first | libc.so.6 | |
○ | __libc_start_main | libc.so.6 | |
○ | _start | bench |
Path / |
Metric | Value |
---|---|
CQA speedup if no scalar integer | 1.15 |
CQA speedup if FP arith vectorized | 1.34 |
CQA speedup if fully vectorized | 2.30 |
CQA speedup if no inter-iteration dependency | NA |
CQA speedup if next bottleneck killed | 1.15 |
Bottlenecks | micro-operation queue, |
Function | t1fv_8#0x1b6dd0 |
Source | simd-avx2.h:85-85,simd-avx2.h:252-252,simd-avx2.h:280-280,simd-avx2.h:315-315,t1fv_8.c:141-196 |
Source loop unroll info | not unrolled or unrolled with no peel/tail loop |
Source loop unroll confidence level | max |
Unroll/vectorization loop type | NA |
Unroll factor | NA |
CQA cycles | 27.50 |
CQA cycles if no scalar integer | 24.00 |
CQA cycles if FP arith vectorized | 20.50 |
CQA cycles if fully vectorized | 11.95 |
Front-end cycles | 27.50 |
P0 cycles | 22.67 |
P1 cycles | 22.33 |
P2 cycles | 14.00 |
P3 cycles | 14.00 |
P4 cycles | 8.00 |
P5 cycles | 24.00 |
P6 cycles | 5.00 |
P7 cycles | 8.00 |
DIV/SQRT cycles | 0.00 |
Inter-iter dependencies cycles | 1 |
FE+BE cycles (UFS) | NA |
Stall cycles (UFS) | NA |
Nb insns | 111.00 |
Nb uops | 110.00 |
Nb loads | 28.00 |
Nb stores | 8.00 |
Nb stack references | 0.00 |
FLOP/cycle | 7.13 |
Nb FLOP add-sub | 104.00 |
Nb FLOP mul | 36.00 |
Nb FLOP fma | 28.00 |
Nb FLOP div | 0.00 |
Nb FLOP rcp | 0.00 |
Nb FLOP sqrt | 0.00 |
Nb FLOP rsqrt | 0.00 |
Bytes/cycle | 30.55 |
Bytes prefetched | 0.00 |
Bytes loaded | 584.00 |
Bytes stored | 256.00 |
Stride 0 | 0.00 |
Stride 1 | 1.00 |
Stride n | 0.00 |
Stride unknown | 2.00 |
Stride indirect | 0.00 |
Vectorization ratio all | 100.00 |
Vectorization ratio load | 100.00 |
Vectorization ratio store | 100.00 |
Vectorization ratio mul | 100.00 |
Vectorization ratio add_sub | 100.00 |
Vectorization ratio fma | 100.00 |
Vectorization ratio div_sqrt | NA |
Vectorization ratio other | 100.00 |
Vector-efficiency ratio all | 48.10 |
Vector-efficiency ratio load | 50.00 |
Vector-efficiency ratio store | 50.00 |
Vector-efficiency ratio mul | 50.00 |
Vector-efficiency ratio add_sub | 50.00 |
Vector-efficiency ratio fma | 50.00 |
Vector-efficiency ratio div_sqrt | NA |
Vector-efficiency ratio other | 43.52 |
Metric | Value |
---|---|
CQA speedup if no scalar integer | 1.15 |
CQA speedup if FP arith vectorized | 1.34 |
CQA speedup if fully vectorized | 2.30 |
CQA speedup if no inter-iteration dependency | NA |
CQA speedup if next bottleneck killed | 1.15 |
Bottlenecks | micro-operation queue, |
Function | t1fv_8#0x1b6dd0 |
Source | simd-avx2.h:85-85,simd-avx2.h:252-252,simd-avx2.h:280-280,simd-avx2.h:315-315,t1fv_8.c:141-196 |
Source loop unroll info | not unrolled or unrolled with no peel/tail loop |
Source loop unroll confidence level | max |
Unroll/vectorization loop type | NA |
Unroll factor | NA |
CQA cycles | 27.50 |
CQA cycles if no scalar integer | 24.00 |
CQA cycles if FP arith vectorized | 20.50 |
CQA cycles if fully vectorized | 11.95 |
Front-end cycles | 27.50 |
P0 cycles | 22.67 |
P1 cycles | 22.33 |
P2 cycles | 14.00 |
P3 cycles | 14.00 |
P4 cycles | 8.00 |
P5 cycles | 24.00 |
P6 cycles | 5.00 |
P7 cycles | 8.00 |
DIV/SQRT cycles | 0.00 |
Inter-iter dependencies cycles | 1 |
FE+BE cycles (UFS) | NA |
Stall cycles (UFS) | NA |
Nb insns | 111.00 |
Nb uops | 110.00 |
Nb loads | 28.00 |
Nb stores | 8.00 |
Nb stack references | 0.00 |
FLOP/cycle | 7.13 |
Nb FLOP add-sub | 104.00 |
Nb FLOP mul | 36.00 |
Nb FLOP fma | 28.00 |
Nb FLOP div | 0.00 |
Nb FLOP rcp | 0.00 |
Nb FLOP sqrt | 0.00 |
Nb FLOP rsqrt | 0.00 |
Bytes/cycle | 30.55 |
Bytes prefetched | 0.00 |
Bytes loaded | 584.00 |
Bytes stored | 256.00 |
Stride 0 | 0.00 |
Stride 1 | 1.00 |
Stride n | 0.00 |
Stride unknown | 2.00 |
Stride indirect | 0.00 |
Vectorization ratio all | 100.00 |
Vectorization ratio load | 100.00 |
Vectorization ratio store | 100.00 |
Vectorization ratio mul | 100.00 |
Vectorization ratio add_sub | 100.00 |
Vectorization ratio fma | 100.00 |
Vectorization ratio div_sqrt | NA |
Vectorization ratio other | 100.00 |
Vector-efficiency ratio all | 48.10 |
Vector-efficiency ratio load | 50.00 |
Vector-efficiency ratio store | 50.00 |
Vector-efficiency ratio mul | 50.00 |
Vector-efficiency ratio add_sub | 50.00 |
Vector-efficiency ratio fma | 50.00 |
Vector-efficiency ratio div_sqrt | NA |
Vector-efficiency ratio other | 43.52 |
Path / |
Function | t1fv_8#0x1b6dd0 |
Source file and lines | t1fv_8.c:141-196 |
Module | bench |
nb instructions | 111 |
nb uops | 110 |
loop length | 521 |
used x86 registers | 10 |
used mmx registers | 0 |
used xmm registers | 0 |
used ymm registers | 13 |
used zmm registers | 0 |
nb stack references | 0 |
ADD-SUB / MUL ratio | 2.89 |
micro-operation queue | 27.50 cycles |
front end | 27.50 cycles |
P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | |
---|---|---|---|---|---|---|---|---|
uops | 22.67 | 22.33 | 14.00 | 14.00 | 8.00 | 24.00 | 5.00 | 8.00 |
cycles | 22.67 | 22.33 | 14.00 | 14.00 | 8.00 | 24.00 | 5.00 | 8.00 |
Cycles executing div or sqrt instructions | NA |
Longest recurrence chain latency (RecMII) | 1.00 |
Front-end | 27.50 |
Dispatch | 24.00 |
Data deps. | 1.00 |
Overall L1 | 27.50 |
all | 100% |
load | 100% |
store | 100% |
mul | 100% |
add-sub | 100% |
fma | 100% |
div/sqrt | NA (no div/sqrt vectorizable/vectorized instructions) |
other | 100% |
all | 48% |
load | 50% |
store | 50% |
mul | 50% |
add-sub | 50% |
fma | 50% |
div/sqrt | NA (no div/sqrt vectorizable/vectorized instructions) |
other | 43% |
Instruction | Nb FU | P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | Latency | Recip. throughput | Vectorization |
---|---|---|---|---|---|---|---|---|---|---|---|---|
VMOVUPD (%RDI),%YMM3 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5-6 | 0.50 | vect (50.0%) |
MOV -0x18(%RCX),%R10 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4-5 | 0.50 | N/A |
VMOVUPD (%RDI,%R10,8),%YMM2 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5-6 | 0.50 | vect (50.0%) |
VMOVUPD -0xc0(%RDX),%YMM5 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5-6 | 0.50 | vect (50.0%) |
VMOVUPD -0xa0(%RDX),%YMM4 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5-6 | 0.50 | vect (50.0%) |
VMOVUPD -0x80(%RDX),%YMM6 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5-6 | 0.50 | vect (50.0%) |
VMOVUPD -0x60(%RDX),%YMM7 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5-6 | 0.50 | vect (50.0%) |
VMOVDDUP %YMM7,%YMM8 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (25.0%) |
VSHUFPD $0x5,%YMM2,%YMM2,%YMM9 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
VSHUFPD $0xf,%YMM7,%YMM7,%YMM7 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
VMULPD %YMM7,%YMM9,%YMM7 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VFMSUBADD231PD %YMM8,%YMM2,%YMM7 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VSUBPD %YMM7,%YMM3,%YMM2 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VADDPD %YMM7,%YMM3,%YMM3 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
MOV -0x30(%RCX),%R11 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4-5 | 0.50 | N/A |
MOV -0x28(%RCX),%RBX | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4-5 | 0.50 | N/A |
VMOVUPD (%RDI,%RBX,8),%YMM7 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5-6 | 0.50 | vect (50.0%) |
VMOVDDUP %YMM4,%YMM8 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (25.0%) |
VSHUFPD $0x5,%YMM7,%YMM7,%YMM9 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
VSHUFPD $0xf,%YMM4,%YMM4,%YMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
VMULPD %YMM4,%YMM9,%YMM9 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VFMSUBADD231PD %YMM8,%YMM7,%YMM9 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
MOV -0x8(%RCX),%RBX | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4-5 | 0.50 | N/A |
VMOVUPD (%RDI,%RBX,8),%YMM4 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5-6 | 0.50 | vect (50.0%) |
VMOVUPD -0x20(%RDX),%YMM7 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5-6 | 0.50 | vect (50.0%) |
VMOVDDUP %YMM7,%YMM8 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (25.0%) |
VSHUFPD $0x5,%YMM4,%YMM4,%YMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
VSHUFPD $0xf,%YMM7,%YMM7,%YMM7 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
VMULPD %YMM7,%YMM10,%YMM7 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VFMSUBADD231PD %YMM8,%YMM4,%YMM7 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VSUBPD %YMM7,%YMM9,%YMM4 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VADDPD %YMM7,%YMM9,%YMM7 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VMOVUPD (%RDI,%R11,8),%YMM8 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5-6 | 0.50 | vect (50.0%) |
VMOVDDUP %YMM5,%YMM9 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (25.0%) |
VSHUFPD $0x5,%YMM8,%YMM8,%YMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
VSHUFPD $0xf,%YMM5,%YMM5,%YMM5 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
VMULPD %YMM5,%YMM10,%YMM5 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
MOV -0x10(%RCX),%R11 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4-5 | 0.50 | N/A |
VMOVUPD (%RDI,%R11,8),%YMM10 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5-6 | 0.50 | vect (50.0%) |
VFMSUBADD231PD %YMM9,%YMM8,%YMM5 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VMOVUPD -0x40(%RDX),%YMM8 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5-6 | 0.50 | vect (50.0%) |
VMOVDDUP %YMM8,%YMM9 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (25.0%) |
VSHUFPD $0x5,%YMM10,%YMM10,%YMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
VSHUFPD $0xf,%YMM8,%YMM8,%YMM8 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
VMULPD %YMM11,%YMM8,%YMM8 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VFMSUBADD231PD %YMM9,%YMM10,%YMM8 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VSUBPD %YMM8,%YMM5,%YMM9 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
MOV (%RCX),%R11 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4-5 | 0.50 | N/A |
VMOVUPD (%RDI,%R11,8),%YMM10 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5-6 | 0.50 | vect (50.0%) |
VADDPD %YMM5,%YMM8,%YMM5 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VMOVUPD (%RDX),%YMM8 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5-6 | 0.50 | vect (50.0%) |
VMOVDDUP %YMM8,%YMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (25.0%) |
VSHUFPD $0x5,%YMM10,%YMM10,%YMM12 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
VSHUFPD $0xf,%YMM8,%YMM8,%YMM8 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
VMULPD %YMM12,%YMM8,%YMM8 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VFMSUBADD231PD %YMM11,%YMM10,%YMM8 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
MOV -0x20(%RCX),%R11 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4-5 | 0.50 | N/A |
VMOVUPD (%RDI,%R11,8),%YMM10 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5-6 | 0.50 | vect (50.0%) |
VMOVDDUP %YMM6,%YMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (25.0%) |
VSHUFPD $0x5,%YMM10,%YMM10,%YMM12 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
VSHUFPD $0xf,%YMM6,%YMM6,%YMM6 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
VMULPD %YMM6,%YMM12,%YMM6 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VFMSUBADD231PD %YMM11,%YMM10,%YMM6 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VSUBPD %YMM6,%YMM8,%YMM10 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VADDPD %YMM6,%YMM8,%YMM6 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VADDPD %YMM7,%YMM3,%YMM8 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VADDPD %YMM6,%YMM5,%YMM11 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VSUBPD %YMM11,%YMM8,%YMM12 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VMOVUPD %YMM12,(%RDI,%R10,8) | 1 | 0 | 0 | 0.33 | 0.33 | 1 | 0 | 0 | 0.33 | 3 | 1 | vect (50.0%) |
VADDPD %YMM11,%YMM8,%YMM8 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VMOVUPD %YMM8,(%RDI) | 1 | 0 | 0 | 0.33 | 0.33 | 1 | 0 | 0 | 0.33 | 3 | 1 | vect (50.0%) |
VSUBPD %YMM7,%YMM3,%YMM3 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VSUBPD %YMM5,%YMM6,%YMM5 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VXORPD %YMM0,%YMM5,%YMM5 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 1 | 0.33 | vect (50.0%) |
VSHUFPS $0x4e,%YMM5,%YMM5,%YMM5 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
MOV -0x8(%RCX),%R10 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4-5 | 0.50 | N/A |
VSUBPD %YMM5,%YMM3,%YMM6 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VMOVUPD %YMM6,(%RDI,%R10,8) | 1 | 0 | 0 | 0.33 | 0.33 | 1 | 0 | 0 | 0.33 | 3 | 1 | vect (50.0%) |
MOV -0x28(%RCX),%R10 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4-5 | 0.50 | N/A |
VADDPD %YMM5,%YMM3,%YMM3 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VMOVUPD %YMM3,(%RDI,%R10,8) | 1 | 0 | 0 | 0.33 | 0.33 | 1 | 0 | 0 | 0.33 | 3 | 1 | vect (50.0%) |
VADDPD %YMM10,%YMM9,%YMM3 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VMULPD %YMM1,%YMM3,%YMM3 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VADDPD %YMM3,%YMM2,%YMM5 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VSUBPD %YMM3,%YMM2,%YMM2 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VSUBPD %YMM9,%YMM10,%YMM3 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VMULPD %YMM1,%YMM3,%YMM3 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VSUBPD %YMM4,%YMM3,%YMM6 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VXORPD %YMM0,%YMM6,%YMM6 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 1 | 0.33 | vect (50.0%) |
VSHUFPS $0x4e,%YMM6,%YMM6,%YMM6 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
VADDPD %YMM3,%YMM4,%YMM3 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VXORPD %YMM0,%YMM3,%YMM3 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 1 | 0.33 | vect (50.0%) |
VSHUFPS $0x4e,%YMM3,%YMM3,%YMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
MOV (%RCX),%R10 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4-5 | 0.50 | N/A |
VSUBPD %YMM6,%YMM5,%YMM4 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VMOVUPD %YMM4,(%RDI,%R10,8) | 1 | 0 | 0 | 0.33 | 0.33 | 1 | 0 | 0 | 0.33 | 3 | 1 | vect (50.0%) |
MOV -0x20(%RCX),%R10 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4-5 | 0.50 | N/A |
VADDPD %YMM3,%YMM2,%YMM4 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VMOVUPD %YMM4,(%RDI,%R10,8) | 1 | 0 | 0 | 0.33 | 0.33 | 1 | 0 | 0 | 0.33 | 3 | 1 | vect (50.0%) |
MOV -0x30(%RCX),%R10 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4-5 | 0.50 | N/A |
VADDPD %YMM6,%YMM5,%YMM4 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VMOVUPD %YMM4,(%RDI,%R10,8) | 1 | 0 | 0 | 0.33 | 0.33 | 1 | 0 | 0 | 0.33 | 3 | 1 | vect (50.0%) |
MOV -0x10(%RCX),%R10 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4-5 | 0.50 | N/A |
VSUBPD %YMM3,%YMM2,%YMM2 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VMOVUPD %YMM2,(%RDI,%R10,8) | 1 | 0 | 0 | 0.33 | 0.33 | 1 | 0 | 0 | 0.33 | 3 | 1 | vect (50.0%) |
ADD $0x2,%R8 | 1 | 0.25 | 0.25 | 0 | 0 | 0 | 0.25 | 0.25 | 0 | 1 | 0.25 | N/A |
ADD %RAX,%RDI | 1 | 0.25 | 0.25 | 0 | 0 | 0 | 0.25 | 0.25 | 0 | 1 | 0.25 | N/A |
ADD $0xe0,%RDX | 1 | 0.25 | 0.25 | 0 | 0 | 0 | 0.25 | 0.25 | 0 | 1 | 0.25 | N/A |
ADD %RSI,%RCX | 1 | 0.25 | 0.25 | 0 | 0 | 0 | 0.25 | 0.25 | 0 | 1 | 0.25 | N/A |
CMP %R9,%R8 | 1 | 0.25 | 0.25 | 0 | 0 | 0 | 0.25 | 0.25 | 0 | 1 | 0.25 | N/A |
JL 1b6e20 <t1fv_8+0x50> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50-1 | N/A |
Function | t1fv_8#0x1b6dd0 |
Source file and lines | t1fv_8.c:141-196 |
Module | bench |
nb instructions | 111 |
nb uops | 110 |
loop length | 521 |
used x86 registers | 10 |
used mmx registers | 0 |
used xmm registers | 0 |
used ymm registers | 13 |
used zmm registers | 0 |
nb stack references | 0 |
ADD-SUB / MUL ratio | 2.89 |
micro-operation queue | 27.50 cycles |
front end | 27.50 cycles |
P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | |
---|---|---|---|---|---|---|---|---|
uops | 22.67 | 22.33 | 14.00 | 14.00 | 8.00 | 24.00 | 5.00 | 8.00 |
cycles | 22.67 | 22.33 | 14.00 | 14.00 | 8.00 | 24.00 | 5.00 | 8.00 |
Cycles executing div or sqrt instructions | NA |
Longest recurrence chain latency (RecMII) | 1.00 |
Front-end | 27.50 |
Dispatch | 24.00 |
Data deps. | 1.00 |
Overall L1 | 27.50 |
all | 100% |
load | 100% |
store | 100% |
mul | 100% |
add-sub | 100% |
fma | 100% |
div/sqrt | NA (no div/sqrt vectorizable/vectorized instructions) |
other | 100% |
all | 48% |
load | 50% |
store | 50% |
mul | 50% |
add-sub | 50% |
fma | 50% |
div/sqrt | NA (no div/sqrt vectorizable/vectorized instructions) |
other | 43% |
Instruction | Nb FU | P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | Latency | Recip. throughput | Vectorization |
---|---|---|---|---|---|---|---|---|---|---|---|---|
VMOVUPD (%RDI),%YMM3 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5-6 | 0.50 | vect (50.0%) |
MOV -0x18(%RCX),%R10 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4-5 | 0.50 | N/A |
VMOVUPD (%RDI,%R10,8),%YMM2 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5-6 | 0.50 | vect (50.0%) |
VMOVUPD -0xc0(%RDX),%YMM5 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5-6 | 0.50 | vect (50.0%) |
VMOVUPD -0xa0(%RDX),%YMM4 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5-6 | 0.50 | vect (50.0%) |
VMOVUPD -0x80(%RDX),%YMM6 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5-6 | 0.50 | vect (50.0%) |
VMOVUPD -0x60(%RDX),%YMM7 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5-6 | 0.50 | vect (50.0%) |
VMOVDDUP %YMM7,%YMM8 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (25.0%) |
VSHUFPD $0x5,%YMM2,%YMM2,%YMM9 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
VSHUFPD $0xf,%YMM7,%YMM7,%YMM7 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
VMULPD %YMM7,%YMM9,%YMM7 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VFMSUBADD231PD %YMM8,%YMM2,%YMM7 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VSUBPD %YMM7,%YMM3,%YMM2 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VADDPD %YMM7,%YMM3,%YMM3 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
MOV -0x30(%RCX),%R11 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4-5 | 0.50 | N/A |
MOV -0x28(%RCX),%RBX | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4-5 | 0.50 | N/A |
VMOVUPD (%RDI,%RBX,8),%YMM7 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5-6 | 0.50 | vect (50.0%) |
VMOVDDUP %YMM4,%YMM8 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (25.0%) |
VSHUFPD $0x5,%YMM7,%YMM7,%YMM9 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
VSHUFPD $0xf,%YMM4,%YMM4,%YMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
VMULPD %YMM4,%YMM9,%YMM9 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VFMSUBADD231PD %YMM8,%YMM7,%YMM9 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
MOV -0x8(%RCX),%RBX | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4-5 | 0.50 | N/A |
VMOVUPD (%RDI,%RBX,8),%YMM4 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5-6 | 0.50 | vect (50.0%) |
VMOVUPD -0x20(%RDX),%YMM7 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5-6 | 0.50 | vect (50.0%) |
VMOVDDUP %YMM7,%YMM8 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (25.0%) |
VSHUFPD $0x5,%YMM4,%YMM4,%YMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
VSHUFPD $0xf,%YMM7,%YMM7,%YMM7 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
VMULPD %YMM7,%YMM10,%YMM7 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VFMSUBADD231PD %YMM8,%YMM4,%YMM7 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VSUBPD %YMM7,%YMM9,%YMM4 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VADDPD %YMM7,%YMM9,%YMM7 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VMOVUPD (%RDI,%R11,8),%YMM8 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5-6 | 0.50 | vect (50.0%) |
VMOVDDUP %YMM5,%YMM9 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (25.0%) |
VSHUFPD $0x5,%YMM8,%YMM8,%YMM10 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
VSHUFPD $0xf,%YMM5,%YMM5,%YMM5 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
VMULPD %YMM5,%YMM10,%YMM5 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
MOV -0x10(%RCX),%R11 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4-5 | 0.50 | N/A |
VMOVUPD (%RDI,%R11,8),%YMM10 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5-6 | 0.50 | vect (50.0%) |
VFMSUBADD231PD %YMM9,%YMM8,%YMM5 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VMOVUPD -0x40(%RDX),%YMM8 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5-6 | 0.50 | vect (50.0%) |
VMOVDDUP %YMM8,%YMM9 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (25.0%) |
VSHUFPD $0x5,%YMM10,%YMM10,%YMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
VSHUFPD $0xf,%YMM8,%YMM8,%YMM8 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
VMULPD %YMM11,%YMM8,%YMM8 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VFMSUBADD231PD %YMM9,%YMM10,%YMM8 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VSUBPD %YMM8,%YMM5,%YMM9 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
MOV (%RCX),%R11 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4-5 | 0.50 | N/A |
VMOVUPD (%RDI,%R11,8),%YMM10 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5-6 | 0.50 | vect (50.0%) |
VADDPD %YMM5,%YMM8,%YMM5 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VMOVUPD (%RDX),%YMM8 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5-6 | 0.50 | vect (50.0%) |
VMOVDDUP %YMM8,%YMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (25.0%) |
VSHUFPD $0x5,%YMM10,%YMM10,%YMM12 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
VSHUFPD $0xf,%YMM8,%YMM8,%YMM8 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
VMULPD %YMM12,%YMM8,%YMM8 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VFMSUBADD231PD %YMM11,%YMM10,%YMM8 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
MOV -0x20(%RCX),%R11 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4-5 | 0.50 | N/A |
VMOVUPD (%RDI,%R11,8),%YMM10 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5-6 | 0.50 | vect (50.0%) |
VMOVDDUP %YMM6,%YMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (25.0%) |
VSHUFPD $0x5,%YMM10,%YMM10,%YMM12 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
VSHUFPD $0xf,%YMM6,%YMM6,%YMM6 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
VMULPD %YMM6,%YMM12,%YMM6 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VFMSUBADD231PD %YMM11,%YMM10,%YMM6 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VSUBPD %YMM6,%YMM8,%YMM10 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VADDPD %YMM6,%YMM8,%YMM6 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VADDPD %YMM7,%YMM3,%YMM8 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VADDPD %YMM6,%YMM5,%YMM11 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VSUBPD %YMM11,%YMM8,%YMM12 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VMOVUPD %YMM12,(%RDI,%R10,8) | 1 | 0 | 0 | 0.33 | 0.33 | 1 | 0 | 0 | 0.33 | 3 | 1 | vect (50.0%) |
VADDPD %YMM11,%YMM8,%YMM8 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VMOVUPD %YMM8,(%RDI) | 1 | 0 | 0 | 0.33 | 0.33 | 1 | 0 | 0 | 0.33 | 3 | 1 | vect (50.0%) |
VSUBPD %YMM7,%YMM3,%YMM3 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VSUBPD %YMM5,%YMM6,%YMM5 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VXORPD %YMM0,%YMM5,%YMM5 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 1 | 0.33 | vect (50.0%) |
VSHUFPS $0x4e,%YMM5,%YMM5,%YMM5 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
MOV -0x8(%RCX),%R10 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4-5 | 0.50 | N/A |
VSUBPD %YMM5,%YMM3,%YMM6 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VMOVUPD %YMM6,(%RDI,%R10,8) | 1 | 0 | 0 | 0.33 | 0.33 | 1 | 0 | 0 | 0.33 | 3 | 1 | vect (50.0%) |
MOV -0x28(%RCX),%R10 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4-5 | 0.50 | N/A |
VADDPD %YMM5,%YMM3,%YMM3 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VMOVUPD %YMM3,(%RDI,%R10,8) | 1 | 0 | 0 | 0.33 | 0.33 | 1 | 0 | 0 | 0.33 | 3 | 1 | vect (50.0%) |
VADDPD %YMM10,%YMM9,%YMM3 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VMULPD %YMM1,%YMM3,%YMM3 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VADDPD %YMM3,%YMM2,%YMM5 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VSUBPD %YMM3,%YMM2,%YMM2 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VSUBPD %YMM9,%YMM10,%YMM3 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VMULPD %YMM1,%YMM3,%YMM3 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VSUBPD %YMM4,%YMM3,%YMM6 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VXORPD %YMM0,%YMM6,%YMM6 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 1 | 0.33 | vect (50.0%) |
VSHUFPS $0x4e,%YMM6,%YMM6,%YMM6 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
VADDPD %YMM3,%YMM4,%YMM3 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VXORPD %YMM0,%YMM3,%YMM3 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 1 | 0.33 | vect (50.0%) |
VSHUFPS $0x4e,%YMM3,%YMM3,%YMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | vect (50.0%) |
MOV (%RCX),%R10 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4-5 | 0.50 | N/A |
VSUBPD %YMM6,%YMM5,%YMM4 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VMOVUPD %YMM4,(%RDI,%R10,8) | 1 | 0 | 0 | 0.33 | 0.33 | 1 | 0 | 0 | 0.33 | 3 | 1 | vect (50.0%) |
MOV -0x20(%RCX),%R10 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4-5 | 0.50 | N/A |
VADDPD %YMM3,%YMM2,%YMM4 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VMOVUPD %YMM4,(%RDI,%R10,8) | 1 | 0 | 0 | 0.33 | 0.33 | 1 | 0 | 0 | 0.33 | 3 | 1 | vect (50.0%) |
MOV -0x30(%RCX),%R10 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4-5 | 0.50 | N/A |
VADDPD %YMM6,%YMM5,%YMM4 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VMOVUPD %YMM4,(%RDI,%R10,8) | 1 | 0 | 0 | 0.33 | 0.33 | 1 | 0 | 0 | 0.33 | 3 | 1 | vect (50.0%) |
MOV -0x10(%RCX),%R10 | 1 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4-5 | 0.50 | N/A |
VSUBPD %YMM3,%YMM2,%YMM2 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 | vect (50.0%) |
VMOVUPD %YMM2,(%RDI,%R10,8) | 1 | 0 | 0 | 0.33 | 0.33 | 1 | 0 | 0 | 0.33 | 3 | 1 | vect (50.0%) |
ADD $0x2,%R8 | 1 | 0.25 | 0.25 | 0 | 0 | 0 | 0.25 | 0.25 | 0 | 1 | 0.25 | N/A |
ADD %RAX,%RDI | 1 | 0.25 | 0.25 | 0 | 0 | 0 | 0.25 | 0.25 | 0 | 1 | 0.25 | N/A |
ADD $0xe0,%RDX | 1 | 0.25 | 0.25 | 0 | 0 | 0 | 0.25 | 0.25 | 0 | 1 | 0.25 | N/A |
ADD %RSI,%RCX | 1 | 0.25 | 0.25 | 0 | 0 | 0 | 0.25 | 0.25 | 0 | 1 | 0.25 | N/A |
CMP %R9,%R8 | 1 | 0.25 | 0.25 | 0 | 0 | 0 | 0.25 | 0.25 | 0 | 1 | 0.25 | N/A |
JL 1b6e20 <t1fv_8+0x50> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50-1 | N/A |