;; Machine description for AArch64 SVE2.
;; Copyright (C) 2019-2020 Free Software Foundation, Inc.
;; Contributed by ARM Ltd.
;;
;; This file is part of GCC.
;;
;; GCC is free software; you can redistribute it and/or modify it
;; under the terms of the GNU General Public License as published by
;; the Free Software Foundation; either version 3, or (at your option)
;; any later version.
;;
;; GCC is distributed in the hope that it will be useful, but
;; WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;; General Public License for more details.
;;
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING3. If not see
;; .
;; The file is organised into the following sections (search for the full
;; line):
;;
;; == Moves
;; ---- Non-temporal gather loads
;; ---- Non-temporal scatter stores
;;
;; == Uniform binary arithmnetic
;; ---- [INT] Multiplication
;; ---- [INT] Scaled high-part multiplication
;; ---- [INT] General binary arithmetic that maps to unspecs
;; ---- [INT] Saturating binary arithmetic
;; ---- [INT] Saturating left shifts
;;
;; == Uniform ternary arithmnetic
;; ---- [INT] General ternary arithmetic that maps to unspecs
;; ---- [INT] Multiply-and-accumulate operations
;; ---- [INT] Binary logic operations with rotation
;; ---- [INT] Ternary logic operations
;; ---- [INT] Shift-and-accumulate operations
;; ---- [INT] Shift-and-insert operations
;; ---- [INT] Sum of absolute differences
;;
;; == Extending arithmetic
;; ---- [INT] Wide binary arithmetic
;; ---- [INT] Long binary arithmetic
;; ---- [INT] Long left shifts
;; ---- [INT] Long binary arithmetic with accumulation
;; ---- [FP] Long multiplication with accumulation
;;
;; == Narrowing arithnetic
;; ---- [INT] Narrowing unary arithmetic
;; ---- [INT] Narrowing binary arithmetic
;; ---- [INT] Narrowing right shifts
;;
;; == Pairwise arithmetic
;; ---- [INT] Pairwise arithmetic
;; ---- [FP] Pairwise arithmetic
;; ---- [INT] Pairwise arithmetic with accumulation
;;
;; == Complex arithmetic
;; ---- [INT] Complex binary operations
;; ---- [INT] Complex ternary operations
;; ---- [INT] Complex dot product
;;
;; == Conversions
;; ---- [FP<-FP] Widening conversions
;; ---- [FP<-FP] Narrowing conversions
;;
;; == Other arithmetic
;; ---- [INT] Reciprocal approximation
;; ---- [INT<-FP] Base-2 logarithm
;; ---- [INT] Polynomial multiplication
;;
;; == Permutation
;; ---- [INT,FP] General permutes
;; ---- [INT] Optional bit-permute extensions
;;
;; == General
;; ---- Check for aliases between pointers
;; ---- Histogram processing
;; ---- String matching
;;
;; == Crypotographic extensions
;; ---- Optional AES extensions
;; ---- Optional SHA-3 extensions
;; ---- Optional SM4 extensions
;; =========================================================================
;; == Moves
;; =========================================================================
;; -------------------------------------------------------------------------
;; ---- Non-temporal gather loads
;; -------------------------------------------------------------------------
;; Includes gather forms of:
;; - LDNT1B
;; - LDNT1D
;; - LDNT1H
;; - LDNT1W
;; -------------------------------------------------------------------------
;; Non-extending loads.
(define_insn "@aarch64_gather_ldnt"
[(set (match_operand:SVE_FULL_SD 0 "register_operand" "=w, w")
(unspec:SVE_FULL_SD
[(match_operand: 1 "register_operand" "Upl, Upl")
(match_operand:DI 2 "aarch64_reg_or_zero" "Z, r")
(match_operand: 3 "register_operand" "w, w")
(mem:BLK (scratch))]
UNSPEC_LDNT1_GATHER))]
"TARGET_SVE2"
"@
ldnt1\t%0., %1/z, [%3.]
ldnt1\t%0., %1/z, [%3., %2]"
)
;; Extending loads.
(define_insn_and_rewrite "@aarch64_gather_ldnt_"
[(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, w")
(unspec:SVE_FULL_SDI
[(match_operand: 4 "general_operand" "UplDnm, UplDnm")
(ANY_EXTEND:SVE_FULL_SDI
(unspec:SVE_PARTIAL_I
[(match_operand: 1 "register_operand" "Upl, Upl")
(match_operand:DI 2 "aarch64_reg_or_zero" "Z, r")
(match_operand: 3 "register_operand" "w, w")
(mem:BLK (scratch))]
UNSPEC_LDNT1_GATHER))]
UNSPEC_PRED_X))]
"TARGET_SVE2
&& (~ & ) == 0"
"@
ldnt1\t%0., %1/z, [%3.]
ldnt1\t%0., %1/z, [%3., %2]"
"&& !CONSTANT_P (operands[4])"
{
operands[4] = CONSTM1_RTX (mode);
}
)
;; -------------------------------------------------------------------------
;; ---- Non-temporal scatter stores
;; -------------------------------------------------------------------------
;; Includes scatter forms of:
;; - STNT1B
;; - STNT1D
;; - STNT1H
;; - STNT1W
;; -------------------------------------------------------------------------
;; Non-truncating stores.
(define_insn "@aarch64_scatter_stnt"
[(set (mem:BLK (scratch))
(unspec:BLK
[(match_operand: 0 "register_operand" "Upl, Upl")
(match_operand:DI 1 "aarch64_reg_or_zero" "Z, r")
(match_operand: 2 "register_operand" "w, w")
(match_operand:SVE_FULL_SD 3 "register_operand" "w, w")]
UNSPEC_STNT1_SCATTER))]
"TARGET_SVE"
"@
stnt1\t%3., %0, [%2.]
stnt1\t%3., %0, [%2., %1]"
)
;; Truncating stores.
(define_insn "@aarch64_scatter_stnt_"
[(set (mem:BLK (scratch))
(unspec:BLK
[(match_operand: 0 "register_operand" "Upl, Upl")
(match_operand:DI 1 "aarch64_reg_or_zero" "Z, r")
(match_operand: 2 "register_operand" "w, w")
(truncate:SVE_PARTIAL_I
(match_operand:SVE_FULL_SDI 3 "register_operand" "w, w"))]
UNSPEC_STNT1_SCATTER))]
"TARGET_SVE2
&& (~ & ) == 0"
"@
stnt1\t%3., %0, [%2.]
stnt1\t%3., %0, [%2., %1]"
)
;; =========================================================================
;; == Uniform binary arithmnetic
;; =========================================================================
;; -------------------------------------------------------------------------
;; ---- [INT] Multiplication
;; -------------------------------------------------------------------------
;; Includes the lane forms of:
;; - MUL
;; -------------------------------------------------------------------------
(define_insn "@aarch64_mul_lane_"
[(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w")
(mult:SVE_FULL_HSDI
(unspec:SVE_FULL_HSDI
[(match_operand:SVE_FULL_HSDI 2 "register_operand" "")
(match_operand:SI 3 "const_int_operand")]
UNSPEC_SVE_LANE_SELECT)
(match_operand:SVE_FULL_HSDI 1 "register_operand" "w")))]
"TARGET_SVE2"
"mul\t%0., %1., %2.[%3]"
)
;; -------------------------------------------------------------------------
;; ---- [INT] Scaled high-part multiplication
;; -------------------------------------------------------------------------
;; The patterns in this section are synthetic.
;; -------------------------------------------------------------------------
;; Unpredicated integer multiply-high-with-(round-and-)scale.
(define_expand "mulhs3"
[(set (match_operand:SVE_FULL_BHSI 0 "register_operand")
(unspec:SVE_FULL_BHSI
[(match_dup 3)
(unspec:SVE_FULL_BHSI
[(match_operand:SVE_FULL_BHSI 1 "register_operand")
(match_operand:SVE_FULL_BHSI 2 "register_operand")]
MULHRS)]
UNSPEC_PRED_X))]
"TARGET_SVE2"
{
operands[3] = aarch64_ptrue_reg (mode);
rtx prod_b = gen_reg_rtx (mode);
rtx prod_t = gen_reg_rtx (mode);
emit_insn (gen_aarch64_sve_mullb (prod_b, operands[1],
operands[2]));
emit_insn (gen_aarch64_sve_mullt (prod_t, operands[1],
operands[2]));
rtx shift = GEN_INT (GET_MODE_UNIT_BITSIZE (mode) - 1);
emit_insn (gen_aarch64_sve_shrnb (operands[0], prod_b, shift));
emit_insn (gen_aarch64_sve_shrnt (operands[0], operands[0],
prod_t, shift));
DONE;
}
)
;; -------------------------------------------------------------------------
;; ---- [INT] General binary arithmetic that maps to unspecs
;; -------------------------------------------------------------------------
;; Includes:
;; - SHADD
;; - SHSUB
;; - SHSUBR
;; - SQRSHL
;; - SQRSHLR
;; - SRHADD
;; - SRSHL
;; - SRSHLR
;; - SUQADD
;; - UHADD
;; - UHSUB
;; - UHSUBR
;; - UQRSHL
;; - UQRSHLR
;; - URHADD
;; - URSHL
;; - URSHLR
;; - USQADD
;; -------------------------------------------------------------------------
;; Integer average (floor).
(define_expand "avg3_floor"
[(set (match_operand:SVE_FULL_I 0 "register_operand")
(unspec:SVE_FULL_I
[(match_dup 3)
(unspec:SVE_FULL_I
[(match_operand:SVE_FULL_I 1 "register_operand")
(match_operand:SVE_FULL_I 2 "register_operand")]
HADD)]
UNSPEC_PRED_X))]
"TARGET_SVE2"
{
operands[3] = force_reg (mode, CONSTM1_RTX (mode));
}
)
;; Integer average (rounding).
(define_expand "avg3_ceil"
[(set (match_operand:SVE_FULL_I 0 "register_operand")
(unspec:SVE_FULL_I
[(match_dup 3)
(unspec:SVE_FULL_I
[(match_operand:SVE_FULL_I 1 "register_operand")
(match_operand:SVE_FULL_I 2 "register_operand")]
RHADD)]
UNSPEC_PRED_X))]
"TARGET_SVE2"
{
operands[3] = force_reg (mode, CONSTM1_RTX (mode));
}
)
;; The immediate form of SQADD acts as an immediate form of SUQADD
;; over its full range. In contrast to the ss_plus pattern, we do
;; not need to treat byte immediates specially. E.g.:
;;
;; SQADD Z0.B, Z0.B, #128
;;
;; is equivalent to:
;;
;; MOV Z1.B, #128
;; SUQADD Z0.B, P0/M, Z0.B, Z1.B
;;
;; even though it's not equivalent to:
;;
;; MOV Z1.B, #128
;; SQADD Z0.B, P0/M, Z0.B, Z1.B // Saturating subtraction of 128
(define_insn "@aarch64_sve_suqadd_const"
[(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
(unspec:SVE_FULL_I
[(match_operand:SVE_FULL_I 1 "register_operand" "0, w")
(match_operand:SVE_FULL_I 2 "aarch64_sve_arith_immediate")]
UNSPEC_SUQADD))]
"TARGET_SVE2"
"@
sqadd\t%0., %0., #%D2
movprfx\t%0, %1\;sqadd\t%0., %0., #%D2"
[(set_attr "movprfx" "*,yes")]
)
;; General predicated binary arithmetic. All operations handled here
;; are commutative or have a reversed form.
(define_insn "@aarch64_pred_"
[(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, w, ?&w")
(unspec:SVE_FULL_I
[(match_operand: 1 "register_operand" "Upl, Upl, Upl")
(unspec:SVE_FULL_I
[(match_operand:SVE_FULL_I 2 "register_operand" "0, w, w")
(match_operand:SVE_FULL_I 3 "register_operand" "w, 0, w")]
SVE2_COND_INT_BINARY_REV)]
UNSPEC_PRED_X))]
"TARGET_SVE2"
"@
\t%0., %1/m, %0., %3.
\t%0., %1/m, %0., %2.
movprfx\t%0, %2\;\t%0., %1/m, %0., %3."
[(set_attr "movprfx" "*,*,yes")]
)
;; Predicated binary arithmetic with merging.
(define_expand "@cond_"
[(set (match_operand:SVE_FULL_I 0 "register_operand")
(unspec:SVE_FULL_I
[(match_operand: 1 "register_operand")
(unspec:SVE_FULL_I
[(match_dup 5)
(unspec:SVE_FULL_I
[(match_operand:SVE_FULL_I 2 "register_operand")
(match_operand:SVE_FULL_I 3 "register_operand")]
SVE2_COND_INT_BINARY)]
UNSPEC_PRED_X)
(match_operand:SVE_FULL_I 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE2"
{
operands[5] = CONSTM1_RTX (mode);
}
)
;; Predicated binary arithmetic, merging with the first input.
(define_insn_and_rewrite "*cond__2"
[(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
(unspec:SVE_FULL_I
[(match_operand: 1 "register_operand" "Upl, Upl")
(unspec:SVE_FULL_I
[(match_operand 4)
(unspec:SVE_FULL_I
[(match_operand:SVE_FULL_I 2 "register_operand" "0, w")
(match_operand:SVE_FULL_I 3 "register_operand" "w, w")]
SVE2_COND_INT_BINARY)]
UNSPEC_PRED_X)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE2"
"@
\t%0., %1/m, %0., %3.
movprfx\t%0, %2\;\t%0., %1/m, %0., %3."
"&& !CONSTANT_P (operands[4])"
{
operands[4] = CONSTM1_RTX (mode);
}
[(set_attr "movprfx" "*,yes")]
)
;; Predicated binary arithmetic, merging with the second input.
(define_insn_and_rewrite "*cond__3"
[(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
(unspec:SVE_FULL_I
[(match_operand: 1 "register_operand" "Upl, Upl")
(unspec:SVE_FULL_I
[(match_operand 4)
(unspec:SVE_FULL_I
[(match_operand:SVE_FULL_I 2 "register_operand" "w, w")
(match_operand:SVE_FULL_I 3 "register_operand" "0, w")]
SVE2_COND_INT_BINARY_REV)]
UNSPEC_PRED_X)
(match_dup 3)]
UNSPEC_SEL))]
"TARGET_SVE2"
"@
\t%0., %1/m, %0., %2.
movprfx\t%0, %3\;\t%0., %1/m, %0., %2."
"&& !CONSTANT_P (operands[4])"
{
operands[4] = CONSTM1_RTX (mode);
}
[(set_attr "movprfx" "*,yes")]
)
;; Predicated binary operations, merging with an independent value.
(define_insn_and_rewrite "*cond__any"
[(set (match_operand:SVE_FULL_I 0 "register_operand" "=&w, &w, &w, &w, ?&w")
(unspec:SVE_FULL_I
[(match_operand: 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
(unspec:SVE_FULL_I
[(match_operand 5)
(unspec:SVE_FULL_I
[(match_operand:SVE_FULL_I 2 "register_operand" "0, w, w, w, w")
(match_operand:SVE_FULL_I 3 "register_operand" "w, 0, w, w, w")]
SVE2_COND_INT_BINARY_REV)]
UNSPEC_PRED_X)
(match_operand:SVE_FULL_I 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
UNSPEC_SEL))]
"TARGET_SVE2
&& !rtx_equal_p (operands[2], operands[4])
&& !rtx_equal_p (operands[3], operands[4])"
"@
movprfx\t%0., %1/z, %0.\;\t%0., %1/m, %0., %3.
movprfx\t%0., %1/z, %0.\;\t%0., %1/m, %0., %2.
movprfx\t%0., %1/z, %2.\;\t%0., %1/m, %0., %3.
movprfx\t%0., %1/m, %2.\;\t%0., %1/m, %0., %3.
#"
"&& 1"
{
if (reload_completed
&& register_operand (operands[4], mode)
&& !rtx_equal_p (operands[0], operands[4]))
{
emit_insn (gen_vcond_mask_ (operands[0], operands[2],
operands[4], operands[1]));
operands[4] = operands[2] = operands[0];
}
else if (!CONSTANT_P (operands[5]))
operands[5] = CONSTM1_RTX (mode);
else
FAIL;
}
[(set_attr "movprfx" "yes")]
)
;; Predicated binary operations with no reverse form, merging with zero.
;; At present we don't generate these patterns via a cond_* optab,
;; so there's no correctness requirement to handle merging with an
;; independent value.
(define_insn_and_rewrite "*cond__z"
[(set (match_operand:SVE_FULL_I 0 "register_operand" "=&w, &w")
(unspec:SVE_FULL_I
[(match_operand: 1 "register_operand" "Upl, Upl")
(unspec:SVE_FULL_I
[(match_operand 5)
(unspec:SVE_FULL_I
[(match_operand:SVE_FULL_I 2 "register_operand" "0, w")
(match_operand:SVE_FULL_I 3 "register_operand" "w, w")]
SVE2_COND_INT_BINARY_NOREV)]
UNSPEC_PRED_X)
(match_operand:SVE_FULL_I 4 "aarch64_simd_imm_zero")]
UNSPEC_SEL))]
"TARGET_SVE2"
"@
movprfx\t%0., %1/z, %0.\;\t%0., %1/m, %0., %3.
movprfx\t%0., %1/z, %2.\;\t%0., %1/m, %0., %3."
"&& !CONSTANT_P (operands[5])"
{
operands[5] = CONSTM1_RTX (mode);
}
[(set_attr "movprfx" "yes")]
)
;; -------------------------------------------------------------------------
;; ---- [INT] Saturating binary arithmetic
;; -------------------------------------------------------------------------
;; Includes:
;; - SQDMULH
;; - SQRDMULH
;; -------------------------------------------------------------------------
(define_insn "@aarch64_sve_"
[(set (match_operand:SVE_FULL_I 0 "register_operand" "=w")
(unspec:SVE_FULL_I
[(match_operand:SVE_FULL_I 1 "register_operand" "w")
(match_operand:SVE_FULL_I 2 "register_operand" "w")]
SVE2_INT_BINARY))]
"TARGET_SVE2"
"\t%0., %1., %2."
)
(define_insn "@aarch64_sve__lane_"
[(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w")
(unspec:SVE_FULL_HSDI
[(match_operand:SVE_FULL_HSDI 1 "register_operand" "w")
(unspec:SVE_FULL_HSDI
[(match_operand:SVE_FULL_HSDI 2 "register_operand" "")
(match_operand:SI 3 "const_int_operand")]
UNSPEC_SVE_LANE_SELECT)]
SVE2_INT_BINARY_LANE))]
"TARGET_SVE2"
"\t%0., %1., %2.[%3]"
)
;; -------------------------------------------------------------------------
;; ---- [INT] Saturating left shifts
;; -------------------------------------------------------------------------
;; Includes:
;; - SQSHL
;; - SQSHLR
;; - UQSHL
;; - UQSHLR
;; -------------------------------------------------------------------------
;; Predicated left shifts.
(define_insn "@aarch64_pred_"
[(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, w, w, ?&w, ?&w")
(unspec:SVE_FULL_I
[(match_operand: 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
(unspec:SVE_FULL_I
[(match_operand:SVE_FULL_I 2 "register_operand" "0, 0, w, w, w")
(match_operand:SVE_FULL_I 3 "aarch64_sve_shift_operand" "D, w, 0, D, w")]
SVE2_COND_INT_SHIFT)]
UNSPEC_PRED_X))]
"TARGET_SVE2"
"@
\t%0., %1/m, %0., #%3
\t%0., %1/m, %0., %3.
r\t%0., %1/m, %0., %2.
movprfx\t%0, %2\;\t%0., %1/m, %0., #%3
movprfx\t%0, %2\;\t%0., %1/m, %0., %3."
[(set_attr "movprfx" "*,*,*,yes,yes")]
)
;; Predicated left shifts with merging.
(define_expand "@cond_"
[(set (match_operand:SVE_FULL_I 0 "register_operand")
(unspec:SVE_FULL_I
[(match_operand: 1 "register_operand")
(unspec:SVE_FULL_I
[(match_dup 5)
(unspec:SVE_FULL_I
[(match_operand:SVE_FULL_I 2 "register_operand")
(match_operand:SVE_FULL_I 3 "aarch64_sve_shift_operand")]
SVE2_COND_INT_SHIFT)]
UNSPEC_PRED_X)
(match_operand:SVE_FULL_I 4 "register_operand")]
UNSPEC_SEL))]
"TARGET_SVE2"
{
operands[5] = CONSTM1_RTX (mode);
}
)
;; Predicated left shifts, merging with the first input.
(define_insn_and_rewrite "*cond__2"
[(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, w, ?&w, ?&w")
(unspec:SVE_FULL_I
[(match_operand: 1 "register_operand" "Upl, Upl, Upl, Upl")
(unspec:SVE_FULL_I
[(match_operand 4)
(unspec:SVE_FULL_I
[(match_operand:SVE_FULL_I 2 "register_operand" "0, 0, w, w")
(match_operand:SVE_FULL_I 3 "aarch64_sve_shift_operand" "D, w, D, w")]
SVE2_COND_INT_SHIFT)]
UNSPEC_PRED_X)
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE2"
"@
\t%0., %1/m, %0., #%3
\t%0., %1/m, %0., %3.
movprfx\t%0, %2\;\t%0., %1/m, %0., #%3
movprfx\t%0, %2\;\t%0., %1/m, %0., %3."
"&& !CONSTANT_P (operands[4])"
{
operands[4] = CONSTM1_RTX (mode);
}
[(set_attr "movprfx" "*,*,yes,yes")]
)
;; Predicated left shifts, merging with the second input.
(define_insn_and_rewrite "*cond__3"
[(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
(unspec:SVE_FULL_I
[(match_operand: 1 "register_operand" "Upl, Upl")
(unspec:SVE_FULL_I
[(match_operand 4)
(unspec:SVE_FULL_I
[(match_operand:SVE_FULL_I 2 "register_operand" "w, w")
(match_operand:SVE_FULL_I 3 "register_operand" "0, w")]
SVE2_COND_INT_SHIFT)]
UNSPEC_PRED_X)
(match_dup 3)]
UNSPEC_SEL))]
"TARGET_SVE2"
"@
r\t%0., %1/m, %0., %2.
movprfx\t%0, %3\;r\t%0., %1/m, %0., %2."
"&& !CONSTANT_P (operands[4])"
{
operands[4] = CONSTM1_RTX (mode);
}
[(set_attr "movprfx" "*,yes")]
)
;; Predicated left shifts, merging with an independent value.
(define_insn_and_rewrite "*cond__any"
[(set (match_operand:SVE_FULL_I 0 "register_operand" "=&w, &w, &w, &w, &w, &w, &w, ?&w, ?&w")
(unspec:SVE_FULL_I
[(match_operand: 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl, Upl, Upl, Upl")
(unspec:SVE_FULL_I
[(match_operand 5)
(unspec:SVE_FULL_I
[(match_operand:SVE_FULL_I 2 "register_operand" "0, 0, w, w, w, w, w, w, w")
(match_operand:SVE_FULL_I 3 "aarch64_sve_shift_operand" "D, w, 0, D, w, D, w, D, w")]
SVE2_COND_INT_SHIFT)]
UNSPEC_PRED_X)
(match_operand:SVE_FULL_I 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, Dz, Dz, 0, 0, w, w")]
UNSPEC_SEL))]
"TARGET_SVE2
&& !rtx_equal_p (operands[2], operands[4])
&& (CONSTANT_P (operands[4]) || !rtx_equal_p (operands[3], operands[4]))"
"@
movprfx\t%0., %1/z, %0.