core/slice/
cmp.rs

1//! Comparison traits for `[T]`.
2
3use super::{from_raw_parts, memchr};
4use crate::ascii;
5use crate::cmp::{self, BytewiseEq, Ordering};
6use crate::intrinsics::compare_bytes;
7use crate::num::NonZero;
8use crate::ops::ControlFlow;
9
10#[stable(feature = "rust1", since = "1.0.0")]
11#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
12impl<T, U> const PartialEq<[U]> for [T]
13where
14    T: [const] PartialEq<U>,
15{
16    #[inline]
17    fn eq(&self, other: &[U]) -> bool {
18        SlicePartialEq::equal(self, other)
19    }
20}
21
22#[stable(feature = "rust1", since = "1.0.0")]
23#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
24impl<T: [const] Eq> const Eq for [T] {}
25
26/// Implements comparison of slices [lexicographically](Ord#lexicographical-comparison).
27#[stable(feature = "rust1", since = "1.0.0")]
28impl<T: Ord> Ord for [T] {
29    fn cmp(&self, other: &[T]) -> Ordering {
30        SliceOrd::compare(self, other)
31    }
32}
33
34#[inline]
35const fn as_underlying(x: ControlFlow<bool>) -> u8 {
36    // SAFETY: This will only compile if `bool` and `ControlFlow<bool>` have the same
37    // size (which isn't guaranteed but this is libcore). Because they have the same
38    // size, it's a niched implementation, which in one byte means there can't be
39    // any uninitialized memory. The callers then only check for `0` or `1` from this,
40    // which must necessarily match the `Break` variant, and we're fine no matter
41    // what ends up getting picked as the value representing `Continue(())`.
42    unsafe { crate::mem::transmute(x) }
43}
44
45/// Implements comparison of slices [lexicographically](Ord#lexicographical-comparison).
46#[stable(feature = "rust1", since = "1.0.0")]
47impl<T: PartialOrd> PartialOrd for [T] {
48    #[inline]
49    fn partial_cmp(&self, other: &[T]) -> Option<Ordering> {
50        SlicePartialOrd::partial_compare(self, other)
51    }
52    #[inline]
53    fn lt(&self, other: &Self) -> bool {
54        // This is certainly not the obvious way to implement these methods.
55        // Unfortunately, using anything that looks at the discriminant means that
56        // LLVM sees a check for `2` (aka `ControlFlow<bool>::Continue(())`) and
57        // gets very distracted by that, ending up generating extraneous code.
58        // This should be changed to something simpler once either LLVM is smarter,
59        // see <https://github.com/llvm/llvm-project/issues/132678>, or we generate
60        // niche discriminant checks in a way that doesn't trigger it.
61
62        as_underlying(self.__chaining_lt(other)) == 1
63    }
64    #[inline]
65    fn le(&self, other: &Self) -> bool {
66        as_underlying(self.__chaining_le(other)) != 0
67    }
68    #[inline]
69    fn gt(&self, other: &Self) -> bool {
70        as_underlying(self.__chaining_gt(other)) == 1
71    }
72    #[inline]
73    fn ge(&self, other: &Self) -> bool {
74        as_underlying(self.__chaining_ge(other)) != 0
75    }
76    #[inline]
77    fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
78        SliceChain::chaining_lt(self, other)
79    }
80    #[inline]
81    fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
82        SliceChain::chaining_le(self, other)
83    }
84    #[inline]
85    fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
86        SliceChain::chaining_gt(self, other)
87    }
88    #[inline]
89    fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
90        SliceChain::chaining_ge(self, other)
91    }
92}
93
94#[doc(hidden)]
95// intermediate trait for specialization of slice's PartialEq
96#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
97const trait SlicePartialEq<B> {
98    fn equal(&self, other: &[B]) -> bool;
99}
100
101// Generic slice equality
102#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
103impl<A, B> const SlicePartialEq<B> for [A]
104where
105    A: [const] PartialEq<B>,
106{
107    // It's not worth trying to inline the loops underneath here *in MIR*,
108    // and preventing it encourages more useful inlining upstream,
109    // such as in `<str as PartialEq>::eq`.
110    // The codegen backend can still inline it later if needed.
111    #[rustc_no_mir_inline]
112    default fn equal(&self, other: &[B]) -> bool {
113        if self.len() != other.len() {
114            return false;
115        }
116
117        // Implemented as explicit indexing rather
118        // than zipped iterators for performance reasons.
119        // See PR https://github.com/rust-lang/rust/pull/116846
120        // FIXME(const_hack): make this a `for idx in 0..self.len()` loop.
121        let mut idx = 0;
122        while idx < self.len() {
123            // bound checks are optimized away
124            if self[idx] != other[idx] {
125                return false;
126            }
127            idx += 1;
128        }
129
130        true
131    }
132}
133
134// When each element can be compared byte-wise, we can compare all the bytes
135// from the whole size in one call to the intrinsics.
136#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
137impl<A, B> const SlicePartialEq<B> for [A]
138where
139    A: [const] BytewiseEq<B>,
140{
141    // This is usually a pretty good backend inlining candidate because the
142    // intrinsic tends to just be `memcmp`.  However, as of 2025-12 letting
143    // MIR inline this makes reuse worse because it means that, for example,
144    // `String::eq` doesn't inline, whereas by keeping this from inling all
145    // the wrappers until the call to this disappear.  If the heuristics have
146    // changed and this is no longer fruitful, though, please do remove it.
147    // In the mean time, it's fine to not inline it in MIR because the backend
148    // will still inline it if it things it's important to do so.
149    #[rustc_no_mir_inline]
150    #[inline]
151    fn equal(&self, other: &[B]) -> bool {
152        if self.len() != other.len() {
153            return false;
154        }
155
156        // SAFETY: `self` and `other` are references and are thus guaranteed to be valid.
157        // The two slices have been checked to have the same size above.
158        unsafe {
159            let size = size_of_val(self);
160            compare_bytes(self.as_ptr() as *const u8, other.as_ptr() as *const u8, size) == 0
161        }
162    }
163}
164
165#[doc(hidden)]
166#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
167// intermediate trait for specialization of slice's PartialOrd
168const trait SlicePartialOrd: Sized {
169    fn partial_compare(left: &[Self], right: &[Self]) -> Option<Ordering>;
170}
171
172#[doc(hidden)]
173#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
174// intermediate trait for specialization of slice's PartialOrd chaining methods
175const trait SliceChain: Sized {
176    fn chaining_lt(left: &[Self], right: &[Self]) -> ControlFlow<bool>;
177    fn chaining_le(left: &[Self], right: &[Self]) -> ControlFlow<bool>;
178    fn chaining_gt(left: &[Self], right: &[Self]) -> ControlFlow<bool>;
179    fn chaining_ge(left: &[Self], right: &[Self]) -> ControlFlow<bool>;
180}
181
182type AlwaysBreak<B> = ControlFlow<B, crate::convert::Infallible>;
183
184impl<A: PartialOrd> SlicePartialOrd for A {
185    default fn partial_compare(left: &[A], right: &[A]) -> Option<Ordering> {
186        let elem_chain = |a, b| match PartialOrd::partial_cmp(a, b) {
187            Some(Ordering::Equal) => ControlFlow::Continue(()),
188            non_eq => ControlFlow::Break(non_eq),
189        };
190        let len_chain = |a: &_, b: &_| ControlFlow::Break(usize::partial_cmp(a, b));
191        let AlwaysBreak::Break(b) = chaining_impl(left, right, elem_chain, len_chain);
192        b
193    }
194}
195
196impl<A: PartialOrd> SliceChain for A {
197    default fn chaining_lt(left: &[Self], right: &[Self]) -> ControlFlow<bool> {
198        chaining_impl(left, right, PartialOrd::__chaining_lt, usize::__chaining_lt)
199    }
200    default fn chaining_le(left: &[Self], right: &[Self]) -> ControlFlow<bool> {
201        chaining_impl(left, right, PartialOrd::__chaining_le, usize::__chaining_le)
202    }
203    default fn chaining_gt(left: &[Self], right: &[Self]) -> ControlFlow<bool> {
204        chaining_impl(left, right, PartialOrd::__chaining_gt, usize::__chaining_gt)
205    }
206    default fn chaining_ge(left: &[Self], right: &[Self]) -> ControlFlow<bool> {
207        chaining_impl(left, right, PartialOrd::__chaining_ge, usize::__chaining_ge)
208    }
209}
210
211#[inline]
212fn chaining_impl<'l, 'r, A: PartialOrd, B, C>(
213    left: &'l [A],
214    right: &'r [A],
215    elem_chain: impl Fn(&'l A, &'r A) -> ControlFlow<B>,
216    len_chain: impl for<'a> FnOnce(&'a usize, &'a usize) -> ControlFlow<B, C>,
217) -> ControlFlow<B, C> {
218    let l = cmp::min(left.len(), right.len());
219
220    // Slice to the loop iteration range to enable bound check
221    // elimination in the compiler
222    let lhs = &left[..l];
223    let rhs = &right[..l];
224
225    for i in 0..l {
226        elem_chain(&lhs[i], &rhs[i])?;
227    }
228
229    len_chain(&left.len(), &right.len())
230}
231
232// This is the impl that we would like to have. Unfortunately it's not sound.
233// See `partial_ord_slice.rs`.
234/*
235impl<A> SlicePartialOrd for A
236where
237    A: Ord,
238{
239    default fn partial_compare(left: &[A], right: &[A]) -> Option<Ordering> {
240        Some(SliceOrd::compare(left, right))
241    }
242}
243*/
244
245#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
246impl<A: [const] AlwaysApplicableOrd> const SlicePartialOrd for A {
247    fn partial_compare(left: &[A], right: &[A]) -> Option<Ordering> {
248        Some(SliceOrd::compare(left, right))
249    }
250}
251
252#[rustc_specialization_trait]
253#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
254const trait AlwaysApplicableOrd: [const] SliceOrd + [const] Ord {}
255
256macro_rules! always_applicable_ord {
257    ($([$($p:tt)*] $t:ty,)*) => {
258        $(impl<$($p)*> AlwaysApplicableOrd for $t {})*
259    }
260}
261
262always_applicable_ord! {
263    [] u8, [] u16, [] u32, [] u64, [] u128, [] usize,
264    [] i8, [] i16, [] i32, [] i64, [] i128, [] isize,
265    [] bool, [] char,
266    [T: ?Sized] *const T, [T: ?Sized] *mut T,
267    [T: AlwaysApplicableOrd] &T,
268    [T: AlwaysApplicableOrd] &mut T,
269    [T: AlwaysApplicableOrd] Option<T>,
270}
271
272#[doc(hidden)]
273#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
274// intermediate trait for specialization of slice's Ord
275const trait SliceOrd: Sized {
276    fn compare(left: &[Self], right: &[Self]) -> Ordering;
277}
278
279impl<A: Ord> SliceOrd for A {
280    default fn compare(left: &[Self], right: &[Self]) -> Ordering {
281        let elem_chain = |a, b| match Ord::cmp(a, b) {
282            Ordering::Equal => ControlFlow::Continue(()),
283            non_eq => ControlFlow::Break(non_eq),
284        };
285        let len_chain = |a: &_, b: &_| ControlFlow::Break(usize::cmp(a, b));
286        let AlwaysBreak::Break(b) = chaining_impl(left, right, elem_chain, len_chain);
287        b
288    }
289}
290
291/// Marks that a type should be treated as an unsigned byte for comparisons.
292///
293/// # Safety
294/// * The type must be readable as an `u8`, meaning it has to have the same
295///   layout as `u8` and always be initialized.
296/// * For every `x` and `y` of this type, `Ord(x, y)` must return the same
297///   value as `Ord::cmp(transmute::<_, u8>(x), transmute::<_, u8>(y))`.
298#[rustc_specialization_trait]
299const unsafe trait UnsignedBytewiseOrd: [const] Ord {}
300
301#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
302unsafe impl const UnsignedBytewiseOrd for bool {}
303#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
304unsafe impl const UnsignedBytewiseOrd for u8 {}
305#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
306unsafe impl const UnsignedBytewiseOrd for NonZero<u8> {}
307#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
308unsafe impl const UnsignedBytewiseOrd for Option<NonZero<u8>> {}
309#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
310unsafe impl const UnsignedBytewiseOrd for ascii::Char {}
311
312// `compare_bytes` compares a sequence of unsigned bytes lexicographically, so
313// use it if the requirements for `UnsignedBytewiseOrd` are fulfilled.
314#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
315impl<A: [const] Ord + [const] UnsignedBytewiseOrd> const SliceOrd for A {
316    #[inline]
317    fn compare(left: &[Self], right: &[Self]) -> Ordering {
318        // Since the length of a slice is always less than or equal to
319        // isize::MAX, this never underflows.
320        let diff = left.len() as isize - right.len() as isize;
321        // This comparison gets optimized away (on x86_64 and ARM) because the
322        // subtraction updates flags.
323        let len = if left.len() < right.len() { left.len() } else { right.len() };
324        let left = left.as_ptr().cast();
325        let right = right.as_ptr().cast();
326        // SAFETY: `left` and `right` are references and are thus guaranteed to
327        // be valid. `UnsignedBytewiseOrd` is only implemented for types that
328        // are valid u8s and can be compared the same way. We use the minimum
329        // of both lengths which guarantees that both regions are valid for
330        // reads in that interval.
331        let mut order = unsafe { compare_bytes(left, right, len) as isize };
332        if order == 0 {
333            order = diff;
334        }
335        order.cmp(&0)
336    }
337}
338
339// Don't generate our own chaining loops for `memcmp`-able things either.
340
341#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
342impl<A: [const] PartialOrd + [const] UnsignedBytewiseOrd> const SliceChain for A {
343    #[inline]
344    fn chaining_lt(left: &[Self], right: &[Self]) -> ControlFlow<bool> {
345        match SliceOrd::compare(left, right) {
346            Ordering::Equal => ControlFlow::Continue(()),
347            ne => ControlFlow::Break(ne.is_lt()),
348        }
349    }
350    #[inline]
351    fn chaining_le(left: &[Self], right: &[Self]) -> ControlFlow<bool> {
352        match SliceOrd::compare(left, right) {
353            Ordering::Equal => ControlFlow::Continue(()),
354            ne => ControlFlow::Break(ne.is_le()),
355        }
356    }
357    #[inline]
358    fn chaining_gt(left: &[Self], right: &[Self]) -> ControlFlow<bool> {
359        match SliceOrd::compare(left, right) {
360            Ordering::Equal => ControlFlow::Continue(()),
361            ne => ControlFlow::Break(ne.is_gt()),
362        }
363    }
364    #[inline]
365    fn chaining_ge(left: &[Self], right: &[Self]) -> ControlFlow<bool> {
366        match SliceOrd::compare(left, right) {
367            Ordering::Equal => ControlFlow::Continue(()),
368            ne => ControlFlow::Break(ne.is_ge()),
369        }
370    }
371}
372
373pub(super) trait SliceContains: Sized {
374    fn slice_contains(&self, x: &[Self]) -> bool;
375}
376
377impl<T> SliceContains for T
378where
379    T: PartialEq,
380{
381    default fn slice_contains(&self, x: &[Self]) -> bool {
382        x.iter().any(|y| *y == *self)
383    }
384}
385
386impl SliceContains for u8 {
387    #[inline]
388    fn slice_contains(&self, x: &[Self]) -> bool {
389        memchr::memchr(*self, x).is_some()
390    }
391}
392
393impl SliceContains for i8 {
394    #[inline]
395    fn slice_contains(&self, x: &[Self]) -> bool {
396        let byte = *self as u8;
397        // SAFETY: `i8` and `u8` have the same memory layout, thus casting `x.as_ptr()`
398        // as `*const u8` is safe. The `x.as_ptr()` comes from a reference and is thus guaranteed
399        // to be valid for reads for the length of the slice `x.len()`, which cannot be larger
400        // than `isize::MAX`. The returned slice is never mutated.
401        let bytes: &[u8] = unsafe { from_raw_parts(x.as_ptr() as *const u8, x.len()) };
402        memchr::memchr(byte, bytes).is_some()
403    }
404}
405
406macro_rules! impl_slice_contains {
407    ($($t:ty),*) => {
408        $(
409            impl SliceContains for $t {
410                #[inline]
411                fn slice_contains(&self, arr: &[$t]) -> bool {
412                    // Make our LANE_COUNT 4x the normal lane count (aiming for 128 bit vectors).
413                    // The compiler will nicely unroll it.
414                    const LANE_COUNT: usize = 4 * (128 / (size_of::<$t>() * 8));
415                    // SIMD
416                    let mut chunks = arr.chunks_exact(LANE_COUNT);
417                    for chunk in &mut chunks {
418                        if chunk.iter().fold(false, |acc, x| acc | (*x == *self)) {
419                            return true;
420                        }
421                    }
422                    // Scalar remainder
423                    return chunks.remainder().iter().any(|x| *x == *self);
424                }
425            }
426        )*
427    };
428}
429
430impl_slice_contains!(u16, u32, u64, i16, i32, i64, f32, f64, usize, isize, char);