rand_core/
impls.rs

1// Copyright 2018 Developers of the Rand project.
2//
3// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
4// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
5// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
6// option. This file may not be copied, modified, or distributed
7// except according to those terms.
8
9//! Helper functions for implementing `RngCore` functions.
10//!
11//! For cross-platform reproducibility, these functions all use Little Endian:
12//! least-significant part first. For example, `next_u64_via_u32` takes `u32`
13//! values `x, y`, then outputs `(y << 32) | x`. To implement `next_u32`
14//! from `next_u64` in little-endian order, one should use `next_u64() as u32`.
15//!
16//! Byte-swapping (like the std `to_le` functions) is only needed to convert
17//! to/from byte sequences, and since its purpose is reproducibility,
18//! non-reproducible sources (e.g. `OsRng`) need not bother with it.
19
20use crate::RngCore;
21use core::cmp::min;
22
23/// Implement `next_u64` via `next_u32`, little-endian order.
24pub fn next_u64_via_u32<R: RngCore + ?Sized>(rng: &mut R) -> u64 {
25    // Use LE; we explicitly generate one value before the next.
26    let x = u64::from(rng.next_u32());
27    let y = u64::from(rng.next_u32());
28    (y << 32) | x
29}
30
31/// Implement `fill_bytes` via `next_u64` and `next_u32`, little-endian order.
32///
33/// The fastest way to fill a slice is usually to work as long as possible with
34/// integers. That is why this method mostly uses `next_u64`, and only when
35/// there are 4 or less bytes remaining at the end of the slice it uses
36/// `next_u32` once.
37pub fn fill_bytes_via_next<R: RngCore + ?Sized>(rng: &mut R, dest: &mut [u8]) {
38    let mut left = dest;
39    while left.len() >= 8 {
40        let (l, r) = { left }.split_at_mut(8);
41        left = r;
42        let chunk: [u8; 8] = rng.next_u64().to_le_bytes();
43        l.copy_from_slice(&chunk);
44    }
45    let n = left.len();
46    if n > 4 {
47        let chunk: [u8; 8] = rng.next_u64().to_le_bytes();
48        left.copy_from_slice(&chunk[..n]);
49    } else if n > 0 {
50        let chunk: [u8; 4] = rng.next_u32().to_le_bytes();
51        left.copy_from_slice(&chunk[..n]);
52    }
53}
54
55macro_rules! fill_via_chunks {
56    ($src:expr, $dst:expr, $ty:ty) => {{
57        const SIZE: usize = core::mem::size_of::<$ty>();
58        let chunk_size_u8 = min($src.len() * SIZE, $dst.len());
59        let chunk_size = (chunk_size_u8 + SIZE - 1) / SIZE;
60
61        // The following can be replaced with safe code, but unfortunately it's
62        // ca. 8% slower.
63        if cfg!(target_endian = "little") {
64            unsafe {
65                core::ptr::copy_nonoverlapping(
66                    $src.as_ptr() as *const u8,
67                    $dst.as_mut_ptr(),
68                    chunk_size_u8);
69            }
70        } else {
71            for (&n, chunk) in $src.iter().zip($dst.chunks_mut(SIZE)) {
72                let tmp = n.to_le();
73                let src_ptr = &tmp as *const $ty as *const u8;
74                unsafe {
75                    core::ptr::copy_nonoverlapping(
76                        src_ptr,
77                        chunk.as_mut_ptr(),
78                        chunk.len());
79                }
80            }
81        }
82
83        (chunk_size, chunk_size_u8)
84    }};
85}
86
87/// Implement `fill_bytes` by reading chunks from the output buffer of a block
88/// based RNG.
89///
90/// The return values are `(consumed_u32, filled_u8)`.
91///
92/// `filled_u8` is the number of filled bytes in `dest`, which may be less than
93/// the length of `dest`.
94/// `consumed_u32` is the number of words consumed from `src`, which is the same
95/// as `filled_u8 / 4` rounded up.
96///
97/// # Example
98/// (from `IsaacRng`)
99///
100/// ```ignore
101/// fn fill_bytes(&mut self, dest: &mut [u8]) {
102///     let mut read_len = 0;
103///     while read_len < dest.len() {
104///         if self.index >= self.rsl.len() {
105///             self.isaac();
106///         }
107///
108///         let (consumed_u32, filled_u8) =
109///             impls::fill_via_u32_chunks(&mut self.rsl[self.index..],
110///                                        &mut dest[read_len..]);
111///
112///         self.index += consumed_u32;
113///         read_len += filled_u8;
114///     }
115/// }
116/// ```
117pub fn fill_via_u32_chunks(src: &[u32], dest: &mut [u8]) -> (usize, usize) {
118    fill_via_chunks!(src, dest, u32)
119}
120
121/// Implement `fill_bytes` by reading chunks from the output buffer of a block
122/// based RNG.
123///
124/// The return values are `(consumed_u64, filled_u8)`.
125/// `filled_u8` is the number of filled bytes in `dest`, which may be less than
126/// the length of `dest`.
127/// `consumed_u64` is the number of words consumed from `src`, which is the same
128/// as `filled_u8 / 8` rounded up.
129///
130/// See `fill_via_u32_chunks` for an example.
131pub fn fill_via_u64_chunks(src: &[u64], dest: &mut [u8]) -> (usize, usize) {
132    fill_via_chunks!(src, dest, u64)
133}
134
135/// Implement `next_u32` via `fill_bytes`, little-endian order.
136pub fn next_u32_via_fill<R: RngCore + ?Sized>(rng: &mut R) -> u32 {
137    let mut buf = [0; 4];
138    rng.fill_bytes(&mut buf);
139    u32::from_le_bytes(buf)
140}
141
142/// Implement `next_u64` via `fill_bytes`, little-endian order.
143pub fn next_u64_via_fill<R: RngCore + ?Sized>(rng: &mut R) -> u64 {
144    let mut buf = [0; 8];
145    rng.fill_bytes(&mut buf);
146    u64::from_le_bytes(buf)
147}
148
149#[cfg(test)]
150mod test {
151    use super::*;
152
153    #[test]
154    fn test_fill_via_u32_chunks() {
155        let src = [1, 2, 3];
156        let mut dst = [0u8; 11];
157        assert_eq!(fill_via_u32_chunks(&src, &mut dst), (3, 11));
158        assert_eq!(dst, [1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0]);
159
160        let mut dst = [0u8; 13];
161        assert_eq!(fill_via_u32_chunks(&src, &mut dst), (3, 12));
162        assert_eq!(dst, [1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 0]);
163
164        let mut dst = [0u8; 5];
165        assert_eq!(fill_via_u32_chunks(&src, &mut dst), (2, 5));
166        assert_eq!(dst, [1, 0, 0, 0, 2]);
167    }
168
169    #[test]
170    fn test_fill_via_u64_chunks() {
171        let src = [1, 2];
172        let mut dst = [0u8; 11];
173        assert_eq!(fill_via_u64_chunks(&src, &mut dst), (2, 11));
174        assert_eq!(dst, [1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0]);
175
176        let mut dst = [0u8; 17];
177        assert_eq!(fill_via_u64_chunks(&src, &mut dst), (2, 16));
178        assert_eq!(dst, [1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0]);
179
180        let mut dst = [0u8; 5];
181        assert_eq!(fill_via_u64_chunks(&src, &mut dst), (1, 5));
182        assert_eq!(dst, [1, 0, 0, 0, 0]);
183    }
184}