Skip to main content

tor_netdir/
lib.rs

1#![cfg_attr(docsrs, feature(doc_cfg))]
2#![doc = include_str!("../README.md")]
3// @@ begin lint list maintained by maint/add_warning @@
4#![allow(renamed_and_removed_lints)] // @@REMOVE_WHEN(ci_arti_stable)
5#![allow(unknown_lints)] // @@REMOVE_WHEN(ci_arti_nightly)
6#![warn(missing_docs)]
7#![warn(noop_method_call)]
8#![warn(unreachable_pub)]
9#![warn(clippy::all)]
10#![deny(clippy::await_holding_lock)]
11#![deny(clippy::cargo_common_metadata)]
12#![deny(clippy::cast_lossless)]
13#![deny(clippy::checked_conversions)]
14#![warn(clippy::cognitive_complexity)]
15#![deny(clippy::debug_assert_with_mut_call)]
16#![deny(clippy::exhaustive_enums)]
17#![deny(clippy::exhaustive_structs)]
18#![deny(clippy::expl_impl_clone_on_copy)]
19#![deny(clippy::fallible_impl_from)]
20#![deny(clippy::implicit_clone)]
21#![deny(clippy::large_stack_arrays)]
22#![warn(clippy::manual_ok_or)]
23#![deny(clippy::missing_docs_in_private_items)]
24#![warn(clippy::needless_borrow)]
25#![warn(clippy::needless_pass_by_value)]
26#![warn(clippy::option_option)]
27#![deny(clippy::print_stderr)]
28#![deny(clippy::print_stdout)]
29#![warn(clippy::rc_buffer)]
30#![deny(clippy::ref_option_ref)]
31#![warn(clippy::semicolon_if_nothing_returned)]
32#![warn(clippy::trait_duplication_in_bounds)]
33#![deny(clippy::unchecked_time_subtraction)]
34#![deny(clippy::unnecessary_wraps)]
35#![warn(clippy::unseparated_literal_suffix)]
36#![deny(clippy::unwrap_used)]
37#![deny(clippy::mod_module_files)]
38#![allow(clippy::let_unit_value)] // This can reasonably be done for explicitness
39#![allow(clippy::uninlined_format_args)]
40#![allow(clippy::significant_drop_in_scrutinee)] // arti/-/merge_requests/588/#note_2812945
41#![allow(clippy::result_large_err)] // temporary workaround for arti#587
42#![allow(clippy::needless_raw_string_hashes)] // complained-about code is fine, often best
43#![allow(clippy::needless_lifetimes)] // See arti#1765
44#![allow(mismatched_lifetime_syntaxes)] // temporary workaround for arti#2060
45#![allow(clippy::collapsible_if)] // See arti#2342
46#![deny(clippy::unused_async)]
47//! <!-- @@ end lint list maintained by maint/add_warning @@ -->
48
49pub mod details;
50mod err;
51#[cfg(feature = "hs-common")]
52mod hsdir_params;
53#[cfg(feature = "hs-common")]
54mod hsdir_ring;
55pub mod params;
56mod weight;
57
58#[cfg(any(test, feature = "testing"))]
59pub mod testnet;
60#[cfg(feature = "testing")]
61pub mod testprovider;
62
63use async_trait::async_trait;
64#[cfg(feature = "hs-service")]
65use itertools::chain;
66use tor_error::warn_report;
67#[cfg(feature = "hs-common")]
68use tor_linkspec::OwnedCircTarget;
69use tor_linkspec::{
70    ChanTarget, DirectChanMethodsHelper, HasAddrs, HasRelayIds, RelayIdRef, RelayIdType,
71};
72use tor_llcrypto as ll;
73use tor_llcrypto::pk::{ed25519::Ed25519Identity, rsa::RsaIdentity};
74use tor_netdoc::doc::microdesc::{MdDigest, Microdesc};
75use tor_netdoc::doc::netstatus::{self, MdConsensus, MdRouterStatus};
76#[cfg(feature = "hs-common")]
77use {hsdir_ring::HsDirRing, std::iter};
78
79use derive_more::{From, Into};
80use futures::{StreamExt, stream::BoxStream};
81use num_enum::{IntoPrimitive, TryFromPrimitive};
82use rand::seq::{IndexedRandom as _, SliceRandom as _, WeightError};
83use serde::Deserialize;
84use std::collections::HashMap;
85use std::net::IpAddr;
86use std::ops::Deref;
87use std::sync::Arc;
88use std::time::SystemTime;
89use strum::{EnumCount, EnumIter};
90use tracing::warn;
91use typed_index_collections::{TiSlice, TiVec};
92
93#[cfg(feature = "hs-common")]
94use {
95    itertools::Itertools,
96    std::collections::HashSet,
97    std::result::Result as StdResult,
98    tor_error::{Bug, internal},
99    tor_hscrypto::{pk::HsBlindId, time::TimePeriod},
100    tor_linkspec::{OwnedChanTargetBuilder, verbatim::VerbatimLinkSpecCircTarget},
101    tor_llcrypto::pk::curve25519,
102};
103
104pub use err::Error;
105pub use weight::WeightRole;
106/// A Result using the Error type from the tor-netdir crate
107pub type Result<T> = std::result::Result<T, Error>;
108
109#[cfg(feature = "hs-common")]
110pub use err::{OnionDirLookupError, VerbatimCircTargetDecodeError};
111
112use params::NetParameters;
113#[cfg(feature = "geoip")]
114use tor_geoip::{CountryCode, GeoipDb, HasCountryCode};
115
116#[cfg(feature = "hs-common")]
117pub use hsdir_params::HsDirParams;
118
119/// Index into the consensus relays
120///
121/// This is an index into the list of relays returned by
122/// [`.c_relays()`](ConsensusRelays::c_relays)
123/// (on the corresponding consensus or netdir).
124///
125/// This is just a `usize` inside, but using a newtype prevents getting a relay index
126/// confused with other kinds of slice indices or counts.
127///
128/// If you are in a part of the code which needs to work with multiple consensuses,
129/// the typechecking cannot tell if you try to index into the wrong consensus.
130#[derive(Debug, From, Into, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)]
131pub(crate) struct RouterStatusIdx(usize);
132
133/// Extension trait to provide index-type-safe `.c_relays()` method
134//
135// TODO: Really it would be better to have MdConsensns::relays() return TiSlice,
136// but that would be an API break there.
137pub(crate) trait ConsensusRelays {
138    /// Obtain the list of relays in the consensus
139    //
140    fn c_relays(&self) -> &TiSlice<RouterStatusIdx, MdRouterStatus>;
141}
142impl ConsensusRelays for MdConsensus {
143    fn c_relays(&self) -> &TiSlice<RouterStatusIdx, MdRouterStatus> {
144        TiSlice::from_ref(MdConsensus::relays(self))
145    }
146}
147impl ConsensusRelays for NetDir {
148    fn c_relays(&self) -> &TiSlice<RouterStatusIdx, MdRouterStatus> {
149        self.consensus.c_relays()
150    }
151}
152
153/// Configuration for determining when two relays have addresses "too close" in
154/// the network.
155///
156/// Used by `Relay::low_level_details().in_same_subnet()`.
157#[derive(Deserialize, Debug, Clone, Copy, Eq, PartialEq)]
158#[serde(deny_unknown_fields)]
159pub struct SubnetConfig {
160    /// Consider IPv4 nodes in the same /x to be the same family.
161    ///
162    /// If this value is 0, all nodes with IPv4 addresses will be in the
163    /// same family.  If this value is above 32, then no nodes will be
164    /// placed im the same family based on their IPv4 addresses.
165    subnets_family_v4: u8,
166    /// Consider IPv6 nodes in the same /x to be the same family.
167    ///
168    /// If this value is 0, all nodes with IPv6 addresses will be in the
169    /// same family.  If this value is above 128, then no nodes will be
170    /// placed im the same family based on their IPv6 addresses.
171    subnets_family_v6: u8,
172}
173
174impl Default for SubnetConfig {
175    fn default() -> Self {
176        Self::new(16, 32)
177    }
178}
179
180impl SubnetConfig {
181    /// Construct a new SubnetConfig from a pair of bit prefix lengths.
182    ///
183    /// The values are clamped to the appropriate ranges if they are
184    /// out-of-bounds.
185    pub fn new(subnets_family_v4: u8, subnets_family_v6: u8) -> Self {
186        Self {
187            subnets_family_v4,
188            subnets_family_v6,
189        }
190    }
191
192    /// Construct a new SubnetConfig such that addresses are not in the same
193    /// family with anything--not even with themselves.
194    pub fn no_addresses_match() -> SubnetConfig {
195        SubnetConfig {
196            subnets_family_v4: 33,
197            subnets_family_v6: 129,
198        }
199    }
200
201    /// Return true if the two addresses in the same subnet, according to this
202    /// configuration.
203    pub fn addrs_in_same_subnet(&self, a: &IpAddr, b: &IpAddr) -> bool {
204        match (a, b) {
205            (IpAddr::V4(a), IpAddr::V4(b)) => {
206                let bits = self.subnets_family_v4;
207                if bits > 32 {
208                    return false;
209                }
210                let a = u32::from_be_bytes(a.octets());
211                let b = u32::from_be_bytes(b.octets());
212                (a >> (32 - bits)) == (b >> (32 - bits))
213            }
214            (IpAddr::V6(a), IpAddr::V6(b)) => {
215                let bits = self.subnets_family_v6;
216                if bits > 128 {
217                    return false;
218                }
219                let a = u128::from_be_bytes(a.octets());
220                let b = u128::from_be_bytes(b.octets());
221                (a >> (128 - bits)) == (b >> (128 - bits))
222            }
223            _ => false,
224        }
225    }
226
227    /// Return true if any of the addresses in `a` shares a subnet with any of
228    /// the addresses in `b`, according to this configuration.
229    pub fn any_addrs_in_same_subnet<T, U>(&self, a: &T, b: &U) -> bool
230    where
231        T: tor_linkspec::HasAddrs,
232        U: tor_linkspec::HasAddrs,
233    {
234        a.addrs().any(|aa| {
235            b.addrs()
236                .any(|bb| self.addrs_in_same_subnet(&aa.ip(), &bb.ip()))
237        })
238    }
239
240    /// Return a new subnet configuration that is the union of `self` and
241    /// `other`.
242    ///
243    /// That is, return a subnet configuration that puts all addresses in the
244    /// same subnet if and only if at least one of `self` and `other` would put
245    /// them in the same subnet.
246    pub fn union(&self, other: &Self) -> Self {
247        use std::cmp::min;
248        Self {
249            subnets_family_v4: min(self.subnets_family_v4, other.subnets_family_v4),
250            subnets_family_v6: min(self.subnets_family_v6, other.subnets_family_v6),
251        }
252    }
253}
254
255/// Configuration for which listed family information to use when deciding
256/// whether relays belong to the same family.
257///
258/// Derived from network parameters.
259#[derive(Clone, Copy, Debug)]
260pub struct FamilyRules {
261    /// If true, we use family information from lists of family members.
262    use_family_lists: bool,
263    /// If true, we use family information from lists of family IDs and from family certs.
264    use_family_ids: bool,
265}
266
267impl<'a> From<&'a NetParameters> for FamilyRules {
268    fn from(params: &'a NetParameters) -> Self {
269        FamilyRules {
270            use_family_lists: bool::from(params.use_family_lists),
271            use_family_ids: bool::from(params.use_family_ids),
272        }
273    }
274}
275
276impl FamilyRules {
277    /// Return a `FamilyRules` that will use all recognized kinds of family information.
278    pub fn all_family_info() -> Self {
279        Self {
280            use_family_lists: true,
281            use_family_ids: true,
282        }
283    }
284
285    /// Return a `FamilyRules` that will ignore all family information declared by relays.
286    pub fn ignore_declared_families() -> Self {
287        Self {
288            use_family_lists: false,
289            use_family_ids: false,
290        }
291    }
292
293    /// Configure this `FamilyRules` to use (or not use) family information from
294    /// lists of family members.
295    pub fn use_family_lists(&mut self, val: bool) -> &mut Self {
296        self.use_family_lists = val;
297        self
298    }
299
300    /// Configure this `FamilyRules` to use (or not use) family information from
301    /// family IDs and family certs.
302    pub fn use_family_ids(&mut self, val: bool) -> &mut Self {
303        self.use_family_ids = val;
304        self
305    }
306
307    /// Return a `FamilyRules` that will look at every source of information
308    /// requested by `self` or by `other`.
309    pub fn union(&self, other: &Self) -> Self {
310        Self {
311            use_family_lists: self.use_family_lists || other.use_family_lists,
312            use_family_ids: self.use_family_ids || other.use_family_ids,
313        }
314    }
315}
316
317/// An opaque type representing the weight with which a relay or set of
318/// relays will be selected for a given role.
319///
320/// Most users should ignore this type, and just use pick_relay instead.
321#[derive(
322    Copy,
323    Clone,
324    Debug,
325    derive_more::Add,
326    derive_more::Sum,
327    derive_more::AddAssign,
328    Eq,
329    PartialEq,
330    Ord,
331    PartialOrd,
332)]
333pub struct RelayWeight(u64);
334
335impl RelayWeight {
336    /// Try to divide this weight by `rhs`.
337    ///
338    /// Return a ratio on success, or None on division-by-zero.
339    pub fn checked_div(&self, rhs: RelayWeight) -> Option<f64> {
340        if rhs.0 == 0 {
341            None
342        } else {
343            Some((self.0 as f64) / (rhs.0 as f64))
344        }
345    }
346
347    /// Compute a ratio `frac` of this weight.
348    ///
349    /// Return None if frac is less than zero, since negative weights
350    /// are impossible.
351    pub fn ratio(&self, frac: f64) -> Option<RelayWeight> {
352        let product = (self.0 as f64) * frac;
353        if product >= 0.0 && product.is_finite() {
354            Some(RelayWeight(product as u64))
355        } else {
356            None
357        }
358    }
359}
360
361impl From<u64> for RelayWeight {
362    fn from(val: u64) -> Self {
363        RelayWeight(val)
364    }
365}
366
367/// An operation for which we might be requesting a hidden service directory.
368#[derive(Copy, Clone, Debug, PartialEq)]
369// TODO: make this pub(crate) once NetDir::hs_dirs is removed
370#[non_exhaustive]
371pub enum HsDirOp {
372    /// Uploading an onion service descriptor.
373    #[cfg(feature = "hs-service")]
374    Upload,
375    /// Downloading an onion service descriptor.
376    Download,
377}
378
379/// A view of the Tor directory, suitable for use in building circuits.
380///
381/// Abstractly, a [`NetDir`] is a set of usable public [`Relay`]s, each of which
382/// has its own properties, identity, and correct weighted probability for use
383/// under different circumstances.
384///
385/// A [`NetDir`] is constructed by making a [`PartialNetDir`] from a consensus
386/// document, and then adding enough microdescriptors to that `PartialNetDir` so
387/// that it can be used to build paths. (Thus, if you have a NetDir, it is
388/// definitely adequate to build paths.)
389///
390/// # "Usable" relays
391///
392/// Many methods on NetDir are defined in terms of <a name="usable">"Usable"</a> relays.  Unless
393/// otherwise stated, a relay is "usable" if it is listed in the consensus,
394/// if we have full directory information for that relay (including a
395/// microdescriptor), and if that relay does not have any flags indicating that
396/// we should never use it. (Currently, `NoEdConsensus` is the only such flag.)
397///
398/// # Limitations
399///
400/// The current NetDir implementation assumes fairly strongly that every relay
401/// has an Ed25519 identity and an RSA identity, that the consensus is indexed
402/// by RSA identities, and that the Ed25519 identities are stored in
403/// microdescriptors.
404///
405/// If these assumptions someday change, then we'll have to revise the
406/// implementation.
407#[derive(Debug, Clone)]
408pub struct NetDir {
409    /// A microdescriptor consensus that lists the members of the network,
410    /// and maps each one to a 'microdescriptor' that has more information
411    /// about it
412    consensus: Arc<MdConsensus>,
413    /// A map from keys to integer values, distributed in the consensus,
414    /// and clamped to certain defaults.
415    params: NetParameters,
416    /// Map from routerstatus index, to that routerstatus's microdescriptor (if we have one.)
417    mds: TiVec<RouterStatusIdx, Option<Arc<Microdesc>>>,
418    /// Map from SHA256 of _missing_ microdescriptors to the index of their
419    /// corresponding routerstatus.
420    rsidx_by_missing: HashMap<MdDigest, RouterStatusIdx>,
421    /// Map from ed25519 identity to index of the routerstatus.
422    ///
423    /// Note that we don't know the ed25519 identity of a relay until
424    /// we get the microdescriptor for it, so this won't be filled in
425    /// until we get the microdescriptors.
426    ///
427    /// # Implementation note
428    ///
429    /// For this field, and for `rsidx_by_rsa`,
430    /// it might be cool to have references instead.
431    /// But that would make this into a self-referential structure,
432    /// which isn't possible in safe rust.
433    rsidx_by_ed: HashMap<Ed25519Identity, RouterStatusIdx>,
434    /// Map from RSA identity to index of the routerstatus.
435    ///
436    /// This is constructed at the same time as the NetDir object, so it
437    /// can be immutable.
438    rsidx_by_rsa: Arc<HashMap<RsaIdentity, RouterStatusIdx>>,
439
440    /// Hash ring(s) describing the onion service directory.
441    ///
442    /// This is empty in a PartialNetDir, and is filled in before the NetDir is
443    /// built.
444    //
445    // TODO hs: It is ugly to have this exist in a partially constructed state
446    // in a PartialNetDir.
447    // Ideally, a PartialNetDir would contain only an HsDirs<HsDirParams>,
448    // or perhaps nothing at all, here.
449    #[cfg(feature = "hs-common")]
450    hsdir_rings: Arc<HsDirs<HsDirRing>>,
451
452    /// Weight values to apply to a given relay when deciding how frequently
453    /// to choose it for a given role.
454    weights: weight::WeightSet,
455
456    #[cfg(feature = "geoip")]
457    /// Country codes for each router in our consensus.
458    ///
459    /// This is indexed by the `RouterStatusIdx` (i.e. a router idx of zero has
460    /// the country code at position zero in this array).
461    country_codes: Vec<Option<CountryCode>>,
462}
463
464/// Collection of hidden service directories (or parameters for them)
465///
466/// In [`NetDir`] this is used to store the actual hash rings.
467/// (But, in a NetDir in a [`PartialNetDir`], it contains [`HsDirRing`]s
468/// where only the `params` are populated, and the `ring` is empty.)
469///
470/// This same generic type is used as the return type from
471/// [`HsDirParams::compute`](HsDirParams::compute),
472/// where it contains the *parameters* for the primary and secondary rings.
473#[derive(Debug, Clone)]
474#[cfg(feature = "hs-common")]
475pub(crate) struct HsDirs<D> {
476    /// The current ring
477    ///
478    /// It corresponds to the time period containing the `valid-after` time in
479    /// the consensus. Its SRV is whatever SRV was most current at the time when
480    /// that time period began.
481    ///
482    /// This is the hash ring that we should use whenever we are fetching an
483    /// onion service descriptor.
484    current: D,
485
486    /// Secondary rings (based on the parameters for the previous and next time periods)
487    ///
488    /// Onion services upload to positions on these ring as well, based on how
489    /// far into the current time period this directory is, so that
490    /// not-synchronized clients can still find their descriptor.
491    ///
492    /// Note that with the current (2023) network parameters, with
493    /// `hsdir_interval = SRV lifetime = 24 hours` at most one of these
494    /// secondary rings will be active at a time.  We have two here in order
495    /// to conform with a more flexible regime in proposal 342.
496    //
497    // TODO: hs clients never need this; so I've made it not-present for them.
498    // But does that risk too much with respect to side channels?
499    //
500    // TODO: Perhaps we should refactor this so that it is clear that these
501    // are immutable?  On the other hand, the documentation for this type
502    // declares that it is immutable, so we are likely okay.
503    //
504    // TODO: this `Vec` is only ever 0,1,2 elements.
505    // Maybe it should be an ArrayVec or something.
506    #[cfg(feature = "hs-service")]
507    secondary: Vec<D>,
508}
509
510#[cfg(feature = "hs-common")]
511impl<D> HsDirs<D> {
512    /// Convert an `HsDirs<D>` to `HsDirs<D2>` by mapping each contained `D`
513    pub(crate) fn map<D2>(self, mut f: impl FnMut(D) -> D2) -> HsDirs<D2> {
514        HsDirs {
515            current: f(self.current),
516            #[cfg(feature = "hs-service")]
517            secondary: self.secondary.into_iter().map(f).collect(),
518        }
519    }
520
521    /// Iterate over some of the contained hsdirs, according to `secondary`
522    ///
523    /// The current ring is always included.
524    /// Secondary rings are included iff `secondary` and the `hs-service` feature is enabled.
525    fn iter_filter_secondary(&self, secondary: bool) -> impl Iterator<Item = &D> {
526        let i = iter::once(&self.current);
527
528        // With "hs-service" disabled, there are no secondary rings,
529        // so we don't care.
530        let _ = secondary;
531
532        #[cfg(feature = "hs-service")]
533        let i = chain!(i, self.secondary.iter().filter(move |_| secondary));
534
535        i
536    }
537
538    /// Iterate over all the contained hsdirs
539    pub(crate) fn iter(&self) -> impl Iterator<Item = &D> {
540        self.iter_filter_secondary(true)
541    }
542
543    /// Iterate over the hsdirs relevant for `op`
544    pub(crate) fn iter_for_op(&self, op: HsDirOp) -> impl Iterator<Item = &D> {
545        self.iter_filter_secondary(match op {
546            #[cfg(feature = "hs-service")]
547            HsDirOp::Upload => true,
548            HsDirOp::Download => false,
549        })
550    }
551}
552
553/// An event that a [`NetDirProvider`] can broadcast to indicate that a change in
554/// the status of its directory.
555#[derive(
556    Debug, Clone, Copy, PartialEq, Eq, EnumIter, EnumCount, IntoPrimitive, TryFromPrimitive,
557)]
558#[non_exhaustive]
559#[repr(u16)]
560pub enum DirEvent {
561    /// A new consensus has been received, and has enough information to be
562    /// used.
563    ///
564    /// This event is also broadcast when a new set of consensus parameters is
565    /// available, even if that set of parameters comes from a configuration
566    /// change rather than from the latest consensus.
567    NewConsensus,
568
569    /// New descriptors have been received for the current consensus.
570    ///
571    /// (This event is _not_ broadcast when receiving new descriptors for a
572    /// consensus which is not yet ready to replace the current consensus.)
573    NewDescriptors,
574
575    /// We have received updated recommendations and requirements
576    /// for which subprotocols we should have to use the network.
577    NewProtocolRecommendation,
578}
579
580/// The network directory provider is shutting down without giving us the
581/// netdir we asked for.
582#[derive(Clone, Copy, Debug, thiserror::Error)]
583#[error("Network directory provider is shutting down")]
584#[non_exhaustive]
585pub struct NetdirProviderShutdown;
586
587impl tor_error::HasKind for NetdirProviderShutdown {
588    fn kind(&self) -> tor_error::ErrorKind {
589        tor_error::ErrorKind::ArtiShuttingDown
590    }
591}
592
593/// How "timely" must a network directory be?
594///
595/// This enum is used as an argument when requesting a [`NetDir`] object from
596/// [`NetDirProvider`] and other APIs, to specify how recent the information
597/// must be in order to be useful.
598#[derive(Copy, Clone, Eq, PartialEq, Debug)]
599#[allow(clippy::exhaustive_enums)]
600pub enum Timeliness {
601    /// The network directory must be strictly timely.
602    ///
603    /// That is, it must be based on a consensus that valid right now, with no
604    /// tolerance for skew or consensus problems.
605    ///
606    /// Avoid using this option if you could use [`Timeliness::Timely`] instead.
607    Strict,
608    /// The network directory must be roughly timely.
609    ///
610    /// This is, it must be based on a consensus that is not _too_ far in the
611    /// future, and not _too_ far in the past.
612    ///
613    /// (The tolerances for "too far" will depend on configuration.)
614    ///
615    /// This is almost always the option that you want to use.
616    Timely,
617    /// Any network directory is permissible, regardless of how untimely.
618    ///
619    /// Avoid using this option if you could use [`Timeliness::Timely`] instead.
620    Unchecked,
621}
622
623/// An object that can provide [`NetDir`]s, as well as inform consumers when
624/// they might have changed.
625///
626/// It is the responsibility of the implementor of `NetDirProvider`
627/// to try to obtain an up-to-date `NetDir`,
628/// and continuously to maintain and update it.
629///
630/// In usual configurations, Arti uses `tor_dirmgr::DirMgr`
631/// as its `NetDirProvider`.
632#[async_trait]
633pub trait NetDirProvider: UpcastArcNetDirProvider + Send + Sync {
634    /// Return a network directory that's live according to the provided
635    /// `timeliness`.
636    fn netdir(&self, timeliness: Timeliness) -> Result<Arc<NetDir>>;
637
638    /// Return a reasonable netdir for general usage.
639    ///
640    /// This is an alias for
641    /// [`NetDirProvider::netdir`]`(`[`Timeliness::Timely`]`)`.
642    fn timely_netdir(&self) -> Result<Arc<NetDir>> {
643        self.netdir(Timeliness::Timely)
644    }
645
646    /// Return a new asynchronous stream that will receive notification
647    /// whenever the consensus has changed.
648    ///
649    /// Multiple events may be batched up into a single item: each time
650    /// this stream yields an event, all you can assume is that the event has
651    /// occurred at least once.
652    fn events(&self) -> BoxStream<'static, DirEvent>;
653
654    /// Return the latest network parameters.
655    ///
656    /// If we have no directory, return a reasonable set of defaults.
657    fn params(&self) -> Arc<dyn AsRef<NetParameters>>;
658
659    /// Get a NetDir from `provider`, waiting until one exists.
660    async fn wait_for_netdir(
661        &self,
662        timeliness: Timeliness,
663    ) -> std::result::Result<Arc<NetDir>, NetdirProviderShutdown> {
664        if let Ok(nd) = self.netdir(timeliness) {
665            return Ok(nd);
666        }
667
668        let mut stream = self.events();
669        loop {
670            // We need to retry `self.netdir()` before waiting for any stream events, to
671            // avoid deadlock.
672            //
673            // We ignore all errors here: they can all potentially be fixed by
674            // getting a fresh consensus, and they will all get warned about
675            // by the NetDirProvider itself.
676            if let Ok(nd) = self.netdir(timeliness) {
677                return Ok(nd);
678            }
679            match stream.next().await {
680                Some(_) => {}
681                None => {
682                    return Err(NetdirProviderShutdown);
683                }
684            }
685        }
686    }
687
688    /// Wait until `provider` lists `target`.
689    ///
690    /// NOTE: This might potentially wait indefinitely, if `target` is never actually
691    /// becomes listed in the directory.  It will exit if the `NetDirProvider` shuts down.
692    async fn wait_for_netdir_to_list(
693        &self,
694        target: &tor_linkspec::RelayIds,
695        timeliness: Timeliness,
696    ) -> std::result::Result<(), NetdirProviderShutdown> {
697        let mut events = self.events();
698        loop {
699            // See if the desired relay is in the netdir.
700            //
701            // We do this before waiting for any events, to avoid race conditions.
702            {
703                let netdir = self.wait_for_netdir(timeliness).await?;
704                if netdir.ids_listed(target) == Some(true) {
705                    return Ok(());
706                }
707                // If we reach this point, then ids_listed returned `Some(false)`,
708                // meaning "This relay is definitely not in the current directory";
709                // or it returned `None`, meaning "waiting for more information
710                // about this network directory.
711                // In both cases, it's reasonable to just wait for another netdir
712                // event and try again.
713            }
714            // We didn't find the relay; wait for the provider to have a new netdir
715            // or more netdir information.
716            if events.next().await.is_none() {
717                // The event stream is closed; the provider has shut down.
718                return Err(NetdirProviderShutdown);
719            }
720        }
721    }
722
723    /// Return the latest set of recommended and required protocols, if there is one.
724    ///
725    /// This may be more recent (or more available) than this provider's associated NetDir.
726    fn protocol_statuses(&self) -> Option<(SystemTime, Arc<netstatus::ProtoStatuses>)>;
727}
728
729impl<T> NetDirProvider for Arc<T>
730where
731    T: NetDirProvider,
732{
733    fn netdir(&self, timeliness: Timeliness) -> Result<Arc<NetDir>> {
734        self.deref().netdir(timeliness)
735    }
736
737    fn timely_netdir(&self) -> Result<Arc<NetDir>> {
738        self.deref().timely_netdir()
739    }
740
741    fn events(&self) -> BoxStream<'static, DirEvent> {
742        self.deref().events()
743    }
744
745    fn params(&self) -> Arc<dyn AsRef<NetParameters>> {
746        self.deref().params()
747    }
748
749    fn protocol_statuses(&self) -> Option<(SystemTime, Arc<netstatus::ProtoStatuses>)> {
750        self.deref().protocol_statuses()
751    }
752}
753
754/// Helper trait: allows any `Arc<X>` to be upcast to a `Arc<dyn
755/// NetDirProvider>` if X is an implementation or supertrait of NetDirProvider.
756///
757/// This trait exists to work around a limitation in rust: when trait upcasting
758/// coercion is stable, this will be unnecessary.
759///
760/// The Rust tracking issue is <https://github.com/rust-lang/rust/issues/65991>.
761pub trait UpcastArcNetDirProvider {
762    /// Return a view of this object as an `Arc<dyn NetDirProvider>`
763    fn upcast_arc<'a>(self: Arc<Self>) -> Arc<dyn NetDirProvider + 'a>
764    where
765        Self: 'a;
766}
767
768impl<T> UpcastArcNetDirProvider for T
769where
770    T: NetDirProvider + Sized,
771{
772    fn upcast_arc<'a>(self: Arc<Self>) -> Arc<dyn NetDirProvider + 'a>
773    where
774        Self: 'a,
775    {
776        self
777    }
778}
779
780impl AsRef<NetParameters> for NetDir {
781    fn as_ref(&self) -> &NetParameters {
782        self.params()
783    }
784}
785
786/// A partially build NetDir -- it can't be unwrapped until it has
787/// enough information to build safe paths.
788#[derive(Debug, Clone)]
789pub struct PartialNetDir {
790    /// The netdir that's under construction.
791    netdir: NetDir,
792
793    /// The previous netdir, if we had one
794    ///
795    /// Used as a cache, so we can reuse information
796    #[cfg(feature = "hs-common")]
797    prev_netdir: Option<Arc<NetDir>>,
798}
799
800/// A view of a relay on the Tor network, suitable for building circuits.
801// TODO: This should probably be a more specific struct, with a trait
802// that implements it.
803#[derive(Clone)]
804pub struct Relay<'a> {
805    /// A router descriptor for this relay.
806    rs: &'a netstatus::MdRouterStatus,
807    /// A microdescriptor for this relay.
808    md: &'a Microdesc,
809    /// The country code this relay is in, if we know one.
810    #[cfg(feature = "geoip")]
811    cc: Option<CountryCode>,
812}
813
814/// A relay that we haven't checked for validity or usability in
815/// routing.
816#[derive(Debug)]
817pub struct UncheckedRelay<'a> {
818    /// A router descriptor for this relay.
819    rs: &'a netstatus::MdRouterStatus,
820    /// A microdescriptor for this relay, if there is one.
821    md: Option<&'a Microdesc>,
822    /// The country code this relay is in, if we know one.
823    #[cfg(feature = "geoip")]
824    cc: Option<CountryCode>,
825}
826
827/// A partial or full network directory that we can download
828/// microdescriptors for.
829pub trait MdReceiver {
830    /// Return an iterator over the digests for all of the microdescriptors
831    /// that this netdir is missing.
832    fn missing_microdescs(&self) -> Box<dyn Iterator<Item = &MdDigest> + '_>;
833    /// Add a microdescriptor to this netdir, if it was wanted.
834    ///
835    /// Return true if it was indeed wanted.
836    fn add_microdesc(&mut self, md: Microdesc) -> bool;
837    /// Return the number of missing microdescriptors.
838    fn n_missing(&self) -> usize;
839}
840
841impl PartialNetDir {
842    /// Create a new PartialNetDir with a given consensus, and no
843    /// microdescriptors loaded.
844    ///
845    /// If `replacement_params` is provided, override network parameters from
846    /// the consensus with those from `replacement_params`.
847    pub fn new(
848        consensus: MdConsensus,
849        replacement_params: Option<&netstatus::NetParams<i32>>,
850    ) -> Self {
851        Self::new_inner(
852            consensus,
853            replacement_params,
854            #[cfg(feature = "geoip")]
855            None,
856        )
857    }
858
859    /// Create a new PartialNetDir with GeoIP support.
860    ///
861    /// This does the same thing as `new()`, except the provided GeoIP database is used to add
862    /// country codes to relays.
863    #[cfg(feature = "geoip")]
864    pub fn new_with_geoip(
865        consensus: MdConsensus,
866        replacement_params: Option<&netstatus::NetParams<i32>>,
867        geoip_db: &GeoipDb,
868    ) -> Self {
869        Self::new_inner(consensus, replacement_params, Some(geoip_db))
870    }
871
872    /// Implementation of the `new()` functions.
873    fn new_inner(
874        consensus: MdConsensus,
875        replacement_params: Option<&netstatus::NetParams<i32>>,
876        #[cfg(feature = "geoip")] geoip_db: Option<&GeoipDb>,
877    ) -> Self {
878        let mut params = NetParameters::default();
879
880        // (We ignore unrecognized options here, since they come from
881        // the consensus, and we don't expect to recognize everything
882        // there.)
883        let _ = params.saturating_update(consensus.params().iter());
884
885        // Now see if the user has any parameters to override.
886        // (We have to do this now, or else changes won't be reflected in our
887        // weights.)
888        if let Some(replacement) = replacement_params {
889            for u in params.saturating_update(replacement.iter()) {
890                warn!("Unrecognized option: override_net_params.{}", u);
891            }
892        }
893
894        // Compute the weights we'll want to use for these relays.
895        let weights = weight::WeightSet::from_consensus(&consensus, &params);
896
897        let n_relays = consensus.c_relays().len();
898
899        let rsidx_by_missing = consensus
900            .c_relays()
901            .iter_enumerated()
902            .map(|(rsidx, rs)| (*rs.md_digest(), rsidx))
903            .collect();
904
905        let rsidx_by_rsa = consensus
906            .c_relays()
907            .iter_enumerated()
908            .map(|(rsidx, rs)| (*rs.rsa_identity(), rsidx))
909            .collect();
910
911        #[cfg(feature = "geoip")]
912        let country_codes = if let Some(db) = geoip_db {
913            consensus
914                .c_relays()
915                .iter()
916                .map(|rs| {
917                    db.lookup_country_code_multi(rs.addrs().map(|x| x.ip()))
918                        .cloned()
919                })
920                .collect()
921        } else {
922            Default::default()
923        };
924
925        #[cfg(feature = "hs-common")]
926        let hsdir_rings = Arc::new({
927            let params = HsDirParams::compute(&consensus, &params).expect("Invalid consensus!");
928            // TODO: It's a bit ugly to use expect above, but this function does
929            // not return a Result. On the other hand, the error conditions under which
930            // HsDirParams::compute can return Err are _very_ narrow and hard to
931            // hit; see documentation in that function.  As such, we probably
932            // don't need to have this return a Result.
933
934            params.map(HsDirRing::empty_from_params)
935        });
936
937        let netdir = NetDir {
938            consensus: Arc::new(consensus),
939            params,
940            mds: vec![None; n_relays].into(),
941            rsidx_by_missing,
942            rsidx_by_rsa: Arc::new(rsidx_by_rsa),
943            rsidx_by_ed: HashMap::with_capacity(n_relays),
944            #[cfg(feature = "hs-common")]
945            hsdir_rings,
946            weights,
947            #[cfg(feature = "geoip")]
948            country_codes,
949        };
950
951        PartialNetDir {
952            netdir,
953            #[cfg(feature = "hs-common")]
954            prev_netdir: None,
955        }
956    }
957
958    /// Return the declared lifetime of this PartialNetDir.
959    pub fn lifetime(&self) -> &netstatus::Lifetime {
960        self.netdir.lifetime()
961    }
962
963    /// Record a previous netdir, which can be used for reusing cached information
964    //
965    // Fills in as many missing microdescriptors as possible in this
966    // netdir, using the microdescriptors from the previous netdir.
967    //
968    // With HS enabled, stores the netdir for reuse of relay hash ring index values.
969    #[allow(clippy::needless_pass_by_value)] // prev might, or might not, be stored
970    pub fn fill_from_previous_netdir(&mut self, prev: Arc<NetDir>) {
971        for md in prev.mds.iter().flatten() {
972            self.netdir.add_arc_microdesc(md.clone());
973        }
974
975        #[cfg(feature = "hs-common")]
976        {
977            self.prev_netdir = Some(prev);
978        }
979    }
980
981    /// Compute the hash ring(s) for this NetDir
982    #[cfg(feature = "hs-common")]
983    fn compute_rings(&mut self) {
984        let params = HsDirParams::compute(&self.netdir.consensus, &self.netdir.params)
985            .expect("Invalid consensus");
986        // TODO: see TODO by similar expect in new()
987
988        self.netdir.hsdir_rings =
989            Arc::new(params.map(|params| {
990                HsDirRing::compute(params, &self.netdir, self.prev_netdir.as_deref())
991            }));
992    }
993
994    /// Return true if this are enough information in this directory
995    /// to build multihop paths.
996    pub fn have_enough_paths(&self) -> bool {
997        self.netdir.have_enough_paths()
998    }
999    /// If this directory has enough information to build multihop
1000    /// circuits, return it.
1001    pub fn unwrap_if_sufficient(
1002        #[allow(unused_mut)] mut self,
1003    ) -> std::result::Result<NetDir, PartialNetDir> {
1004        if self.netdir.have_enough_paths() {
1005            #[cfg(feature = "hs-common")]
1006            self.compute_rings();
1007            Ok(self.netdir)
1008        } else {
1009            Err(self)
1010        }
1011    }
1012}
1013
1014impl MdReceiver for PartialNetDir {
1015    fn missing_microdescs(&self) -> Box<dyn Iterator<Item = &MdDigest> + '_> {
1016        self.netdir.missing_microdescs()
1017    }
1018    fn add_microdesc(&mut self, md: Microdesc) -> bool {
1019        self.netdir.add_microdesc(md)
1020    }
1021    fn n_missing(&self) -> usize {
1022        self.netdir.n_missing()
1023    }
1024}
1025
1026impl NetDir {
1027    /// Return the declared lifetime of this NetDir.
1028    pub fn lifetime(&self) -> &netstatus::Lifetime {
1029        self.consensus.lifetime()
1030    }
1031
1032    /// Add `md` to this NetDir.
1033    ///
1034    /// Return true if we wanted it, and false otherwise.
1035    fn add_arc_microdesc(&mut self, md: Arc<Microdesc>) -> bool {
1036        if let Some(rsidx) = self.rsidx_by_missing.remove(md.digest()) {
1037            assert_eq!(self.c_relays()[rsidx].md_digest(), md.digest());
1038
1039            // There should never be two approved MDs in the same
1040            // consensus listing the same ID... but if there is,
1041            // we'll let the most recent one win.
1042            self.rsidx_by_ed.insert(*md.ed25519_id(), rsidx);
1043
1044            // Happy path: we did indeed want this one.
1045            self.mds[rsidx] = Some(md);
1046
1047            // Save some space in the missing-descriptor list.
1048            if self.rsidx_by_missing.len() < self.rsidx_by_missing.capacity() / 4 {
1049                self.rsidx_by_missing.shrink_to_fit();
1050            }
1051
1052            return true;
1053        }
1054
1055        // Either we already had it, or we never wanted it at all.
1056        false
1057    }
1058
1059    /// Construct a (possibly invalid) Relay object from a routerstatus and its
1060    /// index within the consensus.
1061    fn relay_from_rs_and_rsidx<'a>(
1062        &'a self,
1063        rs: &'a netstatus::MdRouterStatus,
1064        rsidx: RouterStatusIdx,
1065    ) -> UncheckedRelay<'a> {
1066        debug_assert_eq!(self.c_relays()[rsidx].rsa_identity(), rs.rsa_identity());
1067        let md = self.mds[rsidx].as_deref();
1068        if let Some(md) = md {
1069            debug_assert_eq!(rs.md_digest(), md.digest());
1070        }
1071
1072        UncheckedRelay {
1073            rs,
1074            md,
1075            #[cfg(feature = "geoip")]
1076            cc: self.country_codes.get(rsidx.0).copied().flatten(),
1077        }
1078    }
1079
1080    /// Return the value of the hsdir_n_replicas param.
1081    #[cfg(feature = "hs-common")]
1082    fn n_replicas(&self) -> u8 {
1083        self.params
1084            .hsdir_n_replicas
1085            .get()
1086            .try_into()
1087            .expect("BoundedInt did not enforce bounds")
1088    }
1089
1090    /// Return the spread parameter for the specified `op`.
1091    #[cfg(feature = "hs-common")]
1092    fn spread(&self, op: HsDirOp) -> usize {
1093        let spread = match op {
1094            HsDirOp::Download => self.params.hsdir_spread_fetch,
1095            #[cfg(feature = "hs-service")]
1096            HsDirOp::Upload => self.params.hsdir_spread_store,
1097        };
1098
1099        spread
1100            .get()
1101            .try_into()
1102            .expect("BoundedInt did not enforce bounds!")
1103    }
1104
1105    /// Select `spread` hsdir relays for the specified `hsid` from a given `ring`.
1106    ///
1107    /// Algorithm:
1108    ///
1109    /// for idx in 1..=n_replicas:
1110    ///       - let H = hsdir_ring::onion_service_index(id, replica, rand,
1111    ///         period).
1112    ///       - Find the position of H within hsdir_ring.
1113    ///       - Take elements from hsdir_ring starting at that position,
1114    ///         adding them to Dirs until we have added `spread` new elements
1115    ///         that were not there before.
1116    #[cfg(feature = "hs-common")]
1117    fn select_hsdirs<'h, 'r: 'h>(
1118        &'r self,
1119        hsid: HsBlindId,
1120        ring: &'h HsDirRing,
1121        spread: usize,
1122    ) -> impl Iterator<Item = Relay<'r>> + 'h {
1123        let n_replicas = self.n_replicas();
1124
1125        (1..=n_replicas) // 1-indexed !
1126            .flat_map({
1127                let mut selected_nodes = HashSet::new();
1128
1129                move |replica: u8| {
1130                    let hsdir_idx = hsdir_ring::service_hsdir_index(&hsid, replica, ring.params());
1131
1132                    ring.ring_items_at(hsdir_idx, spread, |(hsdir_idx, _)| {
1133                        // According to rend-spec 2.2.3:
1134                        //                                                  ... If any of those
1135                        // nodes have already been selected for a lower-numbered replica of the
1136                        // service, any nodes already chosen are disregarded (i.e. skipped over)
1137                        // when choosing a replica's hsdir_spread_store nodes.
1138                        selected_nodes.insert(*hsdir_idx)
1139                    })
1140                    .collect::<Vec<_>>()
1141                }
1142            })
1143            .filter_map(move |(_hsdir_idx, rs_idx)| {
1144                // This ought not to be None but let's not panic or bail if it is
1145                self.relay_by_rs_idx(*rs_idx)
1146            })
1147    }
1148
1149    /// Replace the overridden parameters in this netdir with `new_replacement`.
1150    ///
1151    /// After this function is done, the netdir's parameters will be those in
1152    /// the consensus, overridden by settings from `new_replacement`.  Any
1153    /// settings in the old replacement parameters will be discarded.
1154    pub fn replace_overridden_parameters(&mut self, new_replacement: &netstatus::NetParams<i32>) {
1155        // TODO(nickm): This is largely duplicate code from PartialNetDir::new().
1156        let mut new_params = NetParameters::default();
1157        let _ = new_params.saturating_update(self.consensus.params().iter());
1158        for u in new_params.saturating_update(new_replacement.iter()) {
1159            warn!("Unrecognized option: override_net_params.{}", u);
1160        }
1161
1162        self.params = new_params;
1163    }
1164
1165    /// Return an iterator over all Relay objects, including invalid ones
1166    /// that we can't use.
1167    pub fn all_relays(&self) -> impl Iterator<Item = UncheckedRelay<'_>> {
1168        // TODO: I'd like if we could memoize this so we don't have to
1169        // do so many hashtable lookups.
1170        self.c_relays()
1171            .iter_enumerated()
1172            .map(move |(rsidx, rs)| self.relay_from_rs_and_rsidx(rs, rsidx))
1173    }
1174    /// Return an iterator over all [usable](NetDir#usable) Relays.
1175    pub fn relays(&self) -> impl Iterator<Item = Relay<'_>> {
1176        self.all_relays().filter_map(UncheckedRelay::into_relay)
1177    }
1178
1179    /// Look up a relay's [`Microdesc`] by its [`RouterStatusIdx`]
1180    #[cfg_attr(not(feature = "hs-common"), allow(dead_code))]
1181    pub(crate) fn md_by_rsidx(&self, rsidx: RouterStatusIdx) -> Option<&Microdesc> {
1182        self.mds.get(rsidx)?.as_deref()
1183    }
1184
1185    /// Return a relay matching a given identity, if we have a
1186    /// _usable_ relay with that key.
1187    ///
1188    /// (Does not return [unusable](NetDir#usable) relays.)
1189    ///
1190    ///
1191    /// Note that a `None` answer is not always permanent: if a microdescriptor
1192    /// is subsequently added for a relay with this ID, the ID may become usable
1193    /// even if it was not usable before.
1194    pub fn by_id<'a, T>(&self, id: T) -> Option<Relay<'_>>
1195    where
1196        T: Into<RelayIdRef<'a>>,
1197    {
1198        let id = id.into();
1199        let answer = match id {
1200            RelayIdRef::Ed25519(ed25519) => {
1201                let rsidx = *self.rsidx_by_ed.get(ed25519)?;
1202                let rs = self.c_relays().get(rsidx).expect("Corrupt index");
1203
1204                self.relay_from_rs_and_rsidx(rs, rsidx).into_relay()?
1205            }
1206            RelayIdRef::Rsa(rsa) => self
1207                .by_rsa_id_unchecked(rsa)
1208                .and_then(UncheckedRelay::into_relay)?,
1209            other_type => self.relays().find(|r| r.has_identity(other_type))?,
1210        };
1211        assert!(answer.has_identity(id));
1212        Some(answer)
1213    }
1214
1215    /// Obtain a `Relay` given a `RouterStatusIdx`
1216    ///
1217    /// Differs from `relay_from_rs_and_rsi` as follows:
1218    ///  * That function expects the caller to already have an `MdRouterStatus`;
1219    ///    it checks with `debug_assert` that the relay in the netdir matches.
1220    ///  * That function panics if the `RouterStatusIdx` is invalid; this one returns `None`.
1221    ///  * That function returns an `UncheckedRelay`; this one a `Relay`.
1222    ///
1223    /// `None` could be returned here, even with a valid `rsi`,
1224    /// if `rsi` refers to an [unusable](NetDir#usable) relay.
1225    #[cfg_attr(not(feature = "hs-common"), allow(dead_code))]
1226    pub(crate) fn relay_by_rs_idx(&self, rs_idx: RouterStatusIdx) -> Option<Relay<'_>> {
1227        let rs = self.c_relays().get(rs_idx)?;
1228        let md = self.mds.get(rs_idx)?.as_deref();
1229        UncheckedRelay {
1230            rs,
1231            md,
1232            #[cfg(feature = "geoip")]
1233            cc: self.country_codes.get(rs_idx.0).copied().flatten(),
1234        }
1235        .into_relay()
1236    }
1237
1238    /// Return a relay with the same identities as those in `target`, if one
1239    /// exists.
1240    ///
1241    /// Does not return [unusable](NetDir#usable) relays.
1242    ///
1243    /// Note that a negative result from this method is not necessarily permanent:
1244    /// it may be the case that a relay exists,
1245    /// but we don't yet have enough information about it to know all of its IDs.
1246    /// To test whether a relay is *definitely* absent,
1247    /// use [`by_ids_detailed`](Self::by_ids_detailed)
1248    /// or [`ids_listed`](Self::ids_listed).
1249    ///
1250    /// # Limitations
1251    ///
1252    /// This will be very slow if `target` does not have an Ed25519 or RSA
1253    /// identity.
1254    pub fn by_ids<T>(&self, target: &T) -> Option<Relay<'_>>
1255    where
1256        T: HasRelayIds + ?Sized,
1257    {
1258        let mut identities = target.identities();
1259        // Don't try if there are no identities.
1260        let first_id = identities.next()?;
1261
1262        // Since there is at most one relay with each given ID type,
1263        // we only need to check the first relay we find.
1264        let candidate = self.by_id(first_id)?;
1265        if identities.all(|wanted_id| candidate.has_identity(wanted_id)) {
1266            Some(candidate)
1267        } else {
1268            None
1269        }
1270    }
1271
1272    /// Check whether there is a relay that has at least one identity from
1273    /// `target`, and which _could_ have every identity from `target`.
1274    /// If so, return such a relay.
1275    ///
1276    /// Return `Ok(None)` if we did not find a relay with any identity from `target`.
1277    ///
1278    /// Return `RelayLookupError::Impossible` if we found a relay with at least
1279    /// one identity from `target`, but that relay's other identities contradict
1280    /// what we learned from `target`.
1281    ///
1282    /// Does not return [unusable](NetDir#usable) relays.
1283    ///
1284    /// (This function is only useful if you need to distinguish the
1285    /// "impossible" case from the "no such relay known" case.)
1286    ///
1287    /// # Limitations
1288    ///
1289    /// This will be very slow if `target` does not have an Ed25519 or RSA
1290    /// identity.
1291    //
1292    // TODO HS: This function could use a better name.
1293    //
1294    // TODO: We could remove the feature restriction here once we think this API is
1295    // stable.
1296    #[cfg(feature = "hs-common")]
1297    pub fn by_ids_detailed<T>(
1298        &self,
1299        target: &T,
1300    ) -> std::result::Result<Option<Relay<'_>>, RelayLookupError>
1301    where
1302        T: HasRelayIds + ?Sized,
1303    {
1304        let candidate = target
1305            .identities()
1306            // Find all the relays that share any identity with this set of identities.
1307            .filter_map(|id| self.by_id(id))
1308            // We might find the same relay more than once under a different
1309            // identity, so we remove the duplicates.
1310            //
1311            // Since there is at most one relay per rsa identity per consensus,
1312            // this is a true uniqueness check under current construction rules.
1313            .unique_by(|r| r.rs.rsa_identity())
1314            // If we find two or more distinct relays, then have a contradiction.
1315            .at_most_one()
1316            .map_err(|_| RelayLookupError::Impossible)?;
1317
1318        // If we have no candidate, return None early.
1319        let candidate = match candidate {
1320            Some(relay) => relay,
1321            None => return Ok(None),
1322        };
1323
1324        // Now we know we have a single candidate.  Make sure that it does not have any
1325        // identity that does not match the target.
1326        if target
1327            .identities()
1328            .all(|wanted_id| match candidate.identity(wanted_id.id_type()) {
1329                None => true,
1330                Some(id) => id == wanted_id,
1331            })
1332        {
1333            Ok(Some(candidate))
1334        } else {
1335            Err(RelayLookupError::Impossible)
1336        }
1337    }
1338
1339    /// Return a boolean if this consensus definitely has (or does not have) a
1340    /// relay matching the listed identities.
1341    ///
1342    /// `Some(true)` indicates that the relay exists.
1343    /// `Some(false)` indicates that the relay definitely does not exist.
1344    /// `None` indicates that we can't yet tell whether such a relay exists,
1345    ///  due to missing information.
1346    fn id_pair_listed(&self, ed_id: &Ed25519Identity, rsa_id: &RsaIdentity) -> Option<bool> {
1347        let r = self.by_rsa_id_unchecked(rsa_id);
1348        match r {
1349            Some(unchecked) => {
1350                if !unchecked.rs.ed25519_id_is_usable() {
1351                    return Some(false);
1352                }
1353                // If md is present, then it's listed iff we have the right
1354                // ed id.  Otherwise we don't know if it's listed.
1355                unchecked.md.map(|md| md.ed25519_id() == ed_id)
1356            }
1357            None => {
1358                // Definitely not listed.
1359                Some(false)
1360            }
1361        }
1362    }
1363
1364    /// Check whether a relay exists (or may exist)
1365    /// with the same identities as those in `target`.
1366    ///
1367    /// `Some(true)` indicates that the relay exists.
1368    /// `Some(false)` indicates that the relay definitely does not exist.
1369    /// `None` indicates that we can't yet tell whether such a relay exists,
1370    ///  due to missing information.
1371    pub fn ids_listed<T>(&self, target: &T) -> Option<bool>
1372    where
1373        T: HasRelayIds + ?Sized,
1374    {
1375        let rsa_id = target.rsa_identity();
1376        let ed25519_id = target.ed_identity();
1377
1378        // TODO: If we later support more identity key types, this will
1379        // become incorrect.  This assertion might help us recognize that case.
1380        const _: () = assert!(RelayIdType::COUNT == 2);
1381
1382        match (rsa_id, ed25519_id) {
1383            (Some(r), Some(e)) => self.id_pair_listed(e, r),
1384            (Some(r), None) => Some(self.rsa_id_is_listed(r)),
1385            (None, Some(e)) => {
1386                if self.rsidx_by_ed.contains_key(e) {
1387                    Some(true)
1388                } else {
1389                    None
1390                }
1391            }
1392            (None, None) => None,
1393        }
1394    }
1395
1396    /// Return a (possibly [unusable](NetDir#usable)) relay with a given RSA identity.
1397    ///
1398    /// This API can be used to find information about a relay that is listed in
1399    /// the current consensus, even if we don't yet have enough information
1400    /// (like a microdescriptor) about the relay to use it.
1401    #[cfg_attr(feature = "experimental-api", visibility::make(pub))]
1402    #[cfg_attr(docsrs, doc(cfg(feature = "experimental-api")))]
1403    fn by_rsa_id_unchecked(&self, rsa_id: &RsaIdentity) -> Option<UncheckedRelay<'_>> {
1404        let rsidx = *self.rsidx_by_rsa.get(rsa_id)?;
1405        let rs = self.c_relays().get(rsidx).expect("Corrupt index");
1406        assert_eq!(rs.rsa_identity(), rsa_id);
1407        Some(self.relay_from_rs_and_rsidx(rs, rsidx))
1408    }
1409    /// Return the relay with a given RSA identity, if we have one
1410    /// and it is [usable](NetDir#usable).
1411    fn by_rsa_id(&self, rsa_id: &RsaIdentity) -> Option<Relay<'_>> {
1412        self.by_rsa_id_unchecked(rsa_id)?.into_relay()
1413    }
1414    /// Return true if `rsa_id` is listed in this directory, even if it isn't
1415    /// currently usable.
1416    ///
1417    /// (An "[unusable](NetDir#usable)" relay in this context is one for which we don't have full
1418    /// directory information.)
1419    #[cfg_attr(feature = "experimental-api", visibility::make(pub))]
1420    #[cfg_attr(docsrs, doc(cfg(feature = "experimental-api")))]
1421    fn rsa_id_is_listed(&self, rsa_id: &RsaIdentity) -> bool {
1422        self.by_rsa_id_unchecked(rsa_id).is_some()
1423    }
1424
1425    /// List the hsdirs in this NetDir, that should be in the HSDir rings
1426    ///
1427    /// The results are not returned in any particular order.
1428    #[cfg(feature = "hs-common")]
1429    fn all_hsdirs(&self) -> impl Iterator<Item = (RouterStatusIdx, Relay<'_>)> {
1430        self.c_relays().iter_enumerated().filter_map(|(rsidx, rs)| {
1431            let relay = self.relay_from_rs_and_rsidx(rs, rsidx);
1432            relay.is_hsdir_for_ring().then_some(())?;
1433            let relay = relay.into_relay()?;
1434            Some((rsidx, relay))
1435        })
1436    }
1437
1438    /// Return the parameters from the consensus, clamped to the
1439    /// correct ranges, with defaults filled in.
1440    ///
1441    /// NOTE: that unsupported parameters aren't returned here; only those
1442    /// values configured in the `params` module are available.
1443    pub fn params(&self) -> &NetParameters {
1444        &self.params
1445    }
1446
1447    /// Return a [`ProtoStatus`](netstatus::ProtoStatus) that lists the
1448    /// network's current requirements and recommendations for the list of
1449    /// protocols that every relay must implement.
1450    //
1451    // TODO HS: I am not sure this is the right API; other alternatives would be:
1452    //    * To expose the _required_ relay protocol list instead (since that's all that
1453    //      onion service implementations need).
1454    //    * To expose the client protocol list as well (for symmetry).
1455    //    * To expose the MdConsensus instead (since that's more general, although
1456    //      it restricts the future evolution of this API).
1457    //
1458    // I think that this is a reasonably good compromise for now, but I'm going
1459    // to put it behind the `hs-common` feature to give us time to consider more.
1460    #[cfg(feature = "hs-common")]
1461    pub fn relay_protocol_status(&self) -> &netstatus::ProtoStatus {
1462        self.consensus.relay_protocol_status()
1463    }
1464
1465    /// Return a [`ProtoStatus`](netstatus::ProtoStatus) that lists the
1466    /// network's current requirements and recommendations for the list of
1467    /// protocols that every relay must implement.
1468    //
1469    // TODO HS: See notes on relay_protocol_status above.
1470    #[cfg(feature = "hs-common")]
1471    pub fn client_protocol_status(&self) -> &netstatus::ProtoStatus {
1472        self.consensus.client_protocol_status()
1473    }
1474
1475    /// Construct a `CircTarget` from an externally provided list of link specifiers,
1476    /// and an externally provided onion key.
1477    ///
1478    /// This method is used in the onion service protocol,
1479    /// where introduction points and rendezvous points are specified using these inputs.
1480    ///
1481    /// This function is a member of `NetDir` so that it can provide a reasonable list of
1482    /// [`Protocols`](tor_protover::Protocols) capabilities for the generated `CircTarget`.
1483    /// It does not (and should not!) look up anything else from the directory.
1484    #[cfg(feature = "hs-common")]
1485    pub fn circ_target_from_verbatim_linkspecs(
1486        &self,
1487        linkspecs: &[tor_linkspec::EncodedLinkSpec],
1488        ntor_onion_key: &curve25519::PublicKey,
1489    ) -> StdResult<VerbatimLinkSpecCircTarget<OwnedCircTarget>, VerbatimCircTargetDecodeError> {
1490        use VerbatimCircTargetDecodeError as E;
1491        use tor_linkspec::CircTarget as _;
1492        use tor_linkspec::decode::Strictness;
1493
1494        let mut bld = OwnedCircTarget::builder();
1495        use tor_error::into_internal;
1496
1497        *bld.chan_target() =
1498            OwnedChanTargetBuilder::from_encoded_linkspecs(Strictness::Standard, linkspecs)?;
1499        let protocols = {
1500            let chan_target = bld.chan_target().build().map_err(into_internal!(
1501                "from_encoded_linkspecs gave an invalid output"
1502            ))?;
1503            match self
1504                .by_ids_detailed(&chan_target)
1505                .map_err(E::ImpossibleIds)?
1506            {
1507                Some(relay) => relay.protovers().clone(),
1508                None => self.relay_protocol_status().required_protocols().clone(),
1509            }
1510        };
1511        bld.protocols(protocols);
1512        bld.ntor_onion_key(*ntor_onion_key);
1513        Ok(VerbatimLinkSpecCircTarget::new(
1514            bld.build()
1515                .map_err(into_internal!("Failed to construct a valid circtarget"))?,
1516            linkspecs.to_vec(),
1517        ))
1518    }
1519
1520    /// Return weighted the fraction of relays we can use.  We only
1521    /// consider relays that match the predicate `usable`.  We weight
1522    /// this bandwidth according to the provided `role`.
1523    ///
1524    /// If _no_ matching relays in the consensus have a nonzero
1525    /// weighted bandwidth value, we fall back to looking at the
1526    /// unweighted fraction of matching relays.
1527    ///
1528    /// If there are no matching relays in the consensus, we return 0.0.
1529    fn frac_for_role<'a, F>(&'a self, role: WeightRole, usable: F) -> f64
1530    where
1531        F: Fn(&UncheckedRelay<'a>) -> bool,
1532    {
1533        let mut total_weight = 0_u64;
1534        let mut have_weight = 0_u64;
1535        let mut have_count = 0_usize;
1536        let mut total_count = 0_usize;
1537
1538        for r in self.all_relays() {
1539            if !usable(&r) {
1540                continue;
1541            }
1542            let w = self.weights.weight_rs_for_role(r.rs, role);
1543            total_weight += w;
1544            total_count += 1;
1545            if r.is_usable() {
1546                have_weight += w;
1547                have_count += 1;
1548            }
1549        }
1550
1551        if total_weight > 0 {
1552            // The consensus lists some weighted bandwidth so return the
1553            // fraction of the weighted bandwidth for which we have
1554            // descriptors.
1555            (have_weight as f64) / (total_weight as f64)
1556        } else if total_count > 0 {
1557            // The consensus lists no weighted bandwidth for these relays,
1558            // but at least it does list relays. Return the fraction of
1559            // relays for which it we have descriptors.
1560            (have_count as f64) / (total_count as f64)
1561        } else {
1562            // There are no relays of this kind in the consensus.  Return
1563            // 0.0, to avoid dividing by zero and giving NaN.
1564            0.0
1565        }
1566    }
1567    /// Return the estimated fraction of possible paths that we have
1568    /// enough microdescriptors to build.
1569    fn frac_usable_paths(&self) -> f64 {
1570        // TODO #504, TODO SPEC: We may want to add a set of is_flagged_fast() and/or
1571        // is_flagged_stable() checks here.  This will require spec clarification.
1572        let f_g = self.frac_for_role(WeightRole::Guard, |u| {
1573            u.low_level_details().is_suitable_as_guard()
1574        });
1575        let f_m = self.frac_for_role(WeightRole::Middle, |_| true);
1576        let f_e = if self.all_relays().any(|u| u.rs.is_flagged_exit()) {
1577            self.frac_for_role(WeightRole::Exit, |u| u.rs.is_flagged_exit())
1578        } else {
1579            // If there are no exits at all, we use f_m here.
1580            f_m
1581        };
1582        f_g * f_m * f_e
1583    }
1584    /// Return true if there is enough information in this NetDir to build
1585    /// multihop circuits.
1586    fn have_enough_paths(&self) -> bool {
1587        // TODO-A001: This should check for our guards as well, and
1588        // make sure that if they're listed in the consensus, we have
1589        // the descriptors for them.
1590
1591        // If we can build a randomly chosen path with at least this
1592        // probability, we know enough information to participate
1593        // on the network.
1594
1595        let min_frac_paths: f64 = self.params().min_circuit_path_threshold.as_fraction();
1596
1597        // What fraction of paths can we build?
1598        let available = self.frac_usable_paths();
1599
1600        available >= min_frac_paths
1601    }
1602    /// Choose a relay at random.
1603    ///
1604    /// Each relay is chosen with probability proportional to its weight
1605    /// in the role `role`, and is only selected if the predicate `usable`
1606    /// returns true for it.
1607    ///
1608    /// This function returns None if (and only if) there are no relays
1609    /// with nonzero weight where `usable` returned true.
1610    //
1611    // TODO this API, with the `usable` closure, invites mistakes where we fail to
1612    // check conditions that are implied by the role we have selected for the relay:
1613    // call sites must include a call to `Relay::is_polarity_inverter()` or whatever.
1614    // IMO the `WeightRole` ought to imply a condition (and it should therefore probably
1615    // be renamed.)  -Diziet
1616    pub fn pick_relay<'a, R, P>(
1617        &'a self,
1618        rng: &mut R,
1619        role: WeightRole,
1620        usable: P,
1621    ) -> Option<Relay<'a>>
1622    where
1623        R: rand::Rng,
1624        P: FnMut(&Relay<'a>) -> bool,
1625    {
1626        let relays: Vec<_> = self.relays().filter(usable).collect();
1627        // This algorithm uses rand::distr::WeightedIndex, and uses
1628        // gives O(n) time and space  to build the index, plus O(log n)
1629        // sampling time.
1630        //
1631        // We might be better off building a WeightedIndex in advance
1632        // for each `role`, and then sampling it repeatedly until we
1633        // get a relay that satisfies `usable`.  Or we might not --
1634        // that depends heavily on the actual particulars of our
1635        // inputs.  We probably shouldn't make any changes there
1636        // unless profiling tells us that this function is in a hot
1637        // path.
1638        //
1639        // The C Tor sampling implementation goes through some trouble
1640        // here to try to make its path selection constant-time.  I
1641        // believe that there is no actual remotely exploitable
1642        // side-channel here however.  It could be worth analyzing in
1643        // the future.
1644        //
1645        // This code will give the wrong result if the total of all weights
1646        // can exceed u64::MAX.  We make sure that can't happen when we
1647        // set up `self.weights`.
1648        tracing::trace!("picking from {} relays for role {:?}", relays.len(), role);
1649        match relays[..].choose_weighted(rng, |r| {
1650            let weight = self.weights.weight_rs_for_role(r.rs, role);
1651            tracing::trace!("relay:{id:?} role:{role:?} weight:{weight}", id = r.id());
1652            weight
1653        }) {
1654            Ok(relay) => Some(relay.clone()),
1655            Err(WeightError::InsufficientNonZero) => {
1656                if relays.is_empty() {
1657                    None
1658                } else {
1659                    warn!(?self.weights, ?role,
1660                          "After filtering, all {} relays had zero weight. Choosing one at random. See bug #1907.",
1661                          relays.len());
1662                    relays.choose(rng).cloned()
1663                }
1664            }
1665            Err(e) => {
1666                warn_report!(
1667                    e,
1668                    "Unexpected error while choosing from {} relays for role {:?}",
1669                    relays.len(),
1670                    role
1671                );
1672                None
1673            }
1674        }
1675    }
1676
1677    /// Choose `n` relay at random.
1678    ///
1679    /// Each relay is chosen with probability proportional to its weight
1680    /// in the role `role`, and is only selected if the predicate `usable`
1681    /// returns true for it.
1682    ///
1683    /// Relays are chosen without replacement: no relay will be
1684    /// returned twice. Therefore, the resulting vector may be smaller
1685    /// than `n` if we happen to have fewer than `n` appropriate relays.
1686    ///
1687    /// This function returns an empty vector if (and only if) there
1688    /// are no relays with nonzero weight where `usable` returned
1689    /// true.
1690    #[allow(clippy::cognitive_complexity)] // all due to tracing crate.
1691    pub fn pick_n_relays<'a, R, P>(
1692        &'a self,
1693        rng: &mut R,
1694        n: usize,
1695        role: WeightRole,
1696        usable: P,
1697    ) -> Vec<Relay<'a>>
1698    where
1699        R: rand::Rng,
1700        P: FnMut(&Relay<'a>) -> bool,
1701    {
1702        let relays: Vec<_> = self.relays().filter(usable).collect();
1703        // NOTE: See discussion in pick_relay().
1704        let mut relays = match relays[..].choose_multiple_weighted(rng, n, |r| {
1705            self.weights.weight_rs_for_role(r.rs, role) as f64
1706        }) {
1707            Err(WeightError::InsufficientNonZero) => {
1708                // Too few relays had nonzero weights: return all of those that are okay.
1709                // (This is behavior used to come up with rand 0.9; it no longer does.
1710                // We still detect it.)
1711                let remaining: Vec<_> = relays
1712                    .iter()
1713                    .filter(|r| self.weights.weight_rs_for_role(r.rs, role) > 0)
1714                    .cloned()
1715                    .collect();
1716                if remaining.is_empty() {
1717                    warn!(?self.weights, ?role,
1718                          "After filtering, all {} relays had zero weight! Picking some at random. See bug #1907.",
1719                          relays.len());
1720                    if relays.len() >= n {
1721                        relays.choose_multiple(rng, n).cloned().collect()
1722                    } else {
1723                        relays
1724                    }
1725                } else {
1726                    warn!(?self.weights, ?role,
1727                          "After filtering, only had {}/{} relays with nonzero weight. Returning them all. See bug #1907.",
1728                           remaining.len(), relays.len());
1729                    remaining
1730                }
1731            }
1732            Err(e) => {
1733                warn_report!(e, "Unexpected error while sampling a set of relays");
1734                Vec::new()
1735            }
1736            Ok(iter) => {
1737                let selection: Vec<_> = iter.map(Relay::clone).collect();
1738                if selection.len() < n && selection.len() < relays.len() {
1739                    warn!(?self.weights, ?role,
1740                          "choose_multiple_weighted returned only {returned}, despite requesting {n}, \
1741                          and having {filtered_len} available after filtering. See bug #1907.",
1742                          returned=selection.len(), filtered_len=relays.len());
1743                }
1744                selection
1745            }
1746        };
1747        relays.shuffle(rng);
1748        relays
1749    }
1750
1751    /// Compute the weight with which `relay` will be selected for a given
1752    /// `role`.
1753    pub fn relay_weight<'a>(&'a self, relay: &Relay<'a>, role: WeightRole) -> RelayWeight {
1754        RelayWeight(self.weights.weight_rs_for_role(relay.rs, role))
1755    }
1756
1757    /// Compute the total weight with which any relay matching `usable`
1758    /// will be selected for a given `role`.
1759    ///
1760    /// Note: because this function is used to assess the total
1761    /// properties of the consensus, the `usable` predicate takes a
1762    /// [`MdRouterStatus`] rather than a [`Relay`].
1763    pub fn total_weight<P>(&self, role: WeightRole, usable: P) -> RelayWeight
1764    where
1765        P: Fn(&UncheckedRelay<'_>) -> bool,
1766    {
1767        self.all_relays()
1768            .filter_map(|unchecked| {
1769                if usable(&unchecked) {
1770                    Some(RelayWeight(
1771                        self.weights.weight_rs_for_role(unchecked.rs, role),
1772                    ))
1773                } else {
1774                    None
1775                }
1776            })
1777            .sum()
1778    }
1779
1780    /// Compute the weight with which a relay with ID `rsa_id` would be
1781    /// selected for a given `role`.
1782    ///
1783    /// Note that weight returned by this function assumes that the
1784    /// relay with that ID is actually [usable](NetDir#usable); if it isn't usable,
1785    /// then other weight-related functions will call its weight zero.
1786    pub fn weight_by_rsa_id(&self, rsa_id: &RsaIdentity, role: WeightRole) -> Option<RelayWeight> {
1787        self.by_rsa_id_unchecked(rsa_id)
1788            .map(|unchecked| RelayWeight(self.weights.weight_rs_for_role(unchecked.rs, role)))
1789    }
1790
1791    /// Return all relays in this NetDir known to be in the same family as
1792    /// `relay`.
1793    ///
1794    /// This list of members will **not** necessarily include `relay` itself.
1795    ///
1796    /// # Limitations
1797    ///
1798    /// Two relays only belong to the same family if _each_ relay
1799    /// claims to share a family with the other.  But if we are
1800    /// missing a microdescriptor for one of the relays listed by this
1801    /// relay, we cannot know whether it acknowledges family
1802    /// membership with this relay or not.  Therefore, this function
1803    /// can omit family members for which there is not (as yet) any
1804    /// Relay object.
1805    pub fn known_family_members<'a>(
1806        &'a self,
1807        relay: &'a Relay<'a>,
1808    ) -> impl Iterator<Item = Relay<'a>> {
1809        let relay_rsa_id = relay.rsa_id();
1810        relay.md.family().members().filter_map(move |other_rsa_id| {
1811            self.by_rsa_id(other_rsa_id)
1812                .filter(|other_relay| other_relay.md.family().contains(relay_rsa_id))
1813        })
1814    }
1815
1816    /// Return the current hidden service directory "time period".
1817    ///
1818    /// Specifically, this returns the time period that contains the beginning
1819    /// of the validity period of this `NetDir`'s consensus.  That time period
1820    /// is the one we use when acting as an hidden service client.
1821    #[cfg(feature = "hs-common")]
1822    pub fn hs_time_period(&self) -> TimePeriod {
1823        self.hsdir_rings.current.time_period()
1824    }
1825
1826    /// Return the [`HsDirParams`] of all the relevant hidden service directory "time periods"
1827    ///
1828    /// This includes the current time period (as from
1829    /// [`.hs_time_period`](NetDir::hs_time_period))
1830    /// plus additional time periods that we publish descriptors for when we are
1831    /// acting as a hidden service.
1832    #[cfg(feature = "hs-service")]
1833    pub fn hs_all_time_periods(&self) -> Vec<HsDirParams> {
1834        self.hsdir_rings
1835            .iter()
1836            .map(|r| r.params().clone())
1837            .collect()
1838    }
1839
1840    /// Return the relays in this network directory that will be used as hidden service directories
1841    ///
1842    /// These are suitable to retrieve a given onion service's descriptor at a given time period.
1843    #[cfg(feature = "hs-common")]
1844    pub fn hs_dirs_download<'r, R>(
1845        &'r self,
1846        hsid: HsBlindId,
1847        period: TimePeriod,
1848        rng: &mut R,
1849    ) -> std::result::Result<Vec<Relay<'r>>, Bug>
1850    where
1851        R: rand::Rng,
1852    {
1853        // Algorithm:
1854        //
1855        // 1. Determine which HsDirRing to use, based on the time period.
1856        // 2. Find the shared random value that's associated with that HsDirRing.
1857        // 3. Choose spread = the parameter `hsdir_spread_fetch`
1858        // 4. Let n_replicas = the parameter `hsdir_n_replicas`.
1859        // 5. Initialize Dirs = []
1860        // 6. for idx in 1..=n_replicas:
1861        //       - let H = hsdir_ring::onion_service_index(id, replica, rand,
1862        //         period).
1863        //       - Find the position of H within hsdir_ring.
1864        //       - Take elements from hsdir_ring starting at that position,
1865        //         adding them to Dirs until we have added `spread` new elements
1866        //         that were not there before.
1867        // 7. Shuffle Dirs
1868        // 8. return Dirs.
1869
1870        let spread = self.spread(HsDirOp::Download);
1871
1872        // When downloading, only look at relays on current ring.
1873        let ring = &self.hsdir_rings.current;
1874
1875        if ring.params().time_period != period {
1876            return Err(internal!(
1877                "our current ring is not associated with the requested time period!"
1878            ));
1879        }
1880
1881        let mut hs_dirs = self.select_hsdirs(hsid, ring, spread).collect_vec();
1882
1883        // When downloading, the order of the returned relays is random.
1884        hs_dirs.shuffle(rng);
1885
1886        Ok(hs_dirs)
1887    }
1888
1889    /// Return the relays in this network directory that will be used as hidden service directories
1890    ///
1891    /// Returns the relays that are suitable for storing a given onion service's descriptors at the
1892    /// given time period.
1893    #[cfg(feature = "hs-service")]
1894    pub fn hs_dirs_upload(
1895        &self,
1896        hsid: HsBlindId,
1897        period: TimePeriod,
1898    ) -> std::result::Result<impl Iterator<Item = Relay<'_>>, Bug> {
1899        // Algorithm:
1900        //
1901        // 1. Choose spread = the parameter `hsdir_spread_store`
1902        // 2. Determine which HsDirRing to use, based on the time period.
1903        // 3. Find the shared random value that's associated with that HsDirRing.
1904        // 4. Let n_replicas = the parameter `hsdir_n_replicas`.
1905        // 5. Initialize Dirs = []
1906        // 6. for idx in 1..=n_replicas:
1907        //       - let H = hsdir_ring::onion_service_index(id, replica, rand,
1908        //         period).
1909        //       - Find the position of H within hsdir_ring.
1910        //       - Take elements from hsdir_ring starting at that position,
1911        //         adding them to Dirs until we have added `spread` new elements
1912        //         that were not there before.
1913        // 3. return Dirs.
1914        let spread = self.spread(HsDirOp::Upload);
1915
1916        // For each HsBlindId, determine which HsDirRing to use.
1917        let rings = self
1918            .hsdir_rings
1919            .iter()
1920            .filter_map(move |ring| {
1921                // Make sure the ring matches the TP of the hsid it's matched with.
1922                (ring.params().time_period == period).then_some((ring, hsid, period))
1923            })
1924            .collect::<Vec<_>>();
1925
1926        // The specified period should have an associated ring.
1927        if !rings.iter().any(|(_, _, tp)| *tp == period) {
1928            return Err(internal!(
1929                "the specified time period does not have an associated ring"
1930            ));
1931        };
1932
1933        // Now that we've matched each `hsid` with the ring associated with its TP, we can start
1934        // selecting replicas from each ring.
1935        Ok(rings.into_iter().flat_map(move |(ring, hsid, period)| {
1936            assert_eq!(period, ring.params().time_period());
1937            self.select_hsdirs(hsid, ring, spread)
1938        }))
1939    }
1940
1941    /// Return the relays in this network directory that will be used as hidden service directories
1942    ///
1943    /// Depending on `op`,
1944    /// these are suitable to either store, or retrieve, a
1945    /// given onion service's descriptor at a given time period.
1946    ///
1947    /// When `op` is `Download`, the order is random.
1948    /// When `op` is `Upload`, the order is not specified.
1949    ///
1950    /// Return an error if the time period is not one returned by
1951    /// `onion_service_time_period` or `onion_service_secondary_time_periods`.
1952    //
1953    // TODO: make HsDirOp pub(crate) once this is removed
1954    #[cfg(feature = "hs-common")]
1955    #[deprecated(note = "Use hs_dirs_upload or hs_dirs_download instead")]
1956    pub fn hs_dirs<'r, R>(&'r self, hsid: &HsBlindId, op: HsDirOp, rng: &mut R) -> Vec<Relay<'r>>
1957    where
1958        R: rand::Rng,
1959    {
1960        // Algorithm:
1961        //
1962        // 1. Determine which HsDirRing to use, based on the time period.
1963        // 2. Find the shared random value that's associated with that HsDirRing.
1964        // 3. Choose spread = the parameter `hsdir_spread_store` or
1965        //    `hsdir_spread_fetch` based on `op`.
1966        // 4. Let n_replicas = the parameter `hsdir_n_replicas`.
1967        // 5. Initialize Dirs = []
1968        // 6. for idx in 1..=n_replicas:
1969        //       - let H = hsdir_ring::onion_service_index(id, replica, rand,
1970        //         period).
1971        //       - Find the position of H within hsdir_ring.
1972        //       - Take elements from hsdir_ring starting at that position,
1973        //         adding them to Dirs until we have added `spread` new elements
1974        //         that were not there before.
1975        // 7. return Dirs.
1976        let n_replicas = self
1977            .params
1978            .hsdir_n_replicas
1979            .get()
1980            .try_into()
1981            .expect("BoundedInt did not enforce bounds");
1982
1983        let spread = match op {
1984            HsDirOp::Download => self.params.hsdir_spread_fetch,
1985            #[cfg(feature = "hs-service")]
1986            HsDirOp::Upload => self.params.hsdir_spread_store,
1987        };
1988
1989        let spread = spread
1990            .get()
1991            .try_into()
1992            .expect("BoundedInt did not enforce bounds!");
1993
1994        // TODO: I may be wrong here but I suspect that this function may
1995        // need refactoring so that it does not look at _all_ of the HsDirRings,
1996        // but only at the ones that corresponds to time periods for which
1997        // HsBlindId is valid.  Or I could be mistaken, in which case we should
1998        // have a comment to explain why I am, since the logic is subtle.
1999        // (For clients, there is only one ring.) -nickm
2000        //
2001        // (Actually, there is no need to follow through with the above TODO,
2002        // since this function is deprecated, and not used anywhere but the
2003        // tests.)
2004
2005        let mut hs_dirs = self
2006            .hsdir_rings
2007            .iter_for_op(op)
2008            .cartesian_product(1..=n_replicas) // 1-indexed !
2009            .flat_map({
2010                let mut selected_nodes = HashSet::new();
2011
2012                move |(ring, replica): (&HsDirRing, u8)| {
2013                    let hsdir_idx = hsdir_ring::service_hsdir_index(hsid, replica, ring.params());
2014
2015                    ring.ring_items_at(hsdir_idx, spread, |(hsdir_idx, _)| {
2016                        // According to rend-spec 2.2.3:
2017                        //                                                  ... If any of those
2018                        // nodes have already been selected for a lower-numbered replica of the
2019                        // service, any nodes already chosen are disregarded (i.e. skipped over)
2020                        // when choosing a replica's hsdir_spread_store nodes.
2021                        selected_nodes.insert(*hsdir_idx)
2022                    })
2023                    .collect::<Vec<_>>()
2024                }
2025            })
2026            .filter_map(|(_hsdir_idx, rs_idx)| {
2027                // This ought not to be None but let's not panic or bail if it is
2028                self.relay_by_rs_idx(*rs_idx)
2029            })
2030            .collect_vec();
2031
2032        match op {
2033            HsDirOp::Download => {
2034                // When `op` is `Download`, the order is random.
2035                hs_dirs.shuffle(rng);
2036            }
2037            #[cfg(feature = "hs-service")]
2038            HsDirOp::Upload => {
2039                // When `op` is `Upload`, the order is not specified.
2040            }
2041        }
2042
2043        hs_dirs
2044    }
2045}
2046
2047impl MdReceiver for NetDir {
2048    fn missing_microdescs(&self) -> Box<dyn Iterator<Item = &MdDigest> + '_> {
2049        Box::new(self.rsidx_by_missing.keys())
2050    }
2051    fn add_microdesc(&mut self, md: Microdesc) -> bool {
2052        self.add_arc_microdesc(Arc::new(md))
2053    }
2054    fn n_missing(&self) -> usize {
2055        self.rsidx_by_missing.len()
2056    }
2057}
2058
2059impl<'a> UncheckedRelay<'a> {
2060    /// Return an [`UncheckedRelayDetails`](details::UncheckedRelayDetails) for this relay.
2061    ///
2062    /// Callers should generally avoid using this information directly if they can;
2063    /// it's better to use a higher-level function that exposes semantic information
2064    /// rather than these properties.
2065    pub fn low_level_details(&self) -> details::UncheckedRelayDetails<'_> {
2066        details::UncheckedRelayDetails(self)
2067    }
2068
2069    /// Return true if this relay is valid and [usable](NetDir#usable).
2070    ///
2071    /// This function should return `true` for every Relay we expose
2072    /// to the user.
2073    pub fn is_usable(&self) -> bool {
2074        // No need to check for 'valid' or 'running': they are implicit.
2075        self.md.is_some() && self.rs.ed25519_id_is_usable()
2076    }
2077    /// If this is [usable](NetDir#usable), return a corresponding Relay object.
2078    pub fn into_relay(self) -> Option<Relay<'a>> {
2079        if self.is_usable() {
2080            Some(Relay {
2081                rs: self.rs,
2082                md: self.md?,
2083                #[cfg(feature = "geoip")]
2084                cc: self.cc,
2085            })
2086        } else {
2087            None
2088        }
2089    }
2090
2091    /// Return true if this relay is a hidden service directory
2092    ///
2093    /// Ie, if it is to be included in the hsdir ring.
2094    #[cfg(feature = "hs-common")]
2095    pub(crate) fn is_hsdir_for_ring(&self) -> bool {
2096        // TODO are there any other flags should we check?
2097        // rend-spec-v3 2.2.3 says just
2098        //   "each node listed in the current consensus with the HSDir flag"
2099        // Do we need to check ed25519_id_is_usable ?
2100        // See also https://gitlab.torproject.org/tpo/core/arti/-/issues/504
2101        self.rs.is_flagged_hsdir()
2102    }
2103}
2104
2105impl<'a> Relay<'a> {
2106    /// Return a [`RelayDetails`](details::RelayDetails) for this relay.
2107    ///
2108    /// Callers should generally avoid using this information directly if they can;
2109    /// it's better to use a higher-level function that exposes semantic information
2110    /// rather than these properties.
2111    pub fn low_level_details(&self) -> details::RelayDetails<'_> {
2112        details::RelayDetails(self)
2113    }
2114
2115    /// Return the Ed25519 ID for this relay.
2116    pub fn id(&self) -> &Ed25519Identity {
2117        self.md.ed25519_id()
2118    }
2119    /// Return the RsaIdentity for this relay.
2120    pub fn rsa_id(&self) -> &RsaIdentity {
2121        self.rs.rsa_identity()
2122    }
2123
2124    /// Return a reference to this relay's "router status" entry in
2125    /// the consensus.
2126    ///
2127    /// The router status entry contains information about the relay
2128    /// that the authorities voted on directly.  For most use cases,
2129    /// you shouldn't need them.
2130    ///
2131    /// This function is only available if the crate was built with
2132    /// its `experimental-api` feature.
2133    #[cfg(feature = "experimental-api")]
2134    pub fn rs(&self) -> &netstatus::MdRouterStatus {
2135        self.rs
2136    }
2137    /// Return a reference to this relay's "microdescriptor" entry in
2138    /// the consensus.
2139    ///
2140    /// A "microdescriptor" is a synopsis of the information about a relay,
2141    /// used to determine its capabilities and route traffic through it.
2142    /// For most use cases, you shouldn't need it.
2143    ///
2144    /// This function is only available if the crate was built with
2145    /// its `experimental-api` feature.
2146    #[cfg(feature = "experimental-api")]
2147    pub fn md(&self) -> &Microdesc {
2148        self.md
2149    }
2150}
2151
2152/// An error value returned from [`NetDir::by_ids_detailed`].
2153#[cfg(feature = "hs-common")]
2154#[derive(Clone, Debug, thiserror::Error)]
2155#[non_exhaustive]
2156pub enum RelayLookupError {
2157    /// We found a relay whose presence indicates that the provided set of
2158    /// identities is impossible to resolve.
2159    #[error("Provided set of identities is impossible according to consensus.")]
2160    Impossible,
2161}
2162
2163impl<'a> HasAddrs for Relay<'a> {
2164    fn addrs(&self) -> impl Iterator<Item = std::net::SocketAddr> {
2165        self.rs.addrs()
2166    }
2167}
2168#[cfg(feature = "geoip")]
2169impl<'a> HasCountryCode for Relay<'a> {
2170    fn country_code(&self) -> Option<CountryCode> {
2171        self.cc
2172    }
2173}
2174impl<'a> tor_linkspec::HasRelayIdsLegacy for Relay<'a> {
2175    fn ed_identity(&self) -> &Ed25519Identity {
2176        self.id()
2177    }
2178    fn rsa_identity(&self) -> &RsaIdentity {
2179        self.rsa_id()
2180    }
2181}
2182
2183impl<'a> HasRelayIds for UncheckedRelay<'a> {
2184    fn identity(&self, key_type: RelayIdType) -> Option<RelayIdRef<'_>> {
2185        match key_type {
2186            RelayIdType::Ed25519 if self.rs.ed25519_id_is_usable() => {
2187                self.md.map(|m| m.ed25519_id().into())
2188            }
2189            RelayIdType::Rsa => Some(self.rs.rsa_identity().into()),
2190            _ => None,
2191        }
2192    }
2193}
2194#[cfg(feature = "geoip")]
2195impl<'a> HasCountryCode for UncheckedRelay<'a> {
2196    fn country_code(&self) -> Option<CountryCode> {
2197        self.cc
2198    }
2199}
2200
2201impl<'a> DirectChanMethodsHelper for Relay<'a> {}
2202impl<'a> ChanTarget for Relay<'a> {}
2203
2204impl<'a> tor_linkspec::CircTarget for Relay<'a> {
2205    fn ntor_onion_key(&self) -> &ll::pk::curve25519::PublicKey {
2206        self.md.ntor_key()
2207    }
2208    fn protovers(&self) -> &tor_protover::Protocols {
2209        self.rs.protovers()
2210    }
2211}
2212
2213#[cfg(test)]
2214mod test {
2215    // @@ begin test lint list maintained by maint/add_warning @@
2216    #![allow(clippy::bool_assert_comparison)]
2217    #![allow(clippy::clone_on_copy)]
2218    #![allow(clippy::dbg_macro)]
2219    #![allow(clippy::mixed_attributes_style)]
2220    #![allow(clippy::print_stderr)]
2221    #![allow(clippy::print_stdout)]
2222    #![allow(clippy::single_char_pattern)]
2223    #![allow(clippy::unwrap_used)]
2224    #![allow(clippy::unchecked_time_subtraction)]
2225    #![allow(clippy::useless_vec)]
2226    #![allow(clippy::needless_pass_by_value)]
2227    //! <!-- @@ end test lint list maintained by maint/add_warning @@ -->
2228    #![allow(clippy::cognitive_complexity)]
2229    use super::*;
2230    use crate::testnet::*;
2231    use float_eq::assert_float_eq;
2232    use std::collections::HashSet;
2233    use std::time::Duration;
2234    use tor_basic_utils::test_rng::{self, testing_rng};
2235    use tor_linkspec::{RelayIdType, RelayIds};
2236
2237    #[cfg(feature = "hs-common")]
2238    fn dummy_hs_blind_id() -> HsBlindId {
2239        let hsid = [2, 1, 1, 1].iter().cycle().take(32).cloned().collect_vec();
2240        let hsid = Ed25519Identity::new(hsid[..].try_into().unwrap());
2241        HsBlindId::from(hsid)
2242    }
2243
2244    // Basic functionality for a partial netdir: Add microdescriptors,
2245    // then you have a netdir.
2246    #[test]
2247    fn partial_netdir() {
2248        let (consensus, microdescs) = construct_network().unwrap();
2249        let dir = PartialNetDir::new(consensus, None);
2250
2251        // Check the lifetime
2252        let lifetime = dir.lifetime();
2253        assert_eq!(
2254            lifetime
2255                .valid_until()
2256                .duration_since(lifetime.valid_after())
2257                .unwrap(),
2258            Duration::new(86400, 0)
2259        );
2260
2261        // No microdescriptors, so we don't have enough paths, and can't
2262        // advance.
2263        assert!(!dir.have_enough_paths());
2264        let mut dir = match dir.unwrap_if_sufficient() {
2265            Ok(_) => panic!(),
2266            Err(d) => d,
2267        };
2268
2269        let missing: HashSet<_> = dir.missing_microdescs().collect();
2270        assert_eq!(missing.len(), 40);
2271        assert_eq!(missing.len(), dir.netdir.c_relays().len());
2272        for md in &microdescs {
2273            assert!(missing.contains(md.digest()));
2274        }
2275
2276        // Now add all the mds and try again.
2277        for md in microdescs {
2278            let wanted = dir.add_microdesc(md);
2279            assert!(wanted);
2280        }
2281
2282        let missing: HashSet<_> = dir.missing_microdescs().collect();
2283        assert!(missing.is_empty());
2284        assert!(dir.have_enough_paths());
2285        let _complete = match dir.unwrap_if_sufficient() {
2286            Ok(d) => d,
2287            Err(_) => panic!(),
2288        };
2289    }
2290
2291    #[test]
2292    fn override_params() {
2293        let (consensus, _microdescs) = construct_network().unwrap();
2294        let override_p = "bwweightscale=2 doesnotexist=77 circwindow=500"
2295            .parse()
2296            .unwrap();
2297        let dir = PartialNetDir::new(consensus.clone(), Some(&override_p));
2298        let params = &dir.netdir.params;
2299        assert_eq!(params.bw_weight_scale.get(), 2);
2300        assert_eq!(params.circuit_window.get(), 500_i32);
2301
2302        // try again without the override.
2303        let dir = PartialNetDir::new(consensus, None);
2304        let params = &dir.netdir.params;
2305        assert_eq!(params.bw_weight_scale.get(), 1_i32);
2306        assert_eq!(params.circuit_window.get(), 1000_i32);
2307    }
2308
2309    #[test]
2310    fn fill_from_previous() {
2311        let (consensus, microdescs) = construct_network().unwrap();
2312
2313        let mut dir = PartialNetDir::new(consensus.clone(), None);
2314        for md in microdescs.iter().skip(2) {
2315            let wanted = dir.add_microdesc(md.clone());
2316            assert!(wanted);
2317        }
2318        let dir1 = dir.unwrap_if_sufficient().unwrap();
2319        assert_eq!(dir1.missing_microdescs().count(), 2);
2320
2321        let mut dir = PartialNetDir::new(consensus, None);
2322        assert_eq!(dir.missing_microdescs().count(), 40);
2323        dir.fill_from_previous_netdir(Arc::new(dir1));
2324        assert_eq!(dir.missing_microdescs().count(), 2);
2325    }
2326
2327    #[test]
2328    fn path_count() {
2329        let low_threshold = "min_paths_for_circs_pct=64".parse().unwrap();
2330        let high_threshold = "min_paths_for_circs_pct=65".parse().unwrap();
2331
2332        let (consensus, microdescs) = construct_network().unwrap();
2333
2334        let mut dir = PartialNetDir::new(consensus.clone(), Some(&low_threshold));
2335        for (pos, md) in microdescs.iter().enumerate() {
2336            if pos % 7 == 2 {
2337                continue; // skip a few relays.
2338            }
2339            dir.add_microdesc(md.clone());
2340        }
2341        let dir = dir.unwrap_if_sufficient().unwrap();
2342
2343        // We  have 40 relays that we know about from the consensus.
2344        assert_eq!(dir.all_relays().count(), 40);
2345
2346        // But only 34 are usable.
2347        assert_eq!(dir.relays().count(), 34);
2348
2349        // For guards: mds 20..=39 correspond to Guard relays.
2350        // Their bandwidth is 2*(1000+2000+...10000) = 110_000.
2351        // We skipped 23, 30, and 37.  They have bandwidth
2352        // 4000 + 1000 + 8000 = 13_000.  So our fractional bandwidth
2353        // should be (110-13)/110.
2354        let f = dir.frac_for_role(WeightRole::Guard, |u| u.rs.is_flagged_guard());
2355        assert!(((97.0 / 110.0) - f).abs() < 0.000001);
2356
2357        // For exits: mds 10..=19 and 30..=39 correspond to Exit relays.
2358        // We skipped 16, 30,  and 37. Per above our fractional bandwidth is
2359        // (110-16)/110.
2360        let f = dir.frac_for_role(WeightRole::Exit, |u| u.rs.is_flagged_exit());
2361        assert!(((94.0 / 110.0) - f).abs() < 0.000001);
2362
2363        // For middles: all relays are middles. We skipped 2, 9, 16,
2364        // 23, 30, and 37. Per above our fractional bandwidth is
2365        // (220-33)/220
2366        let f = dir.frac_for_role(WeightRole::Middle, |_| true);
2367        assert!(((187.0 / 220.0) - f).abs() < 0.000001);
2368
2369        // Multiplying those together, we get the fraction of paths we can
2370        // build at ~0.64052066, which is above the threshold we set above for
2371        // MinPathsForCircsPct.
2372        let f = dir.frac_usable_paths();
2373        assert!((f - 0.64052066).abs() < 0.000001);
2374
2375        // But if we try again with a slightly higher threshold...
2376        let mut dir = PartialNetDir::new(consensus, Some(&high_threshold));
2377        for (pos, md) in microdescs.into_iter().enumerate() {
2378            if pos % 7 == 2 {
2379                continue; // skip a few relays.
2380            }
2381            dir.add_microdesc(md);
2382        }
2383        assert!(dir.unwrap_if_sufficient().is_err());
2384    }
2385
2386    /// Return a 3-tuple for use by `test_pick_*()` of an Rng, a number of
2387    /// iterations, and a tolerance.
2388    ///
2389    /// If the Rng is deterministic (the default), we can use a faster setup,
2390    /// with a higher tolerance and fewer iterations.  But if you've explicitly
2391    /// opted into randomization (or are replaying a seed from an earlier
2392    /// randomized test), we give you more iterations and a tighter tolerance.
2393    fn testing_rng_with_tolerances() -> (impl rand::Rng, usize, f64) {
2394        // Use a deterministic RNG if none is specified, since this is slow otherwise.
2395        let config = test_rng::Config::from_env().unwrap_or(test_rng::Config::Deterministic);
2396        let (iters, tolerance) = match config {
2397            test_rng::Config::Deterministic => (5000, 0.02),
2398            _ => (50000, 0.01),
2399        };
2400        (config.into_rng(), iters, tolerance)
2401    }
2402
2403    #[test]
2404    fn test_pick() {
2405        let (consensus, microdescs) = construct_network().unwrap();
2406        let mut dir = PartialNetDir::new(consensus, None);
2407        for md in microdescs.into_iter() {
2408            let wanted = dir.add_microdesc(md.clone());
2409            assert!(wanted);
2410        }
2411        let dir = dir.unwrap_if_sufficient().unwrap();
2412
2413        let (mut rng, total, tolerance) = testing_rng_with_tolerances();
2414
2415        let mut picked = [0_isize; 40];
2416        for _ in 0..total {
2417            let r = dir.pick_relay(&mut rng, WeightRole::Middle, |r| {
2418                r.low_level_details().supports_exit_port_ipv4(80)
2419            });
2420            let r = r.unwrap();
2421            let id_byte = r.identity(RelayIdType::Rsa).unwrap().as_bytes()[0];
2422            picked[id_byte as usize] += 1;
2423        }
2424        // non-exits should never get picked.
2425        picked[0..10].iter().for_each(|x| assert_eq!(*x, 0));
2426        picked[20..30].iter().for_each(|x| assert_eq!(*x, 0));
2427
2428        let picked_f: Vec<_> = picked.iter().map(|x| *x as f64 / total as f64).collect();
2429
2430        // We didn't we any non-default weights, so the other relays get
2431        // weighted proportional to their bandwidth.
2432        assert_float_eq!(picked_f[19], (10.0 / 110.0), abs <= tolerance);
2433        assert_float_eq!(picked_f[38], (9.0 / 110.0), abs <= tolerance);
2434        assert_float_eq!(picked_f[39], (10.0 / 110.0), abs <= tolerance);
2435    }
2436
2437    #[test]
2438    fn test_pick_multiple() {
2439        // This is mostly a copy of test_pick, except that it uses
2440        // pick_n_relays to pick several relays at once.
2441
2442        let dir = construct_netdir().unwrap_if_sufficient().unwrap();
2443
2444        let (mut rng, total, tolerance) = testing_rng_with_tolerances();
2445
2446        let mut picked = [0_isize; 40];
2447        for _ in 0..total / 4 {
2448            let relays = dir.pick_n_relays(&mut rng, 4, WeightRole::Middle, |r| {
2449                r.low_level_details().supports_exit_port_ipv4(80)
2450            });
2451            assert_eq!(relays.len(), 4);
2452            for r in relays {
2453                let id_byte = r.identity(RelayIdType::Rsa).unwrap().as_bytes()[0];
2454                picked[id_byte as usize] += 1;
2455            }
2456        }
2457        // non-exits should never get picked.
2458        picked[0..10].iter().for_each(|x| assert_eq!(*x, 0));
2459        picked[20..30].iter().for_each(|x| assert_eq!(*x, 0));
2460
2461        let picked_f: Vec<_> = picked.iter().map(|x| *x as f64 / total as f64).collect();
2462
2463        // We didn't we any non-default weights, so the other relays get
2464        // weighted proportional to their bandwidth.
2465        assert_float_eq!(picked_f[19], (10.0 / 110.0), abs <= tolerance);
2466        assert_float_eq!(picked_f[36], (7.0 / 110.0), abs <= tolerance);
2467        assert_float_eq!(picked_f[39], (10.0 / 110.0), abs <= tolerance);
2468    }
2469
2470    #[test]
2471    fn subnets() {
2472        let cfg = SubnetConfig::default();
2473
2474        fn same_net(cfg: &SubnetConfig, a: &str, b: &str) -> bool {
2475            cfg.addrs_in_same_subnet(&a.parse().unwrap(), &b.parse().unwrap())
2476        }
2477
2478        assert!(same_net(&cfg, "127.15.3.3", "127.15.9.9"));
2479        assert!(!same_net(&cfg, "127.15.3.3", "127.16.9.9"));
2480
2481        assert!(!same_net(&cfg, "127.15.3.3", "127::"));
2482
2483        assert!(same_net(&cfg, "ffff:ffff:90:33::", "ffff:ffff:91:34::"));
2484        assert!(!same_net(&cfg, "ffff:ffff:90:33::", "ffff:fffe:91:34::"));
2485
2486        let cfg = SubnetConfig {
2487            subnets_family_v4: 32,
2488            subnets_family_v6: 128,
2489        };
2490        assert!(!same_net(&cfg, "127.15.3.3", "127.15.9.9"));
2491        assert!(!same_net(&cfg, "ffff:ffff:90:33::", "ffff:ffff:91:34::"));
2492
2493        assert!(same_net(&cfg, "127.0.0.1", "127.0.0.1"));
2494        assert!(!same_net(&cfg, "127.0.0.1", "127.0.0.2"));
2495        assert!(same_net(&cfg, "ffff:ffff:90:33::", "ffff:ffff:90:33::"));
2496
2497        let cfg = SubnetConfig {
2498            subnets_family_v4: 33,
2499            subnets_family_v6: 129,
2500        };
2501        assert!(!same_net(&cfg, "127.0.0.1", "127.0.0.1"));
2502        assert!(!same_net(&cfg, "::", "::"));
2503    }
2504
2505    #[test]
2506    fn subnet_union() {
2507        let cfg1 = SubnetConfig {
2508            subnets_family_v4: 16,
2509            subnets_family_v6: 64,
2510        };
2511        let cfg2 = SubnetConfig {
2512            subnets_family_v4: 24,
2513            subnets_family_v6: 32,
2514        };
2515        let a1 = "1.2.3.4".parse().unwrap();
2516        let a2 = "1.2.10.10".parse().unwrap();
2517
2518        let a3 = "ffff:ffff::7".parse().unwrap();
2519        let a4 = "ffff:ffff:1234::8".parse().unwrap();
2520
2521        assert_eq!(cfg1.addrs_in_same_subnet(&a1, &a2), true);
2522        assert_eq!(cfg2.addrs_in_same_subnet(&a1, &a2), false);
2523
2524        assert_eq!(cfg1.addrs_in_same_subnet(&a3, &a4), false);
2525        assert_eq!(cfg2.addrs_in_same_subnet(&a3, &a4), true);
2526
2527        let cfg_u = cfg1.union(&cfg2);
2528        assert_eq!(
2529            cfg_u,
2530            SubnetConfig {
2531                subnets_family_v4: 16,
2532                subnets_family_v6: 32,
2533            }
2534        );
2535        assert_eq!(cfg_u.addrs_in_same_subnet(&a1, &a2), true);
2536        assert_eq!(cfg_u.addrs_in_same_subnet(&a3, &a4), true);
2537
2538        assert_eq!(cfg1.union(&cfg1), cfg1);
2539
2540        assert_eq!(cfg1.union(&SubnetConfig::no_addresses_match()), cfg1);
2541    }
2542
2543    #[test]
2544    fn relay_funcs() {
2545        let (consensus, microdescs) = construct_custom_network(
2546            |pos, nb, _| {
2547                if pos == 15 {
2548                    nb.rs.add_or_port("[f0f0::30]:9001".parse().unwrap());
2549                } else if pos == 20 {
2550                    nb.rs.add_or_port("[f0f0::3131]:9001".parse().unwrap());
2551                }
2552            },
2553            None,
2554        )
2555        .unwrap();
2556        let subnet_config = SubnetConfig::default();
2557        let all_family_info = FamilyRules::all_family_info();
2558        let mut dir = PartialNetDir::new(consensus, None);
2559        for md in microdescs.into_iter() {
2560            let wanted = dir.add_microdesc(md.clone());
2561            assert!(wanted);
2562        }
2563        let dir = dir.unwrap_if_sufficient().unwrap();
2564
2565        // Pick out a few relays by ID.
2566        let k0 = Ed25519Identity::from([0; 32]);
2567        let k1 = Ed25519Identity::from([1; 32]);
2568        let k2 = Ed25519Identity::from([2; 32]);
2569        let k3 = Ed25519Identity::from([3; 32]);
2570        let k10 = Ed25519Identity::from([10; 32]);
2571        let k15 = Ed25519Identity::from([15; 32]);
2572        let k20 = Ed25519Identity::from([20; 32]);
2573
2574        let r0 = dir.by_id(&k0).unwrap();
2575        let r1 = dir.by_id(&k1).unwrap();
2576        let r2 = dir.by_id(&k2).unwrap();
2577        let r3 = dir.by_id(&k3).unwrap();
2578        let r10 = dir.by_id(&k10).unwrap();
2579        let r15 = dir.by_id(&k15).unwrap();
2580        let r20 = dir.by_id(&k20).unwrap();
2581
2582        assert_eq!(r0.id(), &[0; 32].into());
2583        assert_eq!(r0.rsa_id(), &[0; 20].into());
2584        assert_eq!(r1.id(), &[1; 32].into());
2585        assert_eq!(r1.rsa_id(), &[1; 20].into());
2586
2587        assert!(r0.same_relay_ids(&r0));
2588        assert!(r1.same_relay_ids(&r1));
2589        assert!(!r1.same_relay_ids(&r0));
2590
2591        assert!(r0.low_level_details().is_dir_cache());
2592        assert!(!r1.low_level_details().is_dir_cache());
2593        assert!(r2.low_level_details().is_dir_cache());
2594        assert!(!r3.low_level_details().is_dir_cache());
2595
2596        assert!(!r0.low_level_details().supports_exit_port_ipv4(80));
2597        assert!(!r1.low_level_details().supports_exit_port_ipv4(80));
2598        assert!(!r2.low_level_details().supports_exit_port_ipv4(80));
2599        assert!(!r3.low_level_details().supports_exit_port_ipv4(80));
2600
2601        assert!(!r0.low_level_details().policies_allow_some_port());
2602        assert!(!r1.low_level_details().policies_allow_some_port());
2603        assert!(!r2.low_level_details().policies_allow_some_port());
2604        assert!(!r3.low_level_details().policies_allow_some_port());
2605        assert!(r10.low_level_details().policies_allow_some_port());
2606
2607        assert!(r0.low_level_details().in_same_family(&r0, all_family_info));
2608        assert!(r0.low_level_details().in_same_family(&r1, all_family_info));
2609        assert!(r1.low_level_details().in_same_family(&r0, all_family_info));
2610        assert!(r1.low_level_details().in_same_family(&r1, all_family_info));
2611        assert!(!r0.low_level_details().in_same_family(&r2, all_family_info));
2612        assert!(!r2.low_level_details().in_same_family(&r0, all_family_info));
2613        assert!(r2.low_level_details().in_same_family(&r2, all_family_info));
2614        assert!(r2.low_level_details().in_same_family(&r3, all_family_info));
2615
2616        assert!(r0.low_level_details().in_same_subnet(&r10, &subnet_config));
2617        assert!(r10.low_level_details().in_same_subnet(&r10, &subnet_config));
2618        assert!(r0.low_level_details().in_same_subnet(&r0, &subnet_config));
2619        assert!(r1.low_level_details().in_same_subnet(&r1, &subnet_config));
2620        assert!(!r1.low_level_details().in_same_subnet(&r2, &subnet_config));
2621        assert!(!r2.low_level_details().in_same_subnet(&r3, &subnet_config));
2622
2623        // Make sure IPv6 families work.
2624        let subnet_config = SubnetConfig {
2625            subnets_family_v4: 128,
2626            subnets_family_v6: 96,
2627        };
2628        assert!(r15.low_level_details().in_same_subnet(&r20, &subnet_config));
2629        assert!(!r15.low_level_details().in_same_subnet(&r1, &subnet_config));
2630
2631        // Make sure that subnet configs can be disabled.
2632        let subnet_config = SubnetConfig {
2633            subnets_family_v4: 255,
2634            subnets_family_v6: 255,
2635        };
2636        assert!(!r15.low_level_details().in_same_subnet(&r20, &subnet_config));
2637    }
2638
2639    #[test]
2640    fn test_badexit() {
2641        // make a netdir where relays 10-19 are badexit, and everybody
2642        // exits to 443 on IPv6.
2643        use tor_netdoc::types::relay_flags::RelayFlag;
2644        let netdir = construct_custom_netdir(|pos, nb, _| {
2645            if (10..20).contains(&pos) {
2646                nb.rs.add_flags(RelayFlag::BadExit);
2647            }
2648            nb.md.parse_ipv6_policy("accept 443").unwrap();
2649        })
2650        .unwrap()
2651        .unwrap_if_sufficient()
2652        .unwrap();
2653
2654        let e12 = netdir.by_id(&Ed25519Identity::from([12; 32])).unwrap();
2655        let e32 = netdir.by_id(&Ed25519Identity::from([32; 32])).unwrap();
2656
2657        assert!(!e12.low_level_details().supports_exit_port_ipv4(80));
2658        assert!(e32.low_level_details().supports_exit_port_ipv4(80));
2659
2660        assert!(!e12.low_level_details().supports_exit_port_ipv6(443));
2661        assert!(e32.low_level_details().supports_exit_port_ipv6(443));
2662        assert!(!e32.low_level_details().supports_exit_port_ipv6(555));
2663
2664        assert!(!e12.low_level_details().policies_allow_some_port());
2665        assert!(e32.low_level_details().policies_allow_some_port());
2666
2667        assert!(!e12.low_level_details().ipv4_policy().allows_some_port());
2668        assert!(!e12.low_level_details().ipv6_policy().allows_some_port());
2669        assert!(e32.low_level_details().ipv4_policy().allows_some_port());
2670        assert!(e32.low_level_details().ipv6_policy().allows_some_port());
2671
2672        assert!(
2673            e12.low_level_details()
2674                .ipv4_declared_policy()
2675                .allows_some_port()
2676        );
2677        assert!(
2678            e12.low_level_details()
2679                .ipv6_declared_policy()
2680                .allows_some_port()
2681        );
2682    }
2683
2684    #[cfg(feature = "experimental-api")]
2685    #[test]
2686    fn test_accessors() {
2687        let netdir = construct_netdir().unwrap_if_sufficient().unwrap();
2688
2689        let r4 = netdir.by_id(&Ed25519Identity::from([4; 32])).unwrap();
2690        let r16 = netdir.by_id(&Ed25519Identity::from([16; 32])).unwrap();
2691
2692        assert!(!r4.md().ipv4_policy().allows_some_port());
2693        assert!(r16.md().ipv4_policy().allows_some_port());
2694
2695        assert!(!r4.rs().is_flagged_exit());
2696        assert!(r16.rs().is_flagged_exit());
2697    }
2698
2699    #[test]
2700    fn test_by_id() {
2701        // Make a netdir that omits the microdescriptor for 0xDDDDDD...
2702        let netdir = construct_custom_netdir(|pos, nb, _| {
2703            nb.omit_md = pos == 13;
2704        })
2705        .unwrap();
2706
2707        let netdir = netdir.unwrap_if_sufficient().unwrap();
2708
2709        let r = netdir.by_id(&Ed25519Identity::from([0; 32])).unwrap();
2710        assert_eq!(r.id().as_bytes(), &[0; 32]);
2711
2712        assert!(netdir.by_id(&Ed25519Identity::from([13; 32])).is_none());
2713
2714        let r = netdir.by_rsa_id(&[12; 20].into()).unwrap();
2715        assert_eq!(r.rsa_id().as_bytes(), &[12; 20]);
2716        assert!(netdir.rsa_id_is_listed(&[12; 20].into()));
2717
2718        assert!(netdir.by_rsa_id(&[13; 20].into()).is_none());
2719
2720        assert!(netdir.by_rsa_id_unchecked(&[99; 20].into()).is_none());
2721        assert!(!netdir.rsa_id_is_listed(&[99; 20].into()));
2722
2723        let r = netdir.by_rsa_id_unchecked(&[13; 20].into()).unwrap();
2724        assert_eq!(r.rs.rsa_identity().as_bytes(), &[13; 20]);
2725        assert!(netdir.rsa_id_is_listed(&[13; 20].into()));
2726
2727        let pair_13_13 = RelayIds::builder()
2728            .ed_identity([13; 32].into())
2729            .rsa_identity([13; 20].into())
2730            .build()
2731            .unwrap();
2732        let pair_14_14 = RelayIds::builder()
2733            .ed_identity([14; 32].into())
2734            .rsa_identity([14; 20].into())
2735            .build()
2736            .unwrap();
2737        let pair_14_99 = RelayIds::builder()
2738            .ed_identity([14; 32].into())
2739            .rsa_identity([99; 20].into())
2740            .build()
2741            .unwrap();
2742
2743        let r = netdir.by_ids(&pair_13_13);
2744        assert!(r.is_none());
2745        let r = netdir.by_ids(&pair_14_14).unwrap();
2746        assert_eq!(r.identity(RelayIdType::Rsa).unwrap().as_bytes(), &[14; 20]);
2747        assert_eq!(
2748            r.identity(RelayIdType::Ed25519).unwrap().as_bytes(),
2749            &[14; 32]
2750        );
2751        let r = netdir.by_ids(&pair_14_99);
2752        assert!(r.is_none());
2753
2754        assert_eq!(
2755            netdir.id_pair_listed(&[13; 32].into(), &[13; 20].into()),
2756            None
2757        );
2758        assert_eq!(
2759            netdir.id_pair_listed(&[15; 32].into(), &[15; 20].into()),
2760            Some(true)
2761        );
2762        assert_eq!(
2763            netdir.id_pair_listed(&[15; 32].into(), &[99; 20].into()),
2764            Some(false)
2765        );
2766    }
2767
2768    #[test]
2769    #[cfg(feature = "hs-common")]
2770    fn test_by_ids_detailed() {
2771        // Make a netdir that omits the microdescriptor for 0xDDDDDD...
2772        let netdir = construct_custom_netdir(|pos, nb, _| {
2773            nb.omit_md = pos == 13;
2774        })
2775        .unwrap();
2776
2777        let netdir = netdir.unwrap_if_sufficient().unwrap();
2778
2779        let id13_13 = RelayIds::builder()
2780            .ed_identity([13; 32].into())
2781            .rsa_identity([13; 20].into())
2782            .build()
2783            .unwrap();
2784        let id15_15 = RelayIds::builder()
2785            .ed_identity([15; 32].into())
2786            .rsa_identity([15; 20].into())
2787            .build()
2788            .unwrap();
2789        let id15_99 = RelayIds::builder()
2790            .ed_identity([15; 32].into())
2791            .rsa_identity([99; 20].into())
2792            .build()
2793            .unwrap();
2794        let id99_15 = RelayIds::builder()
2795            .ed_identity([99; 32].into())
2796            .rsa_identity([15; 20].into())
2797            .build()
2798            .unwrap();
2799        let id99_99 = RelayIds::builder()
2800            .ed_identity([99; 32].into())
2801            .rsa_identity([99; 20].into())
2802            .build()
2803            .unwrap();
2804        let id15_xx = RelayIds::builder()
2805            .ed_identity([15; 32].into())
2806            .build()
2807            .unwrap();
2808        let idxx_15 = RelayIds::builder()
2809            .rsa_identity([15; 20].into())
2810            .build()
2811            .unwrap();
2812
2813        assert!(matches!(netdir.by_ids_detailed(&id13_13), Ok(None)));
2814        assert!(matches!(netdir.by_ids_detailed(&id15_15), Ok(Some(_))));
2815        assert!(matches!(
2816            netdir.by_ids_detailed(&id15_99),
2817            Err(RelayLookupError::Impossible)
2818        ));
2819        assert!(matches!(
2820            netdir.by_ids_detailed(&id99_15),
2821            Err(RelayLookupError::Impossible)
2822        ));
2823        assert!(matches!(netdir.by_ids_detailed(&id99_99), Ok(None)));
2824        assert!(matches!(netdir.by_ids_detailed(&id15_xx), Ok(Some(_))));
2825        assert!(matches!(netdir.by_ids_detailed(&idxx_15), Ok(Some(_))));
2826    }
2827
2828    #[test]
2829    fn weight_type() {
2830        let r0 = RelayWeight(0);
2831        let r100 = RelayWeight(100);
2832        let r200 = RelayWeight(200);
2833        let r300 = RelayWeight(300);
2834        assert_eq!(r100 + r200, r300);
2835        assert_eq!(r100.checked_div(r200), Some(0.5));
2836        assert!(r100.checked_div(r0).is_none());
2837        assert_eq!(r200.ratio(0.5), Some(r100));
2838        assert!(r200.ratio(-1.0).is_none());
2839    }
2840
2841    #[test]
2842    fn weight_accessors() {
2843        // Make a netdir that omits the microdescriptor for 0xDDDDDD...
2844        let netdir = construct_netdir().unwrap_if_sufficient().unwrap();
2845
2846        let g_total = netdir.total_weight(WeightRole::Guard, |r| r.rs.is_flagged_guard());
2847        // This is just the total guard weight, since all our Wxy = 1.
2848        assert_eq!(g_total, RelayWeight(110_000));
2849
2850        let g_total = netdir.total_weight(WeightRole::Guard, |_| false);
2851        assert_eq!(g_total, RelayWeight(0));
2852
2853        let relay = netdir.by_id(&Ed25519Identity::from([35; 32])).unwrap();
2854        assert!(relay.rs.is_flagged_guard());
2855        let w = netdir.relay_weight(&relay, WeightRole::Guard);
2856        assert_eq!(w, RelayWeight(6_000));
2857
2858        let w = netdir
2859            .weight_by_rsa_id(&[33; 20].into(), WeightRole::Guard)
2860            .unwrap();
2861        assert_eq!(w, RelayWeight(4_000));
2862
2863        assert!(
2864            netdir
2865                .weight_by_rsa_id(&[99; 20].into(), WeightRole::Guard)
2866                .is_none()
2867        );
2868    }
2869
2870    #[test]
2871    fn family_list() {
2872        let netdir = construct_custom_netdir(|pos, n, _| {
2873            if pos == 0x0a {
2874                n.md.family(
2875                    "$0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B \
2876                     $0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C \
2877                     $0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D"
2878                        .parse()
2879                        .unwrap(),
2880                );
2881            } else if pos == 0x0c {
2882                n.md.family("$0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A".parse().unwrap());
2883            }
2884        })
2885        .unwrap()
2886        .unwrap_if_sufficient()
2887        .unwrap();
2888
2889        // In the testing netdir, adjacent members are in the same family by default...
2890        let r0 = netdir.by_id(&Ed25519Identity::from([0; 32])).unwrap();
2891        let family: Vec<_> = netdir.known_family_members(&r0).collect();
2892        assert_eq!(family.len(), 1);
2893        assert_eq!(family[0].id(), &Ed25519Identity::from([1; 32]));
2894
2895        // But we've made this relay claim membership with several others.
2896        let r10 = netdir.by_id(&Ed25519Identity::from([10; 32])).unwrap();
2897        let family: HashSet<_> = netdir.known_family_members(&r10).map(|r| *r.id()).collect();
2898        assert_eq!(family.len(), 2);
2899        assert!(family.contains(&Ed25519Identity::from([11; 32])));
2900        assert!(family.contains(&Ed25519Identity::from([12; 32])));
2901        // Note that 13 doesn't get put in, even though it's listed, since it doesn't claim
2902        //  membership with 10.
2903    }
2904    #[test]
2905    #[cfg(feature = "geoip")]
2906    fn relay_has_country_code() {
2907        let src_v6 = r#"
2908        fe80:dead:beef::,fe80:dead:ffff::,US
2909        fe80:feed:eeee::1,fe80:feed:eeee::1,AT
2910        fe80:feed:eeee::2,fe80:feed:ffff::,DE
2911        "#;
2912        let db = GeoipDb::new_from_legacy_format("", src_v6, true).unwrap();
2913
2914        let netdir = construct_custom_netdir_with_geoip(
2915            |pos, n, _| {
2916                if pos == 0x01 {
2917                    n.rs.add_or_port("[fe80:dead:beef::1]:42".parse().unwrap());
2918                }
2919                if pos == 0x02 {
2920                    n.rs.add_or_port("[fe80:feed:eeee::1]:42".parse().unwrap());
2921                    n.rs.add_or_port("[fe80:feed:eeee::2]:42".parse().unwrap());
2922                }
2923                if pos == 0x03 {
2924                    n.rs.add_or_port("[fe80:dead:beef::1]:42".parse().unwrap());
2925                    n.rs.add_or_port("[fe80:dead:beef::2]:42".parse().unwrap());
2926                }
2927            },
2928            &db,
2929        )
2930        .unwrap()
2931        .unwrap_if_sufficient()
2932        .unwrap();
2933
2934        // No GeoIP data available -> None
2935        let r0 = netdir.by_id(&Ed25519Identity::from([0; 32])).unwrap();
2936        assert_eq!(r0.cc, None);
2937
2938        // Exactly one match -> Some
2939        let r1 = netdir.by_id(&Ed25519Identity::from([1; 32])).unwrap();
2940        assert_eq!(r1.cc.as_ref().map(|x| x.as_ref()), Some("US"));
2941
2942        // Conflicting matches -> None
2943        let r2 = netdir.by_id(&Ed25519Identity::from([2; 32])).unwrap();
2944        assert_eq!(r2.cc, None);
2945
2946        // Multiple agreeing matches -> Some
2947        let r3 = netdir.by_id(&Ed25519Identity::from([3; 32])).unwrap();
2948        assert_eq!(r3.cc.as_ref().map(|x| x.as_ref()), Some("US"));
2949    }
2950
2951    #[test]
2952    #[cfg(feature = "hs-common")]
2953    #[allow(deprecated)]
2954    fn hs_dirs_selection() {
2955        use tor_basic_utils::test_rng::testing_rng;
2956
2957        const HSDIR_SPREAD_STORE: i32 = 6;
2958        const HSDIR_SPREAD_FETCH: i32 = 2;
2959        const PARAMS: [(&str, i32); 2] = [
2960            ("hsdir_spread_store", HSDIR_SPREAD_STORE),
2961            ("hsdir_spread_fetch", HSDIR_SPREAD_FETCH),
2962        ];
2963
2964        let netdir: Arc<NetDir> =
2965            crate::testnet::construct_custom_netdir_with_params(|_, _, _| {}, PARAMS, None)
2966                .unwrap()
2967                .unwrap_if_sufficient()
2968                .unwrap()
2969                .into();
2970        let hsid = dummy_hs_blind_id();
2971
2972        const OP_RELAY_COUNT: &[(HsDirOp, usize)] = &[
2973            // We can't upload to (hsdir_n_replicas * hsdir_spread_store) = 12, relays because there
2974            // are only 10 relays with the HsDir flag in the consensus.
2975            #[cfg(feature = "hs-service")]
2976            (HsDirOp::Upload, 10),
2977            (HsDirOp::Download, 4),
2978        ];
2979
2980        for (op, relay_count) in OP_RELAY_COUNT {
2981            let relays = netdir.hs_dirs(&hsid, *op, &mut testing_rng());
2982
2983            assert_eq!(relays.len(), *relay_count);
2984
2985            // There should be no duplicates (the filtering function passed to
2986            // HsDirRing::ring_items_at() ensures the relays that are already in use for
2987            // lower-numbered replicas aren't considered a second time for a higher-numbered
2988            // replica).
2989            let unique = relays
2990                .iter()
2991                .map(|relay| relay.ed_identity())
2992                .collect::<HashSet<_>>();
2993            assert_eq!(unique.len(), relays.len());
2994        }
2995
2996        // TODO: come up with a test that checks that HsDirRing::ring_items_at() skips over the
2997        // expected relays.
2998        //
2999        // For example, let's say we have the following hsdir ring:
3000        //
3001        //         A  -  B
3002        //        /       \
3003        //       F         C
3004        //        \       /
3005        //         E  -  D
3006        //
3007        // Let's also assume that:
3008        //
3009        //   * hsdir_spread_store = 3
3010        //   * the ordering of the relays on the ring is [A, B, C, D, E, F]
3011        //
3012        // If we use relays [A, B, C] for replica 1, and hs_index(2) = E, then replica 2 _must_ get
3013        // relays [E, F, D]. We should have a test that checks this.
3014    }
3015
3016    #[test]
3017    fn zero_weights() {
3018        // Here we check the behavior of IndexedRandom::{choose_weighted, choose_multiple_weighted}
3019        // in the presence of items whose weight is 0.
3020        //
3021        // We think that the behavior is:
3022        //   - An item with weight 0 is never returned.
3023        //   - If all items have weight 0, choose_weighted returns an error.
3024        //   - If all items have weight 0, choose_multiple_weighted returns an empty list.
3025        //   - If we request n items from choose_multiple_weighted,
3026        //     but only m<n items have nonzero weight, we return all m of those items.
3027        //   - if the request for n items can't be completely satisfied with n items of weight >= 0,
3028        //     we get InsufficientNonZero.
3029        let items = vec![1, 2, 3];
3030        let mut rng = testing_rng();
3031
3032        let a = items.choose_weighted(&mut rng, |_| 0);
3033        assert!(matches!(a, Err(WeightError::InsufficientNonZero)));
3034
3035        let x = items.choose_multiple_weighted(&mut rng, 2, |_| 0);
3036        let xs: Vec<_> = x.unwrap().collect();
3037        assert!(xs.is_empty());
3038
3039        let only_one = |n: &i32| if *n == 1 { 1 } else { 0 };
3040        let x = items.choose_multiple_weighted(&mut rng, 2, only_one);
3041        let xs: Vec<_> = x.unwrap().collect();
3042        assert_eq!(&xs[..], &[&1]);
3043
3044        for _ in 0..100 {
3045            let a = items.choose_weighted(&mut rng, only_one);
3046            assert_eq!(a.unwrap(), &1);
3047
3048            let x = items
3049                .choose_multiple_weighted(&mut rng, 1, only_one)
3050                .unwrap()
3051                .collect::<Vec<_>>();
3052            assert_eq!(x, vec![&1]);
3053        }
3054    }
3055
3056    #[test]
3057    fn insufficient_but_nonzero() {
3058        // Here we check IndexedRandom::choose_multiple_weighted when there no zero values,
3059        // but there are insufficient values.
3060        // (If this behavior changes, we need to change our usage.)
3061
3062        let items = vec![1, 2, 3];
3063        let mut rng = testing_rng();
3064        let mut a = items
3065            .choose_multiple_weighted(&mut rng, 10, |_| 1)
3066            .unwrap()
3067            .copied()
3068            .collect::<Vec<_>>();
3069        a.sort();
3070        assert_eq!(a, items);
3071    }
3072}