Skip to content

Commit e0bc269

Browse files
committed
layout: Store inverse memory index in FieldsShape::Arbitrary
All usages of `memory_index` start by calling `invert_bijective_mapping`, so storing the inverted mapping directly saves some work and simplifies the code.
1 parent ed0006a commit e0bc269

39 files changed

+250
-304
lines changed

compiler/rustc_abi/src/layout.rs

Lines changed: 19 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -714,7 +714,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
714714
},
715715
fields: FieldsShape::Arbitrary {
716716
offsets: [niche_offset].into(),
717-
memory_index: [0].into(),
717+
in_memory_order: [FieldIdx::new(0)].into(),
718718
},
719719
backend_repr: abi,
720720
largest_niche,
@@ -1008,8 +1008,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
10081008
let pair =
10091009
LayoutData::<FieldIdx, VariantIdx>::scalar_pair(&self.cx, tag, prim_scalar);
10101010
let pair_offsets = match pair.fields {
1011-
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1012-
assert_eq!(memory_index.raw, [0, 1]);
1011+
FieldsShape::Arbitrary { ref offsets, ref in_memory_order } => {
1012+
assert_eq!(in_memory_order.raw, [FieldIdx::new(0), FieldIdx::new(1)]);
10131013
offsets
10141014
}
10151015
_ => panic!("encountered a non-arbitrary layout during enum layout"),
@@ -1061,7 +1061,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
10611061
},
10621062
fields: FieldsShape::Arbitrary {
10631063
offsets: [Size::ZERO].into(),
1064-
memory_index: [0].into(),
1064+
in_memory_order: [FieldIdx::new(0)].into(),
10651065
},
10661066
largest_niche,
10671067
uninhabited,
@@ -1110,10 +1110,10 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
11101110
let pack = repr.pack;
11111111
let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
11121112
let mut max_repr_align = repr.align;
1113-
let mut inverse_memory_index: IndexVec<u32, FieldIdx> = fields.indices().collect();
1113+
let mut in_memory_order: IndexVec<u32, FieldIdx> = fields.indices().collect();
11141114
let optimize_field_order = !repr.inhibit_struct_field_reordering();
11151115
let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
1116-
let optimizing = &mut inverse_memory_index.raw[..end];
1116+
let optimizing = &mut in_memory_order.raw[..end];
11171117
let fields_excluding_tail = &fields.raw[..end];
11181118
// unsizable tail fields are excluded so that we use the same seed for the sized and unsized layouts.
11191119
let field_seed = fields_excluding_tail
@@ -1248,12 +1248,10 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
12481248
// regardless of the status of `-Z randomize-layout`
12491249
}
12501250
}
1251-
// inverse_memory_index holds field indices by increasing memory offset.
1252-
// That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
1251+
// in_memory_order holds field indices by increasing memory offset.
1252+
// That is, if field 5 has offset 0, the first element of in_memory_order is 5.
12531253
// We now write field offsets to the corresponding offset slot;
12541254
// field 5 with offset 0 puts 0 in offsets[5].
1255-
// At the bottom of this function, we invert `inverse_memory_index` to
1256-
// produce `memory_index` (see `invert_mapping`).
12571255
let mut unsized_field = None::<&F>;
12581256
let mut offsets = IndexVec::from_elem(Size::ZERO, fields);
12591257
let mut offset = Size::ZERO;
@@ -1265,7 +1263,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
12651263
align = align.max(prefix_align);
12661264
offset = prefix_size.align_to(prefix_align);
12671265
}
1268-
for &i in &inverse_memory_index {
1266+
for &i in &in_memory_order {
12691267
let field = &fields[i];
12701268
if let Some(unsized_field) = unsized_field {
12711269
return Err(LayoutCalculatorError::UnexpectedUnsized(*unsized_field));
@@ -1322,18 +1320,6 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
13221320

13231321
debug!("univariant min_size: {:?}", offset);
13241322
let min_size = offset;
1325-
// As stated above, inverse_memory_index holds field indices by increasing offset.
1326-
// This makes it an already-sorted view of the offsets vec.
1327-
// To invert it, consider:
1328-
// If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
1329-
// Field 5 would be the first element, so memory_index is i:
1330-
// Note: if we didn't optimize, it's already right.
1331-
let memory_index = if optimize_field_order {
1332-
inverse_memory_index.invert_bijective_mapping()
1333-
} else {
1334-
debug_assert!(inverse_memory_index.iter().copied().eq(fields.indices()));
1335-
inverse_memory_index.into_iter().map(|it| it.index() as u32).collect()
1336-
};
13371323
let size = min_size.align_to(align);
13381324
// FIXME(oli-obk): deduplicate and harden these checks
13391325
if size.bytes() >= dl.obj_size_bound() {
@@ -1389,8 +1375,11 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
13891375
let pair =
13901376
LayoutData::<FieldIdx, VariantIdx>::scalar_pair(&self.cx, a, b);
13911377
let pair_offsets = match pair.fields {
1392-
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1393-
assert_eq!(memory_index.raw, [0, 1]);
1378+
FieldsShape::Arbitrary { ref offsets, ref in_memory_order } => {
1379+
assert_eq!(
1380+
in_memory_order.raw,
1381+
[FieldIdx::new(0), FieldIdx::new(1)]
1382+
);
13941383
offsets
13951384
}
13961385
FieldsShape::Primitive
@@ -1434,7 +1423,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
14341423

14351424
Ok(LayoutData {
14361425
variants: Variants::Single { index: VariantIdx::new(0) },
1437-
fields: FieldsShape::Arbitrary { offsets, memory_index },
1426+
fields: FieldsShape::Arbitrary { offsets, in_memory_order },
14381427
backend_repr: abi,
14391428
largest_niche,
14401429
uninhabited,
@@ -1530,7 +1519,10 @@ where
15301519

15311520
Ok(LayoutData {
15321521
variants: Variants::Single { index: VariantIdx::new(0) },
1533-
fields: FieldsShape::Arbitrary { offsets: [Size::ZERO].into(), memory_index: [0].into() },
1522+
fields: FieldsShape::Arbitrary {
1523+
offsets: [Size::ZERO].into(),
1524+
in_memory_order: [FieldIdx::new(0)].into(),
1525+
},
15341526
backend_repr: repr,
15351527
largest_niche: elt.largest_niche,
15361528
uninhabited: false,

compiler/rustc_abi/src/layout/coroutine.rs

Lines changed: 21 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -182,33 +182,29 @@ pub(super) fn layout<
182182
// CoroutineLayout.
183183
debug!("prefix = {:#?}", prefix);
184184
let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
185-
FieldsShape::Arbitrary { mut offsets, memory_index } => {
186-
let mut inverse_memory_index = memory_index.invert_bijective_mapping();
187-
185+
FieldsShape::Arbitrary { mut offsets, in_memory_order } => {
188186
// "a" (`0..b_start`) and "b" (`b_start..`) correspond to
189187
// "outer" and "promoted" fields respectively.
190188
let b_start = tag_index.plus(1);
191189
let offsets_b = IndexVec::from_raw(offsets.raw.split_off(b_start.index()));
192190
let offsets_a = offsets;
193191

194-
// Disentangle the "a" and "b" components of `inverse_memory_index`
192+
// Disentangle the "a" and "b" components of `in_memory_order`
195193
// by preserving the order but keeping only one disjoint "half" each.
196194
// FIXME(eddyb) build a better abstraction for permutations, if possible.
197-
let inverse_memory_index_b: IndexVec<u32, FieldIdx> = inverse_memory_index
198-
.iter()
199-
.filter_map(|&i| i.index().checked_sub(b_start.index()).map(FieldIdx::new))
200-
.collect();
201-
inverse_memory_index.raw.retain(|&i| i.index() < b_start.index());
202-
let inverse_memory_index_a = inverse_memory_index;
203-
204-
// Since `inverse_memory_index_{a,b}` each only refer to their
205-
// respective fields, they can be safely inverted
206-
let memory_index_a = inverse_memory_index_a.invert_bijective_mapping();
207-
let memory_index_b = inverse_memory_index_b.invert_bijective_mapping();
195+
let mut in_memory_order_a = IndexVec::<u32, FieldIdx>::new();
196+
let mut in_memory_order_b = IndexVec::<u32, FieldIdx>::new();
197+
for i in in_memory_order {
198+
if let Some(j) = i.index().checked_sub(b_start.index()) {
199+
in_memory_order_b.push(FieldIdx::new(j));
200+
} else {
201+
in_memory_order_a.push(i);
202+
}
203+
}
208204

209205
let outer_fields =
210-
FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
211-
(outer_fields, offsets_b, memory_index_b)
206+
FieldsShape::Arbitrary { offsets: offsets_a, in_memory_order: in_memory_order_a };
207+
(outer_fields, offsets_b, in_memory_order_b.invert_bijective_mapping())
212208
}
213209
_ => unreachable!(),
214210
};
@@ -236,7 +232,7 @@ pub(super) fn layout<
236232
)?;
237233
variant.variants = Variants::Single { index };
238234

239-
let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
235+
let FieldsShape::Arbitrary { offsets, in_memory_order } = variant.fields else {
240236
unreachable!();
241237
};
242238

@@ -249,8 +245,9 @@ pub(super) fn layout<
249245
// promoted fields were being used, but leave the elements not in the
250246
// subset as `invalid_field_idx`, which we can filter out later to
251247
// obtain a valid (bijective) mapping.
248+
let memory_index = in_memory_order.invert_bijective_mapping();
252249
let invalid_field_idx = promoted_memory_index.len() + memory_index.len();
253-
let mut combined_inverse_memory_index =
250+
let mut combined_in_memory_order =
254251
IndexVec::from_elem_n(FieldIdx::new(invalid_field_idx), invalid_field_idx);
255252

256253
let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
@@ -268,19 +265,18 @@ pub(super) fn layout<
268265
(promoted_offsets[field_idx], promoted_memory_index[field_idx])
269266
}
270267
};
271-
combined_inverse_memory_index[memory_index] = i;
268+
combined_in_memory_order[memory_index] = i;
272269
offset
273270
})
274271
.collect();
275272

276-
// Remove the unused slots and invert the mapping to obtain the
277-
// combined `memory_index` (also see previous comment).
278-
combined_inverse_memory_index.raw.retain(|&i| i.index() != invalid_field_idx);
279-
let combined_memory_index = combined_inverse_memory_index.invert_bijective_mapping();
273+
// Remove the unused slots to obtain the combined `in_memory_order`
274+
// (also see previous comment).
275+
combined_in_memory_order.raw.retain(|&i| i.index() != invalid_field_idx);
280276

281277
variant.fields = FieldsShape::Arbitrary {
282278
offsets: combined_offsets,
283-
memory_index: combined_memory_index,
279+
in_memory_order: combined_in_memory_order,
284280
};
285281

286282
size = size.max(variant.size);

compiler/rustc_abi/src/layout/simple.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
1616
variants: Variants::Single { index: VariantIdx::new(0) },
1717
fields: FieldsShape::Arbitrary {
1818
offsets: IndexVec::new(),
19-
memory_index: IndexVec::new(),
19+
in_memory_order: IndexVec::new(),
2020
},
2121
backend_repr: BackendRepr::Memory { sized },
2222
largest_niche: None,
@@ -108,7 +108,7 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
108108
variants: Variants::Single { index: VariantIdx::new(0) },
109109
fields: FieldsShape::Arbitrary {
110110
offsets: [Size::ZERO, b_offset].into(),
111-
memory_index: [0, 1].into(),
111+
in_memory_order: [FieldIdx::new(0), FieldIdx::new(1)].into(),
112112
},
113113
backend_repr: BackendRepr::ScalarPair(a, b),
114114
largest_niche,
@@ -133,7 +133,7 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
133133
Some(fields) => FieldsShape::Union(fields),
134134
None => FieldsShape::Arbitrary {
135135
offsets: IndexVec::new(),
136-
memory_index: IndexVec::new(),
136+
in_memory_order: IndexVec::new(),
137137
},
138138
},
139139
backend_repr: BackendRepr::Memory { sized: true },

compiler/rustc_abi/src/lib.rs

Lines changed: 4 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -1636,19 +1636,14 @@ pub enum FieldsShape<FieldIdx: Idx> {
16361636
// FIXME(eddyb) use small vector optimization for the common case.
16371637
offsets: IndexVec<FieldIdx, Size>,
16381638

1639-
/// Maps source order field indices to memory order indices,
1639+
/// Maps memory order field indices to source order indices,
16401640
/// depending on how the fields were reordered (if at all).
16411641
/// This is a permutation, with both the source order and the
16421642
/// memory order using the same (0..n) index ranges.
16431643
///
1644-
/// Note that during computation of `memory_index`, sometimes
1645-
/// it is easier to operate on the inverse mapping (that is,
1646-
/// from memory order to source order), and that is usually
1647-
/// named `inverse_memory_index`.
1648-
///
16491644
// FIXME(eddyb) build a better abstraction for permutations, if possible.
16501645
// FIXME(camlorn) also consider small vector optimization here.
1651-
memory_index: IndexVec<FieldIdx, u32>,
1646+
in_memory_order: IndexVec<u32, FieldIdx>,
16521647
},
16531648
}
16541649

@@ -1682,51 +1677,17 @@ impl<FieldIdx: Idx> FieldsShape<FieldIdx> {
16821677
}
16831678
}
16841679

1685-
#[inline]
1686-
pub fn memory_index(&self, i: usize) -> usize {
1687-
match *self {
1688-
FieldsShape::Primitive => {
1689-
unreachable!("FieldsShape::memory_index: `Primitive`s have no fields")
1690-
}
1691-
FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
1692-
FieldsShape::Arbitrary { ref memory_index, .. } => {
1693-
memory_index[FieldIdx::new(i)].try_into().unwrap()
1694-
}
1695-
}
1696-
}
1697-
16981680
/// Gets source indices of the fields by increasing offsets.
16991681
#[inline]
17001682
pub fn index_by_increasing_offset(&self) -> impl ExactSizeIterator<Item = usize> {
1701-
let mut inverse_small = [0u8; 64];
1702-
let mut inverse_big = IndexVec::new();
1703-
let use_small = self.count() <= inverse_small.len();
1704-
1705-
// We have to write this logic twice in order to keep the array small.
1706-
if let FieldsShape::Arbitrary { ref memory_index, .. } = *self {
1707-
if use_small {
1708-
for (field_idx, &mem_idx) in memory_index.iter_enumerated() {
1709-
inverse_small[mem_idx as usize] = field_idx.index() as u8;
1710-
}
1711-
} else {
1712-
inverse_big = memory_index.invert_bijective_mapping();
1713-
}
1714-
}
1715-
17161683
// Primitives don't really have fields in the way that structs do,
17171684
// but having this return an empty iterator for them is unhelpful
17181685
// since that makes them look kinda like ZSTs, which they're not.
17191686
let pseudofield_count = if let FieldsShape::Primitive = self { 1 } else { self.count() };
17201687

1721-
(0..pseudofield_count).map(move |i| match *self {
1688+
(0..pseudofield_count).map(move |i| match self {
17221689
FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
1723-
FieldsShape::Arbitrary { .. } => {
1724-
if use_small {
1725-
inverse_small[i] as usize
1726-
} else {
1727-
inverse_big[i as u32].index()
1728-
}
1729-
}
1690+
FieldsShape::Arbitrary { in_memory_order, .. } => in_memory_order[i as u32].index(),
17301691
})
17311692
}
17321693
}

compiler/rustc_const_eval/src/interpret/visitor.rs

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
use std::num::NonZero;
55

66
use rustc_abi::{FieldIdx, FieldsShape, VariantIdx, Variants};
7-
use rustc_index::IndexVec;
7+
use rustc_index::{Idx as _, IndexVec};
88
use rustc_middle::mir::interpret::InterpResult;
99
use rustc_middle::ty::{self, Ty};
1010
use tracing::trace;
@@ -27,13 +27,15 @@ pub trait ValueVisitor<'tcx, M: Machine<'tcx>>: Sized {
2727
/// This function provides the chance to reorder the order in which fields are visited for
2828
/// `FieldsShape::Aggregate`.
2929
///
30-
/// The default means we iterate in source declaration order; alternatively this can do some
31-
/// work with `memory_index` to iterate in memory order.
30+
/// The default means we iterate in source declaration order; alternatively this can use
31+
/// `in_memory_order` to iterate in memory order.
3232
#[inline(always)]
3333
fn aggregate_field_iter(
34-
memory_index: &IndexVec<FieldIdx, u32>,
35-
) -> impl Iterator<Item = FieldIdx> + 'static {
36-
memory_index.indices()
34+
in_memory_order: &IndexVec<u32, FieldIdx>,
35+
) -> impl Iterator<Item = FieldIdx> {
36+
// Allow the optimizer to elide the bounds checking when creating each index.
37+
let _ = FieldIdx::new(in_memory_order.len());
38+
(0..in_memory_order.len()).map(FieldIdx::new)
3739
}
3840

3941
// Recursive actions, ready to be overloaded.
@@ -168,8 +170,8 @@ pub trait ValueVisitor<'tcx, M: Machine<'tcx>>: Sized {
168170
&FieldsShape::Union(fields) => {
169171
self.visit_union(v, fields)?;
170172
}
171-
FieldsShape::Arbitrary { memory_index, .. } => {
172-
for idx in Self::aggregate_field_iter(memory_index) {
173+
FieldsShape::Arbitrary { in_memory_order, .. } => {
174+
for idx in Self::aggregate_field_iter(in_memory_order) {
173175
let field = self.ecx().project_field(v, idx)?;
174176
self.visit_field(v, idx.as_usize(), &field)?;
175177
}

compiler/rustc_index/src/slice.rs

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -181,9 +181,6 @@ impl<I: Idx, J: Idx> IndexSlice<I, J> {
181181
/// Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`,
182182
/// assuming the values in `self` are a permutation of `0..self.len()`.
183183
///
184-
/// This is used to go between `memory_index` (source field order to memory order)
185-
/// and `inverse_memory_index` (memory order to source field order).
186-
/// See also `FieldsShape::Arbitrary::memory_index` for more details.
187184
// FIXME(eddyb) build a better abstraction for permutations, if possible.
188185
pub fn invert_bijective_mapping(&self) -> IndexVec<J, I> {
189186
debug_assert_eq!(

compiler/rustc_transmute/src/layout/tree.rs

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -491,7 +491,7 @@ pub(crate) mod rustc {
491491
) -> Result<Self, Err> {
492492
// This constructor does not support non-`FieldsShape::Arbitrary`
493493
// layouts.
494-
let FieldsShape::Arbitrary { offsets, memory_index } = layout.fields() else {
494+
let FieldsShape::Arbitrary { offsets, in_memory_order } = layout.fields() else {
495495
return Err(Err::NotYetSupported);
496496
};
497497

@@ -519,8 +519,7 @@ pub(crate) mod rustc {
519519
}
520520

521521
// Append the fields, in memory order, to the layout.
522-
let inverse_memory_index = memory_index.invert_bijective_mapping();
523-
for &field_idx in inverse_memory_index.iter() {
522+
for &field_idx in in_memory_order.iter() {
524523
// Add interfield padding.
525524
let padding_needed = offsets[field_idx] - size;
526525
let padding = Self::padding(padding_needed.bytes_usize());

0 commit comments

Comments
 (0)