Skip to main content

miden_core/mast/node/basic_block_node/
mod.rs

1use alloc::{boxed::Box, string::String, vec::Vec};
2use core::{fmt, iter::repeat_n};
3
4use crate::{
5    Felt, Word, ZERO,
6    chiplets::hasher,
7    crypto::hash::Blake3_256,
8    mast::{
9        DecoratedLinksIter, DecoratedOpLink, DecoratorId, DecoratorStore, MastForest,
10        MastForestError, MastNode, MastNodeFingerprint, MastNodeId,
11    },
12    operations::{DecoratorList, Operation},
13    prettier::PrettyPrint,
14    utils::LookupByIdx,
15};
16
17mod op_batch;
18pub use op_batch::OpBatch;
19use op_batch::OpBatchAccumulator;
20pub(crate) use op_batch::collect_immediate_placements;
21
22use super::{MastForestContributor, MastNodeExt};
23
24#[cfg(any(test, feature = "arbitrary"))]
25pub mod arbitrary;
26
27#[cfg(test)]
28mod tests;
29
30// CONSTANTS
31// ================================================================================================
32
33/// Maximum number of operations per group.
34pub const GROUP_SIZE: usize = 9;
35
36/// Maximum number of groups per batch.
37pub const BATCH_SIZE: usize = 8;
38const _: [(); 1] = [(); ((BATCH_SIZE & (BATCH_SIZE - 1)) == 0) as usize];
39
40// BASIC BLOCK NODE
41// ================================================================================================
42
43/// Block for a linear sequence of operations (i.e., no branching or loops).
44///
45/// Executes its operations in order. Fails if any of the operations fails.
46///
47/// A basic block is composed of operation batches, operation batches are composed of operation
48/// groups, operation groups encode the VM's operations and immediate values. These values are
49/// created according to these rules:
50///
51/// - A basic block contains one or more batches.
52/// - A batch contains up to 8 groups, and the number of groups must be a power of 2.
53/// - A group contains up to 9 operations or 1 immediate value.
54/// - Last operation in a group cannot be an operation that requires an immediate value.
55/// - NOOPs are used to fill a group or batch when necessary.
56/// - An immediate value follows the operation that requires it, using the next available group in
57///   the batch. If there are no groups available in the batch, then both the operation and its
58///   immediate value are moved to the next batch.
59///
60/// Example: 8 pushes result in two operation batches:
61///
62/// - First batch: First group with 7 push opcodes and 2 zero-paddings packed together, followed by
63///   7 groups with their respective immediate values.
64/// - Second batch: First group with the last push opcode and 8 zero-paddings packed together,
65///   followed by one immediate and 6 padding groups.
66///
67/// The hash of a basic block is:
68///
69/// > hash(batches, domain=BASIC_BLOCK_DOMAIN)
70///
71/// Where `batches` is the concatenation of each `batch` in the basic block, and each batch is 8
72/// field elements (512 bits).
73#[derive(Debug, Clone, PartialEq, Eq)]
74pub struct BasicBlockNode {
75    /// The primitive operations contained in this basic block.
76    ///
77    /// The operations are broken up into batches of 8 groups, with each group containing up to 9
78    /// operations, or a single immediates. Thus the maximum size of each batch is 72 operations.
79    /// Multiple batches are used for blocks consisting of more than 72 operations.
80    op_batches: Vec<OpBatch>,
81    digest: Word,
82    /// Stores both operation-level and node-level decorators
83    /// Custom serialization is handled via Serialize/Deserialize impls
84    decorators: DecoratorStore,
85}
86
87// ------------------------------------------------------------------------------------------------
88// SERIALIZATION
89// ================================================================================================
90
91// ------------------------------------------------------------------------------------------------
92/// Constants
93impl BasicBlockNode {
94    /// The domain of the basic block node (used for control block hashing).
95    pub const DOMAIN: Felt = ZERO;
96}
97
98// ------------------------------------------------------------------------------------------------
99/// Constructors
100impl BasicBlockNode {
101    /// Returns a new [`BasicBlockNode`] instantiated with the specified operations and decorators.
102    ///
103    /// Returns an error if:
104    /// - `operations` vector is empty.
105    #[cfg(any(test, feature = "arbitrary"))]
106    pub(crate) fn new_owned_with_decorators(
107        operations: Vec<Operation>,
108        decorators: DecoratorList,
109    ) -> Result<Self, MastForestError> {
110        if operations.is_empty() {
111            return Err(MastForestError::EmptyBasicBlock);
112        }
113
114        // Validate decorators list (only in debug mode).
115        #[cfg(debug_assertions)]
116        validate_decorators(operations.len(), &decorators);
117
118        let (op_batches, digest) = batch_and_hash_ops(operations);
119        // the prior line may have inserted some padding Noops in the op_batches
120        // the decorator mapping should still point to the correct operation when that happens
121        let reflowed_decorators = BasicBlockNode::adjust_decorators(decorators, &op_batches);
122
123        Ok(Self {
124            op_batches,
125            digest,
126            decorators: DecoratorStore::Owned {
127                decorators: reflowed_decorators,
128                before_enter: Vec::new(),
129                after_exit: Vec::new(),
130            },
131        })
132    }
133
134    // Takes a `DecoratorList` which operation indexes are defined against un-padded operations, and
135    // adjusts those indexes to point into the padded `&[OpBatches]` passed as argument.
136    //
137    // IOW this makes its `decorators` padding-aware, or equivalently "adds" the padding to these
138    // decorators
139    pub fn adjust_decorators(decorators: DecoratorList, op_batches: &[OpBatch]) -> DecoratorList {
140        let raw2pad = RawToPaddedPrefix::new(op_batches);
141        decorators
142            .into_iter()
143            .map(|(raw_idx, dec_id)| (raw_idx + raw2pad[raw_idx], dec_id))
144            .collect()
145    }
146
147    /// Adjusts raw operation indices to padded indices for AssemblyOp mappings.
148    ///
149    /// Similar to `adjust_decorators`, but works with AssemblyOp mappings `(raw_idx, id)` pairs.
150    /// The op_batches contain padding NOOPs that shift operation indices. This method adjusts
151    /// the raw indices to account for this padding so lookups during execution use correct indices.
152    pub fn adjust_asm_op_indices<T: Copy>(
153        asm_ops: Vec<(usize, T)>,
154        op_batches: &[OpBatch],
155    ) -> Vec<(usize, T)> {
156        let raw2pad = RawToPaddedPrefix::new(op_batches);
157        asm_ops
158            .into_iter()
159            .map(|(raw_idx, id)| {
160                let padded = raw_idx + raw2pad[raw_idx];
161                (padded, id)
162            })
163            .collect()
164    }
165}
166
167// ------------------------------------------------------------------------------------------------
168/// Public accessors
169impl BasicBlockNode {
170    /// Returns a reference to the operation batches in this basic block.
171    pub fn op_batches(&self) -> &[OpBatch] {
172        &self.op_batches
173    }
174
175    /// Returns the number of operation batches in this basic block.
176    pub fn num_op_batches(&self) -> usize {
177        self.op_batches.len()
178    }
179
180    /// Returns the total number of operation groups in this basic block.
181    ///
182    /// Then number of operation groups is computed as follows:
183    /// - For all batches but the last one we set the number of groups to 8, regardless of the
184    ///   actual number of groups in the batch. The reason for this is that when operation batches
185    ///   are concatenated together each batch contributes 8 elements to the hash.
186    /// - For the last batch, we take the number of actual groups and round it up to the next power
187    ///   of two. The reason for rounding is that the VM always executes a number of operation
188    ///   groups which is a power of two.
189    pub fn num_op_groups(&self) -> usize {
190        let last_batch_num_groups = self.op_batches.last().expect("no last group").num_groups();
191        (self.op_batches.len() - 1) * BATCH_SIZE + last_batch_num_groups.next_power_of_two()
192    }
193
194    /// Returns the number of operations in this basic block.
195    pub fn num_operations(&self) -> u32 {
196        let num_ops: usize = self.op_batches.iter().map(|batch| batch.ops().len()).sum();
197        num_ops.try_into().expect("basic block contains more than 2^32 operations")
198    }
199
200    /// Returns a [`DecoratorOpLinkIterator`] which allows us to iterate through the op-indexed
201    /// decorators of this basic block node.
202    ///
203    /// This method borrows from the forest's storage, avoiding unnecessary Arc clones and providing
204    /// efficient access to decorators.
205    ///
206    /// This iterator is intended for e.g. processor consumption and provides access to only the
207    /// operation-indexed decorators (excluding `before_enter` and `after_exit` decorators).
208    pub fn indexed_decorator_iter<'a>(
209        &'a self,
210        forest: &'a MastForest,
211    ) -> DecoratorOpLinkIterator<'a> {
212        match &self.decorators {
213            DecoratorStore::Owned { decorators, .. } => {
214                DecoratorOpLinkIterator::from_slice(decorators)
215            },
216            DecoratorStore::Linked { id } => {
217                // This is used in MastForestMerger::merge_nodes, which strips the `MastForest` of
218                // some nodes before remapping decorators, so calling
219                // verify_node_in_forest will not work here.
220
221                // For linked nodes, borrow from forest storage
222                // Check if the node has any decorators at all
223                let has_decorators = forest
224                    .decorator_links_for_node(*id)
225                    .map(|links| links.into_iter().next().is_some())
226                    .unwrap_or(false);
227
228                if !has_decorators {
229                    return DecoratorOpLinkIterator::from_slice(&[]);
230                }
231
232                let view = forest.decorator_links_for_node(*id).expect(
233                    "linked node decorators should be available; forest may be inconsistent",
234                );
235
236                DecoratorOpLinkIterator::from_linked(view.into_iter())
237            },
238        }
239    }
240
241    /// Returns an iterator which allows us to iterate through the decorator list of
242    /// this basic block node with op indexes aligned to the "raw" (un-padded)) op
243    /// batches of the basic block node.
244    ///
245    /// Though this adjusts the indexation of op-indexed decorators, this iterator returns all
246    /// decorators of the [`BasicBlockNode`] in the order in which they appear in the program.
247    /// This includes `before_enter`, op-indexed decorators, and after_exit.
248    ///
249    /// Returns an iterator which allows us to iterate through the decorator list of
250    /// this basic block node with op indexes aligned to the "raw" (un-padded)) op
251    /// batches of the basic block node.
252    ///
253    /// This method borrows from the forest's storage, avoiding unnecessary Arc clones and
254    /// providing efficient access to decorators.
255    ///
256    /// Though this adjusts the indexation of op-indexed decorators, this iterator returns all
257    /// decorators of the [`BasicBlockNode`] in the order in which they appear in the program.
258    /// This includes `before_enter`, op-indexed decorators, and after_exit`.
259    pub fn raw_decorator_iter<'a>(
260        &'a self,
261        forest: &'a MastForest,
262    ) -> RawDecoratorOpLinkIterator<'a> {
263        match &self.decorators {
264            DecoratorStore::Owned { decorators, before_enter, after_exit } => {
265                RawDecoratorOpLinkIterator::from_slice_iters(
266                    before_enter,
267                    decorators,
268                    after_exit,
269                    &self.op_batches,
270                )
271            },
272            DecoratorStore::Linked { id } => {
273                #[cfg(debug_assertions)]
274                self.verify_node_in_forest(forest);
275                // For linked nodes, borrow from forest storage
276                // Check if the node has any decorators at all
277                let has_decorators = forest
278                    .decorator_links_for_node(*id)
279                    .map(|links| links.into_iter().next().is_some())
280                    .unwrap_or(false);
281
282                if !has_decorators {
283                    // No operation-level decorators, but still need node-level decorators
284                    let before_enter = forest.before_enter_decorators(*id);
285                    let after_exit = forest.after_exit_decorators(*id);
286                    return RawDecoratorOpLinkIterator::from_slice_iters(
287                        before_enter,
288                        &[],
289                        after_exit,
290                        &self.op_batches,
291                    );
292                }
293
294                let view = forest.decorator_links_for_node(*id).expect(
295                    "linked node decorators should be available; forest may be inconsistent",
296                );
297
298                // Get node-level decorators from NodeToDecoratorIds
299                let before_enter = forest.before_enter_decorators(*id);
300                let after_exit = forest.after_exit_decorators(*id);
301
302                RawDecoratorOpLinkIterator::from_linked(
303                    before_enter,
304                    view.into_iter(),
305                    after_exit,
306                    &self.op_batches,
307                )
308            },
309        }
310    }
311
312    /// Returns only the raw op-indexed decorators (without before_enter/after_exit)
313    /// with indices based on raw operations.
314    ///
315    /// Stores decorators with raw operation indices for serialization.
316    ///
317    /// Returns only the raw op-indexed decorators (without before_enter/after_exit)
318    /// with indices based on raw operations.
319    ///
320    /// This method borrows from the forest's storage, avoiding unnecessary Arc clones and
321    /// providing efficient access to decorators.
322    ///
323    /// Stores decorators with raw operation indices for serialization.
324    pub fn raw_op_indexed_decorators(&self, forest: &MastForest) -> Vec<(usize, DecoratorId)> {
325        match &self.decorators {
326            DecoratorStore::Owned { decorators, .. } => {
327                RawDecoratorOpLinkIterator::from_slice_iters(&[], decorators, &[], &self.op_batches)
328                    .collect()
329            },
330            DecoratorStore::Linked { id } => {
331                // This is used in MastForest::remove_nodes, which strips the `MastForest` of its
332                // nodes before remapping decorators, so calling
333                // verify_node_in_forest will not work here.
334
335                let pad2raw = PaddedToRawPrefix::new(self.op_batches());
336                match forest.decorator_links_for_node(*id) {
337                    Ok(links) => links
338                        .into_iter()
339                        .map(|(padded_idx, dec_id)| {
340                            let raw_idx = padded_idx - pad2raw[padded_idx];
341                            (raw_idx, dec_id)
342                        })
343                        .collect(),
344                    Err(_) => Vec::new(), // Return empty if error
345                }
346            },
347        }
348    }
349
350    /// Returns an iterator over the operations in the order in which they appear in the program.
351    pub fn operations(&self) -> impl Iterator<Item = &Operation> {
352        self.op_batches.iter().flat_map(|batch| batch.ops())
353    }
354
355    /// Returns an iterator over the un-padded operations in the order in which they
356    /// appear in the program.
357    pub fn raw_operations(&self) -> impl Iterator<Item = &Operation> {
358        self.op_batches.iter().flat_map(|batch| batch.raw_ops())
359    }
360
361    /// Returns the total number of operations and decorators in this basic block.
362    pub fn num_operations_and_decorators(&self, forest: &MastForest) -> u32 {
363        let num_ops: usize = self.num_operations() as usize;
364        let num_decorators = match &self.decorators {
365            DecoratorStore::Owned { decorators, .. } => decorators.len(),
366            DecoratorStore::Linked { id } => {
367                #[cfg(debug_assertions)]
368                self.verify_node_in_forest(forest);
369                // For linked nodes, count from forest storage
370                forest
371                    .decorator_links_for_node(*id)
372                    .map(|links| links.into_iter().count())
373                    .unwrap_or(0)
374            },
375        };
376
377        (num_ops + num_decorators)
378            .try_into()
379            .expect("basic block contains more than 2^32 operations and decorators")
380    }
381
382    /// Returns an iterator over all operations and decorator, in the order in which they appear in
383    /// the program.
384    ///
385    /// This method requires access to the forest to properly handle linked nodes.
386    fn iter<'a>(
387        &'a self,
388        forest: &'a MastForest,
389    ) -> impl Iterator<Item = OperationOrDecorator<'a>> + 'a {
390        OperationOrDecoratorIterator::new_with_forest(self, forest)
391    }
392
393    /// Performs semantic equality comparison with another BasicBlockNode.
394    ///
395    /// This method compares two blocks for logical equality by comparing:
396    /// - Operations (exact equality)
397    /// - Before-enter decorators (by ID)
398    /// - After-exit decorators (by ID)
399    /// - Operation-indexed decorators (by iterating and comparing their contents)
400    ///
401    /// Unlike the derived PartialEq, this method works correctly with both owned and linked
402    /// decorator storage by accessing the actual decorator data from the forest when needed.
403    #[cfg(test)]
404    pub fn semantic_eq(&self, other: &BasicBlockNode, forest: &MastForest) -> bool {
405        // Compare operations by collecting and comparing
406        let self_ops: Vec<_> = self.operations().collect();
407        let other_ops: Vec<_> = other.operations().collect();
408        if self_ops != other_ops {
409            return false;
410        }
411
412        // Compare before-enter decorators
413        if self.before_enter(forest) != other.before_enter(forest) {
414            return false;
415        }
416
417        // Compare after-exit decorators
418        if self.after_exit(forest) != other.after_exit(forest) {
419            return false;
420        }
421
422        // Compare operation-indexed decorators by collecting and comparing
423        let self_decorators: Vec<_> = self.indexed_decorator_iter(forest).collect();
424        let other_decorators: Vec<_> = other.indexed_decorator_iter(forest).collect();
425
426        if self_decorators != other_decorators {
427            return false;
428        }
429
430        true
431    }
432
433    /// Return the MastNodeId of this `BasicBlockNode`, if in `Linked` state
434    pub fn linked_id(&self) -> Option<MastNodeId> {
435        self.decorators.linked_id()
436    }
437}
438
439// BATCH VALIDATION
440// ================================================================================================
441
442impl BasicBlockNode {
443    /// Validates that this BasicBlockNode satisfies the core invariants:
444    /// 1. Non-final batches must be full (BATCH_SIZE groups), final batch must be power-of-two
445    /// 2. No operation group ends with an operation requiring an immediate value
446    /// 3. The last operation group in a batch cannot contain operations requiring immediate values
447    /// 4. OpBatch structural consistency (num_groups <= BATCH_SIZE, group size <= GROUP_SIZE)
448    /// 5. Immediate values are committed to empty groups and match group contents
449    /// 6. OpBatch padding semantics (no padding on empty groups; padded groups end with NOOP)
450    ///
451    /// Returns an error string describing which invariant was violated if validation fails.
452    pub fn validate_batch_invariants(&self) -> Result<(), String> {
453        // Check invariant 1: Power-of-two groups in each batch
454        self.validate_power_of_two_groups()?;
455
456        // Check invariant 4: OpBatch structural consistency
457        // This needs to be done early on as it will validate indptr indexes used in later checks.
458        self.validate_batch_structure()?;
459
460        // Control-flow opcodes are expected to be filtered upstream and enforced centrally via
461        // MastForest::validate.
462
463        // Check invariants 2 and 3: immediate-ending constraints
464        self.validate_no_immediate_endings()?;
465
466        // Check invariant 5: Immediate values must be committed to empty groups
467        self.validate_immediate_commitment()?;
468
469        // Check invariant 6: OpBatch padding semantics
470        self.validate_padding_semantics()?;
471
472        Ok(())
473    }
474
475    /// Validates that non-final batches are full and the final batch is power-of-two.
476    ///
477    /// This invariant is required by trace generation (see `num_op_groups`) and is expected to
478    /// hold for all serialized forests produced by the assembler; violations indicate corrupted
479    /// or malformed input.
480    fn validate_power_of_two_groups(&self) -> Result<(), String> {
481        for (batch_idx, batch) in self.op_batches.iter().enumerate() {
482            let num_groups = batch.num_groups();
483            if batch_idx + 1 < self.op_batches.len() {
484                if num_groups != BATCH_SIZE {
485                    return Err(format!(
486                        "Batch {}: {} groups is not full batch size {}",
487                        batch_idx, num_groups, BATCH_SIZE
488                    ));
489                }
490            } else if !num_groups.is_power_of_two() {
491                return Err(format!(
492                    "Batch {}: {} groups is not power of two",
493                    batch_idx, num_groups
494                ));
495            }
496        }
497        Ok(())
498    }
499
500    /// Validates that no operation group ends with an operation that has an immediate value.
501    /// Also validates that the last operation group in a batch cannot contain operations
502    /// requiring immediate values.
503    fn validate_no_immediate_endings(&self) -> Result<(), String> {
504        for (batch_idx, batch) in self.op_batches.iter().enumerate() {
505            let num_groups = batch.num_groups();
506            let indptr = batch.indptr();
507            let ops = batch.ops();
508
509            // Check each group in the batch
510            for group_idx in 0..num_groups {
511                let group_start = indptr[group_idx];
512                let group_end = indptr[group_idx + 1];
513
514                // Skip empty groups (they contain immediate values, not operations)
515                if group_start == group_end {
516                    continue;
517                }
518
519                let group_ops = &ops[group_start..group_end];
520
521                // Check if this is the last group in the batch
522                let is_last_group = group_idx == num_groups - 1;
523
524                if is_last_group {
525                    // Last group in a batch cannot contain ANY operations requiring immediate
526                    // values
527                    for (op_idx, op) in group_ops.iter().enumerate() {
528                        if op.imm_value().is_some() {
529                            return Err(format!(
530                                "Batch {}, group {}: operation at index {} requires immediate value, but this is the last group in batch",
531                                batch_idx, group_idx, op_idx
532                            ));
533                        }
534                    }
535                } else {
536                    // Non-last groups: check that the last operation doesn't require an immediate
537                    if let Some(last_op) = group_ops.last()
538                        && last_op.imm_value().is_some()
539                    {
540                        return Err(format!(
541                            "Batch {}, group {}: ends with operation requiring immediate value",
542                            batch_idx, group_idx
543                        ));
544                    }
545                }
546            }
547        }
548        Ok(())
549    }
550
551    /// Validates that OpBatch structure is consistent and won't cause panics during access.
552    /// Checks:
553    /// - num_groups <= BATCH_SIZE
554    /// - indptr array is monotonic non-decreasing
555    /// - indptr values are within ops bounds
556    /// - each group has at most GROUP_SIZE operations
557    fn validate_batch_structure(&self) -> Result<(), String> {
558        for (batch_idx, batch) in self.op_batches.iter().enumerate() {
559            // Check num_groups is within bounds
560            if batch.num_groups() > BATCH_SIZE {
561                return Err(format!(
562                    "Batch {}: num_groups {} exceeds maximum {}",
563                    batch_idx,
564                    batch.num_groups(),
565                    BATCH_SIZE
566                ));
567            }
568
569            // Check indptr array consistency
570            let indptr = batch.indptr();
571            let ops = batch.ops();
572
573            // Full array must be monotonic for serialization (delta encoding)
574            for i in 0..indptr.len() - 1 {
575                if indptr[i] > indptr[i + 1] {
576                    return Err(format!(
577                        "Batch {}: indptr[{}] {} > indptr[{}] {} - full array not monotonic (required for serialization)",
578                        batch_idx,
579                        i,
580                        indptr[i],
581                        i + 1,
582                        indptr[i + 1]
583                    ));
584                }
585            }
586
587            let ops_len = ops.len();
588            if indptr[indptr.len() - 1] != ops_len {
589                return Err(format!(
590                    "Batch {}: final indptr value {} doesn't match ops.len() {}",
591                    batch_idx,
592                    indptr[indptr.len() - 1],
593                    ops_len
594                ));
595            }
596
597            // Check that each group has at most GROUP_SIZE operations
598            for group_idx in 0..batch.num_groups() {
599                let group_start = indptr[group_idx];
600                let group_end = indptr[group_idx + 1];
601                let group_size = group_end - group_start;
602
603                if group_size > GROUP_SIZE {
604                    return Err(format!(
605                        "Batch {}, group {}: contains {} operations, exceeds maximum {}",
606                        batch_idx, group_idx, group_size, GROUP_SIZE
607                    ));
608                }
609            }
610        }
611        Ok(())
612    }
613
614    /// Validates that immediate values are committed to empty groups and match group contents.
615    /// Checks:
616    /// - operation group encodings match committed group values
617    /// - each immediate maps to an empty group slot
618    /// - immediate group values equal the push immediate
619    /// - immediate placement does not exceed num_groups or batch size
620    fn validate_immediate_commitment(&self) -> Result<(), String> {
621        for (batch_idx, batch) in self.op_batches.iter().enumerate() {
622            let num_groups = batch.num_groups();
623            let indptr = batch.indptr();
624            let ops = batch.ops();
625            let groups = batch.groups();
626
627            let mut immediate_slots = [false; BATCH_SIZE];
628
629            for group_idx in 0..num_groups {
630                let group_start = indptr[group_idx];
631                let group_end = indptr[group_idx + 1];
632
633                if group_start == group_end {
634                    continue;
635                }
636
637                let mut group_value: u64 = 0;
638                for (local_op_idx, op) in ops[group_start..group_end].iter().enumerate() {
639                    let opcode = op.op_code() as u64;
640                    group_value |= opcode << (Operation::OP_BITS * local_op_idx);
641                }
642                if groups[group_idx] != Felt::new(group_value) {
643                    return Err(format!(
644                        "Batch {}, group {}: committed opcode group does not match operations",
645                        batch_idx, group_idx
646                    ));
647                }
648
649                let (placements, _next_group_idx) = collect_immediate_placements(
650                    ops,
651                    indptr,
652                    group_idx,
653                    BATCH_SIZE,
654                    Some(num_groups),
655                )
656                .map_err(|err| format!("Batch {}: {}", batch_idx, err))?;
657
658                for (imm_group_idx, imm_value) in placements {
659                    if groups[imm_group_idx] != imm_value {
660                        return Err(format!(
661                            "Batch {}: push immediate value mismatch at index {}",
662                            batch_idx, imm_group_idx
663                        ));
664                    }
665                    immediate_slots[imm_group_idx] = true;
666                }
667            }
668
669            for group_idx in 0..num_groups {
670                if indptr[group_idx] == indptr[group_idx + 1]
671                    && !immediate_slots[group_idx]
672                    && groups[group_idx] != ZERO
673                {
674                    return Err(format!(
675                        "Batch {}, group {}: empty group must be zero",
676                        batch_idx, group_idx
677                    ));
678                }
679            }
680        }
681
682        Ok(())
683    }
684
685    /// Validates that padding metadata matches batch contents.
686    /// - Empty groups cannot be marked as padded.
687    /// - Padded groups must end with a NOOP operation.
688    fn validate_padding_semantics(&self) -> Result<(), String> {
689        for (batch_idx, batch) in self.op_batches.iter().enumerate() {
690            batch
691                .validate_padding_semantics()
692                .map_err(|err| format!("Batch {}: {}", batch_idx, err))?;
693        }
694
695        Ok(())
696    }
697}
698
699// PRETTY PRINTING
700// ================================================================================================
701
702impl BasicBlockNode {
703    pub(super) fn to_display<'a>(&'a self, mast_forest: &'a MastForest) -> impl fmt::Display + 'a {
704        BasicBlockNodePrettyPrint { block_node: self, mast_forest }
705    }
706
707    pub(super) fn to_pretty_print<'a>(
708        &'a self,
709        mast_forest: &'a MastForest,
710    ) -> impl PrettyPrint + 'a {
711        BasicBlockNodePrettyPrint { block_node: self, mast_forest }
712    }
713}
714
715// MAST NODE TRAIT IMPLEMENTATION
716// ================================================================================================
717
718impl MastNodeExt for BasicBlockNode {
719    /// Returns a commitment to this basic block.
720    fn digest(&self) -> Word {
721        self.digest
722    }
723
724    fn before_enter<'a>(&'a self, forest: &'a MastForest) -> &'a [DecoratorId] {
725        match &self.decorators {
726            DecoratorStore::Owned { before_enter, .. } => before_enter,
727            DecoratorStore::Linked { id } => {
728                // For linked nodes, get the decorators from the forest's NodeToDecoratorIds
729                #[cfg(debug_assertions)]
730                self.verify_node_in_forest(forest);
731                forest.before_enter_decorators(*id)
732            },
733        }
734    }
735
736    fn after_exit<'a>(&'a self, forest: &'a MastForest) -> &'a [DecoratorId] {
737        match &self.decorators {
738            DecoratorStore::Owned { after_exit, .. } => after_exit,
739            DecoratorStore::Linked { id } => {
740                // For linked nodes, get the decorators from the forest's NodeToDecoratorIds
741                #[cfg(debug_assertions)]
742                self.verify_node_in_forest(forest);
743                forest.after_exit_decorators(*id)
744            },
745        }
746    }
747
748    fn to_display<'a>(&'a self, mast_forest: &'a MastForest) -> Box<dyn fmt::Display + 'a> {
749        Box::new(BasicBlockNode::to_display(self, mast_forest))
750    }
751
752    fn to_pretty_print<'a>(&'a self, mast_forest: &'a MastForest) -> Box<dyn PrettyPrint + 'a> {
753        Box::new(BasicBlockNode::to_pretty_print(self, mast_forest))
754    }
755
756    fn has_children(&self) -> bool {
757        false
758    }
759
760    fn append_children_to(&self, _target: &mut Vec<MastNodeId>) {
761        // No children for basic blocks
762    }
763
764    fn for_each_child<F>(&self, _f: F)
765    where
766        F: FnMut(MastNodeId),
767    {
768        // BasicBlockNode has no children
769    }
770
771    fn domain(&self) -> Felt {
772        Self::DOMAIN
773    }
774
775    type Builder = BasicBlockNodeBuilder;
776
777    fn to_builder(self, forest: &MastForest) -> Self::Builder {
778        // Extract padded decorators and before_enter/after_exit based on storage type
779        let (padded_decorators, before_enter, after_exit) = match self.decorators {
780            DecoratorStore::Owned { decorators, before_enter, after_exit } => {
781                // Decorators are already padded in Owned storage
782                (decorators, before_enter, after_exit)
783            },
784            DecoratorStore::Linked { id } => {
785                // For linked nodes, get decorators from forest's centralized storage
786                // The decorators are already padded in the centralized storage
787                let padded_decorators: DecoratorList = forest
788                    .debug_info
789                    .decorator_links_for_node(id)
790                    .expect("node must exist in forest")
791                    .into_iter()
792                    .collect();
793                let before_enter = forest.before_enter_decorators(id).to_vec();
794                let after_exit = forest.after_exit_decorators(id).to_vec();
795                (padded_decorators, before_enter, after_exit)
796            },
797        };
798
799        // Use from_op_batches to avoid re-batching and re-adjusting decorators
800        BasicBlockNodeBuilder::from_op_batches(self.op_batches, padded_decorators, self.digest)
801            .with_before_enter(before_enter)
802            .with_after_exit(after_exit)
803    }
804
805    #[cfg(debug_assertions)]
806    fn verify_node_in_forest(&self, forest: &MastForest) {
807        if let DecoratorStore::Linked { id } = &self.decorators {
808            // Verify that this node is the one stored at the given ID in the forest
809            let self_ptr = self as *const Self;
810            let forest_node = &forest.nodes[*id];
811            let forest_node_ptr = match forest_node {
812                MastNode::Block(block_node) => block_node as *const BasicBlockNode as *const (),
813                _ => panic!("Node type mismatch at {:?}", id),
814            };
815            let self_as_void = self_ptr as *const ();
816            debug_assert_eq!(
817                self_as_void, forest_node_ptr,
818                "Node pointer mismatch: expected node at {:?} to be self",
819                id
820            );
821        }
822    }
823}
824
825struct BasicBlockNodePrettyPrint<'a> {
826    block_node: &'a BasicBlockNode,
827    mast_forest: &'a MastForest,
828}
829
830impl PrettyPrint for BasicBlockNodePrettyPrint<'_> {
831    #[rustfmt::skip]
832    fn render(&self) -> crate::prettier::Document {
833        use crate::prettier::*;
834
835        // e.g. `basic_block a b c end`
836        let single_line = const_text("basic_block")
837            + const_text(" ")
838            + self.
839                block_node
840                .iter(self.mast_forest)
841                .map(|op_or_dec| match op_or_dec {
842                    OperationOrDecorator::Operation(op) => op.render(),
843                    OperationOrDecorator::Decorator(decorator_id) => {
844                        self.mast_forest.decorator_by_id(decorator_id)
845                            .map(|decorator| decorator.render())
846                            .unwrap_or_else(|| const_text("<invalid_decorator_id>"))
847                    },
848                })
849                .reduce(|acc, doc| acc + const_text(" ") + doc)
850                .unwrap_or_default()
851            + const_text(" ")
852            + const_text("end");
853
854        // e.g. `
855        // basic_block
856        //     a
857        //     b
858        //     c
859        // end
860        // `
861
862        let multi_line = indent(
863            4,
864            const_text("basic_block")
865                + nl()
866                + self
867                    .block_node
868                    .iter(self.mast_forest)
869                    .map(|op_or_dec| match op_or_dec {
870                        OperationOrDecorator::Operation(op) => op.render(),
871                        OperationOrDecorator::Decorator(decorator_id) => {
872                            self.mast_forest.decorator_by_id(decorator_id)
873                                .map(|decorator| decorator.render())
874                                .unwrap_or_else(|| const_text("<invalid_decorator_id>"))
875                        },
876                    })
877                    .reduce(|acc, doc| acc + nl() + doc)
878                    .unwrap_or_default(),
879        ) + nl()
880            + const_text("end");
881
882        single_line | multi_line
883    }
884}
885
886impl fmt::Display for BasicBlockNodePrettyPrint<'_> {
887    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
888        self.pretty_print(f)
889    }
890}
891
892enum OpIndexed<'a> {
893    Slice(core::slice::Iter<'a, (usize, DecoratorId)>),
894    Linked(DecoratedLinksIter<'a>),
895}
896
897// DECORATOR ITERATION
898// ================================================================================================
899
900/// Iterator used to iterate through the op-indexed decorators of a basic block.
901///
902/// This lets the caller iterate through operation-indexed decorators with indexes that match the
903/// standard (padded) representation of a basic block.
904pub struct DecoratorOpLinkIterator<'a>(OpIndexed<'a>);
905
906impl<'a> DecoratorOpLinkIterator<'a> {
907    /// Create a new iterator from a slice of decorator links.
908    pub fn from_slice(decorators: &'a [DecoratedOpLink]) -> Self {
909        Self(OpIndexed::Slice(decorators.iter()))
910    }
911
912    /// Create a new iterator from a linked decorator iterator.
913    pub fn from_linked(decorators: DecoratedLinksIter<'a>) -> Self {
914        Self(OpIndexed::Linked(decorators.into_iter()))
915    }
916}
917
918impl<'a> Iterator for DecoratorOpLinkIterator<'a> {
919    type Item = (usize, DecoratorId);
920
921    fn next(&mut self) -> Option<Self::Item> {
922        match &mut self.0 {
923            OpIndexed::Slice(slice_iter) => slice_iter.next().copied(),
924            OpIndexed::Linked(linked_iter) => linked_iter.next(),
925        }
926    }
927}
928
929impl<'a> ExactSizeIterator for DecoratorOpLinkIterator<'a> {
930    #[inline]
931    fn len(&self) -> usize {
932        match &self.0 {
933            OpIndexed::Slice(slice_iter) => slice_iter.len(),
934            OpIndexed::Linked(linked_iter) => linked_iter.len(),
935        }
936    }
937}
938
939// Driver of the Iterators' state machine (used by other iterators)
940enum Segment {
941    Before,
942    Middle,
943    After,
944    Done,
945}
946
947// RAW DECORATOR ITERATION
948// ================================================================================================
949
950/// Iterator used to iterate through the decorator list of a span block
951/// while executing operation batches of a span block.
952///
953/// This lets the caller iterate through a Decorator list with indexes that match the
954/// raw (unpadded) representation of a basic block.
955///
956/// IOW this makes its `BasicBlockNode::raw_decorator_iter` padding-unaware, or equivalently
957/// "removes" the padding of these decorators
958pub struct RawDecoratorOpLinkIterator<'a> {
959    before: core::slice::Iter<'a, DecoratorId>,
960    middle: RawMid<'a>,
961    after: core::slice::Iter<'a, DecoratorId>,
962    pad2raw: PaddedToRawPrefix, // indexed by padded indices
963    total_raw_ops: usize,       // count of raw ops
964    seg: Segment,
965}
966
967enum RawMid<'a> {
968    Slice(core::slice::Iter<'a, (usize, DecoratorId)>),
969    Linked(DecoratedLinksIter<'a>),
970}
971
972impl<'a> RawDecoratorOpLinkIterator<'a> {
973    pub fn from_slice_iters(
974        before_enter: &'a [DecoratorId],
975        decorators: &'a [(usize, DecoratorId)], // contains adjusted indices
976        after_exit: &'a [DecoratorId],
977        op_batches: &'a [OpBatch],
978    ) -> Self {
979        let pad2raw = PaddedToRawPrefix::new(op_batches);
980        let raw2pad = RawToPaddedPrefix::new(op_batches);
981        let total_raw_ops = raw2pad.raw_ops();
982
983        Self {
984            before: before_enter.iter(),
985            middle: RawMid::Slice(decorators.iter()),
986            after: after_exit.iter(),
987            pad2raw,
988            total_raw_ops,
989            seg: Segment::Before,
990        }
991    }
992
993    pub fn from_linked(
994        before_enter: &'a [DecoratorId],
995        decorators: DecoratedLinksIter<'a>,
996        after_exit: &'a [DecoratorId],
997        op_batches: &'a [OpBatch],
998    ) -> Self {
999        let pad2raw = PaddedToRawPrefix::new(op_batches);
1000        let raw2pad = RawToPaddedPrefix::new(op_batches);
1001        let total_raw_ops = raw2pad.raw_ops();
1002
1003        Self {
1004            before: before_enter.iter(),
1005            middle: RawMid::Linked(decorators.into_iter()),
1006            after: after_exit.iter(),
1007            pad2raw,
1008            total_raw_ops,
1009            seg: Segment::Before,
1010        }
1011    }
1012
1013    fn middle_next(&mut self) -> Option<(usize, DecoratorId)> {
1014        match &mut self.middle {
1015            RawMid::Slice(slice_iter) => slice_iter.next().copied(),
1016            RawMid::Linked(linked_iter) => linked_iter.next(),
1017        }
1018    }
1019}
1020
1021impl<'a> Iterator for RawDecoratorOpLinkIterator<'a> {
1022    type Item = (usize, DecoratorId);
1023
1024    fn next(&mut self) -> Option<Self::Item> {
1025        loop {
1026            match self.seg {
1027                Segment::Before => {
1028                    if let Some(&id) = self.before.next() {
1029                        return Some((0, id));
1030                    }
1031                    self.seg = Segment::Middle;
1032                },
1033                Segment::Middle => {
1034                    if let Some((padded_idx, id)) = self.middle_next() {
1035                        let raw_idx = padded_idx - self.pad2raw[padded_idx];
1036                        return Some((raw_idx, id));
1037                    }
1038                    self.seg = Segment::After;
1039                },
1040                Segment::After => {
1041                    if let Some(&id) = self.after.next() {
1042                        // After-exit decorators attach to the sentinel raw index
1043                        return Some((self.total_raw_ops, id));
1044                    }
1045                    self.seg = Segment::Done;
1046                },
1047                Segment::Done => return None,
1048            }
1049        }
1050    }
1051}
1052
1053// OPERATION OR DECORATOR
1054// ================================================================================================
1055
1056/// Encodes either an [`Operation`] or a [`crate::operations::Decorator`].
1057#[derive(Clone, Debug, Eq, PartialEq)]
1058pub enum OperationOrDecorator<'a> {
1059    Operation(&'a Operation),
1060    Decorator(DecoratorId),
1061}
1062
1063struct OperationOrDecoratorIterator<'a> {
1064    node: &'a BasicBlockNode,
1065    forest: Option<&'a MastForest>,
1066
1067    // extra segments
1068    before: core::slice::Iter<'a, DecoratorId>,
1069    after: core::slice::Iter<'a, DecoratorId>,
1070
1071    // operation traversal
1072    batch_index: usize,
1073    op_index_in_batch: usize,
1074    op_index: usize, // across all batches
1075
1076    // decorators inside the block (sorted by op index)
1077    decorator_list_next_index: usize,
1078    seg: Segment,
1079}
1080
1081impl<'a> OperationOrDecoratorIterator<'a> {
1082    fn new_with_forest(node: &'a BasicBlockNode, forest: &'a MastForest) -> Self {
1083        Self {
1084            node,
1085            forest: Some(forest),
1086            before: node.before_enter(forest).iter(),
1087            after: node.after_exit(forest).iter(),
1088            batch_index: 0,
1089            op_index_in_batch: 0,
1090            op_index: 0,
1091            decorator_list_next_index: 0,
1092            seg: Segment::Before,
1093        }
1094    }
1095
1096    #[inline]
1097    fn next_decorator_if_due(&mut self) -> Option<OperationOrDecorator<'a>> {
1098        match &self.node.decorators {
1099            DecoratorStore::Owned { decorators, .. } => {
1100                // Simple case for owned decorators - use index lookup
1101                if let Some((op_idx, deco)) = decorators.get(self.decorator_list_next_index)
1102                    && *op_idx == self.op_index
1103                {
1104                    self.decorator_list_next_index += 1;
1105                    Some(OperationOrDecorator::Decorator(*deco))
1106                } else {
1107                    None
1108                }
1109            },
1110            DecoratorStore::Linked { id } => {
1111                // For linked nodes, use forest access if available
1112                if let Some(forest) = self.forest {
1113                    // Get decorators for the current operation from the forest
1114                    let decorator_ids = forest.decorator_indices_for_op(*id, self.op_index);
1115
1116                    if self.decorator_list_next_index < decorator_ids.len() {
1117                        let decorator_id = decorator_ids[self.decorator_list_next_index];
1118                        self.decorator_list_next_index += 1;
1119                        Some(OperationOrDecorator::Decorator(decorator_id))
1120                    } else {
1121                        None
1122                    }
1123                } else {
1124                    // No forest access available, can't retrieve decorators
1125                    None
1126                }
1127            },
1128        }
1129    }
1130}
1131
1132impl<'a> Iterator for OperationOrDecoratorIterator<'a> {
1133    type Item = OperationOrDecorator<'a>;
1134
1135    fn next(&mut self) -> Option<Self::Item> {
1136        loop {
1137            match self.seg {
1138                Segment::Before => {
1139                    if let Some(id) = self.before.next() {
1140                        return Some(OperationOrDecorator::Decorator(*id));
1141                    }
1142                    self.seg = Segment::Middle;
1143                },
1144
1145                Segment::Middle => {
1146                    // 1) emit any decorators for the current op_index
1147                    if let Some(d) = self.next_decorator_if_due() {
1148                        return Some(d);
1149                    }
1150
1151                    // 2) otherwise emit the operation at current indices
1152                    if let Some(batch) = self.node.op_batches.get(self.batch_index) {
1153                        if let Some(op) = batch.ops.get(self.op_index_in_batch) {
1154                            self.op_index_in_batch += 1;
1155                            self.op_index += 1;
1156                            // Reset decorator index when moving to a new operation
1157                            self.decorator_list_next_index = 0;
1158                            return Some(OperationOrDecorator::Operation(op));
1159                        }
1160                        // advance to next batch and retry
1161                        self.batch_index += 1;
1162                        self.op_index_in_batch = 0;
1163                    } else {
1164                        // no more ops, decorators flushed through the operation index
1165                        // and next_decorator_if_due
1166                        self.seg = Segment::After;
1167                    }
1168                },
1169
1170                Segment::After => {
1171                    if let Some(id) = self.after.next() {
1172                        return Some(OperationOrDecorator::Decorator(*id));
1173                    }
1174                    self.seg = Segment::Done;
1175                },
1176
1177                Segment::Done => return None,
1178            }
1179        }
1180    }
1181}
1182
1183// HELPER FUNCTIONS
1184// ================================================================================================
1185
1186/// Checks if a given decorators list is valid (only checked in debug mode)
1187/// - Assert the decorator list is in ascending order.
1188/// - Assert the last op index in decorator list is less than or equal to the number of operations.
1189#[cfg(debug_assertions)]
1190pub(crate) fn validate_decorators(operations_len: usize, decorators: &DecoratorList) {
1191    if !decorators.is_empty() {
1192        // check if decorator list is sorted
1193        for i in 0..(decorators.len() - 1) {
1194            debug_assert!(decorators[i + 1].0 >= decorators[i].0, "unsorted decorators list");
1195        }
1196        // assert the last index in decorator list is less than or equal to operations vector length
1197        debug_assert!(
1198            operations_len >= decorators.last().expect("empty decorators list").0,
1199            "last op index in decorator list should be less than or equal to the number of ops"
1200        );
1201    }
1202}
1203
1204/// Raw-indexed prefix: how many paddings strictly before raw index r
1205///
1206/// This struct provides O(1) lookup for converting raw operation indices to padded indices.
1207/// For any raw index r, `raw_to_padded[r] = count of padding ops strictly before raw index r`.
1208///
1209/// Length: `raw_ops + 1` (includes sentinel entry at `r == raw_ops`)
1210/// Usage: `padded_idx = r + raw_to_padded[r]` (addition)
1211#[derive(Debug, Clone)]
1212pub struct RawToPaddedPrefix(Vec<usize>);
1213
1214impl RawToPaddedPrefix {
1215    /// Build a raw-indexed prefix array from op batches.
1216    ///
1217    /// For each raw index r, records how many padding operations have been inserted before r.
1218    /// Includes a sentinel entry at `r == raw_ops`.
1219    pub fn new(op_batches: &[OpBatch]) -> Self {
1220        let mut v = Vec::new();
1221        let mut pads_so_far = 0usize;
1222
1223        for b in op_batches {
1224            let n = b.num_groups();
1225            let indptr = b.indptr();
1226            let padding = b.padding();
1227
1228            for g in 0..n {
1229                let group_len = indptr[g + 1] - indptr[g];
1230                let has_pad = padding[g] as usize;
1231                let raw_in_g = group_len - has_pad;
1232
1233                // For each raw op, record how many paddings were before it.
1234                v.extend(repeat_n(pads_so_far, raw_in_g));
1235
1236                // After the group's raw ops, account for the (optional) padding op.
1237                pads_so_far += has_pad; // adds 1 if there is a padding, else 0
1238            }
1239        }
1240
1241        // Sentinel for r == raw_ops
1242        v.push(pads_so_far);
1243        RawToPaddedPrefix(v)
1244    }
1245
1246    /// Get the total number of raw operations (excluding sentinel).
1247    #[inline]
1248    pub fn raw_ops(&self) -> usize {
1249        self.0.len() - 1
1250    }
1251}
1252
1253/// Get the number of padding operations before raw index r.
1254///
1255/// ## Sentinel Access
1256///
1257/// Some decorators have an operation index equal to the length of the
1258/// operations array, to ensure they are executed at the end of the block
1259/// (since the semantics of the decorator index is that it must be executed
1260/// before the operation index it points to).
1261impl core::ops::Index<usize> for RawToPaddedPrefix {
1262    type Output = usize;
1263    #[inline]
1264    fn index(&self, idx: usize) -> &Self::Output {
1265        &self.0[idx]
1266    }
1267}
1268
1269/// Padded-indexed prefix: how many paddings strictly before padded index p
1270///
1271/// This struct provides O(1) lookup for converting padded operation indices to raw indices.
1272/// For any padded index p, `padded_to_raw[p] = count of padding ops strictly before padded index
1273/// p`.
1274///
1275/// Length: `padded_ops + 1` (includes sentinel entry at `p == padded_ops`)
1276/// Usage: `raw_idx = p - padded_to_raw[p]` (subtraction)
1277#[derive(Debug, Clone)]
1278pub struct PaddedToRawPrefix(Vec<usize>);
1279
1280impl PaddedToRawPrefix {
1281    /// Build a padded-indexed prefix array from op batches.
1282    ///
1283    /// Simulates emission of the padded sequence, recording padding count before each position.
1284    /// Includes a sentinel entry at `p == padded_ops`.
1285    pub fn new(op_batches: &[OpBatch]) -> Self {
1286        // Exact capacity to avoid reallocations: sum of per-group lengths across all batches.
1287        let padded_ops = op_batches
1288            .iter()
1289            .map(|b| {
1290                let n = b.num_groups();
1291                let indptr = b.indptr();
1292                indptr[1..=n]
1293                    .iter()
1294                    .zip(&indptr[..n])
1295                    .map(|(end, start)| end - start)
1296                    .sum::<usize>()
1297            })
1298            .sum::<usize>();
1299
1300        let mut v = Vec::with_capacity(padded_ops + 1);
1301        let mut pads_so_far = 0usize;
1302
1303        for b in op_batches {
1304            let n = b.num_groups();
1305            let indptr = b.indptr();
1306            let padding = b.padding();
1307
1308            for g in 0..n {
1309                let group_len = indptr[g + 1] - indptr[g];
1310                let has_pad = padding[g] as usize;
1311                let raw_in_g = group_len - has_pad;
1312
1313                // Emit raw ops of the group.
1314                v.extend(repeat_n(pads_so_far, raw_in_g));
1315
1316                // Emit the optional padding op.
1317                if has_pad == 1 {
1318                    v.push(pads_so_far);
1319                    pads_so_far += 1; // subsequent positions see one more padding before them
1320                }
1321            }
1322        }
1323
1324        // Sentinel at p == padded_ops
1325        v.push(pads_so_far);
1326
1327        PaddedToRawPrefix(v)
1328    }
1329}
1330
1331/// Get the number of padding operations before padded index p.
1332///
1333/// ## Sentinel Access
1334///
1335/// Some decorators have an operation index equal to the length of the
1336/// operations array, to ensure they are executed at the end of the block
1337/// (since the semantics of the decorator index is that it must be executed
1338/// before the operation index it points to).
1339impl core::ops::Index<usize> for PaddedToRawPrefix {
1340    type Output = usize;
1341    #[inline]
1342    fn index(&self, idx: usize) -> &Self::Output {
1343        &self.0[idx]
1344    }
1345}
1346
1347/// Groups the provided operations into batches and computes the hash of the block.
1348fn batch_and_hash_ops(ops: Vec<Operation>) -> (Vec<OpBatch>, Word) {
1349    // Group the operations into batches.
1350    let batches = batch_ops(ops);
1351
1352    // Compute the hash of all operation groups.
1353    let op_groups: Vec<Felt> = batches.iter().flat_map(|batch| batch.groups).collect();
1354    let hash = hasher::hash_elements(&op_groups);
1355
1356    (batches, hash)
1357}
1358
1359/// Groups the provided operations into batches as described in the docs for this module (i.e., up
1360/// to 9 operations per group, and 8 groups per batch).
1361fn batch_ops(ops: Vec<Operation>) -> Vec<OpBatch> {
1362    let mut batches = Vec::<OpBatch>::new();
1363    let mut batch_acc = OpBatchAccumulator::new();
1364
1365    for op in ops {
1366        // If the operation cannot be accepted into the current accumulator, add the contents of
1367        // the accumulator to the list of batches and start a new accumulator.
1368        if !batch_acc.can_accept_op(op) {
1369            let batch = batch_acc.into_batch();
1370            batch_acc = OpBatchAccumulator::new();
1371
1372            batches.push(batch);
1373        }
1374
1375        // Add the operation to the accumulator.
1376        batch_acc.add_op(op);
1377    }
1378
1379    // Make sure we finished processing the last batch.
1380    if !batch_acc.is_empty() {
1381        let batch = batch_acc.into_batch();
1382        batches.push(batch);
1383    }
1384
1385    batches
1386}
1387
1388// ------------------------------------------------------------------------------------------------
1389/// Represents the operation data for a [`BasicBlockNodeBuilder`].
1390///
1391/// The decorators are bundled with the operation data to maintain the invariant that
1392/// decorator indices match the format of the operations:
1393/// - `Raw`: decorators have raw (unpadded) indices
1394/// - `Batched`: decorators have padded indices
1395#[derive(Debug)]
1396enum OperationData {
1397    /// Raw operations with raw decorator indices
1398    Raw {
1399        operations: Vec<Operation>,
1400        decorators: DecoratorList,
1401    },
1402    /// Pre-batched operations with padded decorator indices
1403    Batched {
1404        op_batches: Vec<OpBatch>,
1405        decorators: DecoratorList,
1406    },
1407}
1408
1409/// Builder for creating [`BasicBlockNode`] instances with decorators.
1410#[derive(Debug)]
1411pub struct BasicBlockNodeBuilder {
1412    operation_data: OperationData,
1413    before_enter: Vec<DecoratorId>,
1414    after_exit: Vec<DecoratorId>,
1415    digest: Option<Word>,
1416}
1417
1418impl BasicBlockNodeBuilder {
1419    /// Creates a new builder for a BasicBlockNode with the specified operations and decorators.
1420    ///
1421    /// The decorators must use raw (unpadded) operation indices.
1422    pub fn new(operations: Vec<Operation>, decorators: DecoratorList) -> Self {
1423        Self {
1424            operation_data: OperationData::Raw { operations, decorators },
1425            before_enter: Vec::new(),
1426            after_exit: Vec::new(),
1427            digest: None,
1428        }
1429    }
1430
1431    /// Creates a builder from pre-existing OpBatches with padded decorator indices.
1432    ///
1433    /// This constructor is used during deserialization where operations are already batched
1434    /// and decorators already use padded indices. The digest must also be provided.
1435    ///
1436    /// The decorators must use padded operation indices that match the batched operations.
1437    pub(crate) fn from_op_batches(
1438        op_batches: Vec<OpBatch>,
1439        decorators: DecoratorList,
1440        digest: Word,
1441    ) -> Self {
1442        Self {
1443            operation_data: OperationData::Batched { op_batches, decorators },
1444            before_enter: Vec::new(),
1445            after_exit: Vec::new(),
1446            digest: Some(digest),
1447        }
1448    }
1449
1450    /// Builds the BasicBlockNode with the specified decorators.
1451    pub fn build(self) -> Result<BasicBlockNode, MastForestError> {
1452        let (op_batches, digest, padded_decorators) = match self.operation_data {
1453            OperationData::Raw { operations, decorators } => {
1454                if operations.is_empty() {
1455                    return Err(MastForestError::EmptyBasicBlock);
1456                }
1457
1458                // Validate decorators list (only in debug mode).
1459                #[cfg(debug_assertions)]
1460                validate_decorators(operations.len(), &decorators);
1461
1462                let (op_batches, computed_digest) = batch_and_hash_ops(operations);
1463                // Batch operations (adds padding NOOPs)
1464                // Adjust decorators from raw to padded indices
1465                let padded_decorators = BasicBlockNode::adjust_decorators(decorators, &op_batches);
1466
1467                // Use the forced digest if provided, otherwise use the computed digest
1468                let digest = self.digest.unwrap_or(computed_digest);
1469
1470                (op_batches, digest, padded_decorators)
1471            },
1472            OperationData::Batched { op_batches, decorators } => {
1473                if op_batches.is_empty() {
1474                    return Err(MastForestError::EmptyBasicBlock);
1475                }
1476
1477                // Decorators are already padded - no adjustment needed!
1478                let digest = self.digest.expect("digest must be set for batched operations");
1479
1480                (op_batches, digest, decorators)
1481            },
1482        };
1483
1484        Ok(BasicBlockNode {
1485            op_batches,
1486            digest,
1487            decorators: DecoratorStore::Owned {
1488                decorators: padded_decorators,
1489                before_enter: self.before_enter.clone(),
1490                after_exit: self.after_exit.clone(),
1491            },
1492        })
1493    }
1494
1495    /// Add this node to a forest using relaxed validation.
1496    ///
1497    /// This method is used during deserialization where nodes may reference child nodes
1498    /// that haven't been added to the forest yet. The child node IDs have already been
1499    /// validated against the expected final node count during the `try_into_mast_node_builder`
1500    /// step, so we can safely skip validation here.
1501    ///
1502    /// Note: This is not part of the `MastForestContributor` trait because it's only
1503    /// intended for internal use during deserialization.
1504    ///
1505    /// For BasicBlockNode, this is equivalent to the normal `add_to_forest` since basic blocks
1506    /// don't have child nodes to validate.
1507    pub(in crate::mast) fn add_to_forest_relaxed(
1508        self,
1509        forest: &mut MastForest,
1510    ) -> Result<MastNodeId, MastForestError> {
1511        // For deserialization: decorators are already in forest.debug_info,
1512        // so we don't register them again. We just create the node.
1513
1514        let future_node_id = MastNodeId::new_unchecked(forest.nodes.len() as u32);
1515
1516        // Process based on operation data type
1517        let (op_batches, digest) = match self.operation_data {
1518            OperationData::Raw { operations, decorators: _ } => {
1519                if operations.is_empty() {
1520                    return Err(MastForestError::EmptyBasicBlock);
1521                }
1522
1523                // Batch operations (adds padding NOOPs)
1524                let (op_batches, computed_digest) = batch_and_hash_ops(operations);
1525
1526                // Use the forced digest if provided, otherwise use the computed digest
1527                let digest = self.digest.unwrap_or(computed_digest);
1528
1529                (op_batches, digest)
1530            },
1531            OperationData::Batched { op_batches, decorators: _ } => {
1532                if op_batches.is_empty() {
1533                    return Err(MastForestError::EmptyBasicBlock);
1534                }
1535
1536                // For batched operations, digest must be set
1537                let digest = self.digest.expect("digest must be set for batched operations");
1538
1539                (op_batches, digest)
1540            },
1541        };
1542
1543        // Create the node in the forest with Linked variant
1544        // Note: Decorators are already in forest.debug_info from deserialization
1545        let node_id = forest
1546            .nodes
1547            .push(MastNode::Block(BasicBlockNode {
1548                op_batches,
1549                digest,
1550                decorators: DecoratorStore::Linked { id: future_node_id },
1551            }))
1552            .map_err(|_| MastForestError::TooManyNodes)?;
1553
1554        Ok(node_id)
1555    }
1556}
1557
1558impl MastForestContributor for BasicBlockNodeBuilder {
1559    fn add_to_forest(self, forest: &mut MastForest) -> Result<MastNodeId, MastForestError> {
1560        // Determine the node ID that will be assigned
1561        let future_node_id = MastNodeId::new_unchecked(forest.nodes.len() as u32);
1562
1563        // Process based on operation data type
1564        let (op_batches, digest, padded_decorators) = match self.operation_data {
1565            OperationData::Raw { operations, decorators } => {
1566                if operations.is_empty() {
1567                    return Err(MastForestError::EmptyBasicBlock);
1568                }
1569
1570                // Validate decorators list (only in debug mode).
1571                #[cfg(debug_assertions)]
1572                validate_decorators(operations.len(), &decorators);
1573
1574                // Batch operations (adds padding NOOPs)
1575                let (op_batches, computed_digest) = batch_and_hash_ops(operations);
1576
1577                // Use the forced digest if provided, otherwise use the computed digest
1578                let digest = self.digest.unwrap_or(computed_digest);
1579
1580                // Adjust decorator indices from raw to padded
1581                let padded_decorators = BasicBlockNode::adjust_decorators(decorators, &op_batches);
1582
1583                (op_batches, digest, padded_decorators)
1584            },
1585            OperationData::Batched { op_batches, decorators } => {
1586                if op_batches.is_empty() {
1587                    return Err(MastForestError::EmptyBasicBlock);
1588                }
1589
1590                // Decorators are already padded - no adjustment needed!
1591                let digest = self.digest.expect("digest must be set for batched operations");
1592
1593                (op_batches, digest, decorators)
1594            },
1595        };
1596
1597        // Add decorator info to the forest storage
1598        forest
1599            .debug_info
1600            .register_op_indexed_decorators(future_node_id, padded_decorators)
1601            .map_err(MastForestError::DecoratorError)?;
1602
1603        // Add node-level decorators to the centralized NodeToDecoratorIds for efficient access
1604        forest.register_node_decorators(future_node_id, &self.before_enter, &self.after_exit);
1605
1606        // Create the node in the forest with Linked variant from the start
1607        let node_id = forest
1608            .nodes
1609            .push(MastNode::Block(BasicBlockNode {
1610                op_batches,
1611                digest,
1612                decorators: DecoratorStore::Linked { id: future_node_id },
1613            }))
1614            .map_err(|_| MastForestError::TooManyNodes)?;
1615
1616        Ok(node_id)
1617    }
1618
1619    fn fingerprint_for_node(
1620        &self,
1621        forest: &MastForest,
1622        _hash_by_node_id: &impl LookupByIdx<MastNodeId, MastNodeFingerprint>,
1623    ) -> Result<MastNodeFingerprint, MastForestError> {
1624        // For BasicBlockNode, we need to implement custom logic because BasicBlock has special
1625        // decorator handling with operation indices that other nodes don't have
1626
1627        // Process based on operation data type
1628        let (op_batches, digest, raw_decorators) = match &self.operation_data {
1629            OperationData::Raw { operations, decorators } => {
1630                // Compute digest - use forced digest if available, otherwise compute normally
1631                let (op_batches, computed_digest) = batch_and_hash_ops(operations.clone());
1632                let digest = self.digest.unwrap_or(computed_digest);
1633
1634                // Decorators are already in raw form - no conversion needed
1635                #[cfg(debug_assertions)]
1636                {
1637                    validate_decorators(operations.len(), decorators);
1638                }
1639
1640                (op_batches, digest, decorators.clone())
1641            },
1642            OperationData::Batched { op_batches, decorators } => {
1643                let digest = self.digest.expect("digest must be set for batched operations");
1644
1645                // Convert from padded to raw indices for fingerprinting
1646                let pad2raw = PaddedToRawPrefix::new(op_batches);
1647                let raw_decorators: Vec<(usize, DecoratorId)> = decorators
1648                    .iter()
1649                    .map(|(padded_idx, decorator_id)| {
1650                        let raw_idx = padded_idx - pad2raw[*padded_idx];
1651                        (raw_idx, *decorator_id)
1652                    })
1653                    .collect();
1654
1655                (op_batches.clone(), digest, raw_decorators)
1656            },
1657        };
1658
1659        // Collect before_enter decorator fingerprints
1660        let before_enter_bytes: Vec<[u8; 32]> = self
1661            .before_enter
1662            .iter()
1663            .map(|&id| *forest[id].fingerprint().as_bytes())
1664            .collect();
1665
1666        // Collect op-indexed decorator data (using raw indices)
1667        let adjusted_decorators = raw_decorators;
1668
1669        // Collect op-indexed decorator data
1670        let mut op_decorator_data = Vec::with_capacity(adjusted_decorators.len() * 33);
1671        for (raw_op_idx, decorator_id) in &adjusted_decorators {
1672            op_decorator_data.extend_from_slice(&raw_op_idx.to_le_bytes());
1673            op_decorator_data.extend_from_slice(forest[*decorator_id].fingerprint().as_bytes());
1674        }
1675
1676        // Collect after_exit decorator fingerprints
1677        let after_exit_bytes: Vec<[u8; 32]> =
1678            self.after_exit.iter().map(|&id| *forest[id].fingerprint().as_bytes()).collect();
1679
1680        // Collect assert operation data
1681        let mut assert_data = Vec::new();
1682        for (op_idx, op) in op_batches.iter().flat_map(|batch| batch.ops()).enumerate() {
1683            if let Operation::U32assert2(inner_value)
1684            | Operation::Assert(inner_value)
1685            | Operation::MpVerify(inner_value) = op
1686            {
1687                let op_idx: u32 = op_idx
1688                    .try_into()
1689                    .expect("there are more than 2^{32}-1 operations in basic block");
1690
1691                // we include the opcode to differentiate between `Assert` and `U32assert2`
1692                assert_data.push(op.op_code());
1693                // we include the operation index to distinguish between basic blocks that
1694                // would have the same assert instructions, but in a different order
1695                assert_data.extend_from_slice(&op_idx.to_le_bytes());
1696                let inner_value = inner_value.as_canonical_u64();
1697                assert_data.extend_from_slice(&inner_value.to_le_bytes());
1698            }
1699        }
1700
1701        // Create iterator of slices from all collected data
1702        let decorator_bytes_iter = before_enter_bytes
1703            .iter()
1704            .map(|bytes| bytes.as_slice())
1705            .chain(core::iter::once(op_decorator_data.as_slice()))
1706            .chain(after_exit_bytes.iter().map(|bytes| bytes.as_slice()))
1707            .chain(core::iter::once(assert_data.as_slice()));
1708
1709        if self.before_enter.is_empty()
1710            && self.after_exit.is_empty()
1711            && adjusted_decorators.is_empty()
1712            && assert_data.is_empty()
1713        {
1714            Ok(MastNodeFingerprint::new(digest))
1715        } else {
1716            let decorator_root = Blake3_256::hash_iter(decorator_bytes_iter);
1717            Ok(MastNodeFingerprint::with_decorator_root(digest, decorator_root))
1718        }
1719    }
1720
1721    fn remap_children(self, _remapping: &impl LookupByIdx<MastNodeId, MastNodeId>) -> Self {
1722        // BasicBlockNode has no children to remap
1723        self
1724    }
1725
1726    fn with_before_enter(mut self, decorators: impl Into<Vec<DecoratorId>>) -> Self {
1727        self.before_enter = decorators.into();
1728        self
1729    }
1730
1731    fn with_after_exit(mut self, decorators: impl Into<Vec<DecoratorId>>) -> Self {
1732        self.after_exit = decorators.into();
1733        self
1734    }
1735
1736    fn append_before_enter(&mut self, decorators: impl IntoIterator<Item = DecoratorId>) {
1737        self.before_enter.extend(decorators);
1738    }
1739
1740    fn append_after_exit(&mut self, decorators: impl IntoIterator<Item = DecoratorId>) {
1741        self.after_exit.extend(decorators);
1742    }
1743
1744    fn with_digest(mut self, digest: Word) -> Self {
1745        self.digest = Some(digest);
1746        self
1747    }
1748}
1749
1750#[cfg(any(test, feature = "arbitrary"))]
1751impl proptest::prelude::Arbitrary for BasicBlockNodeBuilder {
1752    type Parameters = super::arbitrary::BasicBlockNodeParams;
1753    type Strategy = proptest::strategy::BoxedStrategy<Self>;
1754
1755    fn arbitrary_with(params: Self::Parameters) -> Self::Strategy {
1756        use proptest::prelude::*;
1757
1758        use super::arbitrary::{decorator_id_strategy, op_non_control_sequence_strategy};
1759
1760        (op_non_control_sequence_strategy(params.max_ops_len),)
1761            .prop_flat_map(move |(ops,)| {
1762                let ops_len = ops.len().max(1); // ensure at least 1 op
1763                // For builders, decorator indices must be strictly less than ops_len
1764                // because they reference actual operation positions
1765                prop::collection::vec(
1766                    (0..ops_len, decorator_id_strategy(params.max_decorator_id_u32)),
1767                    0..=params.max_pairs,
1768                )
1769                .prop_map(move |mut decorators| {
1770                    decorators.sort_by_key(|(i, _)| *i);
1771                    (ops.clone(), decorators)
1772                })
1773            })
1774            .prop_map(|(ops, decorators)| Self::new(ops, decorators))
1775            .boxed()
1776    }
1777}