1use alloc::vec::Vec;
2use core::{
3 borrow::{Borrow, BorrowMut},
4 ops::Range,
5};
6
7use miden_core::{
8 Felt, ONE, WORD_SIZE, Word, ZERO,
9 field::PrimeCharacteristicRing,
10 utils::{Matrix, RowMajorMatrix, range},
11};
12
13use super::{
14 CHIPLETS_OFFSET, CHIPLETS_WIDTH, CLK_COL_IDX, CTX_COL_IDX, DECODER_TRACE_OFFSET,
15 DECODER_TRACE_WIDTH, FN_HASH_OFFSET, PADDED_TRACE_WIDTH, RANGE_CHECK_TRACE_OFFSET,
16 RANGE_CHECK_TRACE_WIDTH, RowIndex, STACK_TRACE_OFFSET, STACK_TRACE_WIDTH,
17 chiplets::{
18 BITWISE_A_COL_IDX, BITWISE_B_COL_IDX, BITWISE_OUTPUT_COL_IDX, HASHER_NODE_INDEX_COL_IDX,
19 HASHER_STATE_COL_RANGE, MEMORY_CLK_COL_IDX, MEMORY_CTX_COL_IDX, MEMORY_IDX0_COL_IDX,
20 MEMORY_IDX1_COL_IDX, MEMORY_V_COL_RANGE, MEMORY_WORD_COL_IDX, NUM_ACE_SELECTORS,
21 ace::{
22 CLK_IDX, CTX_IDX, EVAL_OP_IDX, ID_0_IDX, ID_1_IDX, ID_2_IDX, M_0_IDX, M_1_IDX, PTR_IDX,
23 READ_NUM_EVAL_IDX, SELECTOR_BLOCK_IDX, SELECTOR_START_IDX, V_0_0_IDX, V_0_1_IDX,
24 V_1_0_IDX, V_1_1_IDX, V_2_0_IDX, V_2_1_IDX,
25 },
26 hasher::{DIGEST_LEN, HASH_CYCLE_LEN, LAST_CYCLE_ROW, STATE_WIDTH},
27 },
28 decoder::{
29 GROUP_COUNT_COL_IDX, HASHER_STATE_OFFSET, IN_SPAN_COL_IDX, IS_CALL_FLAG_COL_IDX,
30 IS_LOOP_BODY_FLAG_COL_IDX, IS_LOOP_FLAG_COL_IDX, IS_SYSCALL_FLAG_COL_IDX,
31 NUM_HASHER_COLUMNS, NUM_OP_BATCH_FLAGS, OP_BATCH_FLAGS_OFFSET, OP_BITS_EXTRA_COLS_OFFSET,
32 USER_OP_HELPERS_OFFSET,
33 },
34 stack::{B0_COL_IDX, B1_COL_IDX, H0_COL_IDX},
35};
36
37const DECODER_HASHER_RANGE: Range<usize> =
41 range(DECODER_TRACE_OFFSET + HASHER_STATE_OFFSET, NUM_HASHER_COLUMNS);
42
43#[derive(Debug)]
48#[repr(C)]
49pub struct MainTraceRow<T> {
50 pub clk: T,
52 pub ctx: T,
53 pub fn_hash: [T; WORD_SIZE],
54
55 pub decoder: [T; DECODER_TRACE_WIDTH],
57
58 pub stack: [T; STACK_TRACE_WIDTH],
60
61 pub range: [T; RANGE_CHECK_TRACE_WIDTH],
63
64 pub chiplets: [T; CHIPLETS_WIDTH],
66}
67
68impl<T> Borrow<MainTraceRow<T>> for [T] {
69 fn borrow(&self) -> &MainTraceRow<T> {
70 debug_assert_eq!(self.len(), crate::TRACE_WIDTH);
71 let (prefix, shorts, suffix) = unsafe { self.align_to::<MainTraceRow<T>>() };
72 debug_assert!(prefix.is_empty(), "Alignment should match");
73 debug_assert!(suffix.is_empty(), "Alignment should match");
74 debug_assert_eq!(shorts.len(), 1);
75 &shorts[0]
76 }
77}
78
79impl<T> BorrowMut<MainTraceRow<T>> for [T] {
80 fn borrow_mut(&mut self) -> &mut MainTraceRow<T> {
81 debug_assert_eq!(self.len(), crate::TRACE_WIDTH);
82 let (prefix, shorts, suffix) = unsafe { self.align_to_mut::<MainTraceRow<T>>() };
83 debug_assert!(prefix.is_empty(), "Alignment should match");
84 debug_assert!(suffix.is_empty(), "Alignment should match");
85 debug_assert_eq!(shorts.len(), 1);
86 &mut shorts[0]
87 }
88}
89
90#[derive(Debug)]
96enum TraceStorage {
97 Parts {
98 core_rm: Vec<Felt>,
99 chiplets_rm: Vec<Felt>,
100 range_checker_cols: [Vec<Felt>; 2],
101 num_rows: usize,
102 },
103 RowMajor(RowMajorMatrix<Felt>),
104 Transposed {
105 matrix: RowMajorMatrix<Felt>,
106 num_cols: usize,
107 num_rows: usize,
108 },
109}
110
111#[derive(Debug)]
112pub struct MainTrace {
113 storage: TraceStorage,
114 last_program_row: RowIndex,
115}
116
117const CORE_WIDTH: usize = RANGE_CHECK_TRACE_OFFSET;
119
120#[cfg(feature = "concurrent")]
122const ROW_MAJOR_CHUNK_SIZE: usize = 512;
123
124impl MainTrace {
125 pub fn new(matrix: RowMajorMatrix<Felt>, last_program_row: RowIndex) -> Self {
127 Self {
128 storage: TraceStorage::RowMajor(matrix),
129 last_program_row,
130 }
131 }
132
133 pub fn from_parts(
136 core_rm: Vec<Felt>,
137 chiplets_rm: Vec<Felt>,
138 range_checker_cols: [Vec<Felt>; 2],
139 num_rows: usize,
140 last_program_row: RowIndex,
141 ) -> Self {
142 assert_eq!(core_rm.len(), num_rows * CORE_WIDTH);
143 assert_eq!(chiplets_rm.len(), num_rows * CHIPLETS_WIDTH);
144 assert_eq!(range_checker_cols[0].len(), num_rows);
145 assert_eq!(range_checker_cols[1].len(), num_rows);
146 Self {
147 storage: TraceStorage::Parts {
148 core_rm,
149 chiplets_rm,
150 range_checker_cols,
151 num_rows,
152 },
153 last_program_row,
154 }
155 }
156
157 pub fn from_transposed(transposed: RowMajorMatrix<Felt>, last_program_row: RowIndex) -> Self {
159 let num_cols = transposed.height();
160 let num_rows = transposed.width();
161 Self {
162 storage: TraceStorage::Transposed { matrix: transposed, num_cols, num_rows },
163 last_program_row,
164 }
165 }
166
167 #[inline]
172 pub fn get(&self, row: RowIndex, col: usize) -> Felt {
173 let r = row.as_usize();
174 match &self.storage {
175 TraceStorage::Parts {
176 core_rm,
177 chiplets_rm,
178 range_checker_cols,
179 num_rows,
180 } => {
181 assert!(r < *num_rows, "main trace row index in bounds");
182 assert!(col < PADDED_TRACE_WIDTH, "main trace column index in bounds");
183
184 if col < CORE_WIDTH {
185 core_rm[r * CORE_WIDTH + col]
186 } else {
187 let nc = col - CORE_WIDTH;
188 if nc < RANGE_CHECK_TRACE_WIDTH {
189 range_checker_cols[nc][r]
190 } else if nc < RANGE_CHECK_TRACE_WIDTH + CHIPLETS_WIDTH {
191 chiplets_rm[r * CHIPLETS_WIDTH + (nc - RANGE_CHECK_TRACE_WIDTH)]
192 } else {
193 ZERO
194 }
195 }
196 },
197 TraceStorage::RowMajor(matrix) => {
198 let row_slice = matrix.row_slice(r).expect("main trace row index in bounds");
199 assert!(col < row_slice.len(), "main trace column index in bounds");
200 row_slice[col]
201 },
202 TraceStorage::Transposed { matrix, num_cols, .. } => {
203 let col_slice = matrix.row_slice(col).expect("main trace column index in bounds");
204 assert!(r < col_slice.len(), "main trace row index in bounds");
205 debug_assert_eq!(col_slice.len(), matrix.width());
206 debug_assert_eq!(matrix.height(), *num_cols);
207 col_slice[r]
208 },
209 }
210 }
211
212 #[inline]
214 pub fn width(&self) -> usize {
215 match &self.storage {
216 TraceStorage::Parts { .. } => PADDED_TRACE_WIDTH,
217 TraceStorage::RowMajor(matrix) => matrix.width(),
218 TraceStorage::Transposed { num_cols, .. } => *num_cols,
219 }
220 }
221
222 pub fn to_row_major(&self) -> RowMajorMatrix<Felt> {
224 match &self.storage {
225 TraceStorage::RowMajor(matrix) => matrix.clone(),
226 TraceStorage::Transposed { matrix, .. } => matrix.transpose(),
227 TraceStorage::Parts {
228 core_rm,
229 chiplets_rm,
230 range_checker_cols,
231 num_rows,
232 } => {
233 let h = *num_rows;
234 let w = PADDED_TRACE_WIDTH;
235 let cw = CHIPLETS_WIDTH;
236 let num_pad = PADDED_TRACE_WIDTH - CORE_WIDTH - 2 - cw;
237
238 let total = h * w;
239 let mut data = Vec::with_capacity(total);
240 #[allow(clippy::uninit_vec)]
242 unsafe {
243 data.set_len(total);
244 }
245
246 let fill_rows = |chunk: &mut [Felt], start_row: usize| {
247 let chunk_rows = chunk.len() / w;
248 for i in 0..chunk_rows {
249 let row = start_row + i;
250 let dst = &mut chunk[i * w..(i + 1) * w];
251 dst[..CORE_WIDTH]
252 .copy_from_slice(&core_rm[row * CORE_WIDTH..(row + 1) * CORE_WIDTH]);
253 dst[CORE_WIDTH] = range_checker_cols[0][row];
254 dst[CORE_WIDTH + 1] = range_checker_cols[1][row];
255 dst[CORE_WIDTH + 2..CORE_WIDTH + 2 + cw]
256 .copy_from_slice(&chiplets_rm[row * cw..(row + 1) * cw]);
257 for p in 0..num_pad {
258 dst[CORE_WIDTH + 2 + cw + p] = ZERO;
259 }
260 }
261 };
262
263 #[cfg(not(feature = "concurrent"))]
264 fill_rows(&mut data, 0);
265
266 #[cfg(feature = "concurrent")]
267 {
268 use miden_crypto::parallel::*;
269 let rows_per_chunk = ROW_MAJOR_CHUNK_SIZE;
270 data.par_chunks_mut(rows_per_chunk * w).enumerate().for_each(
271 |(chunk_idx, chunk)| {
272 fill_rows(chunk, chunk_idx * rows_per_chunk);
273 },
274 );
275 }
276
277 RowMajorMatrix::new(data, w)
278 },
279 }
280 }
281
282 pub fn to_row_major_stripped(&self, target_width: usize) -> RowMajorMatrix<Felt> {
284 match &self.storage {
285 TraceStorage::RowMajor(matrix) => {
286 let h = matrix.height();
287 let w = matrix.width();
288 debug_assert!(target_width <= w);
289 if target_width == w {
290 return matrix.clone();
291 }
292 let total = h * target_width;
293 let mut data = Vec::with_capacity(total);
294 #[allow(clippy::uninit_vec)]
296 unsafe {
297 data.set_len(total);
298 }
299
300 #[cfg(not(feature = "concurrent"))]
301 for row in 0..h {
302 let src = matrix.row_slice(row).expect("row index in bounds");
303 data[row * target_width..(row + 1) * target_width]
304 .copy_from_slice(&src[..target_width]);
305 }
306
307 #[cfg(feature = "concurrent")]
308 {
309 use miden_crypto::parallel::*;
310 let rows_per_chunk = ROW_MAJOR_CHUNK_SIZE;
311 data.par_chunks_mut(rows_per_chunk * target_width).enumerate().for_each(
312 |(chunk_idx, chunk)| {
313 let chunk_rows = chunk.len() / target_width;
314 for i in 0..chunk_rows {
315 let row = chunk_idx * rows_per_chunk + i;
316 let src = matrix.row_slice(row).expect("row index in bounds");
317 chunk[i * target_width..(i + 1) * target_width]
318 .copy_from_slice(&src[..target_width]);
319 }
320 },
321 );
322 }
323
324 RowMajorMatrix::new(data, target_width)
325 },
326 TraceStorage::Transposed { .. } => {
327 let full = self.to_row_major();
328 if target_width == full.width() {
329 return full;
330 }
331 let h = full.height();
332 let total = h * target_width;
333 let mut data = Vec::with_capacity(total);
334 #[allow(clippy::uninit_vec)]
336 unsafe {
337 data.set_len(total);
338 }
339 for row in 0..h {
340 let src = full.row_slice(row).expect("row index in bounds");
341 data[row * target_width..(row + 1) * target_width]
342 .copy_from_slice(&src[..target_width]);
343 }
344 RowMajorMatrix::new(data, target_width)
345 },
346 TraceStorage::Parts {
347 core_rm,
348 chiplets_rm,
349 range_checker_cols,
350 num_rows,
351 } => {
352 let h = *num_rows;
353 let cw = CHIPLETS_WIDTH;
354 debug_assert!(target_width >= CORE_WIDTH);
355 let nc_needed = target_width - CORE_WIDTH;
356
357 let total = h * target_width;
358 let mut data = Vec::with_capacity(total);
359 #[allow(clippy::uninit_vec)]
361 unsafe {
362 data.set_len(total);
363 }
364
365 let fill_rows = |chunk: &mut [Felt], start_row: usize| {
366 let chunk_rows = chunk.len() / target_width;
367 for i in 0..chunk_rows {
368 let row = start_row + i;
369 let dst = &mut chunk[i * target_width..(i + 1) * target_width];
370 dst[..CORE_WIDTH]
371 .copy_from_slice(&core_rm[row * CORE_WIDTH..(row + 1) * CORE_WIDTH]);
372 let nc_dst = &mut dst[CORE_WIDTH..];
373 let mut nc_pos = 0;
374 for col in &range_checker_cols[..RANGE_CHECK_TRACE_WIDTH.min(nc_needed)] {
375 nc_dst[nc_pos] = col[row];
376 nc_pos += 1;
377 }
378 if nc_pos < nc_needed {
379 let chip_cols = (nc_needed - nc_pos).min(cw);
380 nc_dst[nc_pos..nc_pos + chip_cols]
381 .copy_from_slice(&chiplets_rm[row * cw..row * cw + chip_cols]);
382 nc_pos += chip_cols;
383 }
384 for dst in &mut dst[nc_pos..nc_needed] {
385 *dst = ZERO;
386 }
387 }
388 };
389
390 #[cfg(not(feature = "concurrent"))]
391 fill_rows(&mut data, 0);
392
393 #[cfg(feature = "concurrent")]
394 {
395 use miden_crypto::parallel::*;
396 let rows_per_chunk = ROW_MAJOR_CHUNK_SIZE;
397 data.par_chunks_mut(rows_per_chunk * target_width).enumerate().for_each(
398 |(chunk_idx, chunk)| {
399 fill_rows(chunk, chunk_idx * rows_per_chunk);
400 },
401 );
402 }
403
404 RowMajorMatrix::new(data, target_width)
405 },
406 }
407 }
408
409 pub fn num_rows(&self) -> usize {
410 match &self.storage {
411 TraceStorage::Parts { num_rows, .. } => *num_rows,
412 TraceStorage::RowMajor(matrix) => matrix.height(),
413 TraceStorage::Transposed { num_rows, .. } => *num_rows,
414 }
415 }
416
417 pub fn last_program_row(&self) -> RowIndex {
418 self.last_program_row
419 }
420
421 pub fn read_row_into(&self, row_idx: usize, row: &mut [Felt]) {
423 let w = self.width();
424 assert!(row.len() >= w, "row buffer too small for main trace");
425 match &self.storage {
426 TraceStorage::RowMajor(matrix) => {
427 let slice = matrix.row_slice(row_idx).expect("row index in bounds");
428 row[..w].copy_from_slice(&slice);
429 },
430 TraceStorage::Parts {
431 core_rm, chiplets_rm, range_checker_cols, ..
432 } => {
433 row[..CORE_WIDTH]
434 .copy_from_slice(&core_rm[row_idx * CORE_WIDTH..(row_idx + 1) * CORE_WIDTH]);
435 row[CORE_WIDTH] = range_checker_cols[0][row_idx];
436 row[CORE_WIDTH + 1] = range_checker_cols[1][row_idx];
437 row[CORE_WIDTH + 2..CORE_WIDTH + 2 + CHIPLETS_WIDTH].copy_from_slice(
438 &chiplets_rm[row_idx * CHIPLETS_WIDTH..(row_idx + 1) * CHIPLETS_WIDTH],
439 );
440 for dst in &mut row[CORE_WIDTH + 2 + CHIPLETS_WIDTH..w] {
441 *dst = ZERO;
442 }
443 },
444 TraceStorage::Transposed { matrix, num_cols, .. } => {
445 for (col_idx, cell) in row[..*num_cols].iter_mut().enumerate() {
446 *cell = unsafe { matrix.get_unchecked(col_idx, row_idx) };
447 }
448 },
449 }
450 }
451
452 pub fn get_column(&self, col_idx: usize) -> Vec<Felt> {
454 let h = self.num_rows();
455 match &self.storage {
456 TraceStorage::Parts {
457 core_rm, chiplets_rm, range_checker_cols, ..
458 } => {
459 if col_idx < CORE_WIDTH {
460 (0..h).map(|r| core_rm[r * CORE_WIDTH + col_idx]).collect()
461 } else {
462 let nc = col_idx - CORE_WIDTH;
463 if nc < 2 {
464 range_checker_cols[nc].clone()
465 } else if nc < 2 + CHIPLETS_WIDTH {
466 let cc = nc - 2;
467 (0..h).map(|r| chiplets_rm[r * CHIPLETS_WIDTH + cc]).collect()
468 } else {
469 vec![ZERO; h]
470 }
471 }
472 },
473 TraceStorage::RowMajor(_) => {
474 (0..h).map(|r| self.get(RowIndex::from(r), col_idx)).collect()
475 },
476 TraceStorage::Transposed { matrix, .. } => {
477 let row_slice = matrix.row_slice(col_idx).expect("column index in bounds");
478 row_slice[..h].to_vec()
479 },
480 }
481 }
482
483 pub fn columns(&self) -> impl Iterator<Item = Vec<Felt>> + '_ {
485 (0..self.width()).map(|c| self.get_column(c))
486 }
487
488 #[cfg(any(test, feature = "testing"))]
489 pub fn get_column_range(&self, range: Range<usize>) -> Vec<Vec<Felt>> {
490 range.fold(vec![], |mut acc, col_idx| {
491 acc.push(self.get_column(col_idx));
492 acc
493 })
494 }
495
496 pub fn clk(&self, i: RowIndex) -> Felt {
501 self.get(i, CLK_COL_IDX)
502 }
503
504 pub fn ctx(&self, i: RowIndex) -> Felt {
506 self.get(i, CTX_COL_IDX)
507 }
508
509 pub fn addr(&self, i: RowIndex) -> Felt {
514 self.get(i, DECODER_TRACE_OFFSET)
515 }
516
517 pub fn is_addr_change(&self, i: RowIndex) -> bool {
519 self.addr(i) != self.addr(i + 1)
520 }
521
522 pub fn helper_register(&self, i: usize, row: RowIndex) -> Felt {
524 self.get(row, DECODER_TRACE_OFFSET + USER_OP_HELPERS_OFFSET + i)
525 }
526
527 pub fn decoder_hasher_state(&self, i: RowIndex) -> [Felt; NUM_HASHER_COLUMNS] {
529 let mut state = [ZERO; NUM_HASHER_COLUMNS];
530 for (idx, col_idx) in DECODER_HASHER_RANGE.enumerate() {
531 state[idx] = self.get(i, col_idx);
532 }
533 state
534 }
535
536 pub fn decoder_hasher_state_first_half(&self, i: RowIndex) -> Word {
538 let mut state = [ZERO; DIGEST_LEN];
539 for (col, s) in state.iter_mut().enumerate() {
540 *s = self.get(i, DECODER_TRACE_OFFSET + HASHER_STATE_OFFSET + col);
541 }
542 state.into()
543 }
544
545 pub fn decoder_hasher_state_second_half(&self, i: RowIndex) -> Word {
547 const SECOND_WORD_OFFSET: usize = 4;
548 let mut state = [ZERO; DIGEST_LEN];
549 for (col, s) in state.iter_mut().enumerate() {
550 *s = self.get(i, DECODER_TRACE_OFFSET + HASHER_STATE_OFFSET + SECOND_WORD_OFFSET + col);
551 }
552 state.into()
553 }
554
555 pub fn decoder_hasher_state_element(&self, element: usize, i: RowIndex) -> Felt {
557 self.get(i, DECODER_TRACE_OFFSET + HASHER_STATE_OFFSET + element)
558 }
559
560 pub fn fn_hash(&self, i: RowIndex) -> [Felt; DIGEST_LEN] {
562 let mut state = [ZERO; DIGEST_LEN];
563 for (col, s) in state.iter_mut().enumerate() {
564 *s = self.get(i, FN_HASH_OFFSET + col);
565 }
566 state
567 }
568
569 pub fn is_loop_body_flag(&self, i: RowIndex) -> Felt {
571 self.get(i, DECODER_TRACE_OFFSET + IS_LOOP_BODY_FLAG_COL_IDX)
572 }
573
574 pub fn is_loop_flag(&self, i: RowIndex) -> Felt {
576 self.get(i, DECODER_TRACE_OFFSET + IS_LOOP_FLAG_COL_IDX)
577 }
578
579 pub fn is_call_flag(&self, i: RowIndex) -> Felt {
581 self.get(i, DECODER_TRACE_OFFSET + IS_CALL_FLAG_COL_IDX)
582 }
583
584 pub fn is_syscall_flag(&self, i: RowIndex) -> Felt {
586 self.get(i, DECODER_TRACE_OFFSET + IS_SYSCALL_FLAG_COL_IDX)
587 }
588
589 pub fn op_batch_flag(&self, i: RowIndex) -> [Felt; NUM_OP_BATCH_FLAGS] {
592 [
593 self.get(i, DECODER_TRACE_OFFSET + OP_BATCH_FLAGS_OFFSET),
594 self.get(i, DECODER_TRACE_OFFSET + OP_BATCH_FLAGS_OFFSET + 1),
595 self.get(i, DECODER_TRACE_OFFSET + OP_BATCH_FLAGS_OFFSET + 2),
596 ]
597 }
598
599 pub fn group_count(&self, i: RowIndex) -> Felt {
602 self.get(i, DECODER_TRACE_OFFSET + GROUP_COUNT_COL_IDX)
603 }
604
605 pub fn delta_group_count(&self, i: RowIndex) -> Felt {
607 self.group_count(i) - self.group_count(i + 1)
608 }
609
610 pub fn is_in_span(&self, i: RowIndex) -> Felt {
612 self.get(i, DECODER_TRACE_OFFSET + IN_SPAN_COL_IDX)
613 }
614
615 pub fn get_op_code(&self, i: RowIndex) -> Felt {
617 let b0 = self.get(i, DECODER_TRACE_OFFSET + 1);
618 let b1 = self.get(i, DECODER_TRACE_OFFSET + 2);
619 let b2 = self.get(i, DECODER_TRACE_OFFSET + 3);
620 let b3 = self.get(i, DECODER_TRACE_OFFSET + 4);
621 let b4 = self.get(i, DECODER_TRACE_OFFSET + 5);
622 let b5 = self.get(i, DECODER_TRACE_OFFSET + 6);
623 let b6 = self.get(i, DECODER_TRACE_OFFSET + 7);
624 b0 + b1 * Felt::from_u64(2)
625 + b2 * Felt::from_u64(4)
626 + b3 * Felt::from_u64(8)
627 + b4 * Felt::from_u64(16)
628 + b5 * Felt::from_u64(32)
629 + b6 * Felt::from_u64(64)
630 }
631
632 pub fn row_iter(&self) -> impl Iterator<Item = RowIndex> {
634 (0..self.num_rows()).map(RowIndex::from)
635 }
636
637 pub fn is_left_shift(&self, i: RowIndex) -> bool {
640 let b0 = self.get(i, DECODER_TRACE_OFFSET + 1);
641 let b1 = self.get(i, DECODER_TRACE_OFFSET + 2);
642 let b2 = self.get(i, DECODER_TRACE_OFFSET + 3);
643 let b3 = self.get(i, DECODER_TRACE_OFFSET + 4);
644 let b4 = self.get(i, DECODER_TRACE_OFFSET + 5);
645 let b5 = self.get(i, DECODER_TRACE_OFFSET + 6);
646 let b6 = self.get(i, DECODER_TRACE_OFFSET + 7);
647 let e0 = self.get(i, DECODER_TRACE_OFFSET + OP_BITS_EXTRA_COLS_OFFSET);
648 let h5 = self.get(i, DECODER_TRACE_OFFSET + IS_LOOP_FLAG_COL_IDX);
649
650 ([b6, b5, b4] == [ZERO, ONE, ZERO])||
652 ([b6, b5, b4, b3, b2] == [ONE, ZERO, ZERO, ONE, ONE]) ||
654 ([e0, b3, b2, b1] == [ONE, ZERO, ONE, ZERO]) ||
656 ([b6, b5, b4, b3, b2, b1, b0] == [ONE, ONE, ONE, ZERO, ONE, ZERO, ZERO]) ||
658 ([b6, b5, b4, b3, b2, b1, b0] == [ONE, ZERO, ONE, ONE, ZERO, ZERO, ZERO]) ||
660 ([b6, b5, b4, b3, b2, b1, b0] == [ONE, ONE, ONE, ZERO, ZERO, ZERO, ZERO] && h5 == ONE)
662 }
663
664 pub fn is_right_shift(&self, i: RowIndex) -> bool {
667 let b0 = self.get(i, DECODER_TRACE_OFFSET + 1);
668 let b1 = self.get(i, DECODER_TRACE_OFFSET + 2);
669 let b2 = self.get(i, DECODER_TRACE_OFFSET + 3);
670 let b3 = self.get(i, DECODER_TRACE_OFFSET + 4);
671 let b4 = self.get(i, DECODER_TRACE_OFFSET + 5);
672 let b5 = self.get(i, DECODER_TRACE_OFFSET + 6);
673 let b6 = self.get(i, DECODER_TRACE_OFFSET + 7);
674
675 [b6, b5, b4] == [ZERO, ONE, ONE]||
677 ([b6, b5, b4, b3, b2, b1, b0] == [ONE, ZERO, ZERO, ONE, ZERO, ZERO, ZERO]) ||
679 ([b6, b5, b4, b3, b2, b1, b0] == [ONE, ZERO, ONE, ONE, ZERO, ONE, ONE])
681 }
682
683 pub fn stack_depth(&self, i: RowIndex) -> Felt {
688 self.get(i, STACK_TRACE_OFFSET + B0_COL_IDX)
689 }
690
691 pub fn stack_element(&self, column: usize, i: RowIndex) -> Felt {
693 self.get(i, STACK_TRACE_OFFSET + column)
694 }
695
696 pub fn stack_word(&self, start: usize, i: RowIndex) -> Word {
701 Word::from([
702 self.stack_element(start, i),
703 self.stack_element(start + 1, i),
704 self.stack_element(start + 2, i),
705 self.stack_element(start + 3, i),
706 ])
707 }
708
709 pub fn parent_overflow_address(&self, i: RowIndex) -> Felt {
711 self.get(i, STACK_TRACE_OFFSET + B1_COL_IDX)
712 }
713
714 pub fn is_non_empty_overflow(&self, i: RowIndex) -> bool {
716 let b0 = self.get(i, STACK_TRACE_OFFSET + B0_COL_IDX);
717 let h0 = self.get(i, STACK_TRACE_OFFSET + H0_COL_IDX);
718 (b0 - Felt::from_u64(16)) * h0 == ONE
719 }
720
721 pub fn chiplet_selector_0(&self, i: RowIndex) -> Felt {
726 self.get(i, CHIPLETS_OFFSET)
727 }
728
729 pub fn chiplet_selector_1(&self, i: RowIndex) -> Felt {
731 self.get(i, CHIPLETS_OFFSET + 1)
732 }
733
734 pub fn chiplet_selector_2(&self, i: RowIndex) -> Felt {
736 self.get(i, CHIPLETS_OFFSET + 2)
737 }
738
739 pub fn chiplet_selector_3(&self, i: RowIndex) -> Felt {
741 self.get(i, CHIPLETS_OFFSET + 3)
742 }
743
744 pub fn chiplet_selector_4(&self, i: RowIndex) -> Felt {
746 self.get(i, CHIPLETS_OFFSET + 4)
747 }
748
749 pub fn chiplet_selector_5(&self, i: RowIndex) -> Felt {
751 self.get(i, CHIPLETS_OFFSET + 5)
752 }
753
754 pub fn is_hash_row(&self, i: RowIndex) -> bool {
756 self.chiplet_selector_0(i) == ZERO
757 }
758
759 pub fn chiplet_hasher_state(&self, i: RowIndex) -> [Felt; STATE_WIDTH] {
761 let mut state = [ZERO; STATE_WIDTH];
762 for (idx, col_idx) in HASHER_STATE_COL_RANGE.enumerate() {
763 state[idx] = self.get(i, col_idx);
764 }
765 state
766 }
767
768 pub fn chiplet_node_index(&self, i: RowIndex) -> Felt {
770 self.get(i, HASHER_NODE_INDEX_COL_IDX)
771 }
772
773 pub fn is_bitwise_row(&self, i: RowIndex) -> bool {
775 self.chiplet_selector_0(i) == ONE && self.chiplet_selector_1(i) == ZERO
776 }
777
778 pub fn chiplet_bitwise_a(&self, i: RowIndex) -> Felt {
780 self.get(i, BITWISE_A_COL_IDX)
781 }
782
783 pub fn chiplet_bitwise_b(&self, i: RowIndex) -> Felt {
785 self.get(i, BITWISE_B_COL_IDX)
786 }
787
788 pub fn chiplet_bitwise_z(&self, i: RowIndex) -> Felt {
790 self.get(i, BITWISE_OUTPUT_COL_IDX)
791 }
792
793 pub fn is_memory_row(&self, i: RowIndex) -> bool {
795 self.chiplet_selector_0(i) == ONE
796 && self.chiplet_selector_1(i) == ONE
797 && self.chiplet_selector_2(i) == ZERO
798 }
799
800 pub fn chiplet_memory_ctx(&self, i: RowIndex) -> Felt {
802 self.get(i, MEMORY_CTX_COL_IDX)
803 }
804
805 pub fn chiplet_memory_word(&self, i: RowIndex) -> Felt {
807 self.get(i, MEMORY_WORD_COL_IDX)
808 }
809
810 pub fn chiplet_memory_idx0(&self, i: RowIndex) -> Felt {
812 self.get(i, MEMORY_IDX0_COL_IDX)
813 }
814
815 pub fn chiplet_memory_idx1(&self, i: RowIndex) -> Felt {
817 self.get(i, MEMORY_IDX1_COL_IDX)
818 }
819
820 pub fn chiplet_memory_clk(&self, i: RowIndex) -> Felt {
822 self.get(i, MEMORY_CLK_COL_IDX)
823 }
824
825 pub fn chiplet_memory_value_0(&self, i: RowIndex) -> Felt {
827 self.get(i, MEMORY_V_COL_RANGE.start)
828 }
829
830 pub fn chiplet_memory_value_1(&self, i: RowIndex) -> Felt {
832 self.get(i, MEMORY_V_COL_RANGE.start + 1)
833 }
834
835 pub fn chiplet_memory_value_2(&self, i: RowIndex) -> Felt {
837 self.get(i, MEMORY_V_COL_RANGE.start + 2)
838 }
839
840 pub fn chiplet_memory_value_3(&self, i: RowIndex) -> Felt {
842 self.get(i, MEMORY_V_COL_RANGE.start + 3)
843 }
844
845 pub fn is_ace_row(&self, i: RowIndex) -> bool {
847 self.chiplet_selector_0(i) == ONE
848 && self.chiplet_selector_1(i) == ONE
849 && self.chiplet_selector_2(i) == ONE
850 && self.chiplet_selector_3(i) == ZERO
851 }
852
853 pub fn chiplet_ace_start_selector(&self, i: RowIndex) -> Felt {
854 self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + SELECTOR_START_IDX)
855 }
856
857 pub fn chiplet_ace_block_selector(&self, i: RowIndex) -> Felt {
858 self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + SELECTOR_BLOCK_IDX)
859 }
860
861 pub fn chiplet_ace_ctx(&self, i: RowIndex) -> Felt {
862 self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + CTX_IDX)
863 }
864
865 pub fn chiplet_ace_ptr(&self, i: RowIndex) -> Felt {
866 self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + PTR_IDX)
867 }
868
869 pub fn chiplet_ace_clk(&self, i: RowIndex) -> Felt {
870 self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + CLK_IDX)
871 }
872
873 pub fn chiplet_ace_eval_op(&self, i: RowIndex) -> Felt {
874 self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + EVAL_OP_IDX)
875 }
876
877 pub fn chiplet_ace_num_eval_rows(&self, i: RowIndex) -> Felt {
878 self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + READ_NUM_EVAL_IDX)
879 }
880
881 pub fn chiplet_ace_id_0(&self, i: RowIndex) -> Felt {
882 self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + ID_0_IDX)
883 }
884
885 pub fn chiplet_ace_v_0_0(&self, i: RowIndex) -> Felt {
886 self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + V_0_0_IDX)
887 }
888
889 pub fn chiplet_ace_v_0_1(&self, i: RowIndex) -> Felt {
890 self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + V_0_1_IDX)
891 }
892
893 pub fn chiplet_ace_wire_0(&self, i: RowIndex) -> [Felt; 3] {
894 let id_0 = self.chiplet_ace_id_0(i);
895 let v_0_0 = self.chiplet_ace_v_0_0(i);
896 let v_0_1 = self.chiplet_ace_v_0_1(i);
897
898 [id_0, v_0_0, v_0_1]
899 }
900
901 pub fn chiplet_ace_id_1(&self, i: RowIndex) -> Felt {
902 self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + ID_1_IDX)
903 }
904
905 pub fn chiplet_ace_v_1_0(&self, i: RowIndex) -> Felt {
906 self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + V_1_0_IDX)
907 }
908
909 pub fn chiplet_ace_v_1_1(&self, i: RowIndex) -> Felt {
910 self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + V_1_1_IDX)
911 }
912
913 pub fn chiplet_ace_wire_1(&self, i: RowIndex) -> [Felt; 3] {
914 let id_1 = self.chiplet_ace_id_1(i);
915 let v_1_0 = self.chiplet_ace_v_1_0(i);
916 let v_1_1 = self.chiplet_ace_v_1_1(i);
917
918 [id_1, v_1_0, v_1_1]
919 }
920
921 pub fn chiplet_ace_id_2(&self, i: RowIndex) -> Felt {
922 self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + ID_2_IDX)
923 }
924
925 pub fn chiplet_ace_v_2_0(&self, i: RowIndex) -> Felt {
926 self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + V_2_0_IDX)
927 }
928
929 pub fn chiplet_ace_v_2_1(&self, i: RowIndex) -> Felt {
930 self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + V_2_1_IDX)
931 }
932
933 pub fn chiplet_ace_wire_2(&self, i: RowIndex) -> [Felt; 3] {
934 let id_2 = self.chiplet_ace_id_2(i);
935 let v_2_0 = self.chiplet_ace_v_2_0(i);
936 let v_2_1 = self.chiplet_ace_v_2_1(i);
937
938 [id_2, v_2_0, v_2_1]
939 }
940
941 pub fn chiplet_ace_m_1(&self, i: RowIndex) -> Felt {
942 self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + M_1_IDX)
943 }
944
945 pub fn chiplet_ace_m_0(&self, i: RowIndex) -> Felt {
946 self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + M_0_IDX)
947 }
948
949 pub fn chiplet_ace_is_read_row(&self, i: RowIndex) -> bool {
950 self.is_ace_row(i) && self.chiplet_ace_block_selector(i) == ZERO
951 }
952
953 pub fn chiplet_ace_is_eval_row(&self, i: RowIndex) -> bool {
954 self.is_ace_row(i) && self.chiplet_ace_block_selector(i) == ONE
955 }
956
957 pub fn is_kernel_row(&self, i: RowIndex) -> bool {
959 self.chiplet_selector_0(i) == ONE
960 && self.chiplet_selector_1(i) == ONE
961 && self.chiplet_selector_2(i) == ONE
962 && self.chiplet_selector_3(i) == ONE
963 && self.chiplet_selector_4(i) == ZERO
964 }
965
966 pub fn chiplet_kernel_is_first_hash_row(&self, i: RowIndex) -> bool {
969 self.get(i, CHIPLETS_OFFSET + 5) == ONE
970 }
971
972 pub fn chiplet_kernel_root_0(&self, i: RowIndex) -> Felt {
975 self.get(i, CHIPLETS_OFFSET + 6)
976 }
977
978 pub fn chiplet_kernel_root_1(&self, i: RowIndex) -> Felt {
981 self.get(i, CHIPLETS_OFFSET + 7)
982 }
983
984 pub fn chiplet_kernel_root_2(&self, i: RowIndex) -> Felt {
987 self.get(i, CHIPLETS_OFFSET + 8)
988 }
989
990 pub fn chiplet_kernel_root_3(&self, i: RowIndex) -> Felt {
993 self.get(i, CHIPLETS_OFFSET + 9)
994 }
995
996 pub fn f_mv(&self, i: RowIndex) -> bool {
1002 i.as_usize().is_multiple_of(HASH_CYCLE_LEN)
1003 && self.chiplet_selector_0(i) == ZERO
1004 && self.chiplet_selector_1(i) == ONE
1005 && self.chiplet_selector_2(i) == ONE
1006 && self.chiplet_selector_3(i) == ZERO
1007 }
1008
1009 pub fn f_mva(&self, i: RowIndex) -> bool {
1012 (i.as_usize() % HASH_CYCLE_LEN == LAST_CYCLE_ROW)
1013 && self.chiplet_selector_0(i) == ZERO
1014 && self.chiplet_selector_1(i) == ONE
1015 && self.chiplet_selector_2(i) == ONE
1016 && self.chiplet_selector_3(i) == ZERO
1017 }
1018
1019 pub fn f_mu(&self, i: RowIndex) -> bool {
1022 i.as_usize().is_multiple_of(HASH_CYCLE_LEN)
1023 && self.chiplet_selector_0(i) == ZERO
1024 && self.chiplet_selector_1(i) == ONE
1025 && self.chiplet_selector_2(i) == ONE
1026 && self.chiplet_selector_3(i) == ONE
1027 }
1028
1029 pub fn f_mua(&self, i: RowIndex) -> bool {
1032 (i.as_usize() % HASH_CYCLE_LEN == LAST_CYCLE_ROW)
1033 && self.chiplet_selector_0(i) == ZERO
1034 && self.chiplet_selector_1(i) == ONE
1035 && self.chiplet_selector_2(i) == ONE
1036 && self.chiplet_selector_3(i) == ONE
1037 }
1038}