diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 422940b2b8..85366f1983 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -60,7 +60,7 @@ jobs: - name: Install cargo-spec for specifications run: | eval $(opam env) - cargo install cargo-spec + cargo install --locked cargo-spec - name: Build the kimchi specification run: | diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3d3c479436..ed881acc75 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -48,3 +48,24 @@ cargo fmt ``` These are enforced by GitHub PR checks, so be sure to have any errors produced by the above tools fixed before pushing the code to your pull request branch. Refer to `.github/workflows` for all PR checks. + +## Branching policy + +Generally, proof-systems intends to be synchronized with the mina repository (see their [README-branching.md](https://github.com/MinaProtocol/mina/blob/develop/README-branching.md)), and so its branching policy is quite similar. However several important (some, temporary) distinctions exist: + +- `compatible`: + - Compatible with `rampup` in `mina`. + - Mina's `compatible`, similarly to mina's `master`, does not have `proof-systems`. +- `berkley`: future hardfork release, will be going out to berkeley. + - This is where hotfixes go. +- `develop`: matches mina's `develop`, soft fork-compatibility. + - Also used by `mina/o1js-main` and `o1js/main`. +- `master`: future feature work development, containing breaking changes. Anything that does not need to be released alongside mina. + - Note that `mina`'s `master` does not depend on `proof-systems` at all. +- `izmir`: next hardfork release after berkeley. +- In the future: + - `master`/`develop` will reverse roles and become something like gitflow. + - After Berkeley release `compatible` will become properly synced with `mina/compatible`. +- Direction of merge: + - Back-merging: `compatible` into `berkeley` into `develop` into `master`. + - Front-merging (introducing new features): other direction, but where you start depends on where the feature belongs. diff --git a/Cargo.toml b/Cargo.toml index 851713bf6b..0c415c6888 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,6 +15,7 @@ members = [ "utils", "internal-tracing", ] +resolver = "2" [profile.release] lto = true diff --git a/book/Cargo.toml b/book/Cargo.toml index 8bf747f208..2a0d222bc7 100644 --- a/book/Cargo.toml +++ b/book/Cargo.toml @@ -10,4 +10,6 @@ edition = "2021" license = "Apache-2.0" [build-dependencies] -cargo-spec = { version = "0.5.0" } \ No newline at end of file +cargo-spec = { version = "0.5.0" } +time = { version = "~0.3.23" } # This crate is a known bad-actor for breaking rust version support. +plist = { version = "~1.5.0" } # This crate improperly constrains its bad-actor dependency (`time`). diff --git a/book/src/specs/kimchi.md b/book/src/specs/kimchi.md index cd717f01d6..06c1353235 100644 --- a/book/src/specs/kimchi.md +++ b/book/src/specs/kimchi.md @@ -2202,7 +2202,7 @@ The prover then follows the following steps to create the proof: * $s_i$ * $w_i$ * $z$ - * lookup (TODO) + * lookup (TODO, see [this issue](https://github.com/MinaProtocol/mina/issues/13886)) * generic selector * poseidon selector @@ -2284,11 +2284,11 @@ We run the following algorithm: * Derive the scalar joint combiner challenge $j$ from $j'$ using the endomorphism. (TODO: specify endomorphism) * absorb the commitments to the sorted polynomials. -1. Sample $\beta$ with the Fq-Sponge. -1. Sample $\gamma$ with the Fq-Sponge. +1. Sample the first permutation challenge $\beta$ with the Fq-Sponge. +1. Sample the second permutation challenge $\gamma$ with the Fq-Sponge. 1. If using lookup, absorb the commitment to the aggregation lookup polynomial. 1. Absorb the commitment to the permutation trace with the Fq-Sponge. -1. Sample $\alpha'$ with the Fq-Sponge. +1. Sample the quotient challenge $\alpha'$ with the Fq-Sponge. 1. Derive $\alpha$ from $\alpha'$ using the endomorphism (TODO: details). 1. Enforce that the length of the $t$ commitment is of size 7. 1. Absorb the commitment to the quotient polynomial $t$ into the argument. diff --git a/kimchi/Cargo.toml b/kimchi/Cargo.toml index 9f200b2821..992cc7144b 100644 --- a/kimchi/Cargo.toml +++ b/kimchi/Cargo.toml @@ -25,7 +25,7 @@ num-derive = "0.3" num-integer = "0.1.45" num-traits = "0.2" itertools = "0.10.3" -rand = "0.8.0" +rand = { version = "0.8.0", features = ["std_rng"] } rand_core = "0.6.3" rayon = "1.5.0" rmp-serde = "1.1.1" diff --git a/kimchi/src/circuits/constraints.rs b/kimchi/src/circuits/constraints.rs index 31fdf5c4e0..6cef21f258 100644 --- a/kimchi/src/circuits/constraints.rs +++ b/kimchi/src/circuits/constraints.rs @@ -683,7 +683,9 @@ impl Builder { /// If not invoked, it is `vec![]` by default. /// /// **Warning:** you have to make sure that the IDs of the lookup tables, - /// are unique and not colliding with IDs of built-in lookup tables + /// are unique and not colliding with IDs of built-in lookup tables, otherwise + /// the error will be raised. + /// /// (see [crate::circuits::lookup::tables]). pub fn lookup(mut self, lookup_tables: Vec>) -> Self { self.lookup_tables = lookup_tables; @@ -693,8 +695,9 @@ impl Builder { /// Set up the runtime tables. /// If not invoked, it is `None` by default. /// - /// **Warning:** you have to make sure that the IDs of the runtime lookup tables, - /// are unique and not colliding with IDs of built-in lookup tables + /// **Warning:** you have to make sure that the IDs of the runtime + /// lookup tables, are unique, i.e. not colliding internaly (with other runtime tables), + /// otherwise error will be raised. /// (see [crate::circuits::lookup::tables]). pub fn runtime(mut self, runtime_tables: Option>>) -> Self { self.runtime_tables = runtime_tables; @@ -736,9 +739,25 @@ impl Builder { let lookup_domain_size = { // First we sum over the lookup table size - let mut lookup_domain_size: usize = lookup_tables.iter().map(|lt| lt.len()).sum(); + let mut has_table_with_id_0 = false; + let mut lookup_domain_size: usize = lookup_tables + .iter() + .map(|LookupTable { id, data }| { + // See below for the reason + if *id == 0_i32 { + has_table_with_id_0 = true + } + if data.is_empty() { + 0 + } else { + data[0].len() + } + }) + .sum(); // After that on the runtime tables if let Some(runtime_tables) = runtime_tables.as_ref() { + // FIXME: Check that a runtime table with ID 0 is enforced to + // contain a zero entry row. for runtime_table in runtime_tables.iter() { lookup_domain_size += runtime_table.len(); } @@ -757,7 +776,14 @@ impl Builder { for gate_table in gate_lookup_tables.into_iter() { lookup_domain_size += gate_table.table_size(); } - lookup_domain_size + + // A dummy zero entry will be added if there is no table with ID + // zero. Therefore we must count this in the size. + if has_table_with_id_0 { + lookup_domain_size + } else { + lookup_domain_size + 1 + } }; //~ 1. Compute the number of zero-knowledge rows (`zk_rows`) that will be required to @@ -784,6 +810,9 @@ impl Builder { //~ ``` //~ let (zk_rows, domain_size_lower_bound) = { + // We add 1 to the lookup domain size because there is one element + // used to close the permutation argument (the polynomial Z is of + // degree n + 1 where n is the order of the subgroup H). let circuit_lower_bound = std::cmp::max(gates.len(), lookup_domain_size + 1); let get_domain_size_lower_bound = |zk_rows: u64| circuit_lower_bound + zk_rows as usize; diff --git a/kimchi/src/circuits/gate.rs b/kimchi/src/circuits/gate.rs index ec5b2f9377..82e3c76031 100644 --- a/kimchi/src/circuits/gate.rs +++ b/kimchi/src/circuits/gate.rs @@ -209,6 +209,7 @@ impl CircuitGate { EndoMul => self.verify_endomul::(row, witness, &index.cs), EndoMulScalar => self.verify_endomul_scalar::(row, witness, &index.cs), // TODO: implement the verification for the lookup gate + // See https://github.com/MinaProtocol/mina/issues/14011 Lookup => Ok(()), CairoClaim | CairoInstruction | CairoFlags | CairoTransition => { self.verify_cairo_gate::(row, witness, &index.cs) @@ -303,6 +304,7 @@ impl CircuitGate { } GateType::Lookup => { // TODO: implement the verification for the lookup gate + // See https://github.com/MinaProtocol/mina/issues/14011 vec![] } GateType::CairoClaim => turshi::Claim::constraint_checks(&env, &mut cache), diff --git a/kimchi/src/circuits/lookup/index.rs b/kimchi/src/circuits/lookup/index.rs index 41a9709737..3f81759ddd 100644 --- a/kimchi/src/circuits/lookup/index.rs +++ b/kimchi/src/circuits/lookup/index.rs @@ -1,10 +1,10 @@ +use super::runtime_tables::{RuntimeTableCfg, RuntimeTableSpec}; use crate::circuits::{ domains::EvaluationDomains, gate::CircuitGate, lookup::{ constraints::LookupConfiguration, lookups::{LookupInfo, LookupPattern}, - runtime_tables::{RuntimeTableCfg, RuntimeTableSpec}, tables::LookupTable, }, }; @@ -23,11 +23,15 @@ use thiserror::Error; /// Represents an error found when computing the lookup constraint system #[derive(Debug, Error, Clone)] pub enum LookupError { + #[error("One of the lookup tables has columns of different lengths")] + InconsistentTableLength, #[error("The combined lookup table is larger than allowed by the domain size. Observed: {length}, expected: {maximum_allowed}")] LookupTableTooLong { length: usize, maximum_allowed: usize, }, + #[error("The table with id 0 must have an entry of all zeros")] + TableIDZeroMustHaveZeroEntry, #[error("Cannot create a combined table since ids for sub-tables are colliding. The collision type is: {collision_type}")] LookupTableIdCollision { collision_type: String }, } @@ -239,8 +243,8 @@ impl LookupConstraintSystem { // explicitly to the constraint system. let fixed_gate_joint_ids: Vec = fixed_lookup_tables .iter() - .map(|lt| lt.id()) - .chain(gate_lookup_tables.iter().map(|lt| lt.id())) + .map(|lt| lt.id) + .chain(gate_lookup_tables.iter().map(|lt| lt.id)) .collect(); check_id_duplicates( fixed_gate_joint_ids.iter(), @@ -248,30 +252,22 @@ impl LookupConstraintSystem { )?; //~ 3. Concatenate explicit runtime lookup tables with the ones (implicitly) used by gates. - let mut lookup_tables: Vec> = fixed_lookup_tables + let mut lookup_tables: Vec<_> = fixed_lookup_tables .into_iter() .chain(gate_lookup_tables) .collect(); - let fixed_lookup_tables_ids: Vec = - lookup_tables.iter().map(|lt| lt.id()).collect(); - check_id_duplicates( - fixed_lookup_tables_ids.iter(), - "fixed lookup table duplicates", - )?; + let mut has_table_id_0 = false; // if we are using runtime tables let (runtime_table_offset, runtime_selector) = if let Some(runtime_tables) = &runtime_tables { + // Check duplicates in runtime table ids let runtime_tables_ids: Vec = runtime_tables.iter().map(|rt| rt.id).collect(); check_id_duplicates(runtime_tables_ids.iter(), "runtime table duplicates")?; - check_id_duplicates( - runtime_tables_ids - .iter() - .chain(fixed_lookup_tables_ids.iter()), - "duplicates between runtime and fixed tables", - )?; + // Runtime table IDs /may/ collide with lookup + // table IDs, so we intentionally do not perform another potential check. // save the offset of the end of the table let mut runtime_table_offset = 0; @@ -313,15 +309,18 @@ impl LookupConstraintSystem { let (id, first_column) = (runtime_table.id, runtime_table.first_column.clone()); + // record if table ID 0 is used in one of the runtime tables + // note: the check later will still force you to have a fixed table with ID 0 + if id == 0 { + has_table_id_0 = true; + } + // important: we still need a placeholder column to make sure that // if all other tables have a single column // we don't use the second table as table ID column. let placeholders = vec![F::zero(); first_column.len()]; let data = vec![first_column, placeholders]; - // TODO Check it does not fail actually. Maybe this should throw a different error. - let table = LookupTable::create(id, data) - .expect("Runtime table creation must succeed"); - + let table = LookupTable { id, data }; lookup_tables.push(table); } @@ -383,21 +382,31 @@ impl LookupConstraintSystem { let mut table_ids: Vec = Vec::with_capacity(d1_size); let mut non_zero_table_id = false; + let mut has_table_id_0_with_zero_entry = false; for table in &lookup_tables { let table_len = table.len(); - if table.id() != 0 { + if table.id == 0 { + has_table_id_0 = true; + if table.has_zero_entry() { + has_table_id_0_with_zero_entry = true; + } + } else { non_zero_table_id = true; } //~~ * Update the corresponding entries in a table id vector (of size the domain as well) //~ with the table ID of the table. - let table_id: F = i32_to_field(table.id()); + let table_id: F = i32_to_field(table.id); table_ids.extend(repeat_n(table_id, table_len)); //~~ * Copy the entries from the table to new rows in the corresponding columns of the concatenated table. - for (i, col) in table.data().iter().enumerate() { + for (i, col) in table.data.iter().enumerate() { + // See GH issue: https://github.com/MinaProtocol/mina/issues/14097 + if col.len() != table_len { + return Err(LookupError::InconsistentTableLength); + } lookup_table[i].extend(col); } @@ -407,6 +416,12 @@ impl LookupConstraintSystem { } } + // If a table has ID 0, then it must have a zero entry. + // This is for the dummy lookups to work. + if has_table_id_0 && !has_table_id_0_with_zero_entry { + return Err(LookupError::TableIDZeroMustHaveZeroEntry); + } + // Note: we use `>=` here to leave space for the dummy value. if lookup_table[0].len() >= max_num_entries { return Err(LookupError::LookupTableTooLong { @@ -416,6 +431,15 @@ impl LookupConstraintSystem { } //~ 6. Pad the end of the concatened table with the dummy value. + // By padding with 0, we constraint the table with ID 0 to + // have a zero entry. + // This is for the rows which do not have a lookup selector, + // see ../../../../book/src/kimchi/lookup.md. + // The zero entry row is contained in the built-in XOR table. + // An error is raised when creating the CS if a user-defined + // table is defined with ID 0 without a row contain zeroes. + // If no such table is used, we artificially add a dummy + // table with ID 0 and a row containing only zeroes. lookup_table .iter_mut() .for_each(|col| col.extend(repeat_n(F::zero(), max_num_entries - col.len()))); @@ -433,8 +457,6 @@ impl LookupConstraintSystem { lookup_table8.push(eval); } - // @volhovm: Do we need to enforce that there is at least one table - // with id 0? //~ 9. pre-compute polynomial and evaluation form for the table IDs, //~ only if a table with an ID different from zero was used. let (table_ids, table_ids8) = if non_zero_table_id { @@ -479,7 +501,7 @@ mod tests { use mina_curves::pasta::Fp; #[test] - fn colliding_table_ids() { + fn test_colliding_table_ids() { let (_, gates) = CircuitGate::::create_multi_range_check(0); let collision_id: i32 = 5; @@ -508,8 +530,14 @@ mod tests { let cs = ConstraintSystem::::create(gates.clone()) .lookup(vec![ - LookupTable::create(collision_id, vec![vec![From::from(0); 16]]).unwrap(), - LookupTable::create(collision_id, vec![vec![From::from(1); 16]]).unwrap(), + LookupTable { + id: collision_id, + data: vec![vec![From::from(0); 16]], + }, + LookupTable { + id: collision_id, + data: vec![vec![From::from(1); 16]], + }, ]) .build(); @@ -547,11 +575,10 @@ mod tests { ); let cs = ConstraintSystem::::create(gates.clone()) - .lookup(vec![LookupTable::create( - collision_id, - vec![vec![From::from(0); 16]], - ) - .unwrap()]) + .lookup(vec![LookupTable { + id: collision_id, + data: vec![vec![From::from(0); 16]], + }]) .runtime(Some(vec![RuntimeTableCfg { id: collision_id, first_column: vec![From::from(1); 16], @@ -559,13 +586,8 @@ mod tests { .build(); assert!( - matches!( - cs, - Err(SetupError::LookupCreation( - LookupError::LookupTableIdCollision { .. } - )) - ), - "LookupConstraintSystem::create(...) must fail, collision between runtime and lookup ids" + cs.is_ok(), + "LookupConstraintSystem::create(...) must not fail when there is a collision between runtime and lookup ids" ); } } diff --git a/kimchi/src/circuits/lookup/lookups.rs b/kimchi/src/circuits/lookup/lookups.rs index 12e4bd058d..002b040828 100644 --- a/kimchi/src/circuits/lookup/lookups.rs +++ b/kimchi/src/circuits/lookup/lookups.rs @@ -222,7 +222,7 @@ impl LookupInfo { }; // TODO: is take(n) useful here? I don't see why we need this - for (i, gate) in gates.iter().take(n).enumerate() { + for (i, gate) in gates.iter().enumerate().take(n) { let typ = gate.typ; if let Some(lookup_pattern) = LookupPattern::from_gate(typ, CurrOrNext::Curr) { @@ -323,7 +323,6 @@ pub type JointLookupSpec = JointLookup, LookupTableID>; pub type JointLookupValue = JointLookup; impl + From> JointLookupValue { - // TODO: Support multiple tables /// Evaluate the combined value of a joint-lookup. pub fn evaluate(&self, joint_combiner: &F, table_id_combiner: &F) -> F { combine_table_entry( diff --git a/kimchi/src/circuits/lookup/runtime_tables.rs b/kimchi/src/circuits/lookup/runtime_tables.rs index f952ee8a32..f8123d75d8 100644 --- a/kimchi/src/circuits/lookup/runtime_tables.rs +++ b/kimchi/src/circuits/lookup/runtime_tables.rs @@ -15,8 +15,6 @@ pub struct RuntimeTableSpec { pub len: usize, } -/// A configuration of the runtime table as known at setup time. -/// /// Use this type at setup time, to list all the runtime tables. /// /// Note: care must be taken as table IDs can collide with IDs of other types of lookup tables. @@ -24,8 +22,7 @@ pub struct RuntimeTableSpec { pub struct RuntimeTableCfg { /// The table ID. pub id: i32, - /// The content of the first column of the runtime table, i.e. - /// keys when a table is viewed as a (k,v) array. + /// The content of the first column of the runtime table. pub first_column: Vec, } @@ -56,13 +53,12 @@ impl From> for RuntimeTableSpec { } /// A runtime table. Runtime tables must match the configuration -/// specified in [`RuntimeTableCfg`]. +/// that was specified in [`RuntimeTableCfg`]. #[derive(Debug, Clone)] pub struct RuntimeTable { /// The table id. pub id: i32, - /// A single column. Represents runtime table values, where - /// ['RuntimeTableCfg'] defines compile-time keys. + /// A single column. pub data: Vec, } diff --git a/kimchi/src/circuits/lookup/tables/mod.rs b/kimchi/src/circuits/lookup/tables/mod.rs index 7760086512..cd183cb714 100644 --- a/kimchi/src/circuits/lookup/tables/mod.rs +++ b/kimchi/src/circuits/lookup/tables/mod.rs @@ -1,109 +1,10 @@ -use ark_ff::{Field, One, Zero}; +use ark_ff::{FftField, One, Zero}; use poly_commitment::PolyComm; use serde::{Deserialize, Serialize}; -use thiserror::Error; pub mod range_check; pub mod xor; -/// A table of values that can be used for a lookup, along with the ID for the table. -#[derive(Debug, Clone)] -pub struct LookupTable { - id: i32, - data: Vec>, -} - -/// Represents inconsistency errors during table construction and composition. -#[derive(Debug, Error)] -pub enum LookupTableError { - #[error("Table must be nonempty")] - InputTableDataEmpty, - #[error("One of the lookup tables has columns of different lengths")] - InconsistentTableLength, - #[error("The table with id 0 must have an entry of all zeros")] - TableIDZeroMustHaveZeroEntry, -} - -impl LookupTable -where - F: Field, -{ - pub fn create(id: i32, data: Vec>) -> Result { - let res = LookupTable { id, data }; - - // Empty tables are not allowed - if res.data.is_empty() { - return Err(LookupTableError::InputTableDataEmpty); - } - - // All columns in the table must have same length - let table_len = res.len(); - for col in res.data.iter() { - if col.len() != table_len { - return Err(LookupTableError::InconsistentTableLength); - } - } - - // If a table has ID 0, then it must have a zero entry. - // This is for the dummy lookups to work. - if id == 0 && !res.has_zero_entry() { - return Err(LookupTableError::TableIDZeroMustHaveZeroEntry); - } - - Ok(res) - } - - /// Return true if the table has an entry (row) containing all zeros. - fn has_zero_entry(&self) -> bool { - // reminder: a table is written as a list of columns, - // not as a list of row entries. - for row in 0..self.len() { - let mut row_zero = true; - for col in &self.data { - if !col[row].is_zero() { - row_zero = false; - break; - } - } - if row_zero { - return true; - } - } - - false - } - - /// Returns the number of columns, i.e. the width of the table. - /// It is less error prone to introduce this method than using the public - /// field data. - pub fn width(&self) -> usize { - self.data.len() - } - - /// Returns the length (height) of the table. - pub fn len(&self) -> usize { - if self.is_empty() { - panic!("LookupTable#len() is called on an empty table") - } - self.data[0].len() - } - - /// Returns `true` if the lookup table is empty, `false` otherwise. - pub fn is_empty(&self) -> bool { - self.data.is_empty() - } - - /// Returns table id. - pub fn id(&self) -> i32 { - self.id - } - - /// Returns table data. - pub fn data(&self) -> &Vec> { - &self.data - } -} - //~ spec:startcode /// The table ID associated with the XOR lookup table. pub const XOR_TABLE_ID: i32 = 0; @@ -166,8 +67,49 @@ impl IntoIterator for GateLookupTables { } } +/// A table of values that can be used for a lookup, along with the ID for the table. +#[derive(Debug, Clone)] +pub struct LookupTable { + pub id: i32, + pub data: Vec>, +} + +impl LookupTable +where + F: FftField, +{ + /// Return true if the table has an entry (row) containing all zeros. + pub fn has_zero_entry(&self) -> bool { + // reminder: a table is written as a list of columns, + // not as a list of row entries. + for row in 0..self.len() { + if self.data.iter().all(|col| col[row].is_zero()) { + return true; + } + } + false + } + + /// Returns the number of columns, i.e. the width of the table. + /// It is less error prone to introduce this method than using the public + /// field data. + pub fn width(&self) -> usize { + self.data.len() + } + + /// Returns the length of the table. + pub fn len(&self) -> usize { + self.data[0].len() + } + + /// Returns `true` if the lookup table is empty, `false` otherwise. + pub fn is_empty(&self) -> bool { + self.data.is_empty() + } +} + /// Returns the lookup table associated to a [`GateLookupTable`]. -pub fn get_table(table_name: GateLookupTable) -> LookupTable { +pub fn get_table(table_name: GateLookupTable) -> LookupTable { match table_name { GateLookupTable::Xor => xor::xor_table(), GateLookupTable::RangeCheck => range_check::range_check_table(), @@ -209,7 +151,6 @@ where F: Zero + One + Clone, I: DoubleEndedIterator, { - // TODO: unnecessary cloning if binops between F and &F are supported v.rev() .fold(F::zero(), |acc, x| joint_combiner.clone() * acc + x.clone()) + table_id_combiner.clone() * table_id.clone() @@ -310,50 +251,3 @@ pub mod caml { } } } - -#[cfg(test)] -mod tests { - - use super::*; - use mina_curves::pasta::Fp; - - #[test] - fn test_zero_table_zero_row() { - let lookup_r: u64 = 32; - // Table column that /does not/ contain zeros - let lookup_table_values_1: Vec<_> = (1..lookup_r + 1).map(From::from).collect(); - // Another table column that /does/ contain zeros. - let lookup_table_values_2: Vec<_> = (0..lookup_r).map(From::from).collect(); - - // Jointly two columns /do not/ have a full zero now. - let table: Result, _> = - LookupTable::create(0, vec![lookup_table_values_1, lookup_table_values_2]); - - assert!( - matches!(table, Err(LookupTableError::TableIDZeroMustHaveZeroEntry)), - "LookupTable::create(...) must fail when zero table has no zero rows" - ); - } - - #[test] - fn test_invalid_data_inputs() { - let table: Result, _> = LookupTable::create(0, vec![]); - assert!( - matches!(table, Err(LookupTableError::InputTableDataEmpty)), - "LookupTable::create(...) must fail when empty table creation is attempted" - ); - - let lookup_r: u64 = 32; - // Two columns of different lengths - let lookup_table_values_1: Vec<_> = (0..2 * lookup_r).map(From::from).collect(); - let lookup_table_values_2: Vec<_> = (0..lookup_r).map(From::from).collect(); - - let table: Result, _> = - LookupTable::create(0, vec![lookup_table_values_1, lookup_table_values_2]); - - assert!( - matches!(table, Err(LookupTableError::InconsistentTableLength)), - "LookupTable::create(...) must fail when columns are not of the same length" - ); - } -} diff --git a/kimchi/src/circuits/lookup/tables/range_check.rs b/kimchi/src/circuits/lookup/tables/range_check.rs index 2bf00e64f7..001ffedd52 100644 --- a/kimchi/src/circuits/lookup/tables/range_check.rs +++ b/kimchi/src/circuits/lookup/tables/range_check.rs @@ -14,9 +14,11 @@ pub fn range_check_table() -> LookupTable where F: Field, { - let data = vec![(0..RANGE_CHECK_UPPERBOUND).map(F::from).collect()]; - LookupTable::create(RANGE_CHECK_TABLE_ID, data) - .expect("range_check_table creation must succeed") + let table = vec![(0..RANGE_CHECK_UPPERBOUND).map(F::from).collect()]; + LookupTable { + id: RANGE_CHECK_TABLE_ID, + data: table, + } } pub const TABLE_SIZE: usize = RANGE_CHECK_UPPERBOUND as usize; diff --git a/kimchi/src/circuits/lookup/tables/xor.rs b/kimchi/src/circuits/lookup/tables/xor.rs index 6f87377d96..d846942a31 100644 --- a/kimchi/src/circuits/lookup/tables/xor.rs +++ b/kimchi/src/circuits/lookup/tables/xor.rs @@ -37,8 +37,10 @@ pub fn xor_table() -> LookupTable { // Just to be safe. assert!(r[r.len() - 1].is_zero()); } - - LookupTable::create(XOR_TABLE_ID, data).expect("xor_table creation must succeed") + LookupTable { + id: XOR_TABLE_ID, + data, + } } pub const TABLE_SIZE: usize = 256; diff --git a/kimchi/src/precomputed_srs.rs b/kimchi/src/precomputed_srs.rs index 11f60edec3..116554e262 100644 --- a/kimchi/src/precomputed_srs.rs +++ b/kimchi/src/precomputed_srs.rs @@ -33,9 +33,6 @@ where let file = File::open(srs_path.clone()).unwrap_or_else(|_| panic!("missing SRS file: {srs_path:?}")); let reader = BufReader::new(file); - // Note: In tests, this read takes significant amount of time (2 min) due - // to -O0 optimisation level. Compile tests with -O1 or --release flag. - // See: https://github.com/o1-labs/proof-systems/blob/develop/CONTRIBUTING.md#development rmp_serde::from_read(reader).unwrap() } diff --git a/kimchi/src/prover.rs b/kimchi/src/prover.rs index 3b0bc69a1c..bce05308d9 100644 --- a/kimchi/src/prover.rs +++ b/kimchi/src/prover.rs @@ -260,7 +260,7 @@ where .interpolate(); //~ 1. Commit (non-hiding) to the negated public input polynomial. - let public_comm = index.srs.commit_non_hiding(&public_poly, num_chunks, None); + let public_comm = index.srs.commit_non_hiding(&public_poly, num_chunks); let public_comm = { index .srs @@ -393,7 +393,7 @@ where let runtime_table_comm = index .srs - .commit(&runtime_table_contribution, num_chunks, None, rng); + .commit(&runtime_table_contribution, num_chunks, rng); // absorb the commitment absorb_commitment(&mut fq_sponge, &runtime_table_comm.commitment); @@ -607,7 +607,7 @@ where let z_poly = index.perm_aggreg(&witness, &beta, &gamma, rng)?; //~ 1. Commit (hidding) to the permutation aggregation polynomial $z$. - let z_comm = index.srs.commit(&z_poly, num_chunks, None, rng); + let z_comm = index.srs.commit(&z_poly, num_chunks, rng); //~ 1. Absorb the permutation aggregation polynomial $z$ with the Fq-Sponge. absorb_commitment(&mut fq_sponge, &z_comm.commitment); @@ -867,7 +867,7 @@ where }; //~ 1. commit (hiding) to the quotient polynomial $t$ - let t_comm = { index.srs.commit("ient_poly, 7 * num_chunks, None, rng) }; + let t_comm = { index.srs.commit("ient_poly, 7 * num_chunks, rng) }; //~ 1. Absorb the the commitment of the quotient polynomial with the Fq-Sponge. absorb_commitment(&mut fq_sponge, &t_comm.commitment); @@ -937,7 +937,7 @@ where //~~ * $s_i$ //~~ * $w_i$ //~~ * $z$ - //~~ * lookup (TODO) + //~~ * lookup (TODO, see [this issue](https://github.com/MinaProtocol/mina/issues/13886)) //~~ * generic selector //~~ * poseidon selector //~ @@ -1152,10 +1152,9 @@ where PolyComm { // blinding_f - Z_H(zeta) * blinding_t - unshifted: vec![ + elems: vec![ blinding_f - (zeta_to_domain_size - G::ScalarField::one()) * blinding_t, ], - shifted: None, } }; @@ -1189,7 +1188,7 @@ where .map(|RecursionChallenge { chals, comm }| { ( DensePolynomial::from_coefficients_vec(b_poly_coefficients(chals)), - comm.unshifted.len(), + comm.elems.len(), ) }) .collect::>(); @@ -1224,8 +1223,7 @@ where //~ (and evaluation proofs) in the protocol. //~ First, include the previous challenges, in case we are in a recursive prover. let non_hiding = |d1_size: usize| PolyComm { - unshifted: vec![G::ScalarField::zero(); d1_size], - shifted: None, + elems: vec![G::ScalarField::zero(); d1_size], }; let coefficients_form = DensePolynomialOrEvaluations::DensePolynomial; @@ -1233,12 +1231,11 @@ where let mut polynomials = polys .iter() - .map(|(p, d1_size)| (coefficients_form(p), None, non_hiding(*d1_size))) + .map(|(p, d1_size)| (coefficients_form(p), non_hiding(*d1_size))) .collect::>(); let fixed_hiding = |d1_size: usize| PolyComm { - unshifted: vec![G::ScalarField::one(); d1_size], - shifted: None, + elems: vec![G::ScalarField::one(); d1_size], }; //~ 1. Then, include: @@ -1249,48 +1246,38 @@ where //~~ * the poseidon selector //~~ * the 15 registers/witness columns //~~ * the 6 sigmas - polynomials.push(( - coefficients_form(&public_poly), - None, - fixed_hiding(num_chunks), - )); - polynomials.push((coefficients_form(&ft), None, blinding_ft)); - polynomials.push((coefficients_form(&z_poly), None, z_comm.blinders)); + polynomials.push((coefficients_form(&public_poly), fixed_hiding(num_chunks))); + polynomials.push((coefficients_form(&ft), blinding_ft)); + polynomials.push((coefficients_form(&z_poly), z_comm.blinders)); polynomials.push(( evaluations_form(&index.column_evaluations.generic_selector4), - None, fixed_hiding(num_chunks), )); polynomials.push(( evaluations_form(&index.column_evaluations.poseidon_selector8), - None, fixed_hiding(num_chunks), )); polynomials.push(( evaluations_form(&index.column_evaluations.complete_add_selector4), - None, fixed_hiding(num_chunks), )); polynomials.push(( evaluations_form(&index.column_evaluations.mul_selector8), - None, fixed_hiding(num_chunks), )); polynomials.push(( evaluations_form(&index.column_evaluations.emul_selector8), - None, fixed_hiding(num_chunks), )); polynomials.push(( evaluations_form(&index.column_evaluations.endomul_scalar_selector8), - None, fixed_hiding(num_chunks), )); polynomials.extend( witness_poly .iter() .zip(w_comm.iter()) - .map(|(w, c)| (coefficients_form(w), None, c.blinders.clone())) + .map(|(w, c)| (coefficients_form(w), c.blinders.clone())) .collect::>(), ); polynomials.extend( @@ -1298,13 +1285,13 @@ where .column_evaluations .coefficients8 .iter() - .map(|coefficientm| (evaluations_form(coefficientm), None, non_hiding(num_chunks))) + .map(|coefficientm| (evaluations_form(coefficientm), non_hiding(num_chunks))) .collect::>(), ); polynomials.extend( index.column_evaluations.permutation_coefficients8[0..PERMUTS - 1] .iter() - .map(|w| (evaluations_form(w), None, non_hiding(num_chunks))) + .map(|w| (evaluations_form(w), non_hiding(num_chunks))) .collect::>(), ); @@ -1314,7 +1301,6 @@ where { polynomials.push(( evaluations_form(range_check0_selector8), - None, non_hiding(num_chunks), )); } @@ -1323,7 +1309,6 @@ where { polynomials.push(( evaluations_form(range_check1_selector8), - None, non_hiding(num_chunks), )); } @@ -1334,7 +1319,6 @@ where { polynomials.push(( evaluations_form(foreign_field_add_selector8), - None, non_hiding(num_chunks), )); } @@ -1345,23 +1329,14 @@ where { polynomials.push(( evaluations_form(foreign_field_mul_selector8), - None, non_hiding(num_chunks), )); } if let Some(xor_selector8) = index.column_evaluations.xor_selector8.as_ref() { - polynomials.push(( - evaluations_form(xor_selector8), - None, - non_hiding(num_chunks), - )); + polynomials.push((evaluations_form(xor_selector8), non_hiding(num_chunks))); } if let Some(rot_selector8) = index.column_evaluations.rot_selector8.as_ref() { - polynomials.push(( - evaluations_form(rot_selector8), - None, - non_hiding(num_chunks), - )); + polynomials.push((evaluations_form(rot_selector8), non_hiding(num_chunks))); } //~~ * optionally, the runtime table @@ -1372,41 +1347,58 @@ where let sorted_comms = lookup_context.sorted_comms.as_ref().unwrap(); for (poly, comm) in sorted_poly.iter().zip(sorted_comms) { - polynomials.push((coefficients_form(poly), None, comm.blinders.clone())); + polynomials.push((coefficients_form(poly), comm.blinders.clone())); } //~~ * add the lookup aggreg polynomial let aggreg_poly = lookup_context.aggreg_coeffs.as_ref().unwrap(); let aggreg_comm = lookup_context.aggreg_comm.as_ref().unwrap(); - polynomials.push(( - coefficients_form(aggreg_poly), - None, - aggreg_comm.blinders.clone(), - )); + polynomials.push((coefficients_form(aggreg_poly), aggreg_comm.blinders.clone())); //~~ * add the combined table polynomial - let table_blinding = if lcs.runtime_selector.is_some() { - let runtime_comm = lookup_context.runtime_table_comm.as_ref().unwrap(); + let table_blinding = { let joint_combiner = lookup_context.joint_combiner.as_ref().unwrap(); + let table_id_combiner = lookup_context.table_id_combiner.as_ref().unwrap(); + let max_fixed_lookup_table_size = { + // CAUTION: This is not `lcs.configuration.lookup_info.max_joint_size` because + // the lookup table may be strictly narrower, and as such will not contribute + // the associated blinders. + // For example, using a runtime table with the lookup gate (width 2), but only + // width-1 fixed tables (e.g. range check), it would be incorrect to use the + // wider width (2) because there are no such contributing commitments! + // Note that lookup_table8 is a list of polynomials + lcs.lookup_table8.len() + }; + let base_blinding = { + let fixed_table_blinding = if max_fixed_lookup_table_size == 0 { + G::ScalarField::zero() + } else { + (1..max_fixed_lookup_table_size).fold(G::ScalarField::one(), |acc, _| { + G::ScalarField::one() + *joint_combiner * acc + }) + }; + fixed_table_blinding + *table_id_combiner + }; + if lcs.runtime_selector.is_some() { + let runtime_comm = lookup_context.runtime_table_comm.as_ref().unwrap(); - let unshifted = runtime_comm - .blinders - .unshifted - .iter() - .map(|blinding| *joint_combiner * blinding) - .collect(); + let elems = runtime_comm + .blinders + .elems + .iter() + .map(|blinding| *joint_combiner * blinding + base_blinding) + .collect(); - PolyComm { - unshifted, - shifted: None, + PolyComm { elems } + } else { + let elems = vec![base_blinding; num_chunks]; + PolyComm { elems } } - } else { - non_hiding(num_chunks) }; let joint_lookup_table = lookup_context.joint_lookup_table.as_ref().unwrap(); - polynomials.push((coefficients_form(joint_lookup_table), None, table_blinding)); + polynomials.push((coefficients_form(joint_lookup_table), table_blinding)); //~~ * if present, add the runtime table polynomial if lcs.runtime_selector.is_some() { @@ -1415,7 +1407,6 @@ where polynomials.push(( coefficients_form(runtime_table), - None, runtime_table_comm.blinders.clone(), )); } @@ -1425,27 +1416,21 @@ where if let Some(runtime_lookup_table_selector) = lcs.runtime_selector.as_ref() { polynomials.push(( evaluations_form(runtime_lookup_table_selector), - None, non_hiding(1), )) } if let Some(xor_lookup_selector) = lcs.lookup_selectors.xor.as_ref() { - polynomials.push((evaluations_form(xor_lookup_selector), None, non_hiding(1))) + polynomials.push((evaluations_form(xor_lookup_selector), non_hiding(1))) } if let Some(lookup_gate_selector) = lcs.lookup_selectors.lookup.as_ref() { - polynomials.push((evaluations_form(lookup_gate_selector), None, non_hiding(1))) + polynomials.push((evaluations_form(lookup_gate_selector), non_hiding(1))) } if let Some(range_check_lookup_selector) = lcs.lookup_selectors.range_check.as_ref() { - polynomials.push(( - evaluations_form(range_check_lookup_selector), - None, - non_hiding(1), - )) + polynomials.push((evaluations_form(range_check_lookup_selector), non_hiding(1))) } if let Some(foreign_field_mul_lookup_selector) = lcs.lookup_selectors.ffmul.as_ref() { polynomials.push(( evaluations_form(foreign_field_mul_lookup_selector), - None, non_hiding(1), )) } diff --git a/kimchi/src/prover_index.rs b/kimchi/src/prover_index.rs index e01cf19e90..05db4f5b6b 100644 --- a/kimchi/src/prover_index.rs +++ b/kimchi/src/prover_index.rs @@ -211,7 +211,6 @@ pub mod testing { override_srs_size, |d1: D, size: usize| { let log2_size = size.ilog2(); - // Run test_srs_serialization test to generate test SRS & enable this let mut srs = if log2_size <= precomputed_srs::SERIALIZED_SRS_SIZE { // TODO: we should trim it if it's smaller precomputed_srs::get_srs() diff --git a/kimchi/src/tests/and.rs b/kimchi/src/tests/and.rs index 9c1a86fbcd..e344af6da4 100644 --- a/kimchi/src/tests/and.rs +++ b/kimchi/src/tests/and.rs @@ -255,7 +255,6 @@ fn verify_bad_and_decomposition( ); witness[col][and_row] += G::ScalarField::one(); } - if col == 2 { assert_eq!( cs.gates[0].verify_witness::(0, witness, &cs, &witness[0][0..cs.public]), diff --git a/kimchi/src/tests/lookup.rs b/kimchi/src/tests/lookup.rs index 269235d35e..d92f62e24b 100644 --- a/kimchi/src/tests/lookup.rs +++ b/kimchi/src/tests/lookup.rs @@ -14,6 +14,7 @@ use mina_poseidon::{ constants::PlonkSpongeConstantsKimchi, sponge::{DefaultFqSponge, DefaultFrSponge}, }; +use rand::prelude::*; use rand::Rng; use std::array; @@ -35,8 +36,10 @@ fn setup_lookup_proof(use_values_from_table: bool, num_lookups: usize, table_siz let index_column = (0..lookup_table_values.len() as u64) .map(Into::into) .collect(); - LookupTable::create(id as i32, vec![index_column, lookup_table_values.clone()]) - .expect("setup_lookup_proof: Table creation must succeed") + LookupTable { + id: id as i32, + data: vec![index_column, lookup_table_values.clone()], + } }) .collect(); @@ -131,7 +134,9 @@ fn setup_successfull_runtime_table_test( runtime_tables: Vec>, lookups: Vec, ) { - let mut rng = rand::thread_rng(); + let seed: [u8; 32] = thread_rng().gen(); + eprintln!("Seed: {:?}", seed); + let mut rng = StdRng::from_seed(seed); let nb_lookups = lookups.len(); // circuit @@ -194,7 +199,9 @@ fn setup_successfull_runtime_table_test( #[test] fn test_runtime_table() { let num = 5; - let mut rng = rand::thread_rng(); + let seed: [u8; 32] = thread_rng().gen(); + eprintln!("Seed: {:?}", seed); + let mut rng = StdRng::from_seed(seed); let first_column = [8u32, 9, 8, 7, 1]; let len = first_column.len(); @@ -448,7 +455,9 @@ fn test_negative_test_runtime_table_prover_uses_undefined_id_in_index_and_witnes #[test] fn test_runtime_table_with_more_than_one_runtime_table_data_given_by_prover() { - let mut rng = rand::thread_rng(); + let seed: [u8; 32] = thread_rng().gen(); + eprintln!("Seed: {:?}", seed); + let mut rng = StdRng::from_seed(seed); let first_column = [0, 1, 2, 3, 4]; let len = first_column.len(); @@ -551,7 +560,9 @@ fn test_runtime_table_only_one_table_with_id_zero_with_non_zero_entries_fixed_va #[test] fn test_runtime_table_only_one_table_with_id_zero_with_non_zero_entries_random_values() { - let mut rng = rand::thread_rng(); + let seed: [u8; 32] = thread_rng().gen(); + eprintln!("Seed: {:?}", seed); + let mut rng = StdRng::from_seed(seed); let len = rng.gen_range(1usize..1000); let first_column: Vec = (0..len as i32).collect(); @@ -573,3 +584,147 @@ fn test_runtime_table_only_one_table_with_id_zero_with_non_zero_entries_random_v setup_successfull_runtime_table_test(vec![cfg], vec![runtime_table], lookups); } + +// This test verifies that if there is a table with ID 0, it contains a row with only zeroes. +// This is to enforce the constraint we have on the so-called "dummy value". +// FIXME: see https://github.com/o1-labs/proof-systems/issues/1460 +// We should test the error message, "expected" argument of the macro won't be +// allowed anymore in future release, see clippy output. +#[test] +#[should_panic] +fn test_lookup_with_a_table_with_id_zero_but_no_zero_entry() { + let max_len: u32 = 100u32; + let seed: [u8; 32] = thread_rng().gen(); + eprintln!("Seed: {:?}", seed); + let mut rng = StdRng::from_seed(seed); + + // Non zero-length table + let len = 1u32 + rng.gen_range(0u32..max_len); + // Table id is 0 + let table_id: i32 = 0; + // Always include index 0 in the table. Maybe even a few. + let indices: Vec = (0..len) + .map(|i| { + if i == 0 { + 0u32 + } else { + rng.gen_range(0u32..max_len) + } + }) + .map(Into::into) + .collect(); + // But no zero values! + // So we'll get rows with zeroes that are not full-zero-rows. + let values: Vec = (0..len) + .map(|_| rng.gen_range(1u32..max_len)) + .map(Into::into) + .collect(); + let lookup_table = LookupTable { + id: table_id, + data: vec![indices, values], + }; + let lookup_tables = vec![lookup_table]; + let num_lookups = 20; + + // circuit gates + let gates = (0..num_lookups) + .map(|i| CircuitGate::new(GateType::Lookup, Wire::for_row(i), vec![])) + .collect(); + + // 0 everywhere, it should handle the case (0, 0, 0). We simulate a lot of + // lookups with (0, 0, 0). + let witness = array::from_fn(|_col| vec![Fp::zero(); num_lookups]); + + let _ = TestFramework::::default() + .gates(gates) + .witness(witness) + .lookup_tables(lookup_tables) + .setup(); +} + +#[test] +fn test_dummy_value_is_added_in_an_arbitraly_created_table_when_no_table_with_id_0() { + let seed: [u8; 32] = thread_rng().gen(); + eprintln!("Seed: {:?}", seed); + let mut rng = StdRng::from_seed(seed); + let max_len: u32 = 100u32; + let max_table_id: i32 = 100; + + // No zero-length table + let len = rng.gen_range(1u32..max_len); + // No table of ID 0 + let table_id: i32 = rng.gen_range(1i32..max_table_id); + // No index 0 in the table. + let indices: Vec = (0..len) + .map(|_| rng.gen_range(1u32..max_len)) + .map(Into::into) + .collect(); + // No zero value + let values: Vec = (0..len) + .map(|_| rng.gen_range(1u32..max_len)) + .map(Into::into) + .collect(); + let lookup_table = LookupTable { + id: table_id, + data: vec![indices, values], + }; + let lookup_tables = vec![lookup_table]; + let num_lookups = 20; + + // circuit gates + let gates = (0..num_lookups) + .map(|i| CircuitGate::new(GateType::Lookup, Wire::for_row(i), vec![])) + .collect(); + + // 0 everywhere, it should handle the case (0, 0, 0). We simulate a lot of + // lookups with (0, 0, 0). + let witness = array::from_fn(|_col| vec![Fp::zero(); num_lookups]); + + TestFramework::::default() + .gates(gates) + .witness(witness) + .lookup_tables(lookup_tables) + .setup() + .prove_and_verify::() + .unwrap(); +} + +#[test] +fn test_dummy_zero_entry_is_counted_while_computing_domain_size() { + let seed: [u8; 32] = thread_rng().gen(); + eprintln!("Seed: {:?}", seed); + let mut rng = StdRng::from_seed(seed); + + let power_of_2: u32 = rng.gen_range(3..16); + // 4 = zk_rows + 1 for the closing constraint on the polynomial. + let len = (1 << power_of_2) - 3 - 1; + // We want to create a table with an ID different than 0. + let table_id: i32 = rng.gen_range(1..1_000); + let idx: Vec = (1..(len + 1) as i32).map(Into::into).collect(); + let values: Vec = (1..(len + 1)) + .map(|_| UniformRand::rand(&mut rng)) + .collect(); + let lt = LookupTable { + id: table_id, + data: vec![idx, values], + }; + + // Dummy, used for the setup. Only the number of gates must be lower than + // the length of the table to avoid having a bigger circuit than the table + // size, and therefore use it as the main component for the domain size + // computation. + let num_lookups = rng.gen_range(2..len); + let gates = (0..num_lookups) + .map(|i| CircuitGate::new(GateType::Lookup, Wire::for_row(i), vec![])) + .collect(); + let witness = array::from_fn(|_col| vec![Fp::zero(); num_lookups]); + + let setup = TestFramework::::default() + .gates(gates) + .witness(witness) + .lookup_tables(vec![lt]) + .setup(); + let domain_size = setup.prover_index().cs.domain.d1.size; + // As the dummy entry has been added, we reached the next power of two + assert!(domain_size == (1 << (power_of_2 + 1))); +} diff --git a/kimchi/src/tests/range_check.rs b/kimchi/src/tests/range_check.rs index 577e2aa4f7..8e46962add 100644 --- a/kimchi/src/tests/range_check.rs +++ b/kimchi/src/tests/range_check.rs @@ -64,18 +64,7 @@ fn create_test_prover_index( CircuitGate::::create_multi_range_check(0) }; - new_index_for_test_with_lookups( - gates, - public_size, - 0, - // specifying lookup table is not necessary, - // since it's already passed through patterns implicitly - //vec![range_check::gadget::lookup_table()], - vec![], - None, - false, - None, - ) + new_index_for_test_with_lookups(gates, public_size, 0, vec![], None, false, None) } #[test] diff --git a/kimchi/src/tests/recursion.rs b/kimchi/src/tests/recursion.rs index d7f028acb5..719318eb96 100644 --- a/kimchi/src/tests/recursion.rs +++ b/kimchi/src/tests/recursion.rs @@ -43,7 +43,7 @@ fn test_recursion() { let comm = { let coeffs = b_poly_coefficients(&chals); let b = DensePolynomial::from_coefficients_vec(coeffs); - index.srs.commit_non_hiding(&b, 1, None) + index.srs.commit_non_hiding(&b, 1) }; RecursionChallenge::new(chals, comm) }; diff --git a/kimchi/src/verifier.rs b/kimchi/src/verifier.rs index f831b05cd3..722a17e53a 100644 --- a/kimchi/src/verifier.rs +++ b/kimchi/src/verifier.rs @@ -208,38 +208,47 @@ where } } - //~ 1. Sample $\beta$ with the Fq-Sponge. + // --- PlonK - Round 2 + //~ 1. Sample the first permutation challenge $\beta$ with the Fq-Sponge. let beta = fq_sponge.challenge(); - //~ 1. Sample $\gamma$ with the Fq-Sponge. + //~ 1. Sample the second permutation challenge $\gamma$ with the Fq-Sponge. let gamma = fq_sponge.challenge(); //~ 1. If using lookup, absorb the commitment to the aggregation lookup polynomial. - self.commitments.lookup.iter().for_each(|l| { - absorb_commitment(&mut fq_sponge, &l.aggreg); - }); + if index.lookup_index.is_some() { + // Should not fail, as the lookup index is present + let lookup_commits = self + .commitments + .lookup + .as_ref() + .ok_or(VerifyError::LookupCommitmentMissing)?; + absorb_commitment(&mut fq_sponge, &lookup_commits.aggreg); + } //~ 1. Absorb the commitment to the permutation trace with the Fq-Sponge. absorb_commitment(&mut fq_sponge, &self.commitments.z_comm); - //~ 1. Sample $\alpha'$ with the Fq-Sponge. + // --- PlonK - Round 3 + //~ 1. Sample the quotient challenge $\alpha'$ with the Fq-Sponge. let alpha_chal = ScalarChallenge(fq_sponge.challenge()); //~ 1. Derive $\alpha$ from $\alpha'$ using the endomorphism (TODO: details). let alpha = alpha_chal.to_field(endo_r); //~ 1. Enforce that the length of the $t$ commitment is of size 7. - if self.commitments.t_comm.unshifted.len() > chunk_size * 7 { + if self.commitments.t_comm.elems.len() > chunk_size * 7 { return Err(VerifyError::IncorrectCommitmentLength( "t", chunk_size * 7, - self.commitments.t_comm.unshifted.len(), + self.commitments.t_comm.elems.len(), )); } //~ 1. Absorb the commitment to the quotient polynomial $t$ into the argument. absorb_commitment(&mut fq_sponge, &self.commitments.t_comm); + // --- PlonK - Round 4 //~ 1. Sample $\zeta'$ with the Fq-Sponge. let zeta_chal = ScalarChallenge(fq_sponge.challenge()); @@ -456,10 +465,10 @@ where let ft_eval1 = vec![self.ft_eval1]; #[allow(clippy::type_complexity)] - let mut es: Vec<(Vec>, Option)> = - polys.iter().map(|(_, e)| (e.clone(), None)).collect(); - es.push((public_evals.to_vec(), None)); - es.push((vec![ft_eval0, ft_eval1], None)); + let mut es: Vec>> = + polys.iter().map(|(_, e)| e.clone()).collect(); + es.push(public_evals.to_vec()); + es.push(vec![ft_eval0, ft_eval1]); for col in [ Column::Z, Column::Index(GateType::Generic), @@ -554,19 +563,16 @@ where .into_iter() .flatten(), ) { - es.push(( - { - let evals = self - .evals - .get_column(col) - .ok_or(VerifyError::MissingEvaluation(col))?; - vec![evals.zeta.clone(), evals.zeta_omega.clone()] - }, - None, - )) + es.push({ + let evals = self + .evals + .get_column(col) + .ok_or(VerifyError::MissingEvaluation(col))?; + vec![evals.zeta.clone(), evals.zeta_omega.clone()] + }) } - combined_inner_product(&evaluation_points, &v, &u, &es, index.srs().max_poly_size()) + combined_inner_product(&v, &u, &es) }; let oracles = RandomOracles { @@ -798,10 +804,7 @@ where .expect("pre-computed committed lagrange bases not found"); let com: Vec<_> = lgr_comm.iter().take(verifier_index.public).collect(); if public_input.is_empty() { - PolyComm::new( - vec![verifier_index.srs().blinding_commitment(); chunk_size], - None, - ) + PolyComm::new(vec![verifier_index.srs().blinding_commitment(); chunk_size]) } else { let elm: Vec<_> = public_input.iter().map(|s| -*s).collect(); let public_comm = PolyComm::::multi_scalar_mul(&com, &elm); @@ -920,21 +923,18 @@ where evaluations.extend(polys.into_iter().map(|(c, e)| Evaluation { commitment: c, evaluations: e, - degree_bound: None, })); //~~ * public input commitment evaluations.push(Evaluation { commitment: public_comm, evaluations: public_evals.to_vec(), - degree_bound: None, }); //~~ * ft commitment (chunks of it) evaluations.push(Evaluation { commitment: ft_comm, evaluations: vec![vec![ft_eval0], vec![proof.ft_eval1]], - degree_bound: None, }); for col in [ @@ -1018,7 +1018,6 @@ where .ok_or(VerifyError::MissingCommitment(col))? .clone(), evaluations: vec![evals.zeta.clone(), evals.zeta_omega.clone()], - degree_bound: None, }); } @@ -1041,6 +1040,9 @@ where let joint_combiner = oracles .joint_combiner .expect("joint_combiner should be present if lookups are used"); + // The table ID is added as the last column of the vector. + // Therefore, the exponent for the combiner for the table ID is the + // width of the concatenated table, i.e. max_joint_size. let table_id_combiner = joint_combiner .1 .pow([u64::from(li.lookup_info.max_joint_size)]); @@ -1060,7 +1062,6 @@ where evaluations.push(Evaluation { commitment: table_comm, evaluations: vec![lookup_table.zeta.clone(), lookup_table.zeta_omega.clone()], - degree_bound: None, }); // add evaluation of the runtime table polynomial @@ -1077,7 +1078,6 @@ where evaluations.push(Evaluation { commitment: runtime.clone(), evaluations: vec![runtime_eval.zeta, runtime_eval.zeta_omega], - degree_bound: None, }); } } @@ -1128,7 +1128,6 @@ where .ok_or(VerifyError::MissingCommitment(col))? .clone(), evaluations: vec![evals.zeta.clone(), evals.zeta_omega.clone()], - degree_bound: None, }); } diff --git a/kimchi/src/verifier_index.rs b/kimchi/src/verifier_index.rs index ff4aaca48a..1c82ef69a5 100644 --- a/kimchi/src/verifier_index.rs +++ b/kimchi/src/verifier_index.rs @@ -193,10 +193,10 @@ where lookup_table: cs .lookup_table8 .iter() - .map(|e| self.srs.commit_evaluations_non_hiding(domain, e)) + .map(|e| mask_fixed(self.srs.commit_evaluations_non_hiding(domain, e))) .collect(), table_ids: cs.table_ids8.as_ref().map(|table_ids8| { - self.srs.commit_evaluations_non_hiding(domain, table_ids8) + mask_fixed(self.srs.commit_evaluations_non_hiding(domain, table_ids8)) }), runtime_tables_selector: cs .runtime_selector @@ -440,42 +440,42 @@ impl> VerifierIndex // Always present for comm in sigma_comm.iter() { - fq_sponge.absorb_g(&comm.unshifted); + fq_sponge.absorb_g(&comm.elems); } for comm in coefficients_comm.iter() { - fq_sponge.absorb_g(&comm.unshifted); + fq_sponge.absorb_g(&comm.elems); } - fq_sponge.absorb_g(&generic_comm.unshifted); - fq_sponge.absorb_g(&psm_comm.unshifted); - fq_sponge.absorb_g(&complete_add_comm.unshifted); - fq_sponge.absorb_g(&mul_comm.unshifted); - fq_sponge.absorb_g(&emul_comm.unshifted); - fq_sponge.absorb_g(&endomul_scalar_comm.unshifted); + fq_sponge.absorb_g(&generic_comm.elems); + fq_sponge.absorb_g(&psm_comm.elems); + fq_sponge.absorb_g(&complete_add_comm.elems); + fq_sponge.absorb_g(&mul_comm.elems); + fq_sponge.absorb_g(&emul_comm.elems); + fq_sponge.absorb_g(&endomul_scalar_comm.elems); // Optional gates if let Some(range_check0_comm) = range_check0_comm { - fq_sponge.absorb_g(&range_check0_comm.unshifted); + fq_sponge.absorb_g(&range_check0_comm.elems); } if let Some(range_check1_comm) = range_check1_comm { - fq_sponge.absorb_g(&range_check1_comm.unshifted); + fq_sponge.absorb_g(&range_check1_comm.elems); } if let Some(foreign_field_mul_comm) = foreign_field_mul_comm { - fq_sponge.absorb_g(&foreign_field_mul_comm.unshifted); + fq_sponge.absorb_g(&foreign_field_mul_comm.elems); } if let Some(foreign_field_add_comm) = foreign_field_add_comm { - fq_sponge.absorb_g(&foreign_field_add_comm.unshifted); + fq_sponge.absorb_g(&foreign_field_add_comm.elems); } if let Some(xor_comm) = xor_comm { - fq_sponge.absorb_g(&xor_comm.unshifted); + fq_sponge.absorb_g(&xor_comm.elems); } if let Some(rot_comm) = rot_comm { - fq_sponge.absorb_g(&rot_comm.unshifted); + fq_sponge.absorb_g(&rot_comm.elems); } // Lookup index; optional @@ -497,26 +497,26 @@ impl> VerifierIndex }) = lookup_index { for entry in lookup_table { - fq_sponge.absorb_g(&entry.unshifted); + fq_sponge.absorb_g(&entry.elems); } if let Some(table_ids) = table_ids { - fq_sponge.absorb_g(&table_ids.unshifted); + fq_sponge.absorb_g(&table_ids.elems); } if let Some(runtime_tables_selector) = runtime_tables_selector { - fq_sponge.absorb_g(&runtime_tables_selector.unshifted); + fq_sponge.absorb_g(&runtime_tables_selector.elems); } if let Some(xor) = xor { - fq_sponge.absorb_g(&xor.unshifted); + fq_sponge.absorb_g(&xor.elems); } if let Some(lookup) = lookup { - fq_sponge.absorb_g(&lookup.unshifted); + fq_sponge.absorb_g(&lookup.elems); } if let Some(range_check) = range_check { - fq_sponge.absorb_g(&range_check.unshifted); + fq_sponge.absorb_g(&range_check.elems); } if let Some(ffmul) = ffmul { - fq_sponge.absorb_g(&ffmul.unshifted); + fq_sponge.absorb_g(&ffmul.elems); } } fq_sponge.digest_fq() diff --git a/poly-commitment/src/chunked.rs b/poly-commitment/src/chunked.rs index 32cb5e1408..9c3ee5c294 100644 --- a/poly-commitment/src/chunked.rs +++ b/poly-commitment/src/chunked.rs @@ -9,21 +9,19 @@ where C: CommitmentCurve, { /// Multiplies each commitment chunk of f with powers of zeta^n - /// Note that it ignores the shifted part. // TODO(mimoo): better name for this function pub fn chunk_commitment(&self, zeta_n: C::ScalarField) -> Self { let mut res = C::Projective::zero(); // use Horner's to compute chunk[0] + z^n chunk[1] + z^2n chunk[2] + ... // as ( chunk[-1] * z^n + chunk[-2] ) * z^n + chunk[-3] // (https://en.wikipedia.org/wiki/Horner%27s_method) - for chunk in self.unshifted.iter().rev() { + for chunk in self.elems.iter().rev() { res *= zeta_n; res.add_assign_mixed(chunk); } PolyComm { - unshifted: vec![res.into_affine()], - shifted: self.shifted, + elems: vec![res.into_affine()], } } } @@ -33,14 +31,13 @@ where F: Field, { /// Multiplies each blinding chunk of f with powers of zeta^n - /// Note that it ignores the shifted part. // TODO(mimoo): better name for this function pub fn chunk_blinding(&self, zeta_n: F) -> F { let mut res = F::zero(); // use Horner's to compute chunk[0] + z^n chunk[1] + z^2n chunk[2] + ... // as ( chunk[-1] * z^n + chunk[-2] ) * z^n + chunk[-3] // (https://en.wikipedia.org/wiki/Horner%27s_method) - for chunk in self.unshifted.iter().rev() { + for chunk in self.elems.iter().rev() { res *= zeta_n; res += chunk } diff --git a/poly-commitment/src/commitment.rs b/poly-commitment/src/commitment.rs index fe83d44dbf..31751d9a6d 100644 --- a/poly-commitment/src/commitment.rs +++ b/poly-commitment/src/commitment.rs @@ -39,9 +39,7 @@ use super::evaluation_proof::*; #[serde(bound = "C: CanonicalDeserialize + CanonicalSerialize")] pub struct PolyComm { #[serde_as(as = "Vec")] - pub unshifted: Vec, - #[serde_as(as = "Option")] - pub shifted: Option, + pub elems: Vec, } #[derive(Clone, Debug, Serialize, Deserialize)] @@ -54,8 +52,8 @@ where } impl PolyComm { - pub fn new(unshifted: Vec, shifted: Option) -> Self { - Self { unshifted, shifted } + pub fn new(elems: Vec) -> Self { + Self { elems } } } @@ -68,19 +66,18 @@ where F: FnMut(A) -> B, B: CanonicalDeserialize + CanonicalSerialize, { - let unshifted = self.unshifted.iter().map(|x| f(x.clone())).collect(); - let shifted = self.shifted.as_ref().map(|x| f(x.clone())); - PolyComm { unshifted, shifted } + let elems = self.elems.iter().map(|x| f(x.clone())).collect(); + PolyComm { elems } } - /// Returns the length of the unshifted commitment. + /// Returns the length of the commitment. pub fn len(&self) -> usize { - self.unshifted.len() + self.elems.len() } /// Returns `true` if the commitment is empty. pub fn is_empty(&self) -> bool { - self.unshifted.is_empty() && self.shifted.is_none() + self.elems.is_empty() } } @@ -90,21 +87,16 @@ impl PolyComm { &self, other: &PolyComm, ) -> Option> { - if self.unshifted.len() != other.unshifted.len() { + if self.elems.len() != other.elems.len() { return None; } - let unshifted = self - .unshifted + let elems = self + .elems .iter() - .zip(other.unshifted.iter()) + .zip(other.elems.iter()) .map(|(x, y)| (*x, *y)) .collect(); - let shifted = match (self.shifted, other.shifted) { - (Some(x), Some(y)) => Some((x, y)), - (None, None) => None, - (Some(_), None) | (None, Some(_)) => return None, - }; - Some(PolyComm { unshifted, shifted }) + Some(PolyComm { elems }) } } @@ -159,25 +151,20 @@ impl<'a, 'b, C: AffineCurve> Add<&'a PolyComm> for &'b PolyComm { type Output = PolyComm; fn add(self, other: &'a PolyComm) -> PolyComm { - let mut unshifted = vec![]; - let n1 = self.unshifted.len(); - let n2 = other.unshifted.len(); + let mut elems = vec![]; + let n1 = self.elems.len(); + let n2 = other.elems.len(); for i in 0..std::cmp::max(n1, n2) { let pt = if i < n1 && i < n2 { - self.unshifted[i] + other.unshifted[i] + self.elems[i] + other.elems[i] } else if i < n1 { - self.unshifted[i] + self.elems[i] } else { - other.unshifted[i] + other.elems[i] }; - unshifted.push(pt); + elems.push(pt); } - let shifted = match (self.shifted, other.shifted) { - (None, _) => other.shifted, - (_, None) => self.shifted, - (Some(p1), Some(p2)) => Some(p1 + p2), - }; - PolyComm { unshifted, shifted } + PolyComm { elems } } } @@ -185,37 +172,27 @@ impl<'a, 'b, C: AffineCurve> Sub<&'a PolyComm> for &'b PolyComm { type Output = PolyComm; fn sub(self, other: &'a PolyComm) -> PolyComm { - let mut unshifted = vec![]; - let n1 = self.unshifted.len(); - let n2 = other.unshifted.len(); + let mut elems = vec![]; + let n1 = self.elems.len(); + let n2 = other.elems.len(); for i in 0..std::cmp::max(n1, n2) { let pt = if i < n1 && i < n2 { - self.unshifted[i] + (-other.unshifted[i]) + self.elems[i] + (-other.elems[i]) } else if i < n1 { - self.unshifted[i] + self.elems[i] } else { - other.unshifted[i] + other.elems[i] }; - unshifted.push(pt); + elems.push(pt); } - let shifted = match (self.shifted, other.shifted) { - (None, _) => other.shifted, - (_, None) => self.shifted, - (Some(p1), Some(p2)) => Some(p1 + (-p2)), - }; - PolyComm { unshifted, shifted } + PolyComm { elems } } } impl PolyComm { pub fn scale(&self, c: C::ScalarField) -> PolyComm { PolyComm { - unshifted: self - .unshifted - .iter() - .map(|g| g.mul(c).into_affine()) - .collect(), - shifted: self.shifted.map(|g| g.mul(c).into_affine()), + elems: self.elems.iter().map(|g| g.mul(c).into_affine()).collect(), } } @@ -229,41 +206,27 @@ impl PolyComm { assert_eq!(com.len(), elm.len()); if com.is_empty() || elm.is_empty() { - return Self::new(vec![C::zero()], None); + return Self::new(vec![C::zero()]); } let all_scalars: Vec<_> = elm.iter().map(|s| s.into_repr()).collect(); - let unshifted_size = Iterator::max(com.iter().map(|c| c.unshifted.len())).unwrap(); - let mut unshifted = Vec::with_capacity(unshifted_size); + let elems_size = Iterator::max(com.iter().map(|c| c.elems.len())).unwrap(); + let mut elems = Vec::with_capacity(elems_size); - for chunk in 0..unshifted_size { + for chunk in 0..elems_size { let (points, scalars): (Vec<_>, Vec<_>) = com .iter() .zip(&all_scalars) // get rid of scalars that don't have an associated chunk - .filter_map(|(com, scalar)| com.unshifted.get(chunk).map(|c| (c, scalar))) + .filter_map(|(com, scalar)| com.elems.get(chunk).map(|c| (c, scalar))) .unzip(); let chunk_msm = VariableBaseMSM::multi_scalar_mul::(&points, &scalars); - unshifted.push(chunk_msm.into_affine()); + elems.push(chunk_msm.into_affine()); } - let mut shifted_pairs = com - .iter() - .zip(all_scalars) - // get rid of commitments without a `shifted` part - .filter_map(|(c, s)| c.shifted.map(|c| (c, s))) - .peekable(); - - let shifted = if shifted_pairs.peek().is_none() { - None - } else { - let (points, scalars): (Vec<_>, Vec<_>) = shifted_pairs.unzip(); - Some(VariableBaseMSM::multi_scalar_mul(&points, &scalars).into_affine()) - }; - - Self::new(unshifted, shifted) + Self::new(elems) } } @@ -343,10 +306,7 @@ pub fn absorb_commitment< sponge: &mut EFqSponge, commitment: &PolyComm, ) { - sponge.absorb_g(&commitment.unshifted); - if let Some(shifted) = commitment.shifted.as_ref() { - sponge.absorb_g(&[shifted.clone()]); - } + sponge.absorb_g(&commitment.elems); } /// A useful trait extending AffineCurve for commitments. @@ -443,21 +403,17 @@ pub fn to_group(m: &G::Map, t: ::BaseField /// Computes the linearization of the evaluations of a (potentially split) polynomial. /// Each given `poly` is associated to a matrix where the rows represent the number of evaluated points, /// and the columns represent potential segments (if a polynomial was split in several parts). -/// Note that if one of the polynomial comes specified with a degree bound, -/// the evaluation for the last segment is potentially shifted to meet the proof. #[allow(clippy::type_complexity)] pub fn combined_inner_product( - evaluation_points: &[F], polyscale: &F, evalscale: &F, // TODO(mimoo): needs a type that can get you evaluations or segments - polys: &[(Vec>, Option)], - srs_length: usize, + polys: &[Vec>], ) -> F { let mut res = F::zero(); let mut xi_i = F::one(); - for (evals_tr, shifted) in polys.iter().filter(|(evals_tr, _)| !evals_tr[0].is_empty()) { + for evals_tr in polys.iter().filter(|evals_tr| !evals_tr[0].is_empty()) { // transpose the evaluations let evals = (0..evals_tr[0].len()) .map(|i| evals_tr.iter().map(|v| v[i]).collect::>()) @@ -470,23 +426,6 @@ pub fn combined_inner_product( res += &(xi_i * term); xi_i *= polyscale; } - - if let Some(m) = shifted { - // polyscale^i sum_j evalscale^j elm_j^{N - m} f(elm_j) - let last_evals = if *m >= evals.len() * srs_length { - vec![F::zero(); evaluation_points.len()] - } else { - evals[evals.len() - 1].clone() - }; - let shifted_evals: Vec<_> = evaluation_points - .iter() - .zip(&last_evals) - .map(|(elm, f_elm)| elm.pow([(srs_length - (*m) % srs_length) as u64]) * f_elm) - .collect(); - - res += &(xi_i * DensePolynomial::::eval_polynomial(&shifted_evals, *evalscale)); - xi_i *= polyscale; - } } res } @@ -501,9 +440,6 @@ where /// Contains an evaluation table pub evaluations: Vec>, - - /// optional degree bound - pub degree_bound: Option, } /// Contains the batch evaluation @@ -535,33 +471,17 @@ pub fn combine_commitments( ) { let mut xi_i = G::ScalarField::one(); - for Evaluation { - commitment, - degree_bound, - .. - } in evaluations + for Evaluation { commitment, .. } in evaluations .iter() - .filter(|x| !x.commitment.unshifted.is_empty()) + .filter(|x| !x.commitment.elems.is_empty()) { // iterating over the polynomial segments - for comm_ch in &commitment.unshifted { + for comm_ch in &commitment.elems { scalars.push(rand_base * xi_i); points.push(*comm_ch); xi_i *= polyscale; } - - if let Some(_m) = degree_bound { - if let Some(comm_ch) = commitment.shifted { - if !comm_ch.is_zero() { - // polyscale^i sum_j evalscale^j elm_j^{N - m} f(elm_j) - scalars.push(rand_base * xi_i); - points.push(comm_ch); - - xi_i *= polyscale; - } - } - } } } @@ -579,13 +499,9 @@ pub fn combine_evaluations( vec![G::ScalarField::zero(); num_evals] }; - for Evaluation { - evaluations, - degree_bound, - .. - } in evaluations + for Evaluation { evaluations, .. } in evaluations .iter() - .filter(|x| !x.commitment.unshifted.is_empty()) + .filter(|x| !x.commitment.elems.is_empty()) { // iterating over the polynomial segments for j in 0..evaluations[0].len() { @@ -594,10 +510,6 @@ pub fn combine_evaluations( } xi_i *= polyscale; } - - if let Some(_m) = degree_bound { - todo!("Misaligned chunked commitments are not supported") - } } acc @@ -622,10 +534,9 @@ impl SRSTrait for SRS { &self, plnm: &DensePolynomial, num_chunks: usize, - max: Option, rng: &mut (impl RngCore + CryptoRng), ) -> BlindedCommitment { - self.mask(self.commit_non_hiding(plnm, num_chunks, max), rng) + self.mask(self.commit_non_hiding(plnm, num_chunks), rng) } /// Turns a non-hiding polynomial commitment into a hidding polynomial commitment. Transforms each given `` into `( + wH, w)` with a random `w` per commitment. @@ -660,62 +571,34 @@ impl SRSTrait for SRS { /// This function commits a polynomial using the SRS' basis of size `n`. /// - `plnm`: polynomial to commit to with max size of sections - /// - `num_chunks`: the number of unshifted commitments to be included in the output polynomial commitment - /// - `max`: maximal degree of the polynomial (not inclusive), if none, no degree bound - /// The function returns an unbounded commitment vector (which splits the commitment into several commitments of size at most `n`), - /// as well as an optional bounded commitment (if `max` is set). - /// Note that a maximum degree cannot (and doesn't need to) be enforced via a shift if `max` is a multiple of `n`. + /// - `num_chunks`: the number of commitments to be included in the output polynomial commitment + /// The function returns an unbounded commitment vector + /// (which splits the commitment into several commitments of size at most `n`). fn commit_non_hiding( &self, plnm: &DensePolynomial, num_chunks: usize, - max: Option, ) -> PolyComm { let is_zero = plnm.is_zero(); - let basis_len = self.g.len(); - let coeffs_len = plnm.coeffs.len(); - let coeffs: Vec<_> = plnm.iter().map(|c| c.into_repr()).collect(); // chunk while commiting - let mut unshifted = vec![]; + let mut elems = vec![]; if is_zero { - unshifted.push(G::zero()); + elems.push(G::zero()); } else { coeffs.chunks(self.g.len()).for_each(|coeffs_chunk| { let chunk = VariableBaseMSM::multi_scalar_mul(&self.g, coeffs_chunk); - unshifted.push(chunk.into_affine()); + elems.push(chunk.into_affine()); }); } - for _ in unshifted.len()..num_chunks { - unshifted.push(G::zero()); + for _ in elems.len()..num_chunks { + elems.push(G::zero()); } - // committing only last chunk shifted to the right edge of SRS - let shifted = match max { - None => None, - Some(max) => { - let start = max - (max % basis_len); - if is_zero || start >= coeffs_len { - // polynomial is small, nothing was shifted - Some(G::zero()) - } else if max % basis_len == 0 { - // the number of chunks should tell the verifier everything they need to know - None - } else { - // we shift the last chunk to the right as proof of the degree bound - let shifted = VariableBaseMSM::multi_scalar_mul( - &self.g[basis_len - (max % basis_len)..], - &coeffs[start..], - ); - Some(shifted.into_affine()) - } - } - }; - - PolyComm:: { unshifted, shifted } + PolyComm:: { elems } } fn commit_evaluations_non_hiding( @@ -968,7 +851,7 @@ mod tests { let mut e = vec![Fp::zero(); n]; e[i] = Fp::one(); let p = Evaluations::>::from_vec_and_domain(e, domain).interpolate(); - srs.commit_non_hiding(&p, num_chunks, None) + srs.commit_non_hiding(&p, num_chunks) }) .collect(); @@ -982,21 +865,24 @@ mod tests { } #[test] + // This tests with two chunks. fn test_chunked_lagrange_commitments() { let n = 64; + let divisor = 4; let domain = D::::new(n).unwrap(); - let mut srs = SRS::::create(n / 2); + let mut srs = SRS::::create(n / divisor); srs.add_lagrange_basis(domain); let num_chunks = domain.size() / srs.g.len(); + assert!(num_chunks == divisor); let expected_lagrange_commitments: Vec<_> = (0..n) .map(|i| { let mut e = vec![Fp::zero(); n]; e[i] = Fp::one(); let p = Evaluations::>::from_vec_and_domain(e, domain).interpolate(); - srs.commit_non_hiding(&p, num_chunks, None) + srs.commit_non_hiding(&p, num_chunks) }) .collect(); @@ -1010,6 +896,10 @@ mod tests { } #[test] + // TODO @volhovm I don't understand what this test does and + // whether it is worth leaving. + /// Same as test_chunked_lagrange_commitments, but with a slight + /// offset in the SRS fn test_offset_chunked_lagrange_commitments() { let n = 64; let domain = D::::new(n).unwrap(); @@ -1017,14 +907,16 @@ mod tests { let mut srs = SRS::::create(n / 2 + 1); srs.add_lagrange_basis(domain); + // Is this even taken into account?... let num_chunks = (domain.size() + srs.g.len() - 1) / srs.g.len(); + assert!(num_chunks == 2); let expected_lagrange_commitments: Vec<_> = (0..n) .map(|i| { let mut e = vec![Fp::zero(); n]; e[i] = Fp::one(); let p = Evaluations::>::from_vec_and_domain(e, domain).interpolate(); - srs.commit_non_hiding(&p, num_chunks, Some(64)) + srs.commit_non_hiding(&p, num_chunks) // this requires max = Some(64) }) .collect(); @@ -1048,10 +940,9 @@ mod tests { let srs = SRS::::create(20); let rng = &mut StdRng::from_seed([0u8; 32]); - // commit the two polynomials (and upperbound the second one) - let commitment = srs.commit(&poly1, 1, None, rng); - let upperbound = poly2.degree() + 1; - let bounded_commitment = srs.commit(&poly2, 1, Some(upperbound), rng); + // commit the two polynomials + let commitment1 = srs.commit(&poly1, 1, rng); + let commitment2 = srs.commit(&poly2, 1, rng); // create an aggregated opening proof let (u, v) = (Fp::rand(rng), Fp::rand(rng)); @@ -1061,18 +952,15 @@ mod tests { let polys: Vec<( DensePolynomialOrEvaluations<_, Radix2EvaluationDomain<_>>, - Option, PolyComm<_>, )> = vec![ ( DensePolynomialOrEvaluations::DensePolynomial(&poly1), - None, - commitment.blinders, + commitment1.blinders, ), ( DensePolynomialOrEvaluations::DensePolynomial(&poly2), - Some(upperbound), - bounded_commitment.blinders, + commitment2.blinders, ), ]; let elm = vec![Fp::rand(rng), Fp::rand(rng)]; @@ -1110,40 +998,21 @@ mod tests { let evaluations = vec![ Evaluation { - commitment: commitment.commitment, + commitment: commitment1.commitment, evaluations: poly1_chunked_evals, - degree_bound: None, }, Evaluation { - commitment: bounded_commitment.commitment, + commitment: commitment2.commitment, evaluations: poly2_chunked_evals, - degree_bound: Some(upperbound), }, ]; let combined_inner_product = { let es: Vec<_> = evaluations .iter() - .map( - |Evaluation { - commitment, - evaluations, - degree_bound, - }| { - let bound: Option = (|| { - let b = (*degree_bound)?; - let x = commitment.shifted?; - if x.is_zero() { - None - } else { - Some(b) - } - })(); - (evaluations.clone(), bound) - }, - ) + .map(|Evaluation { evaluations, .. }| evaluations.clone()) .collect(); - combined_inner_product(&elm, &v, &u, &es, srs.g.len()) + combined_inner_product(&v, &u, &es) }; // verify the proof @@ -1191,8 +1060,8 @@ pub mod caml { { fn from(polycomm: PolyComm) -> Self { Self { - unshifted: polycomm.unshifted.into_iter().map(Into::into).collect(), - shifted: polycomm.shifted.map(Into::into), + unshifted: polycomm.elems.into_iter().map(Into::into).collect(), + shifted: None, } } } @@ -1204,8 +1073,8 @@ pub mod caml { { fn from(polycomm: &'a PolyComm) -> Self { Self { - unshifted: polycomm.unshifted.iter().map(Into::into).collect(), - shifted: polycomm.shifted.as_ref().map(Into::into), + unshifted: polycomm.elems.iter().map(Into::into).collect(), + shifted: None, } } } @@ -1215,9 +1084,12 @@ pub mod caml { G: AffineCurve + From, { fn from(camlpolycomm: CamlPolyComm) -> PolyComm { + assert!( + camlpolycomm.shifted.is_none(), + "mina#14628: Shifted commitments are deprecated and must not be used" + ); PolyComm { - unshifted: camlpolycomm.unshifted.into_iter().map(Into::into).collect(), - shifted: camlpolycomm.shifted.map(Into::into), + elems: camlpolycomm.unshifted.into_iter().map(Into::into).collect(), } } } @@ -1227,9 +1099,13 @@ pub mod caml { G: AffineCurve + From<&'a CamlG> + From, { fn from(camlpolycomm: &'a CamlPolyComm) -> PolyComm { + assert!( + camlpolycomm.shifted.is_none(), + "mina#14628: Shifted commitments are deprecated and must not be used" + ); PolyComm { - unshifted: camlpolycomm.unshifted.iter().map(Into::into).collect(), - shifted: camlpolycomm.shifted.as_ref().map(Into::into), + //FIXME something with as_ref() + elems: camlpolycomm.unshifted.iter().map(Into::into).collect(), } } } diff --git a/poly-commitment/src/evaluation_proof.rs b/poly-commitment/src/evaluation_proof.rs index 0b15615b66..5b42c53206 100644 --- a/poly-commitment/src/evaluation_proof.rs +++ b/poly-commitment/src/evaluation_proof.rs @@ -12,16 +12,11 @@ use serde::{Deserialize, Serialize}; use serde_with::serde_as; use std::iter::Iterator; -enum OptShiftedPolynomial

{ - Unshifted(P), - Shifted(P, usize), -} - // A formal sum of the form // `s_0 * p_0 + ... s_n * p_n` -// where each `s_i` is a scalar and each `p_i` is an optionally shifted polynomial. +// where each `s_i` is a scalar and each `p_i` is a polynomial. #[derive(Default)] -struct ScaledChunkedPolynomial(Vec<(F, OptShiftedPolynomial

)>); +struct ScaledChunkedPolynomial(Vec<(F, P)>); pub enum DensePolynomialOrEvaluations<'a, F: FftField, D: EvaluationDomain> { DensePolynomial(&'a DensePolynomial), @@ -29,13 +24,8 @@ pub enum DensePolynomialOrEvaluations<'a, F: FftField, D: EvaluationDomain> { } impl ScaledChunkedPolynomial { - fn add_unshifted(&mut self, scale: F, p: P) { - self.0.push((scale, OptShiftedPolynomial::Unshifted(p))) - } - - fn add_shifted(&mut self, scale: F, shift: usize, p: P) { - self.0 - .push((scale, OptShiftedPolynomial::Shifted(p, shift))) + fn add_poly(&mut self, scale: F, p: P) { + self.0.push((scale, p)) } } @@ -48,18 +38,8 @@ impl<'a, F: Field> ScaledChunkedPolynomial { .par_iter() .map(|(scale, segment)| { let scale = *scale; - match segment { - OptShiftedPolynomial::Unshifted(segment) => { - let v = segment.par_iter().map(|x| scale * *x).collect(); - DensePolynomial::from_coefficients_vec(v) - } - OptShiftedPolynomial::Shifted(segment, shift) => { - let mut v: Vec<_> = segment.par_iter().map(|x| scale * *x).collect(); - let mut res = vec![F::zero(); *shift]; - res.append(&mut v); - DensePolynomial::from_coefficients_vec(res) - } - } + let v = segment.par_iter().map(|x| scale * *x).collect(); + DensePolynomial::from_coefficients_vec(v) }) .collect(); @@ -85,7 +65,7 @@ pub fn combine_polys>( // If/when we change this, we can add more complicated code to handle different degrees. let degree = plnms .iter() - .fold(None, |acc, (p, _, _)| match p { + .fold(None, |acc, (p, _)| match p { DensePolynomialOrEvaluations::DensePolynomial(_) => acc, DensePolynomialOrEvaluations::Evaluations(_, d) => { if let Some(n) = acc { @@ -97,13 +77,12 @@ pub fn combine_polys>( .unwrap_or(0); vec![G::ScalarField::zero(); degree] }; - // let mut plnm_chunks: Vec<(G::ScalarField, OptShiftedPolynomial<_>)> = vec![]; let mut omega = G::ScalarField::zero(); let mut scale = G::ScalarField::one(); // iterating over polynomials in the batch - for (p_i, degree_bound, omegas) in plnms { + for (p_i, omegas) in plnms { match p_i { DensePolynomialOrEvaluations::Evaluations(evals_i, sub_domain) => { let stride = evals_i.evals.len() / sub_domain.size(); @@ -114,41 +93,23 @@ pub fn combine_polys>( .for_each(|(i, x)| { *x += scale * evals[i * stride]; }); - for j in 0..omegas.unshifted.len() { - omega += &(omegas.unshifted[j] * scale); + for j in 0..omegas.elems.len() { + omega += &(omegas.elems[j] * scale); scale *= &polyscale; } - // We assume here that we have no shifted segment. - // TODO: Remove shifted } DensePolynomialOrEvaluations::DensePolynomial(p_i) => { let mut offset = 0; // iterating over chunks of the polynomial - if let Some(m) = degree_bound { - assert!(p_i.coeffs.len() <= m + 1); - } else { - assert!(omegas.shifted.is_none()); - } - for j in 0..omegas.unshifted.len() { + for j in 0..omegas.elems.len() { let segment = &p_i.coeffs[std::cmp::min(offset, p_i.coeffs.len()) ..std::cmp::min(offset + srs_length, p_i.coeffs.len())]; - // always mixing in the unshifted segments - plnm.add_unshifted(scale, segment); + plnm.add_poly(scale, segment); - omega += &(omegas.unshifted[j] * scale); + omega += &(omegas.elems[j] * scale); scale *= &polyscale; offset += srs_length; - if let Some(m) = degree_bound { - if offset >= *m { - if offset > *m { - // mixing in the shifted segment since degree is bounded - plnm.add_shifted(scale, srs_length - m % srs_length, segment); - } - omega += &(omegas.shifted.unwrap() * scale); - scale *= &polyscale; - } - } } } } @@ -183,15 +144,11 @@ impl SRS { &self, group_map: &G::Map, // TODO(mimoo): create a type for that entry - plnms: &[( - DensePolynomialOrEvaluations, - Option, - PolyComm, - )], // vector of polynomial with optional degree bound and commitment randomness - elm: &[G::ScalarField], // vector of evaluation points - polyscale: G::ScalarField, // scaling factor for polynoms - evalscale: G::ScalarField, // scaling factor for evaluation point powers - mut sponge: EFqSponge, // sponge + plnms: PolynomialsToCombine, // vector of polynomial with commitment randomness + elm: &[G::ScalarField], // vector of evaluation points + polyscale: G::ScalarField, // scaling factor for polynoms + evalscale: G::ScalarField, // scaling factor for evaluation point powers + mut sponge: EFqSponge, // sponge rng: &mut RNG, ) -> OpeningProof where @@ -362,11 +319,7 @@ impl SRS { #[allow(clippy::many_single_char_names)] pub fn prover_polynomials_to_verifier_evaluations>( &self, - plnms: &[( - DensePolynomialOrEvaluations, - Option, - PolyComm, - )], // vector of polynomial with optional degree bound and commitment randomness + plnms: PolynomialsToCombine, elm: &[G::ScalarField], // vector of evaluation points ) -> Vec> where @@ -375,7 +328,7 @@ impl SRS { plnms .iter() .enumerate() - .map(|(i, (poly_or_evals, degree_bound, blinders))| { + .map(|(i, (poly_or_evals, blinders))| { let poly = match poly_or_evals { DensePolynomialOrEvaluations::DensePolynomial(poly) => (*poly).clone(), DensePolynomialOrEvaluations::Evaluations(evals, _) => { @@ -383,9 +336,8 @@ impl SRS { } }; let chunked_polynomial = - poly.to_chunked_polynomial(blinders.unshifted.len(), self.g.len()); - let chunked_commitment = - { self.commit_non_hiding(&poly, blinders.unshifted.len(), None) }; + poly.to_chunked_polynomial(blinders.elems.len(), self.g.len()); + let chunked_commitment = { self.commit_non_hiding(&poly, blinders.elems.len()) }; let masked_commitment = match self.mask_custom(chunked_commitment, blinders) { Ok(comm) => comm, Err(err) => panic!("Error at index {i}: {err}"), @@ -398,8 +350,6 @@ impl SRS { commitment: masked_commitment.commitment, evaluations: chunked_evals, - - degree_bound: *degree_bound, } }) .collect() @@ -433,11 +383,7 @@ impl< fn open::ScalarField>>( srs: &Self::SRS, group_map: &::Map, - plnms: &[( - DensePolynomialOrEvaluations<::ScalarField, D>, - Option, - PolyComm<::ScalarField>, - )], // vector of polynomial with optional degree bound and commitment randomness + plnms: PolynomialsToCombine, elm: &[::ScalarField], // vector of evaluation points polyscale: ::ScalarField, // scaling factor for polynoms evalscale: ::ScalarField, // scaling factor for evaluation point powers diff --git a/poly-commitment/src/lib.rs b/poly-commitment/src/lib.rs index 4d7bac7913..fb7f7491ca 100644 --- a/poly-commitment/src/lib.rs +++ b/poly-commitment/src/lib.rs @@ -37,7 +37,6 @@ pub trait SRS { &self, plnm: &DensePolynomial, num_chunks: usize, - max: Option, rng: &mut (impl RngCore + CryptoRng), ) -> BlindedCommitment; @@ -60,15 +59,13 @@ pub trait SRS { /// This function commits a polynomial using the SRS' basis of size `n`. /// - `plnm`: polynomial to commit to with max size of sections - /// - `max`: maximal degree of the polynomial (not inclusive), if none, no degree bound - /// The function returns an unbounded commitment vector (which splits the commitment into several commitments of size at most `n`), - /// as well as an optional bounded commitment (if `max` is set). - /// Note that a maximum degree cannot (and doesn't need to) be enforced via a shift if `max` is a multiple of `n`. + /// - `num_chunks`: the number of commitments to be included in the output polynomial commitment + /// The function returns an unbounded commitment vector + /// (which splits the commitment into several commitments of size at most `n`). fn commit_non_hiding( &self, plnm: &DensePolynomial, num_chunks: usize, - max: Option, ) -> PolyComm; fn commit_evaluations_non_hiding( @@ -86,9 +83,9 @@ pub trait SRS { } #[allow(type_alias_bounds)] +/// Vector of triples (polynomial itself, degree bound, omegas). type PolynomialsToCombine<'a, G: CommitmentCurve, D: EvaluationDomain> = &'a [( DensePolynomialOrEvaluations<'a, G::ScalarField, D>, - Option, PolyComm, )]; diff --git a/poly-commitment/src/pairing_proof.rs b/poly-commitment/src/pairing_proof.rs index 913cf15d0f..1a581e538b 100644 --- a/poly-commitment/src/pairing_proof.rs +++ b/poly-commitment/src/pairing_proof.rs @@ -1,5 +1,5 @@ use crate::commitment::*; -use crate::evaluation_proof::{combine_polys, DensePolynomialOrEvaluations}; +use crate::evaluation_proof::combine_polys; use crate::srs::SRS; use crate::{CommitmentError, PolynomialsToCombine, SRS as SRSTrait}; use ark_ec::{msm::VariableBaseMSM, AffineCurve, PairingEngine}; @@ -94,11 +94,7 @@ impl< fn open::ScalarField>>( srs: &Self::SRS, _group_map: &::Map, - plnms: &[( - DensePolynomialOrEvaluations<::ScalarField, D>, - Option, - PolyComm<::ScalarField>, - )], // vector of polynomial with optional degree bound and commitment randomness + plnms: PolynomialsToCombine, elm: &[::ScalarField], // vector of evaluation points polyscale: ::ScalarField, // scaling factor for polynoms _evalscale: ::ScalarField, // scaling factor for evaluation point powers @@ -164,10 +160,9 @@ impl< &self, plnm: &DensePolynomial, num_chunks: usize, - max: Option, rng: &mut (impl RngCore + CryptoRng), ) -> BlindedCommitment { - self.full_srs.commit(plnm, num_chunks, max, rng) + self.full_srs.commit(plnm, num_chunks, rng) } fn mask_custom( @@ -190,9 +185,8 @@ impl< &self, plnm: &DensePolynomial, num_chunks: usize, - max: Option, ) -> PolyComm { - self.full_srs.commit_non_hiding(plnm, num_chunks, max) + self.full_srs.commit_non_hiding(plnm, num_chunks) } fn commit_evaluations_non_hiding( @@ -282,10 +276,7 @@ impl< quotient }; - let quotient = srs - .full_srs - .commit_non_hiding("ient_poly, 1, None) - .unshifted[0]; + let quotient = srs.full_srs.commit_non_hiding("ient_poly, 1).elems[0]; Some(PairingProof { quotient, @@ -317,12 +308,12 @@ impl< let blinding_commitment = srs.full_srs.h.mul(self.blinding); let divisor_commitment = srs .verifier_srs - .commit_non_hiding(&divisor_polynomial(elm), 1, None) - .unshifted[0]; + .commit_non_hiding(&divisor_polynomial(elm), 1) + .elems[0]; let eval_commitment = srs .full_srs - .commit_non_hiding(&eval_polynomial(elm, &evals), 1, None) - .unshifted[0] + .commit_non_hiding(&eval_polynomial(elm, &evals), 1) + .elems[0] .into_projective(); let numerator_commitment = { poly_commitment - eval_commitment - blinding_commitment }; @@ -380,18 +371,17 @@ mod tests { let comms: Vec<_> = polynomials .iter() - .map(|p| srs.full_srs.commit(p, 1, None, rng)) + .map(|p| srs.full_srs.commit(p, 1, rng)) .collect(); - let polynomials_and_blinders: Vec<(DensePolynomialOrEvaluations<_, D<_>>, _, _)> = - polynomials - .iter() - .zip(comms.iter()) - .map(|(p, comm)| { - let p = DensePolynomialOrEvaluations::DensePolynomial(p); - (p, None, comm.blinders.clone()) - }) - .collect(); + let polynomials_and_blinders: Vec<(DensePolynomialOrEvaluations<_, D<_>>, _)> = polynomials + .iter() + .zip(comms.iter()) + .map(|(p, comm)| { + let p = DensePolynomialOrEvaluations::DensePolynomial(p); + (p, comm.blinders.clone()) + }) + .collect(); let evaluation_points = vec![ScalarField::rand(rng), ScalarField::rand(rng)]; @@ -409,7 +399,6 @@ mod tests { Evaluation { commitment: commitment.commitment, evaluations, - degree_bound: None, } }) .collect(); diff --git a/poly-commitment/src/srs.rs b/poly-commitment/src/srs.rs index c4cd29251b..355c420b66 100644 --- a/poly-commitment/src/srs.rs +++ b/poly-commitment/src/srs.rs @@ -8,6 +8,7 @@ use ark_poly::{EvaluationDomain, Radix2EvaluationDomain as D}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use blake2::{Blake2b512, Digest}; use groupmap::GroupMap; +use rayon::prelude::*; use serde::{Deserialize, Serialize}; use serde_with::serde_as; use std::array; @@ -177,11 +178,11 @@ impl SRS { // By computing each of these, and recollecting the terms as a vector of polynomial // commitments, we obtain a chunked commitment to the L_i polynomials. let srs_size = self.g.len(); - let num_unshifteds = (n + srs_size - 1) / srs_size; - let mut unshifted = Vec::with_capacity(num_unshifteds); + let num_elems = (n + srs_size - 1) / srs_size; + let mut elems = Vec::with_capacity(num_elems); // For each chunk - for i in 0..num_unshifteds { + for i in 0..num_elems { // Initialize the vector with zero curve points let mut lg: Vec<::Projective> = vec![::Projective::zero(); n]; @@ -194,36 +195,13 @@ impl SRS { // Apply the IFFT domain.ifft_in_place(&mut lg); ::Projective::batch_normalization(lg.as_mut_slice()); - // Append the 'partial Langrange polynomials' to the vector of unshifted chunks - unshifted.push(lg) + // Append the 'partial Langrange polynomials' to the vector of elems chunks + elems.push(lg) } - // If the srs size does not exactly divide the domain size - let shifted: Option::Projective>> = - if n < srs_size || num_unshifteds * srs_size == n { - None - } else { - // Initialize the vector to zero - let mut lg: Vec<::Projective> = - vec![::Projective::zero(); n]; - // Overwrite the terms corresponding to the final chunk with the SRS curve points - // shifted to the right - let start_offset = (num_unshifteds - 1) * srs_size; - let num_terms = n - start_offset; - let srs_start_offset = srs_size - num_terms; - for j in 0..num_terms { - lg[start_offset + j] = self.g[srs_start_offset + j].into_projective() - } - // Apply the IFFT - domain.ifft_in_place(&mut lg); - ::Projective::batch_normalization(lg.as_mut_slice()); - Some(lg) - }; - let chunked_commitments: Vec<_> = (0..n) .map(|i| PolyComm { - unshifted: unshifted.iter().map(|v| v[i].into_affine()).collect(), - shifted: shifted.as_ref().map(|v| v[i].into_affine()), + elems: elems.iter().map(|v| v[i].into_affine()).collect(), }) .collect(); self.lagrange_bases.insert(n, chunked_commitments); @@ -289,3 +267,37 @@ where } } } + +impl SRS +where + ::Map: Sync, + G::BaseField: PrimeField, +{ + /// This function creates SRS instance for circuits with number of rows up to `depth`. + pub fn create_parallel(depth: usize) -> Self { + let m = G::Map::setup(); + + let g: Vec<_> = (0..depth) + .into_par_iter() + .map(|i| { + let mut h = Blake2b512::new(); + h.update((i as u32).to_be_bytes()); + point_of_random_bytes(&m, &h.finalize()) + }) + .collect(); + + const MISC: usize = 1; + let [h]: [G; MISC] = array::from_fn(|i| { + let mut h = Blake2b512::new(); + h.update("srs_misc".as_bytes()); + h.update((i as u32).to_be_bytes()); + point_of_random_bytes(&m, &h.finalize()) + }); + + SRS { + g, + h, + lagrange_bases: HashMap::new(), + } + } +} diff --git a/poly-commitment/src/tests/batch_15_wires.rs b/poly-commitment/src/tests/batch_15_wires.rs index 570e8e8752..4acf9d1e5d 100644 --- a/poly-commitment/src/tests/batch_15_wires.rs +++ b/poly-commitment/src/tests/batch_15_wires.rs @@ -62,6 +62,8 @@ where } }) .collect::>(); + + // TODO @volhovm remove? let bounds = a .iter() .enumerate() @@ -82,7 +84,7 @@ where let comm = (0..a.len()) .map(|i| { ( - srs.commit(&a[i].clone(), num_chunks, bounds[i], rng), + srs.commit(&a[i].clone(), num_chunks, rng), x.iter() .map(|xx| a[i].to_chunked_polynomial(1, size).evaluate_chunks(*xx)) .collect::>(), @@ -96,12 +98,10 @@ where let polys: Vec<( DensePolynomialOrEvaluations<_, Radix2EvaluationDomain<_>>, _, - _, )> = (0..a.len()) .map(|i| { ( DensePolynomialOrEvaluations::DensePolynomial(&a[i]), - bounds[i], (comm[i].0).blinders.clone(), ) }) @@ -120,20 +120,9 @@ where let combined_inner_product = { let es: Vec<_> = comm .iter() - .map(|(commitment, evaluations, degree_bound)| { - let bound: Option = (|| { - let b = (*degree_bound)?; - let x = commitment.commitment.shifted?; - if x.is_zero() { - None - } else { - Some(b) - } - })(); - (evaluations.clone(), bound) - }) + .map(|(_, evaluations, _)| evaluations.clone()) .collect(); - combined_inner_product(&x, &polymask, &evalmask, &es, srs.g.len()) + combined_inner_product(&polymask, &evalmask, &es) }; ( @@ -161,7 +150,6 @@ where .map(|poly| Evaluation { commitment: (poly.0).commitment.clone(), evaluations: poly.1.clone(), - degree_bound: poly.2, }) .collect::>(), opening: &proof.5, diff --git a/poly-commitment/src/tests/commitment.rs b/poly-commitment/src/tests/commitment.rs index dedcd0ad6e..7971b99401 100644 --- a/poly-commitment/src/tests/commitment.rs +++ b/poly-commitment/src/tests/commitment.rs @@ -27,8 +27,6 @@ use std::time::{Duration, Instant}; pub struct Commitment { /// the commitment itself, potentially in chunks chunked_commitment: PolyComm, - /// an optional degree bound - bound: Option, } /// An evaluated commitment (given a number of evaluation points) @@ -76,7 +74,6 @@ impl AggregatedEvaluationProof { /// This function converts an aggregated evaluation proof into something the verify API understands pub fn verify_type( &self, - srs: &SRS, ) -> BatchEvaluationProof, OpeningProof> { let mut coms = vec![]; @@ -85,39 +82,15 @@ impl AggregatedEvaluationProof { coms.push(Evaluation { commitment: eval_com.commit.chunked_commitment.clone(), evaluations: eval_com.chunked_evals.clone(), - degree_bound: eval_com.commit.bound, }); } let combined_inner_product = { let es: Vec<_> = coms .iter() - .map( - |Evaluation { - commitment, - evaluations, - degree_bound, - }| { - let bound: Option = (|| { - let b = (*degree_bound)?; - let x = commitment.shifted?; - if x.is_zero() { - None - } else { - Some(b) - } - })(); - (evaluations.clone(), bound) - }, - ) + .map(|Evaluation { evaluations, .. }| evaluations.clone()) .collect(); - combined_inner_product( - &self.eval_points, - &self.polymask, - &self.evalmask, - &es, - srs.g.len(), - ) + combined_inner_product(&self.polymask, &self.evalmask, &es) }; BatchEvaluationProof { @@ -156,28 +129,23 @@ fn test_randomised(mut rng: &mut RNG) { // create 11 polynomials of random degree (of at most 500) // and commit to them let mut commitments = vec![]; - for i in 0..11 { + for _ in 0..11 { let len: usize = rng.gen(); let len = len % 500; + // TODO @volhovm maybe remove the second case. + // every other polynomial is upperbounded let poly = if len == 0 { DensePolynomial::::zero() } else { DensePolynomial::::rand(len, &mut rng) }; - // every other polynomial is upperbounded - let bound = if i % 2 == 0 { - Some(poly.coeffs.len()) - } else { - None - }; - // create commitments for each polynomial, and evaluate each polynomial at the 7 random points let timer = Instant::now(); let BlindedCommitment { commitment: chunked_commitment, blinders: chunked_blinding, - } = srs.commit(&poly, num_chunks, bound, &mut rng); + } = srs.commit(&poly, num_chunks, &mut rng); time_commit += timer.elapsed(); let mut chunked_evals = vec![]; @@ -188,10 +156,7 @@ fn test_randomised(mut rng: &mut RNG) { ); } - let commit = Commitment { - chunked_commitment, - bound, - }; + let commit = Commitment { chunked_commitment }; let eval_commit = EvaluatedCommitment { commit, @@ -209,13 +174,11 @@ fn test_randomised(mut rng: &mut RNG) { #[allow(clippy::type_complexity)] let mut polynomials: Vec<( DensePolynomialOrEvaluations>, - Option, PolyComm<_>, )> = vec![]; for c in &commitments { polynomials.push(( DensePolynomialOrEvaluations::DensePolynomial(&c.poly), - c.eval_commit.commit.bound, c.chunked_blinding.clone(), )); } @@ -257,7 +220,7 @@ fn test_randomised(mut rng: &mut RNG) { let timer = Instant::now(); // batch verify all the proofs - let mut batch: Vec<_> = proofs.iter().map(|p| p.verify_type(&srs)).collect(); + let mut batch: Vec<_> = proofs.iter().map(|p| p.verify_type()).collect(); assert!(srs.verify::, _>(&group_map, &mut batch, &mut rng)); // TODO: move to bench