From 0b016bcbad85c21ede00ea1f6c2ef805665b0254 Mon Sep 17 00:00:00 2001 From: marcbeunardeau88 Date: Wed, 9 Oct 2024 09:52:56 +0200 Subject: [PATCH 01/30] copy past verif from msm --- o1vm/src/pickles/verifier.rs | 335 +++++++++++++++++++++++++++++++++++ 1 file changed, 335 insertions(+) create mode 100644 o1vm/src/pickles/verifier.rs diff --git a/o1vm/src/pickles/verifier.rs b/o1vm/src/pickles/verifier.rs new file mode 100644 index 0000000000..74d0cd0769 --- /dev/null +++ b/o1vm/src/pickles/verifier.rs @@ -0,0 +1,335 @@ +#![allow(clippy::type_complexity)] +#![allow(clippy::boxed_local)] + +use crate::logup::LookupTableID; +use ark_ff::{Field, Zero}; +use ark_poly::{ + univariate::DensePolynomial, EvaluationDomain, Evaluations, Polynomial, + Radix2EvaluationDomain as R2D, +}; +use rand::thread_rng; +use rayon::iter::{IntoParallelIterator, ParallelIterator}; + +use kimchi::{ + circuits::{ + berkeley_columns::BerkeleyChallenges, + domains::EvaluationDomains, + expr::{Constants, Expr, PolishToken}, + }, + curve::KimchiCurve, + groupmap::GroupMap, + plonk_sponge::FrSponge, + proof::PointEvaluations, +}; +use mina_poseidon::{sponge::ScalarChallenge, FqSponge}; +use poly_commitment::{ + commitment::{ + absorb_commitment, combined_inner_product, BatchEvaluationProof, Evaluation, PolyComm, + }, + OpenProof, SRS, +}; + +use crate::{expr::E, proof::Proof, witness::Witness}; + +pub fn verify< + G: KimchiCurve, + OpeningProof: OpenProof, + EFqSponge: Clone + FqSponge, + EFrSponge: FrSponge, + const N_WIT: usize, + const N_REL: usize, + const N_DSEL: usize, + const N_FSEL: usize, + const NPUB: usize, + ID: LookupTableID, +>( + domain: EvaluationDomains, + srs: &OpeningProof::SRS, + constraints: &Vec>, + fixed_selectors: Box<[Vec; N_FSEL]>, + proof: &Proof, + public_inputs: Witness>, +) -> bool +where + OpeningProof::SRS: Sync, +{ + let Proof { + proof_comms, + proof_evals, + opening_proof, + } = proof; + + //////////////////////////////////////////////////////////////////////////// + // Re-evaluating public inputs + //////////////////////////////////////////////////////////////////////////// + + let fixed_selectors_evals_d1: Box<[Evaluations>; N_FSEL]> = { + o1_utils::array::vec_to_boxed_array( + fixed_selectors + .into_par_iter() + .map(|evals| Evaluations::from_vec_and_domain(evals, domain.d1)) + .collect(), + ) + }; + + let fixed_selectors_polys: Box<[DensePolynomial; N_FSEL]> = { + o1_utils::array::vec_to_boxed_array( + fixed_selectors_evals_d1 + .into_par_iter() + .map(|evals| evals.interpolate()) + .collect(), + ) + }; + + let fixed_selectors_comms: Box<[PolyComm; N_FSEL]> = { + let comm = |poly: &DensePolynomial| srs.commit_non_hiding(poly, 1); + o1_utils::array::vec_to_boxed_array( + fixed_selectors_polys + .as_ref() + .into_par_iter() + .map(comm) + .collect(), + ) + }; + + // Interpolate public input columns on d1, using trait Into. + let public_input_evals_d1: Witness>> = + public_inputs + .into_par_iter() + .map(|evals| { + Evaluations::>::from_vec_and_domain( + evals, domain.d1, + ) + }) + .collect::>>>(); + + let public_input_polys: Witness> = { + let interpolate = + |evals: Evaluations>| evals.interpolate(); + public_input_evals_d1 + .into_par_iter() + .map(interpolate) + .collect::>>() + }; + + let public_input_comms: Witness> = { + let comm = |poly: &DensePolynomial| srs.commit_non_hiding(poly, 1); + (&public_input_polys) + .into_par_iter() + .map(comm) + .collect::>>() + }; + + assert!( + NPUB <= N_WIT, + "Number of public inputs exceeds number of witness columns" + ); + for i in 0..NPUB { + assert!(public_input_comms.cols[i] == proof_comms.witness_comms.cols[i]); + } + + //////////////////////////////////////////////////////////////////////////// + // Absorbing all the commitments to the columns + //////////////////////////////////////////////////////////////////////////// + + let mut fq_sponge = EFqSponge::new(G::other_curve_sponge_params()); + + fixed_selectors_comms + .as_ref() + .iter() + .chain(&proof_comms.witness_comms) + .for_each(|comm| absorb_commitment(&mut fq_sponge, comm)); + + //////////////////////////////////////////////////////////////////////////// + // Logup + //////////////////////////////////////////////////////////////////////////// + + let (joint_combiner, beta) = { + if let Some(logup_comms) = &proof_comms.logup_comms { + // First, we absorb the multiplicity polynomials + logup_comms.m.values().for_each(|comms| { + comms + .iter() + .for_each(|comm| absorb_commitment(&mut fq_sponge, comm)) + }); + + // FIXME @volhovm it seems that the verifier does not + // actually check that the fixed tables used in the proof + // are the fixed tables defined in the code. In other + // words, all the currently used "fixed" tables are + // runtime and can be chosen freely by the prover. + + // To generate the challenges + let joint_combiner = fq_sponge.challenge(); + let beta = fq_sponge.challenge(); + + // And now, we absorb the commitments to the other polynomials + logup_comms.h.values().for_each(|comms| { + comms + .iter() + .for_each(|comm| absorb_commitment(&mut fq_sponge, comm)) + }); + + logup_comms + .fixed_tables + .values() + .for_each(|comm| absorb_commitment(&mut fq_sponge, comm)); + + // And at the end, the aggregation + absorb_commitment(&mut fq_sponge, &logup_comms.sum); + (Some(joint_combiner), beta) + } else { + (None, G::ScalarField::zero()) + } + }; + + // Sample α with the Fq-Sponge. + let alpha = fq_sponge.challenge(); + + //////////////////////////////////////////////////////////////////////////// + // Quotient polynomial + //////////////////////////////////////////////////////////////////////////// + + absorb_commitment(&mut fq_sponge, &proof_comms.t_comm); + + // -- Preparing for opening proof verification + let zeta_chal = ScalarChallenge(fq_sponge.challenge()); + let (_, endo_r) = G::endos(); + let zeta: G::ScalarField = zeta_chal.to_field(endo_r); + let omega = domain.d1.group_gen; + let zeta_omega = zeta * omega; + + let mut coms_and_evaluations: Vec> = vec![]; + + coms_and_evaluations.extend( + (&proof_comms.witness_comms) + .into_iter() + .zip(&proof_evals.witness_evals) + .map(|(commitment, point_eval)| Evaluation { + commitment: commitment.clone(), + evaluations: vec![vec![point_eval.zeta], vec![point_eval.zeta_omega]], + }), + ); + + coms_and_evaluations.extend( + (fixed_selectors_comms) + .into_iter() + .zip(proof_evals.fixed_selectors_evals.iter()) + .map(|(commitment, point_eval)| Evaluation { + commitment: commitment.clone(), + evaluations: vec![vec![point_eval.zeta], vec![point_eval.zeta_omega]], + }), + ); + + if let Some(logup_comms) = &proof_comms.logup_comms { + coms_and_evaluations.extend( + logup_comms + .into_iter() + .zip(proof_evals.logup_evals.as_ref().unwrap()) + .map(|(commitment, point_eval)| Evaluation { + commitment: commitment.clone(), + evaluations: vec![vec![point_eval.zeta], vec![point_eval.zeta_omega]], + }) + .collect::>(), + ); + } + + // -- Absorb all coms_and_evaluations + let fq_sponge_before_coms_and_evaluations = fq_sponge.clone(); + let mut fr_sponge = EFrSponge::new(G::sponge_params()); + fr_sponge.absorb(&fq_sponge.digest()); + + for PointEvaluations { zeta, zeta_omega } in (&proof_evals.witness_evals).into_iter() { + fr_sponge.absorb(zeta); + fr_sponge.absorb(zeta_omega); + } + + for PointEvaluations { zeta, zeta_omega } in proof_evals.fixed_selectors_evals.as_ref().iter() { + fr_sponge.absorb(zeta); + fr_sponge.absorb(zeta_omega); + } + + if proof_comms.logup_comms.is_some() { + // Logup FS + for PointEvaluations { zeta, zeta_omega } in + proof_evals.logup_evals.as_ref().unwrap().into_iter() + { + fr_sponge.absorb(zeta); + fr_sponge.absorb(zeta_omega); + } + }; + + // Compute [ft(X)] = \ + // (1 - ζ^n) \ + // ([t_0(X)] + ζ^n [t_1(X)] + ... + ζ^{kn} [t_{k}(X)]) + let ft_comm = { + let evaluation_point_to_domain_size = zeta.pow([domain.d1.size]); + let chunked_t_comm = proof_comms + .t_comm + .chunk_commitment(evaluation_point_to_domain_size); + // (1 - ζ^n) + let minus_vanishing_poly_at_zeta = -domain.d1.vanishing_polynomial().evaluate(&zeta); + chunked_t_comm.scale(minus_vanishing_poly_at_zeta) + }; + + let challenges = BerkeleyChallenges:: { + alpha, + beta, + gamma: G::ScalarField::zero(), + joint_combiner: joint_combiner.unwrap_or(G::ScalarField::zero()), + }; + + let constants = Constants { + endo_coefficient: *endo_r, + mds: &G::sponge_params().mds, + zk_rows: 0, + }; + + let combined_expr = + Expr::combine_constraints(0..(constraints.len() as u32), constraints.clone()); + // Note the minus! ft polynomial at zeta (ft_eval0) is minus evaluation of the expression. + let ft_eval0 = -PolishToken::evaluate( + combined_expr.to_polish().as_slice(), + domain.d1, + zeta, + proof_evals, + &constants, + &challenges, + ) + .unwrap(); + + coms_and_evaluations.push(Evaluation { + commitment: ft_comm, + evaluations: vec![vec![ft_eval0], vec![proof_evals.ft_eval1]], + }); + + fr_sponge.absorb(&proof_evals.ft_eval1); + // -- End absorb all coms_and_evaluations + + let v_chal = fr_sponge.challenge(); + let v = v_chal.to_field(endo_r); + let u_chal = fr_sponge.challenge(); + let u = u_chal.to_field(endo_r); + + let combined_inner_product = { + let es: Vec<_> = coms_and_evaluations + .iter() + .map(|Evaluation { evaluations, .. }| evaluations.clone()) + .collect(); + + combined_inner_product(&v, &u, es.as_slice()) + }; + + let batch = BatchEvaluationProof { + sponge: fq_sponge_before_coms_and_evaluations, + evaluations: coms_and_evaluations, + evaluation_points: vec![zeta, zeta_omega], + polyscale: v, + evalscale: u, + opening: opening_proof, + combined_inner_product, + }; + + let group_map = G::Map::setup(); + OpeningProof::verify(srs, &group_map, &mut [batch], &mut thread_rng()) +} From 97f3e044cdd64a15f0c3afcf5e4a6ba5ed3e742b Mon Sep 17 00:00:00 2001 From: Matt Walker Date: Wed, 9 Oct 2024 13:54:50 -0400 Subject: [PATCH 02/30] msm: make pub(crate) fields pub --- msm/src/logup.rs | 8 ++++---- msm/src/proof.rs | 20 ++++++++++---------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/msm/src/logup.rs b/msm/src/logup.rs index 19ba65a191..117fc603a4 100644 --- a/msm/src/logup.rs +++ b/msm/src/logup.rs @@ -275,13 +275,13 @@ pub struct LogupWitness { #[derive(Debug, Clone)] pub struct LookupProof { /// The multiplicity polynomials - pub(crate) m: BTreeMap>, + pub m: BTreeMap>, /// The polynomial keeping the sum of each row - pub(crate) h: BTreeMap>, + pub h: BTreeMap>, /// The "running-sum" over the rows, coined `φ` - pub(crate) sum: T, + pub sum: T, /// All fixed lookup tables values, indexed by their ID - pub(crate) fixed_tables: BTreeMap, + pub fixed_tables: BTreeMap, } /// Iterator implementation to abstract the content of the structure. diff --git a/msm/src/proof.rs b/msm/src/proof.rs index 4700450cf5..fc0cc8cead 100644 --- a/msm/src/proof.rs +++ b/msm/src/proof.rs @@ -74,13 +74,13 @@ pub struct ProofEvaluations< ID: LookupTableID, > { /// Witness evaluations, including public inputs - pub(crate) witness_evals: Witness>, + pub witness_evals: Witness>, /// Evaluations of fixed selectors. - pub(crate) fixed_selectors_evals: Box<[PointEvaluations; N_FSEL]>, + pub fixed_selectors_evals: Box<[PointEvaluations; N_FSEL]>, /// Logup argument evaluations - pub(crate) logup_evals: Option, ID>>, + pub logup_evals: Option, ID>>, /// Evaluation of Z_H(ζ) (t_0(X) + ζ^n t_1(X) + ...) at ζω. - pub(crate) ft_eval1: F, + pub ft_eval1: F, } /// The trait ColumnEvaluations is used by the verifier. @@ -150,13 +150,13 @@ impl< pub struct ProofCommitments { /// Commitments to the N columns of the circuits, also called the 'witnesses'. /// If some columns are considered as public inputs, it is counted in the witness. - pub(crate) witness_comms: Witness>, + pub witness_comms: Witness>, /// Commitments to the polynomials used by the lookup argument, coined "logup". /// The values contains the chunked polynomials. - pub(crate) logup_comms: Option, ID>>, + pub logup_comms: Option, ID>>, /// Commitments to the quotient polynomial. /// The value contains the chunked polynomials. - pub(crate) t_comm: PolyComm, + pub t_comm: PolyComm, } #[derive(Debug, Clone)] @@ -169,7 +169,7 @@ pub struct Proof< OpeningProof: OpenProof, ID: LookupTableID, > { - pub(crate) proof_comms: ProofCommitments, - pub(crate) proof_evals: ProofEvaluations, - pub(crate) opening_proof: OpeningProof, + pub proof_comms: ProofCommitments, + pub proof_evals: ProofEvaluations, + pub opening_proof: OpeningProof, } From 4dacfa309337079e41a84b2ebfb264e470f310e1 Mon Sep 17 00:00:00 2001 From: Matt Walker Date: Wed, 9 Oct 2024 14:49:30 -0400 Subject: [PATCH 03/30] o1vm: Add o1_utils to Cargo.toml and update Cargo.lock --- Cargo.lock | 1 + o1vm/Cargo.toml | 1 + 2 files changed, 2 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 124a72ffd2..f2958269c2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1907,6 +1907,7 @@ dependencies = [ "log", "mina-curves", "mina-poseidon", + "o1-utils", "os_pipe", "poly-commitment", "rand", diff --git a/o1vm/Cargo.toml b/o1vm/Cargo.toml index 2109281914..1c14712ece 100644 --- a/o1vm/Cargo.toml +++ b/o1vm/Cargo.toml @@ -25,6 +25,7 @@ name = "pickles_o1vm" path = "src/pickles/main.rs" [dependencies] +o1-utils.workspace = true # FIXME: Only activate this when legacy_o1vm is built ark-bn254.workspace = true # FIXME: Only activate this when legacy_o1vm is built From cab1271c6b7713f8390dbce84a35b6f5f2e4e545 Mon Sep 17 00:00:00 2001 From: Matt Walker Date: Wed, 9 Oct 2024 14:49:59 -0400 Subject: [PATCH 04/30] o1vm/pickles: Add verifier to mod.rs and make compile. --- o1vm/src/pickles/mod.rs | 1 + o1vm/src/pickles/verifier.rs | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/o1vm/src/pickles/mod.rs b/o1vm/src/pickles/mod.rs index 328d7d368a..193162e90b 100644 --- a/o1vm/src/pickles/mod.rs +++ b/o1vm/src/pickles/mod.rs @@ -15,6 +15,7 @@ pub mod column_env; pub mod proof; pub mod prover; +pub mod verifier; /// Maximum degree of the constraints. /// It does include the additional degree induced by the multiplication of the diff --git a/o1vm/src/pickles/verifier.rs b/o1vm/src/pickles/verifier.rs index 74d0cd0769..42d42d262a 100644 --- a/o1vm/src/pickles/verifier.rs +++ b/o1vm/src/pickles/verifier.rs @@ -1,7 +1,6 @@ #![allow(clippy::type_complexity)] #![allow(clippy::boxed_local)] -use crate::logup::LookupTableID; use ark_ff::{Field, Zero}; use ark_poly::{ univariate::DensePolynomial, EvaluationDomain, Evaluations, Polynomial, @@ -29,7 +28,7 @@ use poly_commitment::{ OpenProof, SRS, }; -use crate::{expr::E, proof::Proof, witness::Witness}; +use kimchi_msm::{expr::E, logup::LookupTableID, proof::Proof, witness::Witness}; pub fn verify< G: KimchiCurve, @@ -52,6 +51,7 @@ pub fn verify< ) -> bool where OpeningProof::SRS: Sync, + { let Proof { proof_comms, From 021790e3dee5425daa5a418b7acefd373b2639b5 Mon Sep 17 00:00:00 2001 From: Matt Walker Date: Wed, 9 Oct 2024 14:50:34 -0400 Subject: [PATCH 05/30] o1vm/pickles: format verifier.rs --- o1vm/src/pickles/verifier.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/o1vm/src/pickles/verifier.rs b/o1vm/src/pickles/verifier.rs index 42d42d262a..cb3dfd4f9f 100644 --- a/o1vm/src/pickles/verifier.rs +++ b/o1vm/src/pickles/verifier.rs @@ -51,7 +51,6 @@ pub fn verify< ) -> bool where OpeningProof::SRS: Sync, - { let Proof { proof_comms, From f4ae20d9dab50936cde0e874f3b9d28e1fcae752 Mon Sep 17 00:00:00 2001 From: marcbeunardeau88 Date: Thu, 10 Oct 2024 11:18:48 +0200 Subject: [PATCH 06/30] o1vm:pickles: Use correct imports for verifier.rs --- o1vm/src/pickles/verifier.rs | 234 ++++++++--------------------------- 1 file changed, 49 insertions(+), 185 deletions(-) diff --git a/o1vm/src/pickles/verifier.rs b/o1vm/src/pickles/verifier.rs index cb3dfd4f9f..baf180ecb4 100644 --- a/o1vm/src/pickles/verifier.rs +++ b/o1vm/src/pickles/verifier.rs @@ -28,159 +28,48 @@ use poly_commitment::{ OpenProof, SRS, }; -use kimchi_msm::{expr::E, logup::LookupTableID, proof::Proof, witness::Witness}; +use kimchi_msm::{logup::LookupTableID, witness::Witness}; +use super::proof::Proof; +use crate::E; pub fn verify< G: KimchiCurve, - OpeningProof: OpenProof, + OpeningProof: Proof, EFqSponge: Clone + FqSponge, EFrSponge: FrSponge, - const N_WIT: usize, - const N_REL: usize, - const N_DSEL: usize, - const N_FSEL: usize, - const NPUB: usize, - ID: LookupTableID, >( domain: EvaluationDomains, - srs: &OpeningProof::SRS, + srs: &SRS, constraints: &Vec>, - fixed_selectors: Box<[Vec; N_FSEL]>, - proof: &Proof, - public_inputs: Witness>, + proof: &Proof, ) -> bool where - OpeningProof::SRS: Sync, + SRS: Sync, { let Proof { - proof_comms, - proof_evals, + commitments, + zeta_evaluations, + zeta_omega_evaluations, opening_proof, } = proof; //////////////////////////////////////////////////////////////////////////// - // Re-evaluating public inputs + // TODO : public inputs //////////////////////////////////////////////////////////////////////////// - let fixed_selectors_evals_d1: Box<[Evaluations>; N_FSEL]> = { - o1_utils::array::vec_to_boxed_array( - fixed_selectors - .into_par_iter() - .map(|evals| Evaluations::from_vec_and_domain(evals, domain.d1)) - .collect(), - ) - }; - - let fixed_selectors_polys: Box<[DensePolynomial; N_FSEL]> = { - o1_utils::array::vec_to_boxed_array( - fixed_selectors_evals_d1 - .into_par_iter() - .map(|evals| evals.interpolate()) - .collect(), - ) - }; - - let fixed_selectors_comms: Box<[PolyComm; N_FSEL]> = { - let comm = |poly: &DensePolynomial| srs.commit_non_hiding(poly, 1); - o1_utils::array::vec_to_boxed_array( - fixed_selectors_polys - .as_ref() - .into_par_iter() - .map(comm) - .collect(), - ) - }; - - // Interpolate public input columns on d1, using trait Into. - let public_input_evals_d1: Witness>> = - public_inputs - .into_par_iter() - .map(|evals| { - Evaluations::>::from_vec_and_domain( - evals, domain.d1, - ) - }) - .collect::>>>(); - - let public_input_polys: Witness> = { - let interpolate = - |evals: Evaluations>| evals.interpolate(); - public_input_evals_d1 - .into_par_iter() - .map(interpolate) - .collect::>>() - }; - - let public_input_comms: Witness> = { - let comm = |poly: &DensePolynomial| srs.commit_non_hiding(poly, 1); - (&public_input_polys) - .into_par_iter() - .map(comm) - .collect::>>() - }; - - assert!( - NPUB <= N_WIT, - "Number of public inputs exceeds number of witness columns" - ); - for i in 0..NPUB { - assert!(public_input_comms.cols[i] == proof_comms.witness_comms.cols[i]); - } - //////////////////////////////////////////////////////////////////////////// // Absorbing all the commitments to the columns //////////////////////////////////////////////////////////////////////////// let mut fq_sponge = EFqSponge::new(G::other_curve_sponge_params()); - - fixed_selectors_comms - .as_ref() - .iter() - .chain(&proof_comms.witness_comms) - .for_each(|comm| absorb_commitment(&mut fq_sponge, comm)); - - //////////////////////////////////////////////////////////////////////////// - // Logup - //////////////////////////////////////////////////////////////////////////// - - let (joint_combiner, beta) = { - if let Some(logup_comms) = &proof_comms.logup_comms { - // First, we absorb the multiplicity polynomials - logup_comms.m.values().for_each(|comms| { - comms - .iter() - .for_each(|comm| absorb_commitment(&mut fq_sponge, comm)) - }); - - // FIXME @volhovm it seems that the verifier does not - // actually check that the fixed tables used in the proof - // are the fixed tables defined in the code. In other - // words, all the currently used "fixed" tables are - // runtime and can be chosen freely by the prover. - - // To generate the challenges - let joint_combiner = fq_sponge.challenge(); - let beta = fq_sponge.challenge(); - - // And now, we absorb the commitments to the other polynomials - logup_comms.h.values().for_each(|comms| { - comms - .iter() - .for_each(|comm| absorb_commitment(&mut fq_sponge, comm)) - }); - - logup_comms - .fixed_tables - .values() - .for_each(|comm| absorb_commitment(&mut fq_sponge, comm)); - - // And at the end, the aggregation - absorb_commitment(&mut fq_sponge, &logup_comms.sum); - (Some(joint_combiner), beta) - } else { - (None, G::ScalarField::zero()) - } - }; + for comm in commitments.scratch.iter() { + absorb_commitment(&mut fq_sponge, comm) + } + absorb_commitment(&mut fq_sponge, &commitments.instruction_counter); + absorb_commitment(&mut fq_sponge, &commitments.error); + for comm in commitments.selectors.iter() { + absorb_commitment(&mut fq_sponge, comm) + } // Sample α with the Fq-Sponge. let alpha = fq_sponge.challenge(); @@ -189,7 +78,7 @@ where // Quotient polynomial //////////////////////////////////////////////////////////////////////////// - absorb_commitment(&mut fq_sponge, &proof_comms.t_comm); + absorb_commitment(&mut fq_sponge, &commitments.t_comm); // -- Preparing for opening proof verification let zeta_chal = ScalarChallenge(fq_sponge.challenge()); @@ -201,82 +90,56 @@ where let mut coms_and_evaluations: Vec> = vec![]; coms_and_evaluations.extend( - (&proof_comms.witness_comms) + (&commitments) .into_iter() - .zip(&proof_evals.witness_evals) - .map(|(commitment, point_eval)| Evaluation { + .zip(&zeta_evaluations) + .zip(zeta_omega_evaluations) + .map(|(commitment, (eval_zeta, eval_zeta_omega))| Evaluation { commitment: commitment.clone(), - evaluations: vec![vec![point_eval.zeta], vec![point_eval.zeta_omega]], + evaluations: vec![vec![eval_zeta], vec![eval_zeta_omega]], }), ); - coms_and_evaluations.extend( - (fixed_selectors_comms) - .into_iter() - .zip(proof_evals.fixed_selectors_evals.iter()) - .map(|(commitment, point_eval)| Evaluation { - commitment: commitment.clone(), - evaluations: vec![vec![point_eval.zeta], vec![point_eval.zeta_omega]], - }), - ); - - if let Some(logup_comms) = &proof_comms.logup_comms { - coms_and_evaluations.extend( - logup_comms - .into_iter() - .zip(proof_evals.logup_evals.as_ref().unwrap()) - .map(|(commitment, point_eval)| Evaluation { - commitment: commitment.clone(), - evaluations: vec![vec![point_eval.zeta], vec![point_eval.zeta_omega]], - }) - .collect::>(), - ); - } - // -- Absorb all coms_and_evaluations let fq_sponge_before_coms_and_evaluations = fq_sponge.clone(); let mut fr_sponge = EFrSponge::new(G::sponge_params()); fr_sponge.absorb(&fq_sponge.digest()); - for PointEvaluations { zeta, zeta_omega } in (&proof_evals.witness_evals).into_iter() { - fr_sponge.absorb(zeta); - fr_sponge.absorb(zeta_omega); - } - - for PointEvaluations { zeta, zeta_omega } in proof_evals.fixed_selectors_evals.as_ref().iter() { - fr_sponge.absorb(zeta); - fr_sponge.absorb(zeta_omega); + for (zeta_eval, zeta_omega_eval) in zeta_evaluations + .scratch + .iter() + .zip(zeta_omega_evaluations.scratch.iter()) + { + fr_sponge.absorb(zeta_eval); + fr_sponge.absorb(zeta_omega_eval); } - - if proof_comms.logup_comms.is_some() { - // Logup FS - for PointEvaluations { zeta, zeta_omega } in - proof_evals.logup_evals.as_ref().unwrap().into_iter() - { - fr_sponge.absorb(zeta); - fr_sponge.absorb(zeta_omega); - } - }; + fr_sponge.absorb(&zeta_evaluations.instruction_counter); + fr_sponge.absorb(&zeta_omega_evaluations.instruction_counter); + fr_sponge.absorb(&zeta_evaluations.error); + fr_sponge.absorb(&zeta_omega_evaluations.error); // Compute [ft(X)] = \ - // (1 - ζ^n) \ + // (1 - ζ^n) * // ([t_0(X)] + ζ^n [t_1(X)] + ... + ζ^{kn} [t_{k}(X)]) let ft_comm = { let evaluation_point_to_domain_size = zeta.pow([domain.d1.size]); - let chunked_t_comm = proof_comms + let chunked_t_comm = commitments .t_comm .chunk_commitment(evaluation_point_to_domain_size); // (1 - ζ^n) let minus_vanishing_poly_at_zeta = -domain.d1.vanishing_polynomial().evaluate(&zeta); chunked_t_comm.scale(minus_vanishing_poly_at_zeta) }; - - let challenges = BerkeleyChallenges:: { + // FIXME: use a proper Challenge structure + let challenges = BerkeleyChallenges { alpha, - beta, + // No permutation argument for the moment + beta: G::ScalarField::zero(), gamma: G::ScalarField::zero(), - joint_combiner: joint_combiner.unwrap_or(G::ScalarField::zero()), + // No lookup for the moment + joint_combiner: G::ScalarField::zero(), }; + let (_, endo_r) = G::endos(); let constants = Constants { endo_coefficient: *endo_r, @@ -291,18 +154,19 @@ where combined_expr.to_polish().as_slice(), domain.d1, zeta, - proof_evals, + evaluations, &constants, &challenges, ) .unwrap(); + // Fixme add ft eval to the proof coms_and_evaluations.push(Evaluation { commitment: ft_comm, - evaluations: vec![vec![ft_eval0], vec![proof_evals.ft_eval1]], + evaluations: vec![vec![ft_eval0], vec![zeta_omega_evaluations.ft]], }); - fr_sponge.absorb(&proof_evals.ft_eval1); + fr_sponge.absorb(zeta_omega_evaluations.ft_eval1); // -- End absorb all coms_and_evaluations let v_chal = fr_sponge.challenge(); From 28d2091fcb2064eb17bc0826567023069ff456d0 Mon Sep 17 00:00:00 2001 From: Matt Walker Date: Thu, 10 Oct 2024 11:18:57 -0400 Subject: [PATCH 07/30] Revert "msm: make pub(crate) fields pub" This reverts commit 28399218a5f787fc207c919bbfa49e9583b35f8c. --- msm/src/logup.rs | 8 ++++---- msm/src/proof.rs | 20 ++++++++++---------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/msm/src/logup.rs b/msm/src/logup.rs index 117fc603a4..19ba65a191 100644 --- a/msm/src/logup.rs +++ b/msm/src/logup.rs @@ -275,13 +275,13 @@ pub struct LogupWitness { #[derive(Debug, Clone)] pub struct LookupProof { /// The multiplicity polynomials - pub m: BTreeMap>, + pub(crate) m: BTreeMap>, /// The polynomial keeping the sum of each row - pub h: BTreeMap>, + pub(crate) h: BTreeMap>, /// The "running-sum" over the rows, coined `φ` - pub sum: T, + pub(crate) sum: T, /// All fixed lookup tables values, indexed by their ID - pub fixed_tables: BTreeMap, + pub(crate) fixed_tables: BTreeMap, } /// Iterator implementation to abstract the content of the structure. diff --git a/msm/src/proof.rs b/msm/src/proof.rs index fc0cc8cead..4700450cf5 100644 --- a/msm/src/proof.rs +++ b/msm/src/proof.rs @@ -74,13 +74,13 @@ pub struct ProofEvaluations< ID: LookupTableID, > { /// Witness evaluations, including public inputs - pub witness_evals: Witness>, + pub(crate) witness_evals: Witness>, /// Evaluations of fixed selectors. - pub fixed_selectors_evals: Box<[PointEvaluations; N_FSEL]>, + pub(crate) fixed_selectors_evals: Box<[PointEvaluations; N_FSEL]>, /// Logup argument evaluations - pub logup_evals: Option, ID>>, + pub(crate) logup_evals: Option, ID>>, /// Evaluation of Z_H(ζ) (t_0(X) + ζ^n t_1(X) + ...) at ζω. - pub ft_eval1: F, + pub(crate) ft_eval1: F, } /// The trait ColumnEvaluations is used by the verifier. @@ -150,13 +150,13 @@ impl< pub struct ProofCommitments { /// Commitments to the N columns of the circuits, also called the 'witnesses'. /// If some columns are considered as public inputs, it is counted in the witness. - pub witness_comms: Witness>, + pub(crate) witness_comms: Witness>, /// Commitments to the polynomials used by the lookup argument, coined "logup". /// The values contains the chunked polynomials. - pub logup_comms: Option, ID>>, + pub(crate) logup_comms: Option, ID>>, /// Commitments to the quotient polynomial. /// The value contains the chunked polynomials. - pub t_comm: PolyComm, + pub(crate) t_comm: PolyComm, } #[derive(Debug, Clone)] @@ -169,7 +169,7 @@ pub struct Proof< OpeningProof: OpenProof, ID: LookupTableID, > { - pub proof_comms: ProofCommitments, - pub proof_evals: ProofEvaluations, - pub opening_proof: OpeningProof, + pub(crate) proof_comms: ProofCommitments, + pub(crate) proof_evals: ProofEvaluations, + pub(crate) opening_proof: OpeningProof, } From 031acd53f81742699b6ed9c5b74d10da15011c45 Mon Sep 17 00:00:00 2001 From: Matt Walker Date: Wed, 16 Oct 2024 16:56:27 -0400 Subject: [PATCH 08/30] o1vm/pickles: Make prover and verifier compile --- o1vm/src/pickles/column_env.rs | 48 ++++++--- o1vm/src/pickles/proof.rs | 7 +- o1vm/src/pickles/prover.rs | 49 +++++---- o1vm/src/pickles/verifier.rs | 181 +++++++++++++++++++++++---------- 4 files changed, 191 insertions(+), 94 deletions(-) diff --git a/o1vm/src/pickles/column_env.rs b/o1vm/src/pickles/column_env.rs index 801965689e..d91a0bc17d 100644 --- a/o1vm/src/pickles/column_env.rs +++ b/o1vm/src/pickles/column_env.rs @@ -1,5 +1,6 @@ use ark_ff::FftField; use ark_poly::{Evaluations, Radix2EvaluationDomain}; +use kimchi_msm::columns::Column; use crate::{ interpreters::mips::{column::N_MIPS_SEL_COLS, witness::SCRATCH_SIZE}, @@ -13,12 +14,12 @@ use kimchi::circuits::{ type Evals = Evaluations>; -/// The collection of polynomials (all in evaluation form) and constants +/// The collection f polynomials (all in evaluation form) and constants /// required to evaluate an expression as a polynomial. /// /// All are evaluations. pub struct ColumnEnvironment<'a, F: FftField> { - /// The witness column polynomials. Includes relation columns and dynamic + /// The witness coluomn polynomials. Includes relation columns and dynamic /// selector columns. pub witness: &'a WitnessColumns, [Evals; N_MIPS_SEL_COLS]>, /// The value `prod_{j != 1} (1 - ω^j)`, used for efficiently @@ -34,35 +35,40 @@ pub struct ColumnEnvironment<'a, F: FftField> { pub domain: EvaluationDomains, } -impl<'a, F: FftField> TColumnEnvironment<'a, F, BerkeleyChallengeTerm, BerkeleyChallenges> - for ColumnEnvironment<'a, F> -{ - // FIXME: do we change to the MIPS column type? - // We do not want to keep kimchi_msm/generic prover - type Column = kimchi_msm::columns::Column; +pub fn get_all_columns() -> Vec { + let mut cols = Vec::::with_capacity(SCRATCH_SIZE + N_MIPS_SEL_COLS); + for i in 0..SCRATCH_SIZE { + cols.push(Column::Relation(i)); + } + for i in 0..N_MIPS_SEL_COLS { + cols.push(Column::DynamicSelector(i)); + } + cols +} - fn get_column(&self, col: &Self::Column) -> Option<&'a Evals> { +impl WitnessColumns { + pub fn get_column(&self, col: &Column) -> Option<&G> { match *col { - Self::Column::Relation(i) => { + Column::Relation(i) => { if i < SCRATCH_SIZE { - let res = &self.witness.scratch[i]; + let res = &self.scratch[i]; Some(res) } else if i == SCRATCH_SIZE { - let res = &self.witness.instruction_counter; + let res = &self.instruction_counter; Some(res) } else if i == SCRATCH_SIZE + 1 { - let res = &self.witness.error; + let res = &self.error; Some(res) } else { panic!("We should not have that many relation columns"); } } - Self::Column::DynamicSelector(i) => { + Column::DynamicSelector(i) => { assert!( i < N_MIPS_SEL_COLS, "We do not have that many dynamic selector columns" ); - let res = &self.witness.selector[i]; + let res = &self.selector[i]; Some(res) } _ => { @@ -70,6 +76,18 @@ impl<'a, F: FftField> TColumnEnvironment<'a, F, BerkeleyChallengeTerm, BerkeleyC } } } +} + +impl<'a, F: FftField> TColumnEnvironment<'a, F, BerkeleyChallengeTerm, BerkeleyChallenges> + for ColumnEnvironment<'a, F> +{ + // FIXME: do we change to the MIPS column type? + // We do not want to keep kimchi_msm/generic prover + type Column = Column; + + fn get_column(&self, col: &Self::Column) -> Option<&'a Evals> { + self.witness.get_column(col) + } fn get_domain(&self, d: Domain) -> Radix2EvaluationDomain { match d { diff --git a/o1vm/src/pickles/proof.rs b/o1vm/src/pickles/proof.rs index 1c5526f1cc..b2ebdc5aca 100644 --- a/o1vm/src/pickles/proof.rs +++ b/o1vm/src/pickles/proof.rs @@ -1,4 +1,7 @@ -use kimchi::curve::KimchiCurve; +use kimchi::{ + curve::KimchiCurve, + proof::PointEvaluations, +}; use poly_commitment::{ipa::OpeningProof, PolyComm}; use crate::interpreters::mips::column::N_MIPS_SEL_COLS; @@ -32,6 +35,8 @@ pub struct Proof { pub commitments: WitnessColumns, [PolyComm; N_MIPS_SEL_COLS]>, pub zeta_evaluations: WitnessColumns, pub zeta_omega_evaluations: WitnessColumns, + pub quotient_commitment: PolyComm, + pub quotient_evaluations: PointEvaluations, /// IPA opening proof pub opening_proof: OpeningProof, } diff --git a/o1vm/src/pickles/prover.rs b/o1vm/src/pickles/prover.rs index b4788dda46..e600e5fd5d 100644 --- a/o1vm/src/pickles/prover.rs +++ b/o1vm/src/pickles/prover.rs @@ -12,6 +12,7 @@ use kimchi::{ curve::KimchiCurve, groupmap::GroupMap, plonk_sponge::FrSponge, + proof::PointEvaluations, }; use log::debug; use mina_poseidon::{sponge::ScalarChallenge, FqSponge}; @@ -258,9 +259,8 @@ where quotient }; - let t_comm = srs.commit_non_hiding("ient_poly, DEGREE_QUOTIENT_POLYNOMIAL as usize); - - absorb_commitment(&mut fq_sponge, &t_comm); + let quotient_commitment = srs.commit_non_hiding("ient_poly, DEGREE_QUOTIENT_POLYNOMIAL as usize); + absorb_commitment(&mut fq_sponge, "ient_commitment); //////////////////////////////////////////////////////////////////////////// // Round 3: Evaluations at ζ and ζω @@ -300,16 +300,17 @@ where [<::Group as Group>::ScalarField; N_MIPS_SEL_COLS], > = evals(&zeta_omega); + let quotient_evaluations = PointEvaluations { + zeta: quotient_poly.evaluate(&zeta), + zeta_omega: quotient_poly.evaluate(&zeta_omega), + }; + // Absorbing evaluations with a sponge for the other field // We initialize the state with the previous state of the fq_sponge let fq_sponge_before_evaluations = fq_sponge.clone(); let mut fr_sponge = EFrSponge::new(G::sponge_params()); fr_sponge.absorb(&fq_sponge.digest()); - // Quotient poly evals - let quotient_zeta_eval = quotient_poly.evaluate(&zeta); - let quotient_zeta_omega_eval = quotient_poly.evaluate(&zeta_omega); - for (zeta_eval, zeta_omega_eval) in zeta_evaluations .scratch .iter() @@ -330,30 +331,32 @@ where fr_sponge.absorb(zeta_eval); fr_sponge.absorb(zeta_omega_eval); } - fr_sponge.absorb("ient_zeta_eval); - fr_sponge.absorb("ient_zeta_omega_eval); + fr_sponge.absorb("ient_evaluations.zeta); + fr_sponge.absorb("ient_evaluations.zeta_omega); //////////////////////////////////////////////////////////////////////////// // Round 4: Opening proof w/o linearization polynomial //////////////////////////////////////////////////////////////////////////// - // Preparing the polynomials for the opening proof let mut polynomials: Vec<_> = polys.scratch.into_iter().collect(); polynomials.push(polys.instruction_counter); polynomials.push(polys.error); polynomials.extend(polys.selector); polynomials.push(quotient_poly); - let polynomials: Vec<_> = polynomials - .iter() - .map(|poly| { - ( - DensePolynomialOrEvaluations::DensePolynomial(poly), - // We do not have any blinder, therefore we set to 0. - PolyComm::new(vec![G::ScalarField::zero()]), - ) - }) - .collect(); + // Preparing the polynomials for the opening proof + let polynomials: Vec<_> = + polynomials + .iter() + .map(|poly| { + ( + DensePolynomialOrEvaluations::DensePolynomial(poly), + // We do not have any blinder, therefore we set to 0. + PolyComm::new(vec![G::ScalarField::zero()]), + ) + }) + .collect(); + // FIXME: Push the quotient polynomial with t_comm (DONE) // poly scale let v_chal = fr_sponge.challenge(); @@ -378,9 +381,11 @@ where ); Ok(Proof { - commitments, - zeta_evaluations, + commitments, /* FIXME: Add t_comm somehow -> Rename to quotient_commitment (DONE) */ + zeta_evaluations, /* FIXME: Add quotient evaluations (DONE) */ zeta_omega_evaluations, + quotient_commitment, + quotient_evaluations, opening_proof, }) } diff --git a/o1vm/src/pickles/verifier.rs b/o1vm/src/pickles/verifier.rs index baf180ecb4..41fa029be5 100644 --- a/o1vm/src/pickles/verifier.rs +++ b/o1vm/src/pickles/verifier.rs @@ -1,19 +1,16 @@ #![allow(clippy::type_complexity)] #![allow(clippy::boxed_local)] -use ark_ff::{Field, Zero}; -use ark_poly::{ - univariate::DensePolynomial, EvaluationDomain, Evaluations, Polynomial, - Radix2EvaluationDomain as R2D, -}; +use ark_ec::{AffineRepr, Group}; +use ark_ff::{PrimeField, Zero}; use rand::thread_rng; -use rayon::iter::{IntoParallelIterator, ParallelIterator}; use kimchi::{ circuits::{ berkeley_columns::BerkeleyChallenges, domains::EvaluationDomains, - expr::{Constants, Expr, PolishToken}, + expr::{ColumnEvaluations, Constants, Expr, ExprError, PolishToken}, + gate::CurrOrNext, }, curve::KimchiCurve, groupmap::GroupMap, @@ -25,31 +22,72 @@ use poly_commitment::{ commitment::{ absorb_commitment, combined_inner_product, BatchEvaluationProof, Evaluation, PolyComm, }, - OpenProof, SRS, + ipa::OpeningProof, + OpenProof +}; + +use super::{ + column_env::get_all_columns, + proof::{Proof, WitnessColumns}, }; +use crate::{interpreters::mips::column::N_MIPS_SEL_COLS, E}; +use kimchi_msm::columns::Column; + +type CommitmentColumns = WitnessColumns, [PolyComm; N_MIPS_SEL_COLS]>; +type EvaluationColumns = WitnessColumns< + <::Group as Group>::ScalarField, + [<::Group as Group>::ScalarField; N_MIPS_SEL_COLS], +>; -use kimchi_msm::{logup::LookupTableID, witness::Witness}; -use super::proof::Proof; -use crate::E; +// TODO: Move and perhaps derive some traits for these +struct ColumnEval<'a, G: AffineRepr> { + commitment: &'a CommitmentColumns, + zeta_eval: &'a EvaluationColumns, + zeta_omega_eval: &'a EvaluationColumns, +} + +impl ColumnEvaluations<::ScalarField> for ColumnEval<'_, G> { + type Column = Column; + fn evaluate( + &self, + col: Self::Column, + ) -> Result::ScalarField>, ExprError> { + let ColumnEval { + commitment: _, + zeta_eval, + zeta_omega_eval, + } = self; + if let Some(&zeta) = zeta_eval.get_column(&col) { + if let Some(&zeta_omega) = zeta_omega_eval.get_column(&col) { + Ok(PointEvaluations { zeta, zeta_omega }) + } else { + Err(ExprError::MissingEvaluation(col, CurrOrNext::Next)) + } + } else { + Err(ExprError::MissingEvaluation(col, CurrOrNext::Curr)) + } + } +} pub fn verify< G: KimchiCurve, - OpeningProof: Proof, EFqSponge: Clone + FqSponge, EFrSponge: FrSponge, >( domain: EvaluationDomains, - srs: &SRS, + srs: & as OpenProof>::SRS, constraints: &Vec>, proof: &Proof, ) -> bool where - SRS: Sync, + ::BaseField: PrimeField, { let Proof { commitments, zeta_evaluations, zeta_omega_evaluations, + quotient_commitment, + quotient_evaluations, opening_proof, } = proof; @@ -67,7 +105,7 @@ where } absorb_commitment(&mut fq_sponge, &commitments.instruction_counter); absorb_commitment(&mut fq_sponge, &commitments.error); - for comm in commitments.selectors.iter() { + for comm in commitments.selector.iter() { absorb_commitment(&mut fq_sponge, comm) } @@ -78,7 +116,7 @@ where // Quotient polynomial //////////////////////////////////////////////////////////////////////////// - absorb_commitment(&mut fq_sponge, &commitments.t_comm); + absorb_commitment(&mut fq_sponge, quotient_commitment); // -- Preparing for opening proof verification let zeta_chal = ScalarChallenge(fq_sponge.challenge()); @@ -87,21 +125,14 @@ where let omega = domain.d1.group_gen; let zeta_omega = zeta * omega; - let mut coms_and_evaluations: Vec> = vec![]; - - coms_and_evaluations.extend( - (&commitments) - .into_iter() - .zip(&zeta_evaluations) - .zip(zeta_omega_evaluations) - .map(|(commitment, (eval_zeta, eval_zeta_omega))| Evaluation { - commitment: commitment.clone(), - evaluations: vec![vec![eval_zeta], vec![eval_zeta_omega]], - }), - ); - - // -- Absorb all coms_and_evaluations - let fq_sponge_before_coms_and_evaluations = fq_sponge.clone(); + let column_eval = ColumnEval { + commitment: commitments, + zeta_eval: zeta_evaluations, + zeta_omega_eval: zeta_omega_evaluations, + }; + + // -- Absorb all commitments_and_evaluations + let fq_sponge_before_commitments_and_evaluations = fq_sponge.clone(); let mut fr_sponge = EFrSponge::new(G::sponge_params()); fr_sponge.absorb(&fq_sponge.digest()); @@ -117,19 +148,12 @@ where fr_sponge.absorb(&zeta_omega_evaluations.instruction_counter); fr_sponge.absorb(&zeta_evaluations.error); fr_sponge.absorb(&zeta_omega_evaluations.error); + fr_sponge.absorb_multiple(&zeta_evaluations.selector); + fr_sponge.absorb_multiple(&zeta_omega_evaluations.selector); + fr_sponge.absorb("ient_evaluations.zeta); + fr_sponge.absorb("ient_evaluations.zeta_omega); + // FIXME: Add selector evaluations (DONE) and quotient evaluations - // Compute [ft(X)] = \ - // (1 - ζ^n) * - // ([t_0(X)] + ζ^n [t_1(X)] + ... + ζ^{kn} [t_{k}(X)]) - let ft_comm = { - let evaluation_point_to_domain_size = zeta.pow([domain.d1.size]); - let chunked_t_comm = commitments - .t_comm - .chunk_commitment(evaluation_point_to_domain_size); - // (1 - ζ^n) - let minus_vanishing_poly_at_zeta = -domain.d1.vanishing_polynomial().evaluate(&zeta); - chunked_t_comm.scale(minus_vanishing_poly_at_zeta) - }; // FIXME: use a proper Challenge structure let challenges = BerkeleyChallenges { alpha, @@ -149,24 +173,42 @@ where let combined_expr = Expr::combine_constraints(0..(constraints.len() as u32), constraints.clone()); - // Note the minus! ft polynomial at zeta (ft_eval0) is minus evaluation of the expression. - let ft_eval0 = -PolishToken::evaluate( + + // FIXME: Add these to the final check!!!!! + + // FIXME: Fixup absorbs so they match in prover.rs + + let quotient_eval_zeta = PolishToken::evaluate( combined_expr.to_polish().as_slice(), domain.d1, zeta, - evaluations, + &column_eval, &constants, &challenges, ) - .unwrap(); + .unwrap_or_else(|_| panic!("Could not evaluate quotient polynomial at zeta")); - // Fixme add ft eval to the proof - coms_and_evaluations.push(Evaluation { - commitment: ft_comm, - evaluations: vec![vec![ft_eval0], vec![zeta_omega_evaluations.ft]], - }); + let quotient_eval_zeta_omega = PolishToken::evaluate( + combined_expr.to_polish().as_slice(), + domain.d1, + zeta_omega, + &column_eval, + &constants, + &challenges, + ) + .unwrap_or_else(|_| panic!("Could not evaluate quotient polynomial at zeta_omega")); + + // Check the actual quotient works. combined_expr(eval) [ == quotient_eval_*] = quotient(eval) [== Given by prover (new field) -- chunked] * vanishing_poly(eval) [== x^n - 1 == zeta^(d1.size()) - 1] - fr_sponge.absorb(zeta_omega_evaluations.ft_eval1); + // Fixme add ft eval to the proof + /* coms_and_evaluations.push(Evaluation { + commitment: ft_comm, + evaluations: vec![vec![ft_eval0], vec![zeta_omega_evaluations.ft]], + }); + */ + fr_sponge.absorb("ient_eval_zeta); + fr_sponge.absorb("ient_eval_zeta_omega); + // fr_sponge.absorb(zeta_omega_evaluations.ft_eval1); // -- End absorb all coms_and_evaluations let v_chal = fr_sponge.challenge(); @@ -174,8 +216,35 @@ where let u_chal = fr_sponge.challenge(); let u = u_chal.to_field(endo_r); + let evaluations = { + let all_columns = get_all_columns(); + + let mut evaluations = Vec::with_capacity(all_columns.len()); + + all_columns.into_iter() + .for_each( + |column| { + let point_evaluations = column_eval + .evaluate(column) + .unwrap_or_else(|_| panic!("Could not get `evaluations` for `Evaluation`")); // FIXME: Finish message (DONE) + + let commitment = column_eval + .commitment + .get_column(&column) + .unwrap_or_else(|| panic!("Could not get `commitment` for `Evaluation`")) // FIXME: Finish message (DONE) + .clone(); + + evaluations.push(Evaluation { + commitment, + evaluations: vec![vec![point_evaluations.zeta], vec![point_evaluations.zeta_omega]], + }) + }); + + evaluations + }; + let combined_inner_product = { - let es: Vec<_> = coms_and_evaluations + let es: Vec<_> = evaluations .iter() .map(|Evaluation { evaluations, .. }| evaluations.clone()) .collect(); @@ -184,8 +253,8 @@ where }; let batch = BatchEvaluationProof { - sponge: fq_sponge_before_coms_and_evaluations, - evaluations: coms_and_evaluations, + sponge: fq_sponge_before_commitments_and_evaluations, + evaluations: evaluations, evaluation_points: vec![zeta, zeta_omega], polyscale: v, evalscale: u, From b817d5abed79bc974cae374d846d54913cdd7d47 Mon Sep 17 00:00:00 2001 From: Matt Walker Date: Wed, 16 Oct 2024 17:03:01 -0400 Subject: [PATCH 09/30] o1vm/pickles: Format --- o1vm/src/pickles/proof.rs | 5 +---- o1vm/src/pickles/prover.rs | 21 ++++++++++---------- o1vm/src/pickles/verifier.rs | 37 ++++++++++++++++++------------------ 3 files changed, 30 insertions(+), 33 deletions(-) diff --git a/o1vm/src/pickles/proof.rs b/o1vm/src/pickles/proof.rs index b2ebdc5aca..595e7e846a 100644 --- a/o1vm/src/pickles/proof.rs +++ b/o1vm/src/pickles/proof.rs @@ -1,7 +1,4 @@ -use kimchi::{ - curve::KimchiCurve, - proof::PointEvaluations, -}; +use kimchi::{curve::KimchiCurve, proof::PointEvaluations}; use poly_commitment::{ipa::OpeningProof, PolyComm}; use crate::interpreters::mips::column::N_MIPS_SEL_COLS; diff --git a/o1vm/src/pickles/prover.rs b/o1vm/src/pickles/prover.rs index e600e5fd5d..2a2cbca086 100644 --- a/o1vm/src/pickles/prover.rs +++ b/o1vm/src/pickles/prover.rs @@ -345,17 +345,16 @@ where polynomials.push(quotient_poly); // Preparing the polynomials for the opening proof - let polynomials: Vec<_> = - polynomials - .iter() - .map(|poly| { - ( - DensePolynomialOrEvaluations::DensePolynomial(poly), - // We do not have any blinder, therefore we set to 0. - PolyComm::new(vec![G::ScalarField::zero()]), - ) - }) - .collect(); + let polynomials: Vec<_> = polynomials + .iter() + .map(|poly| { + ( + DensePolynomialOrEvaluations::DensePolynomial(poly), + // We do not have any blinder, therefore we set to 0. + PolyComm::new(vec![G::ScalarField::zero()]), + ) + }) + .collect(); // FIXME: Push the quotient polynomial with t_comm (DONE) // poly scale diff --git a/o1vm/src/pickles/verifier.rs b/o1vm/src/pickles/verifier.rs index 41fa029be5..26e18cb243 100644 --- a/o1vm/src/pickles/verifier.rs +++ b/o1vm/src/pickles/verifier.rs @@ -23,7 +23,7 @@ use poly_commitment::{ absorb_commitment, combined_inner_product, BatchEvaluationProof, Evaluation, PolyComm, }, ipa::OpeningProof, - OpenProof + OpenProof, }; use super::{ @@ -221,23 +221,24 @@ where let mut evaluations = Vec::with_capacity(all_columns.len()); - all_columns.into_iter() - .for_each( - |column| { - let point_evaluations = column_eval - .evaluate(column) - .unwrap_or_else(|_| panic!("Could not get `evaluations` for `Evaluation`")); // FIXME: Finish message (DONE) - - let commitment = column_eval - .commitment - .get_column(&column) - .unwrap_or_else(|| panic!("Could not get `commitment` for `Evaluation`")) // FIXME: Finish message (DONE) - .clone(); - - evaluations.push(Evaluation { - commitment, - evaluations: vec![vec![point_evaluations.zeta], vec![point_evaluations.zeta_omega]], - }) + all_columns.into_iter().for_each(|column| { + let point_evaluations = column_eval + .evaluate(column) + .unwrap_or_else(|_| panic!("Could not get `evaluations` for `Evaluation`")); // FIXME: Finish message (DONE) + + let commitment = column_eval + .commitment + .get_column(&column) + .unwrap_or_else(|| panic!("Could not get `commitment` for `Evaluation`")) // FIXME: Finish message (DONE) + .clone(); + + evaluations.push(Evaluation { + commitment, + evaluations: vec![ + vec![point_evaluations.zeta], + vec![point_evaluations.zeta_omega], + ], + }) }); evaluations From 5c8184d27332858083af7e7c753b44fc09541263 Mon Sep 17 00:00:00 2001 From: Matt Walker Date: Wed, 16 Oct 2024 21:48:26 -0400 Subject: [PATCH 10/30] o1vm/pickles: Fix various FIXMEs from review --- o1vm/src/pickles/column_env.rs | 4 ++-- o1vm/src/pickles/verifier.rs | 32 ++++++++++++++++++++------------ 2 files changed, 22 insertions(+), 14 deletions(-) diff --git a/o1vm/src/pickles/column_env.rs b/o1vm/src/pickles/column_env.rs index d91a0bc17d..1055a4dd42 100644 --- a/o1vm/src/pickles/column_env.rs +++ b/o1vm/src/pickles/column_env.rs @@ -14,12 +14,12 @@ use kimchi::circuits::{ type Evals = Evaluations>; -/// The collection f polynomials (all in evaluation form) and constants +/// The collection of polynomials (all in evaluation form) and constants /// required to evaluate an expression as a polynomial. /// /// All are evaluations. pub struct ColumnEnvironment<'a, F: FftField> { - /// The witness coluomn polynomials. Includes relation columns and dynamic + /// The witness column polynomials. Includes relation columns and dynamic /// selector columns. pub witness: &'a WitnessColumns, [Evals; N_MIPS_SEL_COLS]>, /// The value `prod_{j != 1} (1 - ω^j)`, used for efficiently diff --git a/o1vm/src/pickles/verifier.rs b/o1vm/src/pickles/verifier.rs index 26e18cb243..9bd7d129fa 100644 --- a/o1vm/src/pickles/verifier.rs +++ b/o1vm/src/pickles/verifier.rs @@ -2,7 +2,7 @@ #![allow(clippy::boxed_local)] use ark_ec::{AffineRepr, Group}; -use ark_ff::{PrimeField, Zero}; +use ark_ff::{PrimeField, Zero /* One */}; use rand::thread_rng; use kimchi::{ @@ -174,9 +174,9 @@ where let combined_expr = Expr::combine_constraints(0..(constraints.len() as u32), constraints.clone()); - // FIXME: Add these to the final check!!!!! + // FIXME: Add these to the final check!!!!! (DONE) - // FIXME: Fixup absorbs so they match in prover.rs + // FIXME: Fixup absorbs so they match in prover.rs (DONE) let quotient_eval_zeta = PolishToken::evaluate( combined_expr.to_polish().as_slice(), @@ -198,18 +198,18 @@ where ) .unwrap_or_else(|_| panic!("Could not evaluate quotient polynomial at zeta_omega")); - // Check the actual quotient works. combined_expr(eval) [ == quotient_eval_*] = quotient(eval) [== Given by prover (new field) -- chunked] * vanishing_poly(eval) [== x^n - 1 == zeta^(d1.size()) - 1] + // Check the actual quotient works. + // + // combined_expr(eval) [ == quotient_eval_*] + // = + // quotient(eval) [== Given by prover (new field) -- chunked] + // * + // vanishing_poly(eval) [== x^n - 1 == zeta^(d1.size()) - 1] + // FIXME: This should probably use some sort of proof assert, not just panic. + /* assert!(quotient_eval_zeta == quotient_evaluations * (pow(zeta, d1.size()) - G::ScalarField::one()), "The prover lied!"); */ - // Fixme add ft eval to the proof - /* coms_and_evaluations.push(Evaluation { - commitment: ft_comm, - evaluations: vec![vec![ft_eval0], vec![zeta_omega_evaluations.ft]], - }); - */ fr_sponge.absorb("ient_eval_zeta); fr_sponge.absorb("ient_eval_zeta_omega); - // fr_sponge.absorb(zeta_omega_evaluations.ft_eval1); - // -- End absorb all coms_and_evaluations let v_chal = fr_sponge.challenge(); let v = v_chal.to_field(endo_r); @@ -241,6 +241,14 @@ where }) }); + evaluations.push(Evaluation { + commitment: quotient_commitment.clone(), + evaluations: vec![ + vec![quotient_eval_zeta], + vec![quotient_eval_zeta_omega], + ], + }); + evaluations }; From e7c5b477e203a0025cbe7d75346eca57b01f5612 Mon Sep 17 00:00:00 2001 From: Matt Walker Date: Thu, 17 Oct 2024 09:59:56 -0400 Subject: [PATCH 11/30] o1vm/pickles: Fix various FIXMEs from review [continued] --- o1vm/src/pickles/verifier.rs | 25 ++++++++----------------- 1 file changed, 8 insertions(+), 17 deletions(-) diff --git a/o1vm/src/pickles/verifier.rs b/o1vm/src/pickles/verifier.rs index 9bd7d129fa..b399c77c16 100644 --- a/o1vm/src/pickles/verifier.rs +++ b/o1vm/src/pickles/verifier.rs @@ -2,7 +2,7 @@ #![allow(clippy::boxed_local)] use ark_ec::{AffineRepr, Group}; -use ark_ff::{PrimeField, Zero /* One */}; +use ark_ff::{Field, One, PrimeField, Zero}; use rand::thread_rng; use kimchi::{ @@ -198,16 +198,6 @@ where ) .unwrap_or_else(|_| panic!("Could not evaluate quotient polynomial at zeta_omega")); - // Check the actual quotient works. - // - // combined_expr(eval) [ == quotient_eval_*] - // = - // quotient(eval) [== Given by prover (new field) -- chunked] - // * - // vanishing_poly(eval) [== x^n - 1 == zeta^(d1.size()) - 1] - // FIXME: This should probably use some sort of proof assert, not just panic. - /* assert!(quotient_eval_zeta == quotient_evaluations * (pow(zeta, d1.size()) - G::ScalarField::one()), "The prover lied!"); */ - fr_sponge.absorb("ient_eval_zeta); fr_sponge.absorb("ient_eval_zeta_omega); @@ -219,7 +209,7 @@ where let evaluations = { let all_columns = get_all_columns(); - let mut evaluations = Vec::with_capacity(all_columns.len()); + let mut evaluations = Vec::with_capacity(all_columns.len() + 1); // +1 for the quotient all_columns.into_iter().for_each(|column| { let point_evaluations = column_eval @@ -243,10 +233,7 @@ where evaluations.push(Evaluation { commitment: quotient_commitment.clone(), - evaluations: vec![ - vec![quotient_eval_zeta], - vec![quotient_eval_zeta_omega], - ], + evaluations: vec![vec![quotient_eval_zeta], vec![quotient_eval_zeta_omega]], }); evaluations @@ -272,5 +259,9 @@ where }; let group_map = G::Map::setup(); - OpeningProof::verify(srs, &group_map, &mut [batch], &mut thread_rng()) + + // Check the actual quotient works. + (quotient_eval_zeta + == quotient_evaluations.zeta * zeta.pow([domain.d1.size]) - G::ScalarField::one()) + && OpeningProof::verify(srs, &group_map, &mut [batch], &mut thread_rng()) } From c70228be9a5e83e1126967ffd5723620078d5037 Mon Sep 17 00:00:00 2001 From: Matt Walker Date: Thu, 17 Oct 2024 10:08:05 -0400 Subject: [PATCH 12/30] Format and remove FIXMEs --- o1vm/src/pickles/prover.rs | 8 ++++---- o1vm/src/pickles/verifier.rs | 10 ++-------- 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/o1vm/src/pickles/prover.rs b/o1vm/src/pickles/prover.rs index 2a2cbca086..9fb7b0881e 100644 --- a/o1vm/src/pickles/prover.rs +++ b/o1vm/src/pickles/prover.rs @@ -259,7 +259,8 @@ where quotient }; - let quotient_commitment = srs.commit_non_hiding("ient_poly, DEGREE_QUOTIENT_POLYNOMIAL as usize); + let quotient_commitment = + srs.commit_non_hiding("ient_poly, DEGREE_QUOTIENT_POLYNOMIAL as usize); absorb_commitment(&mut fq_sponge, "ient_commitment); //////////////////////////////////////////////////////////////////////////// @@ -355,7 +356,6 @@ where ) }) .collect(); - // FIXME: Push the quotient polynomial with t_comm (DONE) // poly scale let v_chal = fr_sponge.challenge(); @@ -380,8 +380,8 @@ where ); Ok(Proof { - commitments, /* FIXME: Add t_comm somehow -> Rename to quotient_commitment (DONE) */ - zeta_evaluations, /* FIXME: Add quotient evaluations (DONE) */ + commitments, + zeta_evaluations, zeta_omega_evaluations, quotient_commitment, quotient_evaluations, diff --git a/o1vm/src/pickles/verifier.rs b/o1vm/src/pickles/verifier.rs index b399c77c16..f736d53ece 100644 --- a/o1vm/src/pickles/verifier.rs +++ b/o1vm/src/pickles/verifier.rs @@ -39,7 +39,6 @@ type EvaluationColumns = WitnessColumns< [<::Group as Group>::ScalarField; N_MIPS_SEL_COLS], >; -// TODO: Move and perhaps derive some traits for these struct ColumnEval<'a, G: AffineRepr> { commitment: &'a CommitmentColumns, zeta_eval: &'a EvaluationColumns, @@ -152,7 +151,6 @@ where fr_sponge.absorb_multiple(&zeta_omega_evaluations.selector); fr_sponge.absorb("ient_evaluations.zeta); fr_sponge.absorb("ient_evaluations.zeta_omega); - // FIXME: Add selector evaluations (DONE) and quotient evaluations // FIXME: use a proper Challenge structure let challenges = BerkeleyChallenges { @@ -174,10 +172,6 @@ where let combined_expr = Expr::combine_constraints(0..(constraints.len() as u32), constraints.clone()); - // FIXME: Add these to the final check!!!!! (DONE) - - // FIXME: Fixup absorbs so they match in prover.rs (DONE) - let quotient_eval_zeta = PolishToken::evaluate( combined_expr.to_polish().as_slice(), domain.d1, @@ -214,12 +208,12 @@ where all_columns.into_iter().for_each(|column| { let point_evaluations = column_eval .evaluate(column) - .unwrap_or_else(|_| panic!("Could not get `evaluations` for `Evaluation`")); // FIXME: Finish message (DONE) + .unwrap_or_else(|_| panic!("Could not get `evaluations` for `Evaluation`")); let commitment = column_eval .commitment .get_column(&column) - .unwrap_or_else(|| panic!("Could not get `commitment` for `Evaluation`")) // FIXME: Finish message (DONE) + .unwrap_or_else(|| panic!("Could not get `commitment` for `Evaluation`")) .clone(); evaluations.push(Evaluation { From 34d3b90cf1d65d6ed215063dc758da6486115bc9 Mon Sep 17 00:00:00 2001 From: marcbeunardeau88 Date: Thu, 17 Oct 2024 17:55:00 +0200 Subject: [PATCH 13/30] o1vm/pickles/verif: add a fixme --- o1vm/src/pickles/verifier.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/o1vm/src/pickles/verifier.rs b/o1vm/src/pickles/verifier.rs index f736d53ece..8bde1bb79d 100644 --- a/o1vm/src/pickles/verifier.rs +++ b/o1vm/src/pickles/verifier.rs @@ -75,6 +75,7 @@ pub fn verify< >( domain: EvaluationDomains, srs: & as OpenProof>::SRS, + //FIXME: change vec to array constraints: &Vec>, proof: &Proof, ) -> bool From 0ca024e6b1b5c4dbc1da1748a0d85dc8b7bdb588 Mon Sep 17 00:00:00 2001 From: marcbeunardeau88 Date: Thu, 17 Oct 2024 17:55:26 +0200 Subject: [PATCH 14/30] o1vm/pickles/prover: improve error message --- o1vm/src/pickles/prover.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/o1vm/src/pickles/prover.rs b/o1vm/src/pickles/prover.rs index 9fb7b0881e..031b9f97c7 100644 --- a/o1vm/src/pickles/prover.rs +++ b/o1vm/src/pickles/prover.rs @@ -241,19 +241,19 @@ where // And we interpolate using the evaluations let expr_evaluation_interpolated = expr_evaluation.interpolate(); - let fail_final_q_division = || { - panic!("Division by vanishing poly must not fail at this point, we checked it before") - }; + let fail_final_q_division = || panic!("Fail division by vanishing poly"); + let fail_remainder_not_zero = + || panic!("The constraints are not satisifed since the remainder is not zero"); // We compute the polynomial t(X) by dividing the constraints polynomial // by the vanishing polynomial, i.e. Z_H(X). - let (quotient, res) = expr_evaluation_interpolated + let (quotient, rem) = expr_evaluation_interpolated .divide_by_vanishing_poly(domain.d1) .unwrap_or_else(fail_final_q_division); // As the constraints must be verified on H, the rest of the division // must be equal to 0 as the constraints polynomial and Z_H(X) are both // equal on H. - if !res.is_zero() { - fail_final_q_division(); + if !rem.is_zero() { + fail_remainder_not_zero(); } quotient From 1c570a2527a44490cba4309291328f4e0902b801 Mon Sep 17 00:00:00 2001 From: marcbeunardeau88 Date: Thu, 17 Oct 2024 17:55:50 +0200 Subject: [PATCH 15/30] WIP : add test --- o1vm/src/pickles/tests.rs | 97 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 96 insertions(+), 1 deletion(-) diff --git a/o1vm/src/pickles/tests.rs b/o1vm/src/pickles/tests.rs index b7e1bf1815..4d668dde11 100644 --- a/o1vm/src/pickles/tests.rs +++ b/o1vm/src/pickles/tests.rs @@ -1,14 +1,29 @@ +use super::super::interpreters::mips::witness::SCRATCH_SIZE; +use super::proof::{ProofInputs, WitnessColumns}; +use super::prover::prove; +use crate::pickles::verifier::verify; use crate::{ interpreters::mips::{ constraints as mips_constraints, interpreter, interpreter::InterpreterEnv, Instruction, }, pickles::{MAXIMUM_DEGREE_CONSTRAINTS, TOTAL_NUMBER_OF_CONSTRAINTS}, }; +use ark_ff::{One, Zero}; use interpreter::{ITypeInstruction, JTypeInstruction, RTypeInstruction}; +use kimchi::circuits::gate::CurrOrNext; +use kimchi::circuits::{domains::EvaluationDomains, expr::Expr}; +use kimchi_msm::columns::Column; use kimchi_msm::expr::E; +use log::debug; use mina_curves::pasta::Fp; +use mina_curves::pasta::Fq; +use mina_curves::pasta::Pallas; +use mina_curves::pasta::PallasParameters; +use mina_poseidon::constants::PlonkSpongeConstantsKimchi; +use mina_poseidon::sponge::{DefaultFqSponge, DefaultFrSponge}; +use o1_utils::tests::make_test_rng; +use poly_commitment::SRS; use strum::{EnumCount, IntoEnumIterator}; - #[test] fn test_regression_constraints_with_selectors() { let constraints = { @@ -56,3 +71,83 @@ fn test_regression_selectors_for_instructions() { .iter() .for_each(|c| assert!(c.degree(1, 0) == 2 || c.degree(1, 0) == 1)); } + +#[test] +fn test_small_circuit() { + /* domain: EvaluationDomains, + srs: &SRS, + inputs: ProofInputs, + constraints: &[E], + rng: &mut RNG, + */ + debug!("0"); + let domain = EvaluationDomains::::create(8).unwrap(); + let srs = SRS::create(8); + let proof_input = ProofInputs:: { + evaluations: WitnessColumns { + scratch: std::array::from_fn(|_| { + vec![ + Fq::one(), + Fq::one(), + Fq::one(), + Fq::one(), + Fq::one(), + Fq::one(), + Fq::one(), + Fq::one(), + ] + }), + instruction_counter: vec![ + Fq::one(), + Fq::one(), + Fq::one(), + Fq::one(), + Fq::one(), + Fq::one(), + Fq::one(), + Fq::one(), + ], + error: vec![ + -Fq::from((SCRATCH_SIZE + 1) as u64), + -Fq::from((SCRATCH_SIZE + 1) as u64), + -Fq::from((SCRATCH_SIZE + 1) as u64), + -Fq::from((SCRATCH_SIZE + 1) as u64), + -Fq::from((SCRATCH_SIZE + 1) as u64), + -Fq::from((SCRATCH_SIZE + 1) as u64), + -Fq::from((SCRATCH_SIZE + 1) as u64), + -Fq::from((SCRATCH_SIZE + 1) as u64), + ], + selector: vec![ + Fq::zero(), + Fq::zero(), + Fq::zero(), + Fq::zero(), + Fq::zero(), + Fq::zero(), + Fq::zero(), + Fq::zero(), + ], + }, + }; + debug!("1"); + let mut expr = Expr::literal(Fq::zero()); + for i in 0..SCRATCH_SIZE { + expr += Expr::cell(Column::Relation(i), CurrOrNext::Curr); + } + expr *= Expr::cell(Column::DynamicSelector(0), CurrOrNext::Curr); + let mut rng = make_test_rng(None); + type BaseSponge = DefaultFqSponge; + type ScalarSponge = DefaultFrSponge; + + let proof = prove::( + domain, + &srs, + proof_input, + &[expr.clone()], + &mut rng, + ) + .unwrap(); + let verif = + verify::(domain, &srs, &vec![expr.clone()], &proof); + assert!(verif, "fdsf"); +} From a0f4823fc8dcc54c0a56abf3a406060bb69d1f1c Mon Sep 17 00:00:00 2001 From: Matt Walker Date: Thu, 17 Oct 2024 12:45:54 -0400 Subject: [PATCH 16/30] o1vm/pickles: Fix typo for vanishing poly --- o1vm/src/pickles/column_env.rs | 4 ++-- o1vm/src/pickles/tests.rs | 10 +--------- o1vm/src/pickles/verifier.rs | 18 +++++++++++------- 3 files changed, 14 insertions(+), 18 deletions(-) diff --git a/o1vm/src/pickles/column_env.rs b/o1vm/src/pickles/column_env.rs index 1055a4dd42..c66f68160f 100644 --- a/o1vm/src/pickles/column_env.rs +++ b/o1vm/src/pickles/column_env.rs @@ -36,8 +36,8 @@ pub struct ColumnEnvironment<'a, F: FftField> { } pub fn get_all_columns() -> Vec { - let mut cols = Vec::::with_capacity(SCRATCH_SIZE + N_MIPS_SEL_COLS); - for i in 0..SCRATCH_SIZE { + let mut cols = Vec::::with_capacity(SCRATCH_SIZE + 2 + N_MIPS_SEL_COLS); + for i in 0..SCRATCH_SIZE + 2 { cols.push(Column::Relation(i)); } for i in 0..N_MIPS_SEL_COLS { diff --git a/o1vm/src/pickles/tests.rs b/o1vm/src/pickles/tests.rs index 4d668dde11..5752c9ed35 100644 --- a/o1vm/src/pickles/tests.rs +++ b/o1vm/src/pickles/tests.rs @@ -74,13 +74,6 @@ fn test_regression_selectors_for_instructions() { #[test] fn test_small_circuit() { - /* domain: EvaluationDomains, - srs: &SRS, - inputs: ProofInputs, - constraints: &[E], - rng: &mut RNG, - */ - debug!("0"); let domain = EvaluationDomains::::create(8).unwrap(); let srs = SRS::create(8); let proof_input = ProofInputs:: { @@ -129,9 +122,8 @@ fn test_small_circuit() { ], }, }; - debug!("1"); let mut expr = Expr::literal(Fq::zero()); - for i in 0..SCRATCH_SIZE { + for i in 0..SCRATCH_SIZE + 2 { expr += Expr::cell(Column::Relation(i), CurrOrNext::Curr); } expr *= Expr::cell(Column::DynamicSelector(0), CurrOrNext::Curr); diff --git a/o1vm/src/pickles/verifier.rs b/o1vm/src/pickles/verifier.rs index 8bde1bb79d..5585fabf9a 100644 --- a/o1vm/src/pickles/verifier.rs +++ b/o1vm/src/pickles/verifier.rs @@ -4,6 +4,7 @@ use ark_ec::{AffineRepr, Group}; use ark_ff::{Field, One, PrimeField, Zero}; use rand::thread_rng; +use log::debug; use kimchi::{ circuits::{ @@ -148,8 +149,14 @@ where fr_sponge.absorb(&zeta_omega_evaluations.instruction_counter); fr_sponge.absorb(&zeta_evaluations.error); fr_sponge.absorb(&zeta_omega_evaluations.error); - fr_sponge.absorb_multiple(&zeta_evaluations.selector); - fr_sponge.absorb_multiple(&zeta_omega_evaluations.selector); + for (zeta_eval, zeta_omega_eval) in zeta_evaluations + .selector + .iter() + .zip(zeta_omega_evaluations.selector.iter()) + { + fr_sponge.absorb(zeta_eval); + fr_sponge.absorb(zeta_omega_eval); + } fr_sponge.absorb("ient_evaluations.zeta); fr_sponge.absorb("ient_evaluations.zeta_omega); @@ -193,9 +200,6 @@ where ) .unwrap_or_else(|_| panic!("Could not evaluate quotient polynomial at zeta_omega")); - fr_sponge.absorb("ient_eval_zeta); - fr_sponge.absorb("ient_eval_zeta_omega); - let v_chal = fr_sponge.challenge(); let v = v_chal.to_field(endo_r); let u_chal = fr_sponge.challenge(); @@ -245,7 +249,7 @@ where let batch = BatchEvaluationProof { sponge: fq_sponge_before_commitments_and_evaluations, - evaluations: evaluations, + evaluations, evaluation_points: vec![zeta, zeta_omega], polyscale: v, evalscale: u, @@ -257,6 +261,6 @@ where // Check the actual quotient works. (quotient_eval_zeta - == quotient_evaluations.zeta * zeta.pow([domain.d1.size]) - G::ScalarField::one()) + == quotient_evaluations.zeta * (zeta.pow([domain.d1.size]) - G::ScalarField::one())) && OpeningProof::verify(srs, &group_map, &mut [batch], &mut thread_rng()) } From 944c7007d21fc02e85202d052d22b6518ba0b489 Mon Sep 17 00:00:00 2001 From: Matt Walker Date: Fri, 18 Oct 2024 09:52:55 -0400 Subject: [PATCH 17/30] o1vm/pickles: Format --- o1vm/src/pickles/tests.rs | 31 ++++++++++++++----------------- o1vm/src/pickles/verifier.rs | 1 - 2 files changed, 14 insertions(+), 18 deletions(-) diff --git a/o1vm/src/pickles/tests.rs b/o1vm/src/pickles/tests.rs index 5752c9ed35..87570e8dc4 100644 --- a/o1vm/src/pickles/tests.rs +++ b/o1vm/src/pickles/tests.rs @@ -1,26 +1,23 @@ -use super::super::interpreters::mips::witness::SCRATCH_SIZE; -use super::proof::{ProofInputs, WitnessColumns}; -use super::prover::prove; -use crate::pickles::verifier::verify; +use super::{ + super::interpreters::mips::witness::SCRATCH_SIZE, + proof::{ProofInputs, WitnessColumns}, + prover::prove, +}; use crate::{ interpreters::mips::{ constraints as mips_constraints, interpreter, interpreter::InterpreterEnv, Instruction, }, - pickles::{MAXIMUM_DEGREE_CONSTRAINTS, TOTAL_NUMBER_OF_CONSTRAINTS}, + pickles::{verifier::verify, MAXIMUM_DEGREE_CONSTRAINTS, TOTAL_NUMBER_OF_CONSTRAINTS}, }; use ark_ff::{One, Zero}; use interpreter::{ITypeInstruction, JTypeInstruction, RTypeInstruction}; -use kimchi::circuits::gate::CurrOrNext; -use kimchi::circuits::{domains::EvaluationDomains, expr::Expr}; -use kimchi_msm::columns::Column; -use kimchi_msm::expr::E; -use log::debug; -use mina_curves::pasta::Fp; -use mina_curves::pasta::Fq; -use mina_curves::pasta::Pallas; -use mina_curves::pasta::PallasParameters; -use mina_poseidon::constants::PlonkSpongeConstantsKimchi; -use mina_poseidon::sponge::{DefaultFqSponge, DefaultFrSponge}; +use kimchi::circuits::{domains::EvaluationDomains, expr::Expr, gate::CurrOrNext}; +use kimchi_msm::{columns::Column, expr::E}; +use mina_curves::pasta::{Fp, Fq, Pallas, PallasParameters}; +use mina_poseidon::{ + constants::PlonkSpongeConstantsKimchi, + sponge::{DefaultFqSponge, DefaultFrSponge}, +}; use o1_utils::tests::make_test_rng; use poly_commitment::SRS; use strum::{EnumCount, IntoEnumIterator}; @@ -141,5 +138,5 @@ fn test_small_circuit() { .unwrap(); let verif = verify::(domain, &srs, &vec![expr.clone()], &proof); - assert!(verif, "fdsf"); + assert!(verif, "Verification fails"); } diff --git a/o1vm/src/pickles/verifier.rs b/o1vm/src/pickles/verifier.rs index 5585fabf9a..3f0900b7f2 100644 --- a/o1vm/src/pickles/verifier.rs +++ b/o1vm/src/pickles/verifier.rs @@ -4,7 +4,6 @@ use ark_ec::{AffineRepr, Group}; use ark_ff::{Field, One, PrimeField, Zero}; use rand::thread_rng; -use log::debug; use kimchi::{ circuits::{ From 0366845b83ae03af859d3703bb4aecce72ff3afa Mon Sep 17 00:00:00 2001 From: Matt Walker Date: Wed, 23 Oct 2024 15:05:40 -0400 Subject: [PATCH 18/30] o1vm/pickles: Fixup for tests --- o1vm/src/pickles/proof.rs | 4 +++ o1vm/src/pickles/prover.rs | 2 ++ o1vm/src/pickles/tests.rs | 60 +++++++++++++----------------------- o1vm/src/pickles/verifier.rs | 21 ++----------- 4 files changed, 30 insertions(+), 57 deletions(-) diff --git a/o1vm/src/pickles/proof.rs b/o1vm/src/pickles/proof.rs index 595e7e846a..d83cb155d3 100644 --- a/o1vm/src/pickles/proof.rs +++ b/o1vm/src/pickles/proof.rs @@ -3,6 +3,7 @@ use poly_commitment::{ipa::OpeningProof, PolyComm}; use crate::interpreters::mips::column::N_MIPS_SEL_COLS; +#[derive(Debug)] pub struct WitnessColumns { pub scratch: [G; crate::interpreters::mips::witness::SCRATCH_SIZE], pub instruction_counter: G, @@ -10,6 +11,7 @@ pub struct WitnessColumns { pub selector: S, } +#[derive(Debug)] pub struct ProofInputs { pub evaluations: WitnessColumns, Vec>, } @@ -27,7 +29,9 @@ impl ProofInputs { } } + // FIXME: should we blind the commitment? +#[derive(Debug)] pub struct Proof { pub commitments: WitnessColumns, [PolyComm; N_MIPS_SEL_COLS]>, pub zeta_evaluations: WitnessColumns, diff --git a/o1vm/src/pickles/prover.rs b/o1vm/src/pickles/prover.rs index 031b9f97c7..52b993531c 100644 --- a/o1vm/src/pickles/prover.rs +++ b/o1vm/src/pickles/prover.rs @@ -247,6 +247,7 @@ where // We compute the polynomial t(X) by dividing the constraints polynomial // by the vanishing polynomial, i.e. Z_H(X). let (quotient, rem) = expr_evaluation_interpolated + // FIXME: Should this be d8? .divide_by_vanishing_poly(domain.d1) .unwrap_or_else(fail_final_q_division); // As the constraints must be verified on H, the rest of the division @@ -295,6 +296,7 @@ where <::Group as Group>::ScalarField, [<::Group as Group>::ScalarField; N_MIPS_SEL_COLS], > = evals(&zeta); + // All evaluations at ζω let zeta_omega_evaluations: WitnessColumns< <::Group as Group>::ScalarField, diff --git a/o1vm/src/pickles/tests.rs b/o1vm/src/pickles/tests.rs index 87570e8dc4..03aef0e6c9 100644 --- a/o1vm/src/pickles/tests.rs +++ b/o1vm/src/pickles/tests.rs @@ -69,6 +69,14 @@ fn test_regression_selectors_for_instructions() { .for_each(|c| assert!(c.degree(1, 0) == 2 || c.degree(1, 0) == 1)); } +fn zero_to_n_minus_one(n: usize) -> Vec { + let mut ret = Vec::with_capacity(n); + for i in 0..n { + ret.push(Fq::from(i as u64)) + } + ret +} + #[test] fn test_small_circuit() { let domain = EvaluationDomains::::create(8).unwrap(); @@ -76,54 +84,27 @@ fn test_small_circuit() { let proof_input = ProofInputs:: { evaluations: WitnessColumns { scratch: std::array::from_fn(|_| { - vec![ - Fq::one(), - Fq::one(), - Fq::one(), - Fq::one(), - Fq::one(), - Fq::one(), - Fq::one(), - Fq::one(), - ] + zero_to_n_minus_one(8) }), - instruction_counter: vec![ - Fq::one(), - Fq::one(), - Fq::one(), - Fq::one(), - Fq::one(), - Fq::one(), - Fq::one(), - Fq::one(), - ], + instruction_counter: zero_to_n_minus_one(8).into_iter().map(|x| x + Fq::one()).collect(), error: vec![ - -Fq::from((SCRATCH_SIZE + 1) as u64), - -Fq::from((SCRATCH_SIZE + 1) as u64), - -Fq::from((SCRATCH_SIZE + 1) as u64), - -Fq::from((SCRATCH_SIZE + 1) as u64), - -Fq::from((SCRATCH_SIZE + 1) as u64), - -Fq::from((SCRATCH_SIZE + 1) as u64), - -Fq::from((SCRATCH_SIZE + 1) as u64), - -Fq::from((SCRATCH_SIZE + 1) as u64), - ], - selector: vec![ - Fq::zero(), - Fq::zero(), - Fq::zero(), - Fq::zero(), - Fq::zero(), - Fq::zero(), - Fq::zero(), - Fq::zero(), + -Fq::from((0 * SCRATCH_SIZE + 1) as u64), + -Fq::from((1 * SCRATCH_SIZE + 2) as u64), + -Fq::from((2 * SCRATCH_SIZE + 3) as u64), + -Fq::from((3 * SCRATCH_SIZE + 4) as u64), + -Fq::from((4 * SCRATCH_SIZE + 5) as u64), + -Fq::from((5 * SCRATCH_SIZE + 6) as u64), + -Fq::from((6 * SCRATCH_SIZE + 7) as u64), + -Fq::from((7 * SCRATCH_SIZE + 8) as u64), ], + selector: zero_to_n_minus_one(8), }, }; let mut expr = Expr::literal(Fq::zero()); for i in 0..SCRATCH_SIZE + 2 { expr += Expr::cell(Column::Relation(i), CurrOrNext::Curr); } - expr *= Expr::cell(Column::DynamicSelector(0), CurrOrNext::Curr); + /* expr *= Expr::cell(Column::DynamicSelector(0), CurrOrNext::Curr); */ let mut rng = make_test_rng(None); type BaseSponge = DefaultFqSponge; type ScalarSponge = DefaultFrSponge; @@ -136,6 +117,7 @@ fn test_small_circuit() { &mut rng, ) .unwrap(); + let verif = verify::(domain, &srs, &vec![expr.clone()], &proof); assert!(verif, "Verification fails"); diff --git a/o1vm/src/pickles/verifier.rs b/o1vm/src/pickles/verifier.rs index 3f0900b7f2..5ebc6b9aca 100644 --- a/o1vm/src/pickles/verifier.rs +++ b/o1vm/src/pickles/verifier.rs @@ -179,7 +179,7 @@ where let combined_expr = Expr::combine_constraints(0..(constraints.len() as u32), constraints.clone()); - let quotient_eval_zeta = PolishToken::evaluate( + let numerator_zeta = PolishToken::evaluate( combined_expr.to_polish().as_slice(), domain.d1, zeta, @@ -189,16 +189,6 @@ where ) .unwrap_or_else(|_| panic!("Could not evaluate quotient polynomial at zeta")); - let quotient_eval_zeta_omega = PolishToken::evaluate( - combined_expr.to_polish().as_slice(), - domain.d1, - zeta_omega, - &column_eval, - &constants, - &challenges, - ) - .unwrap_or_else(|_| panic!("Could not evaluate quotient polynomial at zeta_omega")); - let v_chal = fr_sponge.challenge(); let v = v_chal.to_field(endo_r); let u_chal = fr_sponge.challenge(); @@ -229,11 +219,6 @@ where }) }); - evaluations.push(Evaluation { - commitment: quotient_commitment.clone(), - evaluations: vec![vec![quotient_eval_zeta], vec![quotient_eval_zeta_omega]], - }); - evaluations }; @@ -259,7 +244,7 @@ where let group_map = G::Map::setup(); // Check the actual quotient works. - (quotient_eval_zeta - == quotient_evaluations.zeta * (zeta.pow([domain.d1.size]) - G::ScalarField::one())) + (quotient_evaluations.zeta + == numerator_zeta / (zeta.pow([domain.d1.size]) - G::ScalarField::one())) && OpeningProof::verify(srs, &group_map, &mut [batch], &mut thread_rng()) } From 897a501827fafc76e790e157339f162e2760952e Mon Sep 17 00:00:00 2001 From: marcbeunardeau88 Date: Thu, 24 Oct 2024 10:02:33 +0200 Subject: [PATCH 19/30] o1vm/pickles/test: simplify zero_to_n_minus_1 func --- o1vm/src/pickles/tests.rs | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/o1vm/src/pickles/tests.rs b/o1vm/src/pickles/tests.rs index 03aef0e6c9..b79ca87328 100644 --- a/o1vm/src/pickles/tests.rs +++ b/o1vm/src/pickles/tests.rs @@ -70,23 +70,19 @@ fn test_regression_selectors_for_instructions() { } fn zero_to_n_minus_one(n: usize) -> Vec { - let mut ret = Vec::with_capacity(n); - for i in 0..n { - ret.push(Fq::from(i as u64)) - } - ret + (0..n).map(|i| Fq::from((i) as u32)).collect() } - #[test] fn test_small_circuit() { let domain = EvaluationDomains::::create(8).unwrap(); let srs = SRS::create(8); let proof_input = ProofInputs:: { evaluations: WitnessColumns { - scratch: std::array::from_fn(|_| { - zero_to_n_minus_one(8) - }), - instruction_counter: zero_to_n_minus_one(8).into_iter().map(|x| x + Fq::one()).collect(), + scratch: std::array::from_fn(|_| zero_to_n_minus_one(8)), + instruction_counter: zero_to_n_minus_one(8) + .into_iter() + .map(|x| x + Fq::one()) + .collect(), error: vec![ -Fq::from((0 * SCRATCH_SIZE + 1) as u64), -Fq::from((1 * SCRATCH_SIZE + 2) as u64), @@ -117,7 +113,7 @@ fn test_small_circuit() { &mut rng, ) .unwrap(); - + let verif = verify::(domain, &srs, &vec![expr.clone()], &proof); assert!(verif, "Verification fails"); From d61efb3ca6ee8a512453500708686156c8c986d6 Mon Sep 17 00:00:00 2001 From: marcbeunardeau88 Date: Thu, 24 Oct 2024 10:04:52 +0200 Subject: [PATCH 20/30] o1vm/pickles/test: simplify error creation --- o1vm/src/pickles/tests.rs | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/o1vm/src/pickles/tests.rs b/o1vm/src/pickles/tests.rs index b79ca87328..359d603ca8 100644 --- a/o1vm/src/pickles/tests.rs +++ b/o1vm/src/pickles/tests.rs @@ -83,16 +83,9 @@ fn test_small_circuit() { .into_iter() .map(|x| x + Fq::one()) .collect(), - error: vec![ - -Fq::from((0 * SCRATCH_SIZE + 1) as u64), - -Fq::from((1 * SCRATCH_SIZE + 2) as u64), - -Fq::from((2 * SCRATCH_SIZE + 3) as u64), - -Fq::from((3 * SCRATCH_SIZE + 4) as u64), - -Fq::from((4 * SCRATCH_SIZE + 5) as u64), - -Fq::from((5 * SCRATCH_SIZE + 6) as u64), - -Fq::from((6 * SCRATCH_SIZE + 7) as u64), - -Fq::from((7 * SCRATCH_SIZE + 8) as u64), - ], + error: (0..8) + .map(|i| Fq::from((i * SCRATCH_SIZE + (i + 1)) as u32)) + .collect(), selector: zero_to_n_minus_one(8), }, }; From 05260c90a3c5fef1f4f75007a88ad6821e976d22 Mon Sep 17 00:00:00 2001 From: marcbeunardeau88 Date: Thu, 24 Oct 2024 10:07:02 +0200 Subject: [PATCH 21/30] o1vm/pickles: fmt --- o1vm/src/pickles/proof.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/o1vm/src/pickles/proof.rs b/o1vm/src/pickles/proof.rs index d83cb155d3..55893ab2da 100644 --- a/o1vm/src/pickles/proof.rs +++ b/o1vm/src/pickles/proof.rs @@ -29,7 +29,6 @@ impl ProofInputs { } } - // FIXME: should we blind the commitment? #[derive(Debug)] pub struct Proof { From 1c9014c8a511ea03b34daedf984ca741441298b4 Mon Sep 17 00:00:00 2001 From: marcbeunardeau88 Date: Thu, 24 Oct 2024 11:48:51 +0200 Subject: [PATCH 22/30] o1vm/pickles/test : fix it --- o1vm/src/pickles/tests.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/o1vm/src/pickles/tests.rs b/o1vm/src/pickles/tests.rs index 359d603ca8..00907258d3 100644 --- a/o1vm/src/pickles/tests.rs +++ b/o1vm/src/pickles/tests.rs @@ -70,7 +70,7 @@ fn test_regression_selectors_for_instructions() { } fn zero_to_n_minus_one(n: usize) -> Vec { - (0..n).map(|i| Fq::from((i) as u32)).collect() + (0..n).map(|i| Fq::from((i) as u64)).collect() } #[test] fn test_small_circuit() { @@ -84,7 +84,7 @@ fn test_small_circuit() { .map(|x| x + Fq::one()) .collect(), error: (0..8) - .map(|i| Fq::from((i * SCRATCH_SIZE + (i + 1)) as u32)) + .map(|i| -Fq::from((i * SCRATCH_SIZE + (i + 1)) as u64)) .collect(), selector: zero_to_n_minus_one(8), }, From 0e84168f4750fbc99138634c15173c18086fb4fd Mon Sep 17 00:00:00 2001 From: marcbeunardeau88 Date: Thu, 24 Oct 2024 12:23:30 +0200 Subject: [PATCH 23/30] o1vm/pickles/ add verifier to the main --- o1vm/src/pickles/main.rs | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/o1vm/src/pickles/main.rs b/o1vm/src/pickles/main.rs index dec89c4d6a..b1ce33a603 100644 --- a/o1vm/src/pickles/main.rs +++ b/o1vm/src/pickles/main.rs @@ -17,10 +17,7 @@ use o1vm::{ witness::{self as mips_witness}, ITypeInstruction, Instruction, RTypeInstruction, }, - pickles::{ - proof::{Proof, ProofInputs}, - prover, - }, + pickles::{proof::ProofInputs, prover, verifier}, preimage_oracle::PreImageOracle, }; use poly_commitment::{ipa::SRS, SRS as _}; @@ -140,19 +137,26 @@ pub fn main() -> ExitCode { // FIXME let start_iteration = Instant::now(); debug!("Limit of {DOMAIN_SIZE} reached. We make a proof, verify it (for testing) and start with a new chunk"); - let _proof: Result, prover::ProverError> = - prover::prove::< - Vesta, - DefaultFqSponge, - DefaultFrSponge, - _, - >(domain_fp, &srs, curr_proof_inputs, &constraints, &mut rng); + let proof = prover::prove::< + Vesta, + DefaultFqSponge, + DefaultFrSponge, + _, + >(domain_fp, &srs, curr_proof_inputs, &constraints, &mut rng) + .unwrap(); // FIXME: check that the proof is correct. This is for testing purposes. // Leaving like this for now. debug!( "Proof generated in {elapsed} μs", elapsed = start_iteration.elapsed().as_micros() ); + let verif = verifier::verify::< + Vesta, + DefaultFqSponge, + DefaultFrSponge, + >(domain_fp, &srs, &constraints, &proof); + assert!(verif); + curr_proof_inputs = ProofInputs::new(DOMAIN_SIZE); } } From 940d2b29d60a8074d05006a7b1f01456083dad19 Mon Sep 17 00:00:00 2001 From: marcbeunardeau88 Date: Mon, 28 Oct 2024 18:03:27 +0100 Subject: [PATCH 24/30] o1vm/pickles/prover: use commit with fixed blinder --- o1vm/src/pickles/prover.rs | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/o1vm/src/pickles/prover.rs b/o1vm/src/pickles/prover.rs index 52b993531c..5800babbb8 100644 --- a/o1vm/src/pickles/prover.rs +++ b/o1vm/src/pickles/prover.rs @@ -129,7 +129,15 @@ where } = &polys; // Note: we do not blind. We might want in the near future in case we // have a column with only zeroes. - let comm = |poly: &DensePolynomial| srs.commit_non_hiding(poly, num_chunks); + let comm = |poly: &DensePolynomial| { + srs.commit_custom( + poly, + num_chunks, + &PolyComm::new(vec![G::ScalarField::one()]), + ) + .unwrap() + .commitment + }; // Doing in parallel let scratch = scratch.par_iter().map(comm).collect::>(); let selector = selector.par_iter().map(comm).collect::>(); @@ -260,8 +268,14 @@ where quotient }; - let quotient_commitment = - srs.commit_non_hiding("ient_poly, DEGREE_QUOTIENT_POLYNOMIAL as usize); + let quotient_commitment = srs + .commit_custom( + "ient_poly, + DEGREE_QUOTIENT_POLYNOMIAL as usize, + &PolyComm::new(vec![G::ScalarField::one(); 7]), + ) + .unwrap() + .commitment; absorb_commitment(&mut fq_sponge, "ient_commitment); //////////////////////////////////////////////////////////////////////////// From f24ab525a0144db86640f3353ae1e5d82a649eaf Mon Sep 17 00:00:00 2001 From: marcbeunardeau88 Date: Mon, 28 Oct 2024 19:06:18 +0100 Subject: [PATCH 25/30] o1vm/pickles: handle t correctly --- o1vm/run-code.sh | 1 + o1vm/src/pickles/prover.rs | 19 +++++++++++++++---- o1vm/src/pickles/verifier.rs | 8 +++++++- 3 files changed, 23 insertions(+), 5 deletions(-) diff --git a/o1vm/run-code.sh b/o1vm/run-code.sh index 8753acf2c5..fba71d8d0c 100755 --- a/o1vm/run-code.sh +++ b/o1vm/run-code.sh @@ -16,4 +16,5 @@ else ./run-op-program.sh ./run-cannon.sh fi + ./run-vm.sh diff --git a/o1vm/src/pickles/prover.rs b/o1vm/src/pickles/prover.rs index 5800babbb8..fe5fcf74cf 100644 --- a/o1vm/src/pickles/prover.rs +++ b/o1vm/src/pickles/prover.rs @@ -272,7 +272,10 @@ where .commit_custom( "ient_poly, DEGREE_QUOTIENT_POLYNOMIAL as usize, - &PolyComm::new(vec![G::ScalarField::one(); 7]), + &PolyComm::new(vec![ + G::ScalarField::one(); + DEGREE_QUOTIENT_POLYNOMIAL as usize + ]), ) .unwrap() .commitment; @@ -359,19 +362,27 @@ where polynomials.push(polys.instruction_counter); polynomials.push(polys.error); polynomials.extend(polys.selector); - polynomials.push(quotient_poly); // Preparing the polynomials for the opening proof - let polynomials: Vec<_> = polynomials + let mut polynomials: Vec<_> = polynomials .iter() .map(|poly| { ( DensePolynomialOrEvaluations::DensePolynomial(poly), // We do not have any blinder, therefore we set to 0. - PolyComm::new(vec![G::ScalarField::zero()]), + PolyComm::new(vec![G::ScalarField::one()]), ) }) .collect(); + // we handle the quotient separately bc of the nb of blinders = num chunks + polynomials.push(( + DensePolynomialOrEvaluations::DensePolynomial("ient_poly), + // We do not have any blinder, therefore we set to 0. + PolyComm::new(vec![ + G::ScalarField::one(); + DEGREE_QUOTIENT_POLYNOMIAL as usize + ]), + )); // poly scale let v_chal = fr_sponge.challenge(); diff --git a/o1vm/src/pickles/verifier.rs b/o1vm/src/pickles/verifier.rs index 5ebc6b9aca..f7f56c8bfa 100644 --- a/o1vm/src/pickles/verifier.rs +++ b/o1vm/src/pickles/verifier.rs @@ -218,7 +218,13 @@ where ], }) }); - + evaluations.push(Evaluation { + commitment: proof.quotient_commitment.clone(), + evaluations: vec![ + vec![quotient_evaluations.zeta], + vec![quotient_evaluations.zeta_omega], + ], + }); evaluations }; From a494251ae5c34cb37c6a71e13b2b891d5e603339 Mon Sep 17 00:00:00 2001 From: Matt Walker Date: Mon, 28 Oct 2024 14:17:45 -0400 Subject: [PATCH 26/30] o1vm/pickles: Actually commit to the quotient_poly --- o1vm/src/pickles/proof.rs | 2 -- o1vm/src/pickles/prover.rs | 23 ++++++++++------------- o1vm/src/pickles/tests.rs | 1 - o1vm/src/pickles/verifier.rs | 1 - 4 files changed, 10 insertions(+), 17 deletions(-) diff --git a/o1vm/src/pickles/proof.rs b/o1vm/src/pickles/proof.rs index 55893ab2da..0a493a364f 100644 --- a/o1vm/src/pickles/proof.rs +++ b/o1vm/src/pickles/proof.rs @@ -11,7 +11,6 @@ pub struct WitnessColumns { pub selector: S, } -#[derive(Debug)] pub struct ProofInputs { pub evaluations: WitnessColumns, Vec>, } @@ -30,7 +29,6 @@ impl ProofInputs { } // FIXME: should we blind the commitment? -#[derive(Debug)] pub struct Proof { pub commitments: WitnessColumns, [PolyComm; N_MIPS_SEL_COLS]>, pub zeta_evaluations: WitnessColumns, diff --git a/o1vm/src/pickles/prover.rs b/o1vm/src/pickles/prover.rs index fe5fcf74cf..f0d7ddeb55 100644 --- a/o1vm/src/pickles/prover.rs +++ b/o1vm/src/pickles/prover.rs @@ -127,8 +127,7 @@ where error, selector, } = &polys; - // Note: we do not blind. We might want in the near future in case we - // have a column with only zeroes. + // Note: We add a constant blinder in case we have a column with only zeroes. let comm = |poly: &DensePolynomial| { srs.commit_custom( poly, @@ -277,9 +276,8 @@ where DEGREE_QUOTIENT_POLYNOMIAL as usize ]), ) - .unwrap() - .commitment; - absorb_commitment(&mut fq_sponge, "ient_commitment); + .unwrap(); + absorb_commitment(&mut fq_sponge, "ient_commitment.commitment); //////////////////////////////////////////////////////////////////////////// // Round 3: Evaluations at ζ and ζω @@ -369,19 +367,18 @@ where .map(|poly| { ( DensePolynomialOrEvaluations::DensePolynomial(poly), - // We do not have any blinder, therefore we set to 0. + // We do not have any blinder, therefore we set to 1, + // since otherwise we might commit to the zero polynomial + // and that would be bad! PolyComm::new(vec![G::ScalarField::one()]), ) }) .collect(); - // we handle the quotient separately bc of the nb of blinders = num chunks + // we handle the quotient separately because the number of blinders = + // number of chunks, which is different for just the quotient polynomial. polynomials.push(( DensePolynomialOrEvaluations::DensePolynomial("ient_poly), - // We do not have any blinder, therefore we set to 0. - PolyComm::new(vec![ - G::ScalarField::one(); - DEGREE_QUOTIENT_POLYNOMIAL as usize - ]), + quotient_commitment.blinders, )); // poly scale @@ -410,7 +407,7 @@ where commitments, zeta_evaluations, zeta_omega_evaluations, - quotient_commitment, + quotient_commitment: quotient_commitment.commitment, quotient_evaluations, opening_proof, }) diff --git a/o1vm/src/pickles/tests.rs b/o1vm/src/pickles/tests.rs index 00907258d3..d226dc6162 100644 --- a/o1vm/src/pickles/tests.rs +++ b/o1vm/src/pickles/tests.rs @@ -93,7 +93,6 @@ fn test_small_circuit() { for i in 0..SCRATCH_SIZE + 2 { expr += Expr::cell(Column::Relation(i), CurrOrNext::Curr); } - /* expr *= Expr::cell(Column::DynamicSelector(0), CurrOrNext::Curr); */ let mut rng = make_test_rng(None); type BaseSponge = DefaultFqSponge; type ScalarSponge = DefaultFrSponge; diff --git a/o1vm/src/pickles/verifier.rs b/o1vm/src/pickles/verifier.rs index f7f56c8bfa..423cb7fbe5 100644 --- a/o1vm/src/pickles/verifier.rs +++ b/o1vm/src/pickles/verifier.rs @@ -1,4 +1,3 @@ -#![allow(clippy::type_complexity)] #![allow(clippy::boxed_local)] use ark_ec::{AffineRepr, Group}; From 772fee6566b9a0792769b3266e540a027c5927fa Mon Sep 17 00:00:00 2001 From: Matt Walker Date: Mon, 28 Oct 2024 14:32:59 -0400 Subject: [PATCH 27/30] o1vm/pickles: Add debugging duration in tests --- o1vm/src/pickles/tests.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/o1vm/src/pickles/tests.rs b/o1vm/src/pickles/tests.rs index d226dc6162..b9cb9e4293 100644 --- a/o1vm/src/pickles/tests.rs +++ b/o1vm/src/pickles/tests.rs @@ -1,3 +1,5 @@ +use std::time::Instant; + use super::{ super::interpreters::mips::witness::SCRATCH_SIZE, proof::{ProofInputs, WitnessColumns}, @@ -13,6 +15,7 @@ use ark_ff::{One, Zero}; use interpreter::{ITypeInstruction, JTypeInstruction, RTypeInstruction}; use kimchi::circuits::{domains::EvaluationDomains, expr::Expr, gate::CurrOrNext}; use kimchi_msm::{columns::Column, expr::E}; +use log::debug; use mina_curves::pasta::{Fp, Fq, Pallas, PallasParameters}; use mina_poseidon::{ constants::PlonkSpongeConstantsKimchi, @@ -89,11 +92,12 @@ fn test_small_circuit() { selector: zero_to_n_minus_one(8), }, }; - let mut expr = Expr::literal(Fq::zero()); + let mut expr = Expr::zero(); for i in 0..SCRATCH_SIZE + 2 { expr += Expr::cell(Column::Relation(i), CurrOrNext::Curr); } let mut rng = make_test_rng(None); + type BaseSponge = DefaultFqSponge; type ScalarSponge = DefaultFrSponge; @@ -106,7 +110,10 @@ fn test_small_circuit() { ) .unwrap(); + let instant_before_verification = Instant::now(); let verif = verify::(domain, &srs, &vec![expr.clone()], &proof); + let instant_after_verification = Instant::now(); + debug!("Verification took: {}", (instant_after_verification - instant_before_verification).as_millis()); assert!(verif, "Verification fails"); } From 824fd02e6620f4c01e23085f55785e941b79f0d8 Mon Sep 17 00:00:00 2001 From: Matt Walker Date: Mon, 28 Oct 2024 14:36:05 -0400 Subject: [PATCH 28/30] o1vm/pickles: Format --- o1vm/src/pickles/prover.rs | 2 +- o1vm/src/pickles/tests.rs | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/o1vm/src/pickles/prover.rs b/o1vm/src/pickles/prover.rs index f0d7ddeb55..6eee0ca882 100644 --- a/o1vm/src/pickles/prover.rs +++ b/o1vm/src/pickles/prover.rs @@ -367,7 +367,7 @@ where .map(|poly| { ( DensePolynomialOrEvaluations::DensePolynomial(poly), - // We do not have any blinder, therefore we set to 1, + // We do not have any blinder, therefore we set to 1, // since otherwise we might commit to the zero polynomial // and that would be bad! PolyComm::new(vec![G::ScalarField::one()]), diff --git a/o1vm/src/pickles/tests.rs b/o1vm/src/pickles/tests.rs index b9cb9e4293..958759815f 100644 --- a/o1vm/src/pickles/tests.rs +++ b/o1vm/src/pickles/tests.rs @@ -114,6 +114,9 @@ fn test_small_circuit() { let verif = verify::(domain, &srs, &vec![expr.clone()], &proof); let instant_after_verification = Instant::now(); - debug!("Verification took: {}", (instant_after_verification - instant_before_verification).as_millis()); + debug!( + "Verification took: {}", + (instant_after_verification - instant_before_verification).as_millis() + ); assert!(verif, "Verification fails"); } From 7201003ef48c7450e507077add58ea1cc80754be Mon Sep 17 00:00:00 2001 From: marcbeunardeau88 Date: Tue, 29 Oct 2024 17:09:00 +0100 Subject: [PATCH 29/30] o1vm/pickles: fix handling T's chunk --- o1vm/src/pickles/proof.rs | 2 +- o1vm/src/pickles/prover.rs | 26 +++++++++++++++++++++----- o1vm/src/pickles/verifier.rs | 26 ++++++++++++++++++++------ 3 files changed, 42 insertions(+), 12 deletions(-) diff --git a/o1vm/src/pickles/proof.rs b/o1vm/src/pickles/proof.rs index 0a493a364f..23c02dbb45 100644 --- a/o1vm/src/pickles/proof.rs +++ b/o1vm/src/pickles/proof.rs @@ -34,7 +34,7 @@ pub struct Proof { pub zeta_evaluations: WitnessColumns, pub zeta_omega_evaluations: WitnessColumns, pub quotient_commitment: PolyComm, - pub quotient_evaluations: PointEvaluations, + pub quotient_evaluations: PointEvaluations>, /// IPA opening proof pub opening_proof: OpeningProof, } diff --git a/o1vm/src/pickles/prover.rs b/o1vm/src/pickles/prover.rs index 6eee0ca882..30b79c48db 100644 --- a/o1vm/src/pickles/prover.rs +++ b/o1vm/src/pickles/prover.rs @@ -16,6 +16,7 @@ use kimchi::{ }; use log::debug; use mina_poseidon::{sponge::ScalarChallenge, FqSponge}; +use o1_utils::ExtendedDensePolynomial; use poly_commitment::{ commitment::{absorb_commitment, PolyComm}, ipa::{DensePolynomialOrEvaluations, OpeningProof, SRS}, @@ -318,9 +319,19 @@ where [<::Group as Group>::ScalarField; N_MIPS_SEL_COLS], > = evals(&zeta_omega); + let chunked_quotient = quotient_poly + .to_chunked_polynomial(DEGREE_QUOTIENT_POLYNOMIAL as usize, domain.d1.size as usize); let quotient_evaluations = PointEvaluations { - zeta: quotient_poly.evaluate(&zeta), - zeta_omega: quotient_poly.evaluate(&zeta_omega), + zeta: chunked_quotient + .polys + .iter() + .map(|p| p.evaluate(&zeta)) + .collect::>(), + zeta_omega: chunked_quotient + .polys + .iter() + .map(|p| p.evaluate(&zeta_omega)) + .collect(), }; // Absorbing evaluations with a sponge for the other field @@ -349,9 +360,14 @@ where fr_sponge.absorb(zeta_eval); fr_sponge.absorb(zeta_omega_eval); } - fr_sponge.absorb("ient_evaluations.zeta); - fr_sponge.absorb("ient_evaluations.zeta_omega); - + for (quotient_zeta_eval, quotient_zeta_omega_eval) in quotient_evaluations + .zeta + .iter() + .zip(quotient_evaluations.zeta_omega.iter()) + { + fr_sponge.absorb(quotient_zeta_eval); + fr_sponge.absorb(quotient_zeta_omega_eval); + } //////////////////////////////////////////////////////////////////////////// // Round 4: Opening proof w/o linearization polynomial //////////////////////////////////////////////////////////////////////////// diff --git a/o1vm/src/pickles/verifier.rs b/o1vm/src/pickles/verifier.rs index 423cb7fbe5..df39810860 100644 --- a/o1vm/src/pickles/verifier.rs +++ b/o1vm/src/pickles/verifier.rs @@ -2,6 +2,7 @@ use ark_ec::{AffineRepr, Group}; use ark_ff::{Field, One, PrimeField, Zero}; +use itertools::Itertools; use rand::thread_rng; use kimchi::{ @@ -155,8 +156,14 @@ where fr_sponge.absorb(zeta_eval); fr_sponge.absorb(zeta_omega_eval); } - fr_sponge.absorb("ient_evaluations.zeta); - fr_sponge.absorb("ient_evaluations.zeta_omega); + for (quotient_zeta_eval, quotient_zeta_omega_eval) in quotient_evaluations + .zeta + .iter() + .zip(quotient_evaluations.zeta_omega.iter()) + { + fr_sponge.absorb(quotient_zeta_eval); + fr_sponge.absorb(quotient_zeta_omega_eval); + } // FIXME: use a proper Challenge structure let challenges = BerkeleyChallenges { @@ -220,8 +227,8 @@ where evaluations.push(Evaluation { commitment: proof.quotient_commitment.clone(), evaluations: vec![ - vec![quotient_evaluations.zeta], - vec![quotient_evaluations.zeta_omega], + quotient_evaluations.zeta.clone(), + quotient_evaluations.zeta_omega.clone(), ], }); evaluations @@ -249,7 +256,14 @@ where let group_map = G::Map::setup(); // Check the actual quotient works. - (quotient_evaluations.zeta - == numerator_zeta / (zeta.pow([domain.d1.size]) - G::ScalarField::one())) + let (quotient_zeta, _) = quotient_evaluations.zeta.iter().fold( + (G::ScalarField::zero(), G::ScalarField::one()), + |(res, zeta_i_n), chunk| { + let res = res + zeta_i_n * chunk; + let zeta_i_n = zeta_i_n * zeta.pow([domain.d1.size]); + (res, zeta_i_n) + }, + ); + (quotient_zeta == numerator_zeta / (zeta.pow([domain.d1.size]) - G::ScalarField::one())) && OpeningProof::verify(srs, &group_map, &mut [batch], &mut thread_rng()) } From 0d8e46d5f6e868b422f808f4494712b98b90d610 Mon Sep 17 00:00:00 2001 From: Matt Walker Date: Wed, 30 Oct 2024 00:59:22 -0400 Subject: [PATCH 30/30] o1vm/pickles: Remove wrong comments and cleanup for CI --- o1vm/src/pickles/prover.rs | 6 ++---- o1vm/src/pickles/tests.rs | 2 +- o1vm/src/pickles/verifier.rs | 1 - 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/o1vm/src/pickles/prover.rs b/o1vm/src/pickles/prover.rs index 30b79c48db..82c34896bd 100644 --- a/o1vm/src/pickles/prover.rs +++ b/o1vm/src/pickles/prover.rs @@ -128,7 +128,7 @@ where error, selector, } = &polys; - // Note: We add a constant blinder in case we have a column with only zeroes. + let comm = |poly: &DensePolynomial| { srs.commit_custom( poly, @@ -383,9 +383,7 @@ where .map(|poly| { ( DensePolynomialOrEvaluations::DensePolynomial(poly), - // We do not have any blinder, therefore we set to 1, - // since otherwise we might commit to the zero polynomial - // and that would be bad! + // We do not have any blinder, therefore we set to 1. PolyComm::new(vec![G::ScalarField::one()]), ) }) diff --git a/o1vm/src/pickles/tests.rs b/o1vm/src/pickles/tests.rs index 958759815f..422f10bc13 100644 --- a/o1vm/src/pickles/tests.rs +++ b/o1vm/src/pickles/tests.rs @@ -115,7 +115,7 @@ fn test_small_circuit() { verify::(domain, &srs, &vec![expr.clone()], &proof); let instant_after_verification = Instant::now(); debug!( - "Verification took: {}", + "Verification took: {} ms", (instant_after_verification - instant_before_verification).as_millis() ); assert!(verif, "Verification fails"); diff --git a/o1vm/src/pickles/verifier.rs b/o1vm/src/pickles/verifier.rs index df39810860..0895d0e673 100644 --- a/o1vm/src/pickles/verifier.rs +++ b/o1vm/src/pickles/verifier.rs @@ -2,7 +2,6 @@ use ark_ec::{AffineRepr, Group}; use ark_ff::{Field, One, PrimeField, Zero}; -use itertools::Itertools; use rand::thread_rng; use kimchi::{