Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
453 changes: 318 additions & 135 deletions Cargo.lock

Large diffs are not rendered by default.

7 changes: 4 additions & 3 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,10 @@ repository = "https://github.com/scroll-tech/scroll"
version = "4.7.1"

[workspace.dependencies]
scroll-zkvm-prover = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.1" }
scroll-zkvm-verifier = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.1" }
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.1" }
# with openvm 1.4.2
scroll-zkvm-prover = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "2e8e29f" }
scroll-zkvm-verifier = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "2e8e29f" }
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "2e8e29f" }

sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91.2", features = ["scroll", "rkyv"] }
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91.2" }
Expand Down
19 changes: 2 additions & 17 deletions crates/libzkp/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,22 +13,8 @@ use serde_json::value::RawValue;
use std::{collections::HashMap, path::Path, sync::OnceLock};
use tasks::chunk_interpreter::{ChunkInterpreter, TryFromWithInterpreter};

pub(crate) fn witness_use_legacy_mode(fork_name: &str) -> eyre::Result<bool> {
ADDITIONAL_FEATURES
.get()
.and_then(|features| features.get(fork_name))
.map(|cfg| cfg.legacy_witness_encoding)
.ok_or_else(|| {
eyre::eyre!(
"can not find features setting for unrecognized fork {}",
fork_name
)
})
}

#[derive(Debug, Default, Clone)]
struct FeatureOptions {
legacy_witness_encoding: bool,
for_openvm_13_prover: bool,
}

Expand All @@ -41,11 +27,10 @@ impl FeatureOptions {
for feat_s in feats.split(':') {
match feat_s.trim().to_lowercase().as_str() {
"legacy_witness" => {
tracing::info!("set witness encoding for legacy mode");
ret.legacy_witness_encoding = true;
tracing::warn!("legacy witness is no longer supported");
}
"openvm_13" => {
tracing::info!("set prover should use openvm 13");
tracing::warn!("set prover should use openvm 13");
ret.for_openvm_13_prover = true;
}
s => tracing::warn!("unrecognized dynamic feature: {s}"),
Expand Down
11 changes: 2 additions & 9 deletions crates/libzkp/src/tasks/batch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,11 @@ use sbv_primitives::{B256, U256};
use scroll_zkvm_types::{
batch::{
build_point_eval_witness, BatchHeader, BatchHeaderV6, BatchHeaderV7, BatchHeaderValidium,
BatchInfo, BatchWitness, Envelope, EnvelopeV6, EnvelopeV7, LegacyBatchWitness,
ReferenceHeader, N_BLOB_BYTES,
BatchInfo, BatchWitness, Envelope, EnvelopeV6, EnvelopeV7, ReferenceHeader, N_BLOB_BYTES,
},
chunk::ChunkInfo,
public_inputs::{ForkName, MultiVersionPublicInputs, Version},
task::ProvingTask,
utils::{to_rkyv_bytes, RancorError},
version::{Codec, Domain, STFVersion},
};

Expand Down Expand Up @@ -118,12 +116,7 @@ pub struct BatchProvingTask {
impl BatchProvingTask {
pub fn into_proving_task_with_precheck(self) -> Result<(ProvingTask, BatchInfo, B256)> {
let (witness, metadata, batch_pi_hash) = self.precheck()?;
let serialized_witness = if crate::witness_use_legacy_mode(&self.fork_name)? {
let legacy_witness = LegacyBatchWitness::from(witness);
to_rkyv_bytes::<RancorError>(&legacy_witness)?.into_vec()
} else {
super::encode_task_to_witness(&witness)?
};
let serialized_witness = super::encode_task_to_witness(&witness)?;

let proving_task = ProvingTask {
identifier: self.batch_header.batch_hash().to_string(),
Expand Down
10 changes: 2 additions & 8 deletions crates/libzkp/src/tasks/bundle.rs
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
use eyre::Result;
use sbv_primitives::B256;
use scroll_zkvm_types::{
bundle::{BundleInfo, BundleWitness, LegacyBundleWitness},
bundle::{BundleInfo, BundleWitness},
public_inputs::{MultiVersionPublicInputs, Version},
task::ProvingTask,
utils::{to_rkyv_bytes, RancorError},
};

use crate::proofs::BatchProof;
Expand All @@ -27,12 +26,7 @@ pub struct BundleProvingTask {
impl BundleProvingTask {
pub fn into_proving_task_with_precheck(self) -> Result<(ProvingTask, BundleInfo, B256)> {
let (witness, bundle_info, bundle_pi_hash) = self.precheck()?;
let serialized_witness = if crate::witness_use_legacy_mode(&self.fork_name)? {
let legacy = LegacyBundleWitness::from(witness);
to_rkyv_bytes::<RancorError>(&legacy)?.into_vec()
} else {
super::encode_task_to_witness(&witness)?
};
let serialized_witness = super::encode_task_to_witness(&witness)?;

let proving_task = ProvingTask {
identifier: self.identifier(),
Expand Down
10 changes: 2 additions & 8 deletions crates/libzkp/src/tasks/chunk.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,9 @@ use eyre::Result;
use sbv_core::BlockWitness;
use sbv_primitives::{types::consensus::BlockHeader, B256};
use scroll_zkvm_types::{
chunk::{execute, ChunkInfo, ChunkWitness, LegacyChunkWitness, ValidiumInputs},
chunk::{execute, ChunkInfo, ChunkWitness, ValidiumInputs},
public_inputs::{MultiVersionPublicInputs, Version},
task::ProvingTask,
utils::{to_rkyv_bytes, RancorError},
};

use super::chunk_interpreter::*;
Expand Down Expand Up @@ -117,12 +116,7 @@ impl ChunkProvingTask {

pub fn into_proving_task_with_precheck(self) -> Result<(ProvingTask, ChunkInfo, B256)> {
let (witness, chunk_info, chunk_pi_hash) = self.precheck()?;
let serialized_witness = if crate::witness_use_legacy_mode(&self.fork_name)? {
let legacy_witness = LegacyChunkWitness::from(witness);
to_rkyv_bytes::<RancorError>(&legacy_witness)?.into_vec()
} else {
super::encode_task_to_witness(&witness)?
};
let serialized_witness = super::encode_task_to_witness(&witness)?;

let proving_task = ProvingTask {
identifier: self.identifier(),
Expand Down
3 changes: 2 additions & 1 deletion crates/prover-bin/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ edition.workspace = true
scroll-zkvm-types.workspace = true
scroll-zkvm-prover.workspace = true
libzkp = { path = "../libzkp"}
scroll-proving-sdk = { git = "https://github.com/scroll-tech/scroll-proving-sdk.git", rev = "05648db" }
scroll-proving-sdk = { git = "https://github.com/scroll-tech/scroll-proving-sdk.git", rev = "22ad34e" }
serde.workspace = true
serde_json.workspace = true
once_cell.workspace =true
Expand All @@ -34,6 +34,7 @@ clap = { version = "4.5", features = ["derive"] }
ctor = "0.2.8"
url = { version = "2.5.4", features = ["serde",] }
serde_bytes = "0.11.15"
bincode = { version = "2.0", features = ["serde",] }

[features]
default = []
Expand Down
88 changes: 88 additions & 0 deletions crates/prover-bin/src/dumper.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
use async_trait::async_trait;
use libzkp::ProvingTaskExt;
use scroll_proving_sdk::prover::{
proving_service::{
GetVkRequest, GetVkResponse, ProveRequest, ProveResponse, QueryTaskRequest,
QueryTaskResponse, TaskStatus,
},
ProvingService,
};
use scroll_zkvm_types::ProvingTask;

#[derive(Default)]
pub struct Dumper {
#[allow(dead_code)]
pub target_path: String,
pub json_mode: bool,
}

impl Dumper {
fn dump(&self, input_string: &str) -> eyre::Result<()> {
let task: ProvingTaskExt = serde_json::from_str(input_string)?;
let task = ProvingTask::from(task);

if self.json_mode {
let file = std::fs::File::create("input_task.json")?;
serde_json::to_writer(std::io::BufWriter::new(file), &task)?;
} else {
// stream-encode serialized_witness to input_task.bin using bincode 2.0
let input_file = std::fs::File::create("input_task.bin")?;
let mut input_writer = std::io::BufWriter::new(input_file);
bincode::encode_into_std_write(
&task.serialized_witness,
&mut input_writer,
bincode::config::standard(),
)?;

// stream-encode aggregated_proofs to agg_proofs.bin using bincode 2.0
let agg_file = std::fs::File::create("agg_proofs.bin")?;
let mut agg_writer = std::io::BufWriter::new(agg_file);
for proof in &task.aggregated_proofs {
let sz = bincode::serde::encode_into_std_write(
&proof.proofs,
&mut agg_writer,
bincode::config::standard(),
)?;
println!("written {sz} bytes for proof");
}
}

Ok(())
}
}

#[async_trait]
impl ProvingService for Dumper {
fn is_local(&self) -> bool {
true
}
async fn get_vks(&self, _: GetVkRequest) -> GetVkResponse {
// get vk has been deprecated in new prover with dynamic asset loading scheme
GetVkResponse {
vks: vec![],
error: None,
}
}
async fn prove(&mut self, req: ProveRequest) -> ProveResponse {
let error = if let Err(e) = self.dump(&req.input) {
Some(format!("failed to dump: {}", e))
} else {
None
};

ProveResponse {
status: TaskStatus::Failed,
error,
..Default::default()
}
}
Comment on lines +66 to +78
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

🧩 Analysis chain

🏁 Script executed:

# First, let's examine the full dumper.rs file to understand the context
cat crates/prover-bin/src/dumper.rs

Repository: scroll-tech/scroll

Length of output: 2301


🏁 Script executed:

# Search for TaskStatus enum definition
rg "enum TaskStatus" -A 10

Repository: scroll-tech/scroll

Length of output: 44


🏁 Script executed:

# Search for how prove() is used
rg "\.prove\(" -B 2 -A 2

Repository: scroll-tech/scroll

Length of output: 44


🏁 Script executed:

# Look for any comments or documentation about Dumper's purpose
rg -i "dumper" -B 2 -A 2 | head -100

Repository: scroll-tech/scroll

Length of output: 1372


🏁 Script executed:

# Search for TaskStatus definition in different ways
rg "TaskStatus" -B 2 -A 5 | head -150

Repository: scroll-tech/scroll

Length of output: 7859


🏁 Script executed:

# Look for tests related to dumper
fd "test" -path "*dumper*" -o -name "*test*" | grep -i dump

Repository: scroll-tech/scroll

Length of output: 230


🏁 Script executed:

# Search for TaskStatus in proving_sdk or prover modules
fd "proving_service" -o -name "*.rs" | xargs rg "enum TaskStatus\|pub enum TaskStatus" -A 10

Repository: scroll-tech/scroll

Length of output: 294


🏁 Script executed:

# Search for TaskStatus enum definition in scroll_proving_sdk
find . -name "*.rs" -type f | xargs rg "pub enum TaskStatus" -A 10

Repository: scroll-tech/scroll

Length of output: 46


🏁 Script executed:

# Also check if there's documentation about Dumper's purpose
find . -name "*.md" -o -name "README*" | xargs grep -l -i "dump" 2>/dev/null | head -5

Repository: scroll-tech/scroll

Length of output: 44


🏁 Script executed:

# Search for TaskStatus definition more broadly
rg "enum TaskStatus|struct TaskStatus" -A 15

Repository: scroll-tech/scroll

Length of output: 44


🏁 Script executed:

# Check if scroll_proving_sdk is in the repo or external
find . -name "scroll_proving_sdk" -type d

Repository: scroll-tech/scroll

Length of output: 44


🏁 Script executed:

# Look at Cargo.toml to understand dependencies
cat Cargo.toml 2>/dev/null || find . -name "Cargo.toml" -path "*/prover-bin/*" -exec cat {} \;

Repository: scroll-tech/scroll

Length of output: 2786


🏁 Script executed:

# Check prover-bin's Cargo.toml for dependencies
cat crates/prover-bin/Cargo.toml

Repository: scroll-tech/scroll

Length of output: 1129


🏁 Script executed:

# Search for any local definition of TaskStatus or proving_service
find . -name "*.rs" -type f -exec grep -l "proving_service\|ProvingService" {} \; | head -20

Repository: scroll-tech/scroll

Length of output: 129


Status should reflect dump success or failure, not always return Failed.

The prove() method always sets status: TaskStatus::Failed regardless of whether the dump succeeds. When error is None (successful dump), the status should be TaskStatus::Success. This causes all dump operations to be incorrectly reported as failed.

Update the implementation to conditionally set the status:

async fn prove(&mut self, req: ProveRequest) -> ProveResponse {
-   let error = if let Err(e) = self.dump(&req.input) {
-       Some(format!("failed to dump: {}", e))
-   } else {
-       None
-   };
+   let (status, error) = if let Err(e) = self.dump(&req.input) {
+       (TaskStatus::Failed, Some(format!("failed to dump: {}", e)))
+   } else {
+       (TaskStatus::Success, None)
+   };

    ProveResponse {
-       status: TaskStatus::Failed,
+       status,
        error,
        ..Default::default()
    }
}
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
async fn prove(&mut self, req: ProveRequest) -> ProveResponse {
let error = if let Err(e) = self.dump(&req.input) {
Some(format!("failed to dump: {}", e))
} else {
None
};
ProveResponse {
status: TaskStatus::Failed,
error,
..Default::default()
}
}
async fn prove(&mut self, req: ProveRequest) -> ProveResponse {
let (status, error) = if let Err(e) = self.dump(&req.input) {
(TaskStatus::Failed, Some(format!("failed to dump: {}", e)))
} else {
(TaskStatus::Success, None)
};
ProveResponse {
status,
error,
..Default::default()
}
}
🤖 Prompt for AI Agents
In crates/prover-bin/src/dumper.rs around lines 57 to 69, the prove() method
always returns TaskStatus::Failed even when dump succeeds; change the status to
be conditional on the dump result: call self.dump(&req.input) and if it returns
Err set error to Some(...) and status to TaskStatus::Failed, otherwise set error
to None and status to TaskStatus::Success, then construct ProveResponse using
that conditional status and error (keep the rest of the default fields).


async fn query_task(&mut self, req: QueryTaskRequest) -> QueryTaskResponse {
QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Failed,
error: Some("dump file finished but need a fail return to exit".to_string()),
..Default::default()
}
}
Comment on lines +80 to +87
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Misleading task status despite intentional exit behavior.

The query_task() method returns TaskStatus::Failed with message "dump file finished but need a fail return to exit" for all queries. While this appears intentional to force process termination (per PR objectives about ensuring exit), it violates expected API semantics—the status should reflect the actual task outcome, not be used as a process control mechanism.

Consider separating concerns: track actual task status (Success/Failed based on dump result) and use a different mechanism (e.g., explicit process exit in the caller) to ensure termination.

}
46 changes: 45 additions & 1 deletion crates/prover-bin/src/main.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
mod dumper;
mod prover;
mod types;
mod zk_circuits_handler;

use clap::{ArgAction, Parser, Subcommand};
use clap::{ArgAction, Parser, Subcommand, ValueEnum};
use prover::{LocalProver, LocalProverConfig};
use scroll_proving_sdk::{
prover::{types::ProofType, ProverBuilder},
Expand Down Expand Up @@ -32,12 +33,35 @@ struct Args {
command: Option<Commands>,
}

#[derive(Clone, Debug, PartialEq, Eq, ValueEnum)]
enum TaskType {
Chunk,
Batch,
Bundle,
}

impl From<TaskType> for ProofType {
fn from(value: TaskType) -> Self {
match value {
TaskType::Chunk => ProofType::Chunk,
TaskType::Batch => ProofType::Batch,
TaskType::Bundle => ProofType::Bundle,
}
}
}

#[derive(Subcommand, Debug)]
enum Commands {
Handle {
/// path to save the verifier's asset
task_path: String,
},
Dump {
#[arg(long = "json", default_value = "false")]
json_mode: bool,
task_type: TaskType,
task_id: String,
},
}

#[derive(Debug, serde::Deserialize)]
Expand All @@ -63,6 +87,26 @@ async fn main() -> eyre::Result<()> {
let local_prover = LocalProver::new(cfg.clone());

match args.command {
Some(Commands::Dump {
json_mode,
task_type,
task_id,
}) => {
let prover = ProverBuilder::new(
sdk_config,
dumper::Dumper {
json_mode,
..Default::default()
},
)
.build()
.await
.map_err(|e| eyre::eyre!("build prover fail: {e}"))?;

std::sync::Arc::new(prover)
.one_shot(&[task_id], task_type.into())
.await;
}
Some(Commands::Handle { task_path }) => {
let file = File::open(Path::new(&task_path))?;
let reader = BufReader::new(file);
Expand Down
39 changes: 28 additions & 11 deletions crates/prover-bin/src/prover.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,9 @@ pub struct AssetsLocationData {
#[serde(default)]
/// a altered url for specififed vk
pub asset_detours: HashMap<String, url::Url>,
/// when asset file existed, do not verify from network, help for debugging stuffs
#[serde(default)]
pub debug_mode: bool,
}

impl AssetsLocationData {
Expand Down Expand Up @@ -79,6 +82,13 @@ impl AssetsLocationData {
// Get file metadata to check size
if let Ok(metadata) = std::fs::metadata(&local_file_path) {
// Make a HEAD request to get remote file size
if self.debug_mode {
println!(
"File {} already exists, skipping download under debugmode",
filename
);
continue;
}

if let Ok(head_resp) = client.head(download_url.clone()).send().await {
if let Some(content_length) = head_resp.headers().get("content-length") {
Expand Down Expand Up @@ -201,12 +211,20 @@ impl ProvingService for LocalProver {
error: Some(format!("proving task failed: {}", e)),
..Default::default()
},
Err(e) => QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Failed,
error: Some(format!("proving task panicked: {}", e)),
..Default::default()
},
Err(e) => {
if e.is_panic() {
// simply re-throw panic for any panicking in proving process,
// cause worker loop and the whole prover exit
std::panic::resume_unwind(e.into_panic());
}

QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Failed,
error: Some(format!("proving task failed: {}", e)),
..Default::default()
}
}
};
} else {
return QueryTaskResponse {
Expand Down Expand Up @@ -273,7 +291,9 @@ impl LocalProver {
let created_at = duration.as_secs() as f64 + duration.subsec_nanos() as f64 * 1e-9;

let prover_task = UniversalHandler::get_task_from_input(&req.input)?;
let is_openvm_13 = prover_task.use_openvm_13;
if prover_task.use_openvm_13 {
eyre::bail!("prover do not support snark params base on openvm 13");
}
let prover_task: ProvingTask = prover_task.into();
let vk = hex::encode(&prover_task.vk);
let handler = if let Some(handler) = self.handlers.get(&vk) {
Expand All @@ -300,10 +320,7 @@ impl LocalProver {
.location_data
.get_asset(&vk, &url_base, &base_config.workspace_path)
.await?;
let circuits_handler = Arc::new(Mutex::new(UniversalHandler::new(
&asset_path,
is_openvm_13,
)?));
let circuits_handler = Arc::new(Mutex::new(UniversalHandler::new(&asset_path)?));
self.handlers.insert(vk, circuits_handler.clone());
circuits_handler
};
Expand Down
Loading