diff --git a/.gitignore b/.gitignore index 7cb5df7d..951f3e1c 100755 --- a/.gitignore +++ b/.gitignore @@ -17,3 +17,5 @@ universalClient/coverage.out # TSS data directory tss-data/ local-native/data/ +e2e-tests/.pchain/ +e2e-tests/genesis_accounts.json diff --git a/Makefile b/Makefile index a5db6e09..84322ffa 100755 --- a/Makefile +++ b/Makefile @@ -155,6 +155,10 @@ draw-deps: clean: rm -rf snapcraft-local.yaml build/ +.PHONY: replace-addresses +replace-addresses: + bash scripts/replace_addresses.sh + distclean: clean rm -rf vendor/ diff --git a/config/testnet-donut/arb_sepolia/chain.json b/config/testnet-donut/arb_sepolia/chain.json index 5930c187..a9ccdff6 100644 --- a/config/testnet-donut/arb_sepolia/chain.json +++ b/config/testnet-donut/arb_sepolia/chain.json @@ -1,5 +1,5 @@ { - "chain": "eip155:421614", + "chain": "eip155:421614", "public_rpc_url": "https://endpoints.omniatech.io/v1/arbitrum/sepolia/public", "vm_type": 1, "gateway_address": "0x2cd870e0166Ba458dEC615168Fd659AacD795f34", @@ -52,4 +52,4 @@ "isInboundEnabled": true, "isOutboundEnabled": true } -} \ No newline at end of file +} diff --git a/config/testnet-donut/arb_sepolia/tokens/eth.json b/config/testnet-donut/arb_sepolia/tokens/eth.json index 7c65caa8..8d54447e 100644 --- a/config/testnet-donut/arb_sepolia/tokens/eth.json +++ b/config/testnet-donut/arb_sepolia/tokens/eth.json @@ -11,4 +11,4 @@ "denom": "", "contract_address": "0xc0a821a1AfEd1322c5e15f1F4586C0B8cE65400e" } -} \ No newline at end of file +} diff --git a/config/testnet-donut/base_sepolia/chain.json b/config/testnet-donut/base_sepolia/chain.json index db1dfda1..3b6d9c1b 100644 --- a/config/testnet-donut/base_sepolia/chain.json +++ b/config/testnet-donut/base_sepolia/chain.json @@ -52,4 +52,4 @@ "isInboundEnabled": true, "isOutboundEnabled": false } -} \ No newline at end of file +} diff --git a/config/testnet-donut/base_sepolia/tokens/eth.json b/config/testnet-donut/base_sepolia/tokens/eth.json index ff46c796..3410d53c 100644 --- a/config/testnet-donut/base_sepolia/tokens/eth.json +++ b/config/testnet-donut/base_sepolia/tokens/eth.json @@ -11,4 +11,4 @@ "denom": "", "contract_address": "0xc7007af2B24D4eb963fc9633B0c66e1d2D90Fc21" } -} \ No newline at end of file +} diff --git a/config/testnet-donut/bsc_testnet/chain.json b/config/testnet-donut/bsc_testnet/chain.json index 6e2d4900..045f8d92 100644 --- a/config/testnet-donut/bsc_testnet/chain.json +++ b/config/testnet-donut/bsc_testnet/chain.json @@ -52,4 +52,4 @@ "isInboundEnabled": true, "isOutboundEnabled": true } -} \ No newline at end of file +} diff --git a/config/testnet-donut/bsc_testnet/tokens/bnb.json b/config/testnet-donut/bsc_testnet/tokens/bnb.json index 83858a73..94455057 100644 --- a/config/testnet-donut/bsc_testnet/tokens/bnb.json +++ b/config/testnet-donut/bsc_testnet/tokens/bnb.json @@ -11,4 +11,4 @@ "denom": "", "contract_address": "0x7a9082dA308f3fa005beA7dB0d203b3b86664E36" } -} \ No newline at end of file +} diff --git a/config/testnet-donut/bsc_testnet/tokens/usdt.json b/config/testnet-donut/bsc_testnet/tokens/usdt.json index e5bb32a9..5e40428d 100644 --- a/config/testnet-donut/bsc_testnet/tokens/usdt.json +++ b/config/testnet-donut/bsc_testnet/tokens/usdt.json @@ -11,4 +11,4 @@ "denom": "", "contract_address": "0x2f98B4235FD2BA0173a2B056D722879360B12E7b" } -} \ No newline at end of file +} diff --git a/config/testnet-donut/eth_sepolia/chain.json b/config/testnet-donut/eth_sepolia/chain.json index 90fcc47d..6ec45fcd 100644 --- a/config/testnet-donut/eth_sepolia/chain.json +++ b/config/testnet-donut/eth_sepolia/chain.json @@ -52,4 +52,4 @@ "isInboundEnabled": true, "isOutboundEnabled": true } -} \ No newline at end of file +} diff --git a/config/testnet-donut/solana_devnet/chain.json b/config/testnet-donut/solana_devnet/chain.json index f5aed4e1..428fd5ae 100644 --- a/config/testnet-donut/solana_devnet/chain.json +++ b/config/testnet-donut/solana_devnet/chain.json @@ -15,7 +15,8 @@ "event_identifier": "7f1f6cffbb134644", "confirmation_type": 2 }, - {"name": "send_funds", + { + "name": "send_funds", "identifier": "54f7d3283f6a0f3b", "event_identifier": "6c9ad829b5ea1d7c", "confirmation_type": 1 diff --git a/config/testnet-donut/solana_devnet/tokens/sol.json b/config/testnet-donut/solana_devnet/tokens/sol.json index 2ec25016..05f65f7f 100644 --- a/config/testnet-donut/solana_devnet/tokens/sol.json +++ b/config/testnet-donut/solana_devnet/tokens/sol.json @@ -6,7 +6,7 @@ "decimals": 9, "enabled": true, "liquidity_cap": "1000000000000000000000000", - "token_type": 4, + "token_type": 4, "native_representation": { "denom": "", "contract_address": "0x5D525Df2bD99a6e7ec58b76aF2fd95F39874EBed" diff --git a/e2e-tests/.env.example b/e2e-tests/.env.example new file mode 100644 index 00000000..29fcccaa --- /dev/null +++ b/e2e-tests/.env.example @@ -0,0 +1,73 @@ +# Copy this file to e2e-tests/.env and adjust values. + +# Path to push-chain workspace root. +# Keep this empty to use auto-detection (parent of e2e-tests). +# PUSH_CHAIN_DIR= + +# Local Push RPC +PUSH_RPC_URL=http://localhost:8545 + +# Local chain info +CHAIN_ID=localchain_9000-1 +KEYRING_BACKEND=test +# Set to LOCAL to enable anvil/surfpool setup and local RPC rewrites in setup-environment/all +TESTING_ENV= + +# Genesis key recovery/funding +GENESIS_KEY_NAME=genesis-acc-1 +GENESIS_KEY_HOME=./e2e-tests/.pchain +# Optional local fallback file. If missing, setup.sh reads accounts from docker core-validator-1 (/tmp/push-accounts/genesis_accounts.json) +GENESIS_ACCOUNTS_JSON=./e2e-tests/genesis_accounts.json + +# Optional: set to skip interactive mnemonic prompt +# GENESIS_MNEMONIC="word1 word2 ..." + +# Address to fund from genesis account +FUND_TO_ADDRESS=push1w7xnyp3hf79vyetj3cvw8l32u6unun8yr6zn60 +FUND_AMOUNT=1000000000000000000upc +POOL_CREATION_TOPUP_AMOUNT=50000000000000000000upc +GAS_PRICES=100000000000upc + +# EVM private key used by forge/hardhat scripts +PRIVATE_KEY=0xYOURPRIVATEKEY + +# External repositories +CORE_CONTRACTS_REPO=https://github.com/pushchain/push-chain-core-contracts.git +CORE_CONTRACTS_BRANCH=node-e2e + +SWAP_AMM_REPO=https://github.com/pushchain/push-chain-swap-internal-amm-contracts.git +SWAP_AMM_BRANCH=e2e-push-node + +GATEWAY_REPO=https://github.com/pushchain/push-chain-gateway-contracts.git +GATEWAY_BRANCH=e2e-push-node + +PUSH_CHAIN_SDK_REPO=https://github.com/pushchain/push-chain-sdk.git +PUSH_CHAIN_SDK_BRANCH=outbound_changes + +# push-chain-sdk core .env target path (relative to PUSH_CHAIN_SDK_DIR) +PUSH_CHAIN_SDK_CORE_ENV_PATH=packages/core/.env + +# Local clone layout (outside push-chain directory) +E2E_PARENT_DIR=../ +CORE_CONTRACTS_DIR=../push-chain-core-contracts +SWAP_AMM_DIR=../push-chain-swap-internal-amm-contracts +GATEWAY_DIR=../push-chain-gateway-contracts +PUSH_CHAIN_SDK_DIR=../push-chain-sdk +PUSH_CHAIN_SDK_E2E_DIR=packages/core/__e2e__/evm/inbound + +# push-chain-sdk required env vars (mirrored into PUSH_CHAIN_SDK_DIR/packages/core/.env by setup-sdk) +# Defaults used by setup-sdk when omitted: +# EVM_PRIVATE_KEY <= PRIVATE_KEY +# EVM_RPC <= PUSH_RPC_URL +# PUSH_PRIVATE_KEY<= PRIVATE_KEY +EVM_PRIVATE_KEY= +EVM_RPC= +SOLANA_RPC_URL=https://api.devnet.solana.com +SOLANA_PRIVATE_KEY= +PUSH_PRIVATE_KEY= + +# Tracking files +DEPLOY_ADDRESSES_FILE=./e2e-tests/deploy_addresses.json +TEST_ADDRESSES_PATH=../push-chain-swap-internal-amm-contracts/test-addresses.json +TOKEN_CONFIG_PATH=./config/testnet-donut/eth_sepolia/tokens/eth.json +CHAIN_CONFIG_PATH=./config/testnet-donut/eth_sepolia/chain.json diff --git a/e2e-tests/.gitignore b/e2e-tests/.gitignore new file mode 100644 index 00000000..4ea7f858 --- /dev/null +++ b/e2e-tests/.gitignore @@ -0,0 +1,2 @@ +.env +logs/ \ No newline at end of file diff --git a/e2e-tests/README.md b/e2e-tests/README.md new file mode 100644 index 00000000..e8346d06 --- /dev/null +++ b/e2e-tests/README.md @@ -0,0 +1,454 @@ +# e2e-tests setup + +This folder provides a full, automated local E2E bootstrap for Push Chain. + +It covers: + +1. Local devnet — 4 `pchaind` + 4 `puniversald` processes (no Docker) +2. TSS key generation +3. Genesis key recovery + account funding +4. Core contracts deployment (auto-resume on receipt errors) +5. Swap AMM deployment (WPC + Uniswap V3 core + periphery) +6. WPC liquidity pool creation for all synthetic tokens +7. Core `.env` generation from deployed addresses +8. Token config updates +9. Gateway contracts deployment (auto-resume on receipt errors) +10. `configureUniversalCore` script +11. uregistry chain/token config submission +12. CounterPayable deployment + SDK constant sync +13. `push-chain-sdk` E2E test runners + +--- + +## Quick testing setup + +Three commands from a clean checkout. Make sure the prerequisites below are installed first. + +**1. Set up `.env`** + +```bash +cp e2e-tests/.env.example e2e-tests/.env +``` + +Edit `e2e-tests/.env` and set at minimum: + +- `TESTING_ENV=LOCAL` — enables anvil + surfpool forks +- `PRIVATE_KEY=0x...` — EVM deployer key (used by forge/hardhat and mirrored into SDK `.env`) +- `SOLANA_PRIVATE_KEY=...` — only needed if you plan to run Solana SDK tests + +`FUND_TO_ADDRESS`, `EVM_PRIVATE_KEY`, `EVM_RPC`, and `PUSH_PRIVATE_KEY` are auto-derived from `PRIVATE_KEY` / `PUSH_RPC_URL` if left blank. + +**2. Bootstrap the local Push network** + +```bash +TESTING_ENV=LOCAL bash e2e-tests/setup.sh all +``` + +Runs the full pipeline: starts anvil/surfpool forks, boots 4 validators + 2 universal validators, generates the TSS key, deploys core/swap/gateway contracts, submits uregistry configs, and syncs addresses into `deploy_addresses.json`. See [One-command full run](#one-command-full-run) for the detailed step list. + +**3. Set up the SDK** + +```bash +TESTING_ENV=LOCAL bash e2e-tests/setup.sh setup-sdk +``` + +Clones `push-chain-sdk`, writes `packages/core/.env` from your e2e `.env`, syncs the LOCALNET synthetic token addresses into the SDK's chain constants, resolves `UEA_PROXY_IMPLEMENTATION` from the local chain, and installs dependencies. + +After this you can run SDK E2E tests — see [Running SDK E2E tests](#running-sdk-e2e-tests). + +--- + +## What gets created + +- `local-native/data/` — validator + universal-validator home directories +- `local-native/logs/` — per-process log files +- `e2e-tests/logs/` — logs for each deployment step +- `e2e-tests/deploy_addresses.json` — contract/token address source-of-truth + +External repos are resolved from **sibling directories** (relative to `push-chain/`): + +| Repo | Default path | +|---|---| +| `push-chain-core-contracts` | `../push-chain-core-contracts` | +| `push-chain-swap-internal-amm-contracts` | `../push-chain-swap-internal-amm-contracts` | +| `push-chain-gateway-contracts` | `../push-chain-gateway-contracts` | +| `push-chain-sdk` | `../push-chain-sdk` | + +Override any of these with env vars (`CORE_CONTRACTS_DIR`, `SWAP_AMM_DIR`, `GATEWAY_DIR`, `PUSH_CHAIN_SDK_DIR`). + +--- + +## Prerequisites + +Required tools: + +- `git`, `make`, `curl`, `jq`, `perl`, `python3`, `lsof` +- `node`, `npm`, `npx`, `yarn` +- `forge`, `cast` (Foundry) +- `anvil` + `surfpool` — only for `TESTING_ENV=LOCAL` +- `pchaind` and `puniversald` binaries in `build/` (built by `make build`) + +Build the binaries first: + +```bash +make replace-addresses +make build +``` + +--- + +## Configuration + +Copy env template: + +```bash +cp e2e-tests/.env.example e2e-tests/.env +``` + +Edit `e2e-tests/.env`. Key variables: + +| Variable | Default | Description | +|---|---|---| +| `TESTING_ENV` | _(empty)_ | Set to `LOCAL` for local anvil/surfpool mode | +| `PUSH_RPC_URL` | `http://localhost:8545` | Push Chain EVM JSON-RPC | +| `PRIVATE_KEY` | — | EVM deployer private key (forge/hardhat) | +| `EVM_PRIVATE_KEY` | ← `PRIVATE_KEY` | SDK EVM signer key | +| `EVM_RPC` | ← `PUSH_RPC_URL` | SDK EVM RPC endpoint | +| `PUSH_PRIVATE_KEY` | ← `PRIVATE_KEY` | SDK Push Chain signer key | +| `SOLANA_PRIVATE_KEY` | — | SDK Solana signer key (also `SVM_PRIVATE_KEY` / `SOL_PRIVATE_KEY`) | +| `SOLANA_RPC_URL` | `https://api.devnet.solana.com` | SDK Solana RPC | +| `FUND_TO_ADDRESS` | _(auto-derived from `PRIVATE_KEY`)_ | Address to top up from genesis account | +| `GENESIS_MNEMONIC` | _(read from `genesis_accounts.json`)_ | Override genesis mnemonic directly | +| `POOL_CREATION_TOPUP_AMOUNT` | `50000000000000000000upc` | Deployer top-up before pool creation | +| `LOCAL_DEVNET_DIR` | `./local-native` | Path to local devnet management directory | +| `CORE_CONTRACTS_BRANCH` | `e2e-push-node` | | +| `SWAP_AMM_BRANCH` | `e2e-push-node` | | +| `GATEWAY_BRANCH` | `e2e-push-node` | | +| `PUSH_CHAIN_SDK_BRANCH` | `outbound_changes` | | +| `PUSH_CHAIN_SDK_E2E_DIR` | `packages/core/__e2e__/evm/inbound` | Test directory inside SDK | +| `PREFER_SIBLING_REPO_DIRS` | `true` | Prefer sibling dirs for core/gateway repos over cloning fresh | +| `E2E_TARGET_CHAINS` | — | Restrict SDK E2E chains (passed through to SDK `.env`) | +| `CORE_RESUME_MAX_ATTEMPTS` | `0` (unlimited) | Max `--resume` retry count for core forge script | +| `GATEWAY_RESUME_MAX_ATTEMPTS` | `0` (unlimited) | Max `--resume` retry count for gateway forge script | +| `CORE_CONFIGURE_RESUME_MAX_ATTEMPTS` | `0` (unlimited) | Max `--resume` retry count for `configureUniversalCore` | + +### TESTING_ENV=LOCAL + +When set in `.env`, the `setup-environment` step (also called by `all`) does: + +1. Starts local fork nodes: + - `anvil` for Ethereum Sepolia, Arbitrum Sepolia, Base Sepolia, BSC Testnet + - `surfpool` for Solana +2. Rewrites `public_rpc_url` in `config/testnet-donut/*/chain.json` to local fork URLs +3. Patches `puniversald` chain RPC config (`local-native/data/universal-N/.puniversal/config/pushuv_config.json`) to use local fork endpoints + +Default local fork URLs (override in `.env`): + +| Variable | Default | Description | +|---|---|---| +| `ANVIL_SEPOLIA_HOST_RPC_URL` | `http://localhost:9545` | Anvil Sepolia host URL (forge/cast + chain config patch) | +| `ANVIL_ARBITRUM_HOST_RPC_URL` | `http://localhost:9546` | Anvil Arbitrum Sepolia host URL | +| `ANVIL_BASE_HOST_RPC_URL` | `http://localhost:9547` | Anvil Base Sepolia host URL | +| `ANVIL_BSC_HOST_RPC_URL` | `http://localhost:9548` | Anvil BSC Testnet host URL | +| `SURFPOOL_SOLANA_HOST_RPC_URL` | `http://localhost:8899` | Surfpool Solana devnet host URL | +| `LOCAL_SEPOLIA_UV_RPC_URL` | ← `ANVIL_SEPOLIA_HOST_RPC_URL` | RPC written into UV `pushuv_config.json` (can differ from host if using Docker networking) | +| `LOCAL_ARBITRUM_UV_RPC_URL` | ← `ANVIL_ARBITRUM_HOST_RPC_URL` | UV-side Arbitrum RPC | +| `LOCAL_BASE_UV_RPC_URL` | ← `ANVIL_BASE_HOST_RPC_URL` | UV-side Base RPC | +| `LOCAL_BSC_UV_RPC_URL` | ← `ANVIL_BSC_HOST_RPC_URL` | UV-side BSC RPC | +| `LOCAL_SOLANA_UV_RPC_URL` | ← `SURFPOOL_SOLANA_HOST_RPC_URL` | UV-side Solana RPC | + +--- + +## One-command full run + +```bash +make replace-addresses +make build +TESTING_ENV=LOCAL bash e2e-tests/setup.sh all +``` + +The `all` pipeline runs in order: + +1. `setup-environment` — start anvil/surfpool + patch chain RPC configs (LOCAL) or sync testnet RPCs +2. Build binaries (`make replace-addresses` + `make build`) +3. Auto-derive `FUND_TO_ADDRESS` from `PRIVATE_KEY` (writes to `.env`) +4. Stop any running nodes cleanly +5. `devnet` — start 4 validators, register 4 universal validators, start 2 (edit `./devnet start-uv N` to start more) +6. `tss-keygen` — TSS key generation (via `./local-native/devnet tss-keygen`) +7. `setup-environment` (second run — patches UV `pushuv_config.json` with `event_start_from` after devnet data exists) +8. `recover-genesis-key` — import genesis mnemonic into local keyring +9. `fund` — top up deployer address from genesis account +10. `setup-core` — deploy core contracts (forge, auto-resume) +11. `setup-swap` — deploy WPC + Uniswap V3 (hardhat) +12. `sync-addresses` — copy addresses into swap `test-addresses.json` +13. `create-pool` — create WPC liquidity pools for all tokens +14. `check-addresses` — assert required contract addresses are recorded +15. `write-core-env` — generate core contracts `.env` +16. `configure-core` — run `configureUniversalCore.s.sol` (forge, auto-resume; internally re-generates core `.env`) +17. `update-token-config` — patch token config JSON files +18. `setup-gateway` — deploy gateway contracts (forge, auto-resume) +19. `add-uregistry-configs` — submit chain + token config txs +20. `deploy-counter-sdk` — deploy CounterPayable + sync SDK constants +21. Sync SDK LOCALNET synthetic token constants from `deploy_addresses.json` +22. `sync-vault-tss` — sync vault TSS addresses on all local Anvil EVM chains (LOCAL only) + +> `setup-sdk` is **not** included in `all`. Run it separately before any `sdk-test-*` command (see [Running SDK E2E tests](#running-sdk-e2e-tests)). + +--- + +## Running SDK E2E tests + +The SDK repo is cloned/installed and patched to point at the local deployment only when `setup-sdk` runs. After `all` finishes: + +```bash +# Clone push-chain-sdk, generate its .env, install deps, sync LOCALNET constants +TESTING_ENV=LOCAL bash e2e-tests/setup.sh setup-sdk + +# Inbound test suite (TESTNET_DONUT → LOCALNET rewrite applied to spec files) +TESTING_ENV=LOCAL bash e2e-tests/setup.sh sdk-test-all + +# Outbound test suite (requires TESTING_ENV=LOCAL; also funds TSS signer + vault TSS sync) +TESTING_ENV=LOCAL bash e2e-tests/setup.sh sdk-test-outbound-all + +# Single inbound file +TESTING_ENV=LOCAL bash e2e-tests/setup.sh sdk-test-send-to-self +``` + +Route-2 outbound tests (`cea-to-eoa.spec.ts`) additionally require a bootstrapped CEA on the BSC testnet fork: + +```bash +TESTING_ENV=LOCAL bash e2e-tests/setup.sh bootstrap-cea-sdk +TESTING_ENV=LOCAL bash e2e-tests/setup.sh sdk-test-cea-to-eoa +``` + +### Quick outbound smoke test + +For the fastest outbound sanity check after a fresh bootstrap, chain `all` with `quick-testing-outbound`: + +```bash +TESTING_ENV=LOCAL bash e2e-tests/setup.sh all +TESTING_ENV=LOCAL bash e2e-tests/setup.sh quick-testing-outbound +``` + +`quick-testing-outbound` internally runs `setup-sdk`, then `fund-uea-prc20`, and finally executes just the two most important outbound specs — `cea-to-eoa.spec.ts` and `cea-to-uea.spec.ts` — so you get end-to-end outbound coverage without running the full outbound suite. + +--- + +## Local devnet (`local-native/devnet`) + +The `devnet` script manages 4 `pchaind` validators and 4 `puniversald` universal validators as local OS processes (no Docker). + +``` +local-native/ + devnet # management script + data/ # validator home dirs + PID file (gitignored) + logs/ # per-process log files (gitignored) +``` + +### Devnet commands + +```bash +./local-native/devnet start 4 # Start 4 core validators +./local-native/devnet setup-uvalidators # Register UVs on-chain + create AuthZ grants +./local-native/devnet start-uv 2 # Start 2 universal validators (or 4 for full set) +./local-native/devnet stop # Stop all processes (keep data) +./local-native/devnet down # Stop and remove data +./local-native/devnet status # Show running processes + block heights +./local-native/devnet logs [name] # Tail logs (validator-1, universal-2, all, …) +./local-native/devnet tss-keygen # Initiate TSS key generation +``` + +Port layout: + +| Node | RPC | EVM JSON-RPC | WS | +|---|---|---|---| +| validator-1 | 26657 | 8545 | 8546 | +| validator-2 | 26658 | 8547 | 8548 | +| validator-3 | 26659 | 8549 | 8550 | +| validator-4 | 26660 | 8551 | 8552 | + +| UV | Query | TSS P2P | +|---|---|---| +| universal-validator-1 | 8080 | 39000 | +| universal-validator-2 | 8081 | 39001 | +| universal-validator-3 | 8082 | 39002 | +| universal-validator-4 | 8083 | 39003 | + +### Clean devnet restart + +```bash +./local-native/devnet down +./local-native/devnet start 4 +./local-native/devnet setup-uvalidators +./local-native/devnet start-uv 4 +``` + +--- + +## setup.sh command reference + +```bash +TESTING_ENV=LOCAL bash e2e-tests/setup.sh +``` + +| Command | Description | +|---|---| +| `all` | Full setup pipeline | +| `setup-environment` | Start anvil/surfpool + patch chain RPC configs | +| `devnet` | Start local devnet + register universal validators | +| `print-genesis` | Print first genesis account + mnemonic | +| `recover-genesis-key` | Import genesis mnemonic into local keyring | +| `fund` | Fund `FUND_TO_ADDRESS` from genesis account | +| `setup-core` | Build + deploy core contracts (auto-resume) | +| `setup-swap` | Build + deploy WPC + Uniswap V3 | +| `sync-addresses` | Copy `deploy_addresses.json` into swap `test-addresses.json` | +| `create-pool` | Create WPC pools for all deployed core tokens | +| `fund-uea-prc20` | Transfer PRC20 tokens from deployer to the test UEA address | +| `configure-core` | Run `configureUniversalCore.s.sol` (auto-resume) | +| `check-addresses` | Assert required contract addresses are recorded | +| `write-core-env` | Generate core contracts `.env` | +| `update-token-config` | Patch token config JSON contract addresses | +| `setup-gateway` | Build + deploy gateway contracts (auto-resume) | +| `sync-vault-tss` | Sync vault `TSS_ADDRESS` to current TSS key on all local Anvil chains (LOCAL only) | +| `add-uregistry-configs` | Submit chain + token configs to uregistry | +| `deploy-counter-sdk` | Deploy CounterPayable + sync SDK `COUNTER_ADDRESS_PAYABLE` | +| `bootstrap-cea-sdk` | Ensure CEA is deployed for SDK signer on BSC testnet fork (Route 2 bootstrap) | +| `setup-sdk` | Clone/install SDK, generate SDK `.env`, sync LOCALNET constants | +| `sdk-test-all` | Run all configured inbound SDK E2E test files | +| `sdk-test-outbound-all` | Run all configured outbound SDK E2E test files (LOCAL only) | +| `quick-testing-outbound` | Run `setup-sdk` + `fund-uea-prc20`, then only `cea-to-eoa.spec.ts` and `cea-to-uea.spec.ts` (fast outbound smoke test) | +| `sdk-test-pctx-last-transaction` | Run `pctx-last-transaction.spec.ts` | +| `sdk-test-send-to-self` | Run `send-to-self.spec.ts` | +| `sdk-test-progress-hook` | Run `progress-hook-per-tx.spec.ts` | +| `sdk-test-bridge-multicall` | Run `bridge-multicall.spec.ts` | +| `sdk-test-pushchain` | Run `pushchain.spec.ts` | +| `sdk-test-bridge-hooks` | Run `bridge-hooks.spec.ts` | +| `sdk-test-cea-to-eoa` | Run `cea-to-eoa.spec.ts` (outbound Route 3; requires `TESTING_ENV=LOCAL`) | +| `record-contract K A` | Manually record contract key + address | +| `record-token N S A` | Manually record token name, symbol, address | +| `help` | Show help | + +--- + +## Address tracking model + +`e2e-tests/deploy_addresses.json` is the canonical address registry. + +### Required contracts + +- `contracts.WPC` +- `contracts.Factory` +- `contracts.QuoterV2` +- `contracts.SwapRouter` +- `contracts.UEA_PROXY_IMPLEMENTATION` (resolved from on-chain precompile during `setup-sdk`) +- `contracts.COUNTER_ADDRESS_PAYABLE` + +### Token entries + +`tokens[]` records each synthetic ERC-20 deployed by core contracts (`name`, `symbol`, `address`, `decimals`). + +These addresses are used to: + +- sync swap repo `test-addresses.json` +- generate core contracts `.env` +- update `config/testnet-donut/tokens/*.json` +- submit token config txs to uregistry + +Manual helpers: + +```bash +./e2e-tests/setup.sh record-contract Factory 0x1234... +./e2e-tests/setup.sh record-token "Push ETH" pETH 0x1234... +``` + +--- + +## Adding a new token to the setup + +To register a new synthetic token in the local bootstrap, edit `../push-chain-core-contracts/scripts/localSetup/setup.s.sol` and add the token there. The `all` pipeline will deploy it and automatically create a WPC ↔ token liquidity pool as part of `create-pool`. + +Note: this only handles pools paired with WPC. If you need a pool between two non-WPC tokens, additional adjustments are required (extra pool-creation logic in the swap setup and matching entries in the token/uregistry configs). + +--- + +## Auto-retry and resilience behavior + +### Forge scripts (core, gateway, configureUniversalCore) + +- Stale broadcast cache from previous runs is cleared automatically before each fresh deploy. +- If the initial `forge script --broadcast` fails (e.g., receipt timeout), retries with `--resume` until success. +- Caps (all default `0` = unlimited retries): + - `CORE_RESUME_MAX_ATTEMPTS` — core contracts deploy + - `GATEWAY_RESUME_MAX_ATTEMPTS` — gateway contracts deploy + - `CORE_CONFIGURE_RESUME_MAX_ATTEMPTS` — `configureUniversalCore.s.sol` + +### uregistry tx submission + +- Retries automatically on `account sequence mismatch`. +- Validates tx result by checking the returned `code` field. + +--- + +## Generated files of interest + +| File | Description | +|---|---| +| `e2e-tests/deploy_addresses.json` | Contract/token address registry | +| `e2e-tests/logs/` | Per-step deployment logs | +| `local-native/data/` | Validator + UV home directories | +| `local-native/logs/` | Per-process stdout/stderr | +| `/test-addresses.json` | Swap repo address file (synced from deploy_addresses.json) | +| `/.env` | Core contracts env (generated by `write-core-env`) | +| `config/testnet-donut/*/tokens/*.json` | Token config files (updated contract addresses) | + +--- + +## Clean full re-run + +```bash +# Stop + wipe devnet +./local-native/devnet down + +# Reset state +rm -f e2e-tests/deploy_addresses.json + +# Rebuild + run +make replace-addresses +make build +TESTING_ENV=LOCAL bash e2e-tests/setup.sh all +``` + +--- + +## Troubleshooting + +### 1) `pchaind` or `puniversald` won't start + +Check that `make build` completed successfully and `build/pchaind` / `build/puniversald` exist. + +### 2) Validators stuck at height 0 + +P2P peer connections failing. The devnet script sets `allow_duplicate_ip = true` and `addr_book_strict = false` automatically for all-localhost setups. If reusing old data, run `./local-native/devnet down` to wipe and restart clean. + +### 3) TSS keygen not completing + +Check UV logs (`./local-native/devnet logs universal-1`). UVs need: +- All 4 validators bonded +- All 4 UVs registered with AuthZ grants +- External chain RPC endpoints configured (set by `setup-environment`) + +### 4) Core/gateway forge script keeps stopping with receipt errors + +Expected intermittently. The script auto-retries with `--resume` until all receipts confirm. + +### 5) `account sequence mismatch` in uregistry tx + +The script retries automatically. + +### 6) Swap AMM deployment fails mid-run + +Re-run the individual step: + +```bash +TESTING_ENV=LOCAL bash e2e-tests/setup.sh setup-swap +``` diff --git a/e2e-tests/deploy_addresses.json b/e2e-tests/deploy_addresses.json new file mode 100644 index 00000000..2964c011 --- /dev/null +++ b/e2e-tests/deploy_addresses.json @@ -0,0 +1,63 @@ +{ + "generatedAt": "2026-04-15T09:32:52Z", + "contracts": { + "WPC": "0xB5B1e1ADc1b8fc1066975aa09f9371a5f67C54F5", + "Factory": "0x140b9f84fCbccB4129AC6F32b1243ea808d18261", + "SwapRouter": "0x95cE5e63366D3A11E9BCCe71917bB37C23Fd0002", + "QuoterV2": "0xE9cb561141553DFa0A576cCd34546BECffb64Af1", + "PositionManager": "0x484aC6ED747090fe8C82c5F10427ccC2F2998930", + "UEA_PROXY_IMPLEMENTATION": "0x2C297101b7d3e0911296b9A64d106684a161b4C9", + "COUNTER_ADDRESS_PAYABLE": "0xDaC125f9350cD25786Cfd5c8eb2b6837c5e7Ce6B" + }, + "tokens": [ + { + "name": "pETH.eth", + "symbol": "pETH", + "address": "0x373D3F1B2b26729A308C5641970247bc9d4ddDa4", + "source": "core-contracts", + "decimals": 18 + }, + { + "name": "USDT.eth", + "symbol": "USDT.eth", + "address": "0x6a20557430be6412AF423681e35CC96797506F3a", + "source": "core-contracts", + "decimals": 6 + }, + { + "name": "pETH.base", + "symbol": "pETH.base", + "address": "0xCcd71bc096E2225048cD167447e164E8571BcCA6", + "source": "core-contracts", + "decimals": 18 + }, + { + "name": "pETH.arb", + "symbol": "pETH.arb", + "address": "0xE74A512688E53d6Ed2cf64a327fABE8ECE27aDD6", + "source": "core-contracts", + "decimals": 18 + }, + { + "name": "pBNB", + "symbol": "pBNB", + "address": "0x2ddB499C3a35a60c809d878eFf5Fa248bb5eAdbd", + "source": "core-contracts", + "decimals": 18 + }, + { + "name": "pSOL", + "symbol": "pSOL", + "address": "0x31F3Dcb417970EBe9AC1e254Ee42b91e49e30EE2", + "source": "core-contracts", + "decimals": 9 + }, + { + "name": "USDT.bsc", + "symbol": "USDT.bsc", + "address": "0xC329d4EbF8814eEFfA2Fd9612655e490b112523F", + "source": "core-contracts", + "decimals": 6 + } + ] +} diff --git a/e2e-tests/setup.sh b/e2e-tests/setup.sh new file mode 100755 index 00000000..efaa32e5 --- /dev/null +++ b/e2e-tests/setup.sh @@ -0,0 +1,3037 @@ +#!/usr/bin/env bash + +set -euo pipefail +IFS=$'\n\t' + +SCRIPT_DIR="$(cd -P "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PUSH_CHAIN_DIR_DEFAULT="$(cd -P "$SCRIPT_DIR/.." && pwd)" +ENV_FILE="$SCRIPT_DIR/.env" + +if [[ -f "$ENV_FILE" ]]; then + set -a + source "$ENV_FILE" + set +a +fi + +: "${PUSH_CHAIN_DIR:=$PUSH_CHAIN_DIR_DEFAULT}" +: "${PUSH_RPC_URL:=http://localhost:8545}" +: "${CHAIN_ID:=localchain_9000-1}" +: "${TESTING_ENV:=}" +: "${KEYRING_BACKEND:=test}" +: "${GENESIS_KEY_NAME:=genesis-acc-1}" +: "${GENESIS_KEY_HOME:=./e2e-tests/.pchain}" +: "${GENESIS_ACCOUNTS_JSON:=./e2e-tests/genesis_accounts.json}" +: "${FUND_AMOUNT:=1000000000000000000upc}" +: "${POOL_CREATION_TOPUP_AMOUNT:=50000000000000000000upc}" +: "${GAS_PRICES:=100000000000upc}" +: "${LOCAL_DEVNET_DIR:=./local-native}" + +: "${CORE_CONTRACTS_REPO:=https://github.com/pushchain/push-chain-core-contracts.git}" +: "${CORE_CONTRACTS_BRANCH:=e2e-push-node}" +: "${SWAP_AMM_REPO:=https://github.com/pushchain/push-chain-swap-internal-amm-contracts.git}" +: "${SWAP_AMM_BRANCH:=e2e-push-node}" +: "${GATEWAY_REPO:=https://github.com/pushchain/push-chain-gateway-contracts.git}" +: "${GATEWAY_BRANCH:=e2e-push-node}" +: "${PUSH_CHAIN_SDK_REPO:=https://github.com/pushchain/push-chain-sdk.git}" +: "${PUSH_CHAIN_SDK_BRANCH:=outbound_changes}" +: "${PREFER_SIBLING_REPO_DIRS:=true}" + +: "${E2E_PARENT_DIR:=../}" +: "${CORE_CONTRACTS_DIR:=$E2E_PARENT_DIR/push-chain-core-contracts}" +: "${SWAP_AMM_DIR:=$E2E_PARENT_DIR/push-chain-swap-internal-amm-contracts}" +: "${GATEWAY_DIR:=$E2E_PARENT_DIR/push-chain-gateway-contracts}" +: "${PUSH_CHAIN_SDK_DIR:=$E2E_PARENT_DIR/push-chain-sdk}" +: "${PUSH_CHAIN_SDK_E2E_DIR:=packages/core/__e2e__/evm/inbound}" +: "${PUSH_CHAIN_SDK_CHAIN_CONSTANTS_PATH:=packages/core/src/lib/constants/chain.ts}" +: "${PUSH_CHAIN_SDK_ACCOUNT_TS_PATH:=packages/core/src/lib/universal/account/account.ts}" +: "${PUSH_CHAIN_SDK_CORE_ENV_PATH:=packages/core/.env}" +: "${DEPLOY_ADDRESSES_FILE:=$SCRIPT_DIR/deploy_addresses.json}" +: "${LOG_DIR:=$SCRIPT_DIR/logs}" +: "${TEST_ADDRESSES_PATH:=$SWAP_AMM_DIR/test-addresses.json}" +: "${TOKENS_CONFIG_DIR:=./config/testnet-donut}" +: "${TOKEN_CONFIG_PATH:=./config/testnet-donut/eth_sepolia/tokens/eth.json}" +: "${CHAIN_CONFIG_PATH:=./config/testnet-donut/eth_sepolia/chain.json}" + +abs_from_root() { + local path="$1" + if [[ "$path" = /* ]]; then + printf "%s" "$path" + else + printf "%s/%s" "$PUSH_CHAIN_DIR" "${path#./}" + fi +} + +GENESIS_KEY_HOME="$(abs_from_root "$GENESIS_KEY_HOME")" +GENESIS_ACCOUNTS_JSON="$(abs_from_root "$GENESIS_ACCOUNTS_JSON")" +LOCAL_DEVNET_DIR="$(abs_from_root "$LOCAL_DEVNET_DIR")" +E2E_PARENT_DIR="$(abs_from_root "$E2E_PARENT_DIR")" +CORE_CONTRACTS_DIR="$(abs_from_root "$CORE_CONTRACTS_DIR")" +SWAP_AMM_DIR="$(abs_from_root "$SWAP_AMM_DIR")" +GATEWAY_DIR="$(abs_from_root "$GATEWAY_DIR")" +PUSH_CHAIN_SDK_DIR="$(abs_from_root "$PUSH_CHAIN_SDK_DIR")" +DEPLOY_ADDRESSES_FILE="$(abs_from_root "$DEPLOY_ADDRESSES_FILE")" +TEST_ADDRESSES_PATH="$(abs_from_root "$TEST_ADDRESSES_PATH")" +LOG_DIR="$(abs_from_root "$LOG_DIR")" +TOKENS_CONFIG_DIR="$(abs_from_root "$TOKENS_CONFIG_DIR")" +TOKEN_CONFIG_PATH="$(abs_from_root "$TOKEN_CONFIG_PATH")" +CHAIN_CONFIG_PATH="$(abs_from_root "$CHAIN_CONFIG_PATH")" + +mkdir -p "$LOG_DIR" + +green='\033[0;32m' +yellow='\033[0;33m' +red='\033[0;31m' +cyan='\033[0;36m' +nc='\033[0m' + +log_info() { printf "%b\n" "${cyan}==>${nc} $*"; } +log_ok() { printf "%b\n" "${green}✓${nc} $*"; } +log_warn() { printf "%b\n" "${yellow}!${nc} $*"; } +log_err() { printf "%b\n" "${red}x${nc} $*"; } + +normalize_path() { + local path="$1" + if [[ -d "$path" ]]; then + (cd -P "$path" && pwd) + return + fi + + local parent base + parent="$(dirname "$path")" + base="$(basename "$path")" + + if [[ -d "$parent" ]]; then + printf "%s/%s" "$(cd -P "$parent" && pwd)" "$base" + else + printf "%s" "$path" + fi +} + +prefer_sibling_repo_dirs() { + if [[ "$(echo "$PREFER_SIBLING_REPO_DIRS" | tr '[:upper:]' '[:lower:]')" != "true" ]]; then + CORE_CONTRACTS_DIR="$(normalize_path "$CORE_CONTRACTS_DIR")" + GATEWAY_DIR="$(normalize_path "$GATEWAY_DIR")" + return + fi + + local sibling_core sibling_gateway + sibling_core="$(normalize_path "$PUSH_CHAIN_DIR/../push-chain-core-contracts")" + sibling_gateway="$(normalize_path "$PUSH_CHAIN_DIR/../push-chain-gateway-contracts")" + + CORE_CONTRACTS_DIR="$(normalize_path "$CORE_CONTRACTS_DIR")" + GATEWAY_DIR="$(normalize_path "$GATEWAY_DIR")" + + if [[ -d "$sibling_core" ]]; then + CORE_CONTRACTS_DIR="$sibling_core" + fi + + if [[ -d "$sibling_gateway" ]]; then + GATEWAY_DIR="$sibling_gateway" + fi +} + +prefer_sibling_repo_dirs + +ensure_testing_env_var_in_env_file() { + mkdir -p "$(dirname "$ENV_FILE")" + + if [[ ! -f "$ENV_FILE" ]]; then + printf "TESTING_ENV=\n" >"$ENV_FILE" + return + fi + + if ! grep -Eq '^TESTING_ENV=' "$ENV_FILE"; then + printf "\nTESTING_ENV=\n" >>"$ENV_FILE" + fi +} + +is_local_testing_env() { + [[ "${TESTING_ENV:-}" == "LOCAL" ]] +} + +get_genesis_accounts_json() { + if [[ -f "$GENESIS_ACCOUNTS_JSON" ]]; then + cat "$GENESIS_ACCOUNTS_JSON" + return 0 + fi + + if command -v docker >/dev/null 2>&1; then + if docker ps --format '{{.Names}}' | grep -qx 'core-validator-1'; then + if docker exec core-validator-1 test -f /tmp/push-accounts/genesis_accounts.json >/dev/null 2>&1; then + docker exec core-validator-1 cat /tmp/push-accounts/genesis_accounts.json + return 0 + fi + fi + fi + + return 1 +} + +require_cmd() { + local c + for c in "$@"; do + command -v "$c" >/dev/null 2>&1 || { + log_err "Missing command: $c" + exit 1 + } + done +} + +list_remote_branches() { + local repo_url="$1" + git ls-remote --heads "$repo_url" | awk '{print $2}' | sed 's#refs/heads/##' +} + +select_best_matching_branch() { + local requested="$1" + shift + local branches=("$@") + local best="" + local best_score=0 + local branch token score + + # Tokenize requested branch by non-alphanumeric delimiters. + local tokens=() + while IFS= read -r token; do + [[ -n "$token" ]] && tokens+=("$token") + done < <(echo "$requested" | tr -cs '[:alnum:]' '\n' | tr '[:upper:]' '[:lower:]') + + for branch in "${branches[@]}"; do + score=0 + local b_lc + b_lc="$(echo "$branch" | tr '[:upper:]' '[:lower:]')" + for token in "${tokens[@]}"; do + if [[ "$b_lc" == *"$token"* ]]; then + score=$((score + 1)) + fi + done + if (( score > best_score )); then + best_score=$score + best="$branch" + fi + done + + if (( best_score >= 2 )); then + printf "%s" "$best" + fi +} + +resolve_branch() { + local repo_url="$1" + local requested="$2" + local branches=() + local b + + while IFS= read -r b; do + [[ -n "$b" ]] && branches+=("$b") + done < <(list_remote_branches "$repo_url") + + local branch + for branch in "${branches[@]}"; do + if [[ "$branch" == "$requested" ]]; then + printf "%s" "$requested" + return + fi + done + + local best + best="$(select_best_matching_branch "$requested" "${branches[@]}")" + if [[ -n "$best" ]]; then + printf "%b\n" "${yellow}!${nc} Branch '$requested' not found. Auto-selected '$best'." >&2 + printf "%s" "$best" + return + fi + + for branch in main master; do + for b in "${branches[@]}"; do + if [[ "$b" == "$branch" ]]; then + printf "%b\n" "${yellow}!${nc} Branch '$requested' not found. Falling back to '$branch'." >&2 + printf "%s" "$branch" + return + fi + done + done + + if [[ ${#branches[@]} -gt 0 ]]; then + printf "%b\n" "${yellow}!${nc} Branch '$requested' not found. Falling back to '${branches[0]}'." >&2 + printf "%s" "${branches[0]}" + return + fi + + log_err "No remote branches found for $repo_url" + exit 1 +} + +ensure_deploy_file() { + mkdir -p "$(dirname "$DEPLOY_ADDRESSES_FILE")" + + if [[ ! -s "$DEPLOY_ADDRESSES_FILE" ]]; then + cat >"$DEPLOY_ADDRESSES_FILE" <<'JSON' +{ + "generatedAt": "", + "contracts": {}, + "tokens": [] +} +JSON + return + fi + + if ! jq -e . "$DEPLOY_ADDRESSES_FILE" >/dev/null 2>&1; then + log_warn "Deploy file is empty/invalid JSON, reinitializing: $DEPLOY_ADDRESSES_FILE" + cat >"$DEPLOY_ADDRESSES_FILE" <<'JSON' +{ + "generatedAt": "", + "contracts": {}, + "tokens": [] +} +JSON + return + fi + + local tmp + tmp="$(mktemp)" + jq ' + .generatedAt = (.generatedAt // "") + | .contracts = (.contracts // {}) + | .tokens = (.tokens // []) + ' "$DEPLOY_ADDRESSES_FILE" >"$tmp" + mv "$tmp" "$DEPLOY_ADDRESSES_FILE" +} + +set_generated_at() { + local tmp + tmp="$(mktemp)" + jq --arg now "$(date -u +%Y-%m-%dT%H:%M:%SZ)" '.generatedAt = $now' "$DEPLOY_ADDRESSES_FILE" >"$tmp" + mv "$tmp" "$DEPLOY_ADDRESSES_FILE" +} + +record_contract() { + local key="$1" + local address="$2" + local tmp + tmp="$(mktemp)" + jq --arg key "$key" --arg val "$address" '.contracts[$key] = $val' "$DEPLOY_ADDRESSES_FILE" >"$tmp" + mv "$tmp" "$DEPLOY_ADDRESSES_FILE" + set_generated_at + log_ok "Recorded contract $key=$address" +} + +record_token() { + local name="$1" + local symbol="$2" + local address="$3" + local source="$4" + local tmp + tmp="$(mktemp)" + jq \ + --arg name "$name" \ + --arg symbol "$symbol" \ + --arg address "$address" \ + --arg source "$source" \ + ' + .tokens = ( + ([.tokens[]? | select((.address | ascii_downcase) != ($address | ascii_downcase))]) + + [{name:$name, symbol:$symbol, address:$address, source:$source}] + ) + ' "$DEPLOY_ADDRESSES_FILE" >"$tmp" + mv "$tmp" "$DEPLOY_ADDRESSES_FILE" + set_generated_at + log_ok "Recorded token $symbol=$address ($name)" +} + +validate_eth_address() { + [[ "$1" =~ ^0x[a-fA-F0-9]{40}$ ]] +} + +clone_or_update_repo() { + local repo_url="$1" + local branch="$2" + local dest="$3" + local resolved_branch + + resolved_branch="$(resolve_branch "$repo_url" "$branch")" + + if [[ -d "$dest" && ! -d "$dest/.git" ]]; then + log_warn "Removing non-git directory at $dest" + rm -rf "$dest" + fi + + if [[ -d "$dest/.git" ]]; then + local current_branch has_changes + current_branch="$(git -C "$dest" rev-parse --abbrev-ref HEAD 2>/dev/null || true)" + has_changes="$(git -C "$dest" status --porcelain 2>/dev/null)" + + if [[ -n "$has_changes" && "$current_branch" == "$resolved_branch" ]]; then + log_warn "Repo $(basename "$dest") has local changes on branch '$current_branch'. Skipping update to preserve local changes." + return 0 + fi + + log_info "Updating repo $(basename "$dest")" + local current_origin + current_origin="$(git -C "$dest" remote get-url origin 2>/dev/null || true)" + if [[ -z "$current_origin" || "$current_origin" != "$repo_url" ]]; then + log_warn "Setting origin for $(basename "$dest") to $repo_url" + if git -C "$dest" remote get-url origin >/dev/null 2>&1; then + git -C "$dest" remote set-url origin "$repo_url" + else + git -C "$dest" remote add origin "$repo_url" + fi + fi + + git -C "$dest" fetch origin + git -C "$dest" checkout -B "$resolved_branch" "origin/$resolved_branch" + git -C "$dest" reset --hard "origin/$resolved_branch" + else + log_info "Cloning $(basename "$dest")" + git clone --branch "$resolved_branch" "$repo_url" "$dest" + fi +} + +sdk_test_files() { + local base_dir="$PUSH_CHAIN_SDK_DIR/$PUSH_CHAIN_SDK_E2E_DIR" + local file alt + local requested_files=( + "pctx-last-transaction.spec.ts" + "send-to-self.spec.ts" + "progress-hook-per-tx.spec.ts" + "bridge-multicall.spec.ts" + "pushchain.spec.ts" + "bridge-hooks.spec.ts" + ) + + for file in "${requested_files[@]}"; do + if [[ -f "$base_dir/$file" ]]; then + printf "%s\n" "$base_dir/$file" + continue + fi + + if [[ "$file" == *.tx ]]; then + alt="${file%.tx}.ts" + if [[ -f "$base_dir/$alt" ]]; then + printf "%b\n" "${yellow}!${nc} Test file '$file' not found. Using '$alt'." >&2 + printf "%s\n" "$base_dir/$alt" + continue + fi + fi + + log_err "SDK test file not found: $base_dir/$file" + exit 1 + done +} + +sdk_outbound_test_files() { + local outbound_dir="$PUSH_CHAIN_SDK_DIR/packages/core/__e2e__/evm/outbound" + local file + local requested_files=( + "cea-to-eoa.spec.ts" + ) + + for file in "${requested_files[@]}"; do + if [[ -f "$outbound_dir/$file" ]]; then + printf "%s\n" "$outbound_dir/$file" + else + log_err "SDK outbound test file not found: $outbound_dir/$file" + exit 1 + fi + done +} + +sdk_rewrite_chain_endpoints_for_local() { + local chain_constants_file="$1" + + CHAIN_CONSTANTS_FILE="$chain_constants_file" node <<'NODE' +const fs = require('fs'); + +const filePath = process.env.CHAIN_CONSTANTS_FILE; +if (!filePath || !fs.existsSync(filePath)) { + console.error('chain.ts file not found for LOCAL endpoint rewrite'); + process.exit(1); +} + +let source = fs.readFileSync(filePath, 'utf8'); + +const endpointMap = [ + { chain: 'ETHEREUM_SEPOLIA', url: 'http://localhost:9545' }, + { chain: 'ARBITRUM_SEPOLIA', url: 'http://localhost:9546' }, + { chain: 'BASE_SEPOLIA', url: 'http://localhost:9547' }, + { chain: 'BNB_TESTNET', url: 'http://localhost:9548' }, + { chain: 'SOLANA_DEVNET', url: 'http://localhost:8899' }, +]; + +function findChainBlockRange(text, chainName) { + const marker = `[CHAIN.${chainName}]`; + const markerIdx = text.indexOf(marker); + if (markerIdx === -1) { + return null; + } + + const openBraceIdx = text.indexOf('{', markerIdx); + if (openBraceIdx === -1) { + return null; + } + + let depth = 0; + for (let i = openBraceIdx; i < text.length; i += 1) { + const ch = text[i]; + if (ch === '{') { + depth += 1; + } else if (ch === '}') { + depth -= 1; + if (depth === 0) { + return { start: openBraceIdx, end: i }; + } + } + } + + return null; +} + +function detectIndent(blockText) { + const match = blockText.match(/\n(\s+)[A-Za-z_\[]/); + return match ? match[1] : ' '; +} + +function findMatchingBracket(text, openIdx) { + let depth = 0; + let quote = ''; + + for (let i = openIdx; i < text.length; i += 1) { + const ch = text[i]; + const prev = i > 0 ? text[i - 1] : ''; + + if (quote) { + if (ch === quote && prev !== '\\') { + quote = ''; + } + continue; + } + + if (ch === '\'' || ch === '"' || ch === '`') { + quote = ch; + continue; + } + + if (ch === '[') { + depth += 1; + continue; + } + + if (ch === ']') { + depth -= 1; + if (depth === 0) { + return i; + } + } + } + + return -1; +} + +function upsertDefaultRpc(blockText, rpcUrl, indent) { + const keyRegex = /\bdefaultRPC\s*:/m; + const keyMatch = keyRegex.exec(blockText); + if (keyMatch) { + const arrayStart = blockText.indexOf('[', keyMatch.index); + if (arrayStart !== -1) { + const arrayEnd = findMatchingBracket(blockText, arrayStart); + if (arrayEnd !== -1) { + return { + text: `${blockText.slice(0, arrayStart)}['${rpcUrl}']${blockText.slice(arrayEnd + 1)}`, + changed: true, + }; + } + } + + return { + text: blockText.replace(/(defaultRPC\s*:\s*)[^\n,]+/, `$1['${rpcUrl}']`), + changed: true, + }; + } + + return { + text: blockText.replace(/\{\s*/, `{\n${indent}defaultRPC: ['${rpcUrl}'],\n`), + changed: true, + }; +} + +function upsertExplorerUrl(blockText, explorerUrl, indent) { + const explorerRegex = /((explorerURL|explorerUrl)\s*:\s*)['"`][^'"`\n]*['"`]/m; + if (explorerRegex.test(blockText)) { + return { + text: blockText.replace(explorerRegex, `$1'${explorerUrl}'`), + changed: true, + }; + } + + const defaultRpcLineRegex = /(defaultRPC\s*:\s*\[[\s\S]*?\]\s*,?)/m; + if (defaultRpcLineRegex.test(blockText)) { + return { + text: blockText.replace(defaultRpcLineRegex, `$1\n${indent}explorerUrl: '${explorerUrl}',`), + changed: true, + }; + } + + return { + text: blockText.replace(/\{\s*/, `{\n${indent}explorerUrl: '${explorerUrl}',\n`), + changed: true, + }; +} + +const edits = []; +for (const entry of endpointMap) { + const range = findChainBlockRange(source, entry.chain); + if (!range) { + console.error(`Could not find chain block for CHAIN.${entry.chain} in ${filePath}`); + process.exit(1); + } + + const originalBlock = source.slice(range.start, range.end + 1); + const indent = detectIndent(originalBlock); + + const defaultRpcResult = upsertDefaultRpc(originalBlock, entry.url, indent); + const explorerResult = upsertExplorerUrl(defaultRpcResult.text, entry.url, indent); + + edits.push({ + start: range.start, + end: range.end, + text: explorerResult.text, + }); +} + +edits.sort((a, b) => b.start - a.start); +for (const edit of edits) { + source = source.slice(0, edit.start) + edit.text + source.slice(edit.end + 1); +} + +fs.writeFileSync(filePath, source); +NODE +} + +sdk_sync_localnet_constants() { + require_cmd jq perl node + + local chain_constants_file="$PUSH_CHAIN_SDK_DIR/$PUSH_CHAIN_SDK_CHAIN_CONSTANTS_PATH" + local sdk_utils_file="$PUSH_CHAIN_SDK_DIR/packages/core/src/lib/utils.ts" + local orchestrator_file="$PUSH_CHAIN_SDK_DIR/packages/core/src/lib/orchestrator/orchestrator.ts" + + if [[ ! -f "$chain_constants_file" ]]; then + log_err "SDK chain constants file not found: $chain_constants_file" + exit 1 + fi + + ensure_deploy_file + + local peth peth_arb peth_base pbnb psol usdt_eth usdt_bnb + peth="$(address_from_deploy_token "pETH")" + peth_arb="$(address_from_deploy_token "pETH.arb")" + peth_base="$(address_from_deploy_token "pETH.base")" + pbnb="$(address_from_deploy_token "pBNB")" + psol="$(address_from_deploy_token "pSOL")" + usdt_eth="$(address_from_deploy_token "USDT.eth")" + usdt_bnb="$(address_from_deploy_token "USDT.bsc")" + + [[ -n "$peth" ]] || peth="0xTBD" + [[ -n "$peth_arb" ]] || peth_arb="0xTBD" + [[ -n "$peth_base" ]] || peth_base="0xTBD" + [[ -n "$pbnb" ]] || pbnb="0xTBD" + [[ -n "$psol" ]] || psol="0xTBD" + [[ -n "$usdt_eth" ]] || usdt_eth="0xTBD" + [[ -n "$usdt_bnb" ]] || usdt_bnb="$usdt_eth" + + PETH_ADDR="$peth" \ + PETH_ARB_ADDR="$peth_arb" \ + PETH_BASE_ADDR="$peth_base" \ + PBNB_ADDR="$pbnb" \ + PSOL_ADDR="$psol" \ + USDT_ETH_ADDR="$usdt_eth" \ + USDT_BNB_ADDR="$usdt_bnb" \ + perl -0pi -e ' + s#(\[PUSH_NETWORK\.LOCALNET\]:\s*\{[\s\S]*?pETH:\s*)'\''[^'\''\n]*'\''#$1'\''$ENV{PETH_ADDR}'\''#s; + s#(\[PUSH_NETWORK\.LOCALNET\]:\s*\{[\s\S]*?pETH_ARB:\s*)'\''[^'\''\n]*'\''#$1'\''$ENV{PETH_ARB_ADDR}'\''#s; + s#(\[PUSH_NETWORK\.LOCALNET\]:\s*\{[\s\S]*?pETH_BASE:\s*)'\''[^'\''\n]*'\''#$1'\''$ENV{PETH_BASE_ADDR}'\''#s; + s#(\[PUSH_NETWORK\.LOCALNET\]:\s*\{[\s\S]*?pETH_BNB:\s*)'\''[^'\''\n]*'\''#$1'\''$ENV{PBNB_ADDR}'\''#s; + s#(\[PUSH_NETWORK\.LOCALNET\]:\s*\{[\s\S]*?pSOL:\s*)'\''[^'\''\n]*'\''#$1'\''$ENV{PSOL_ADDR}'\''#s; + s#(\[PUSH_NETWORK\.LOCALNET\]:\s*\{[\s\S]*?USDT_ETH:\s*)'\''[^'\''\n]*'\''#$1'\''$ENV{USDT_ETH_ADDR}'\''#s; + s#(\[PUSH_NETWORK\.LOCALNET\]:\s*\{[\s\S]*?USDT_BNB:\s*)'\''[^'\''\n]*'\''#$1'\''$ENV{USDT_BNB_ADDR}'\''#s; + ' "$chain_constants_file" + + if [[ -f "$orchestrator_file" ]]; then + perl -0pi -e "s/return '\\Q0x00000000000000000000000000000000000000C0\\E';/return '0x00000000000000000000000000000000000000C1';/g" "$orchestrator_file" + fi + + # For LOCAL testing only, force selected chain endpoints to localhost RPC/explorer URLs. + if is_local_testing_env; then + sdk_rewrite_chain_endpoints_for_local "$chain_constants_file" + log_ok "Patched SDK chain.ts RPC/explorer endpoints for LOCAL testing" + fi + + if [[ -f "$sdk_utils_file" ]]; then + perl -0pi -e "s/\[PUSH_NETWORK\\.LOCALNET\]:\s*\[\s*CHAIN\\.PUSH_TESTNET_DONUT,/\[PUSH_NETWORK.LOCALNET\]: [CHAIN.PUSH_LOCALNET,/g" "$sdk_utils_file" + fi + + log_ok "Synced SDK LOCALNET synthetic token constants from deploy addresses" +} + +sdk_prepare_test_files_for_localnet() { + require_cmd perl + + if [[ ! -d "$PUSH_CHAIN_SDK_DIR/.git" && ! -d "$PUSH_CHAIN_SDK_DIR" ]]; then + log_err "SDK repo not found at $PUSH_CHAIN_SDK_DIR" + log_err "Run: $0 setup-sdk" + exit 1 + fi + + if [[ ! -d "$PUSH_CHAIN_SDK_DIR/$PUSH_CHAIN_SDK_E2E_DIR" ]]; then + log_err "SDK E2E directory not found: $PUSH_CHAIN_SDK_DIR/$PUSH_CHAIN_SDK_E2E_DIR" + exit 1 + fi + + while IFS= read -r test_file; do + [[ -n "$test_file" ]] || continue + perl -0pi -e 's/\bPUSH_NETWORK\.TESTNET_DONUT\b/PUSH_NETWORK.LOCALNET/g; s/\bPUSH_NETWORK\.TESTNET\b/PUSH_NETWORK.LOCALNET/g; s/\bCHAIN\.PUSH_TESTNET_DONUT\b/CHAIN.PUSH_LOCALNET/g' "$test_file" + log_ok "Prepared LOCALNET network replacement in $(basename "$test_file")" + done < <(sdk_test_files) + + while IFS= read -r outbound_file; do + [[ -n "$outbound_file" ]] || continue + perl -0pi -e 's/\bPUSH_NETWORK\.TESTNET_DONUT\b/PUSH_NETWORK.LOCALNET/g; s/\bPUSH_NETWORK\.TESTNET\b/PUSH_NETWORK.LOCALNET/g; s/\bCHAIN\.PUSH_TESTNET_DONUT\b/CHAIN.PUSH_LOCALNET/g' "$outbound_file" + log_ok "Prepared LOCALNET network replacement in $(basename "$outbound_file")" + done < <(find "$PUSH_CHAIN_SDK_DIR/packages/core/__e2e__/evm/outbound" -type f -name '*.spec.ts' | sort) +} + +step_clone_push_chain_sdk() { + require_cmd git + clone_or_update_repo "$PUSH_CHAIN_SDK_REPO" "$PUSH_CHAIN_SDK_BRANCH" "$PUSH_CHAIN_SDK_DIR" + log_ok "push-chain-sdk ready at $PUSH_CHAIN_SDK_DIR" +} + +step_setup_push_chain_sdk() { + require_cmd git yarn npm cast jq perl + + local chain_constants_file="$PUSH_CHAIN_SDK_DIR/$PUSH_CHAIN_SDK_CHAIN_CONSTANTS_PATH" + local sdk_account_file="$PUSH_CHAIN_SDK_DIR/$PUSH_CHAIN_SDK_ACCOUNT_TS_PATH" + local uea_impl_raw uea_impl synced_localnet_uea + + if [[ ! -d "$PUSH_CHAIN_SDK_DIR/.git" ]]; then + log_err "SDK repo not found at $PUSH_CHAIN_SDK_DIR" + log_err "Run: $0 clone-sdk (or 'setup all' which clones it automatically)" + exit 1 + fi + + local sdk_env_path="$PUSH_CHAIN_SDK_DIR/$PUSH_CHAIN_SDK_CORE_ENV_PATH" + local sdk_evm_private_key sdk_evm_rpc sdk_solana_rpc sdk_solana_private_key sdk_push_private_key + + sdk_evm_private_key="${EVM_PRIVATE_KEY:-${PRIVATE_KEY:-}}" + sdk_evm_rpc="${EVM_RPC:-${PUSH_RPC_URL:-}}" + sdk_solana_rpc="${SOLANA_RPC_URL:-https://api.devnet.solana.com}" + sdk_solana_private_key="${SOLANA_PRIVATE_KEY:-${SVM_PRIVATE_KEY:-${SOL_PRIVATE_KEY:-}}}" + sdk_push_private_key="${PUSH_PRIVATE_KEY:-${PRIVATE_KEY:-}}" + + mkdir -p "$(dirname "$sdk_env_path")" + { + echo "# Auto-generated by e2e-tests/setup.sh setup-sdk" + echo "# Source: e2e-tests/.env" + echo "EVM_PRIVATE_KEY=$sdk_evm_private_key" + echo "EVM_RPC=$sdk_evm_rpc" + echo "SOLANA_RPC_URL=$sdk_solana_rpc" + echo "SOLANA_PRIVATE_KEY=$sdk_solana_private_key" + echo "PUSH_PRIVATE_KEY=$sdk_push_private_key" + [[ -n "${E2E_TARGET_CHAINS:-}" ]] && echo "E2E_TARGET_CHAINS=${E2E_TARGET_CHAINS}" + } >"$sdk_env_path" + + [[ -n "$sdk_evm_private_key" ]] || log_warn "SDK env EVM_PRIVATE_KEY is empty (set EVM_PRIVATE_KEY or PRIVATE_KEY in e2e-tests/.env)" + [[ -n "$sdk_evm_rpc" ]] || log_warn "SDK env EVM_RPC is empty (set EVM_RPC or PUSH_RPC_URL in e2e-tests/.env)" + [[ -n "$sdk_solana_private_key" ]] || log_warn "SDK env SOLANA_PRIVATE_KEY is empty (set SOLANA_PRIVATE_KEY in e2e-tests/.env)" + [[ -n "$sdk_push_private_key" ]] || log_warn "SDK env PUSH_PRIVATE_KEY is empty (set PUSH_PRIVATE_KEY or PRIVATE_KEY in e2e-tests/.env)" + log_ok "Generated push-chain-sdk env file: $sdk_env_path" + + if [[ ! -f "$chain_constants_file" ]]; then + log_err "SDK chain constants file not found: $chain_constants_file" + exit 1 + fi + + sdk_sync_localnet_constants + + log_info "Fetching UEA_PROXY_IMPLEMENTATION from local chain" + uea_impl_raw="$(cast call 0x00000000000000000000000000000000000000ea 'UEA_PROXY_IMPLEMENTATION()(address)' --rpc-url "$PUSH_RPC_URL" 2>/dev/null || true)" + uea_impl="$(echo "$uea_impl_raw" | grep -Eo '0x[a-fA-F0-9]{40}' | head -1 || true)" + + if ! validate_eth_address "$uea_impl"; then + log_err "Could not resolve valid UEA_PROXY_IMPLEMENTATION address from cast output: $uea_impl_raw" + exit 1 + fi + + ensure_deploy_file + record_contract "UEA_PROXY_IMPLEMENTATION" "$uea_impl" + + UEA_PROXY_IMPL="$uea_impl" perl -0pi -e 's#(\[PUSH_NETWORK\.LOCALNET\]:\s*)'\''[^'\'']*'\''#$1'\''$ENV{UEA_PROXY_IMPL}'\''#g' "$chain_constants_file" + + synced_localnet_uea="$(grep -E '\[PUSH_NETWORK\.LOCALNET\]:' "$chain_constants_file" | head -1 | sed -E "s/.*'([^']+)'.*/\1/")" + if [[ "$synced_localnet_uea" != "$uea_impl" ]]; then + log_err "Failed to update PUSH_NETWORK.LOCALNET UEA proxy in $chain_constants_file" + exit 1 + fi + + log_ok "Synced PUSH_NETWORK.LOCALNET UEA proxy to $uea_impl" + + if [[ ! -f "$sdk_account_file" ]]; then + log_err "SDK account file not found: $sdk_account_file" + exit 1 + fi + + perl -0pi -e ' + s{(function\s+convertExecutorToOriginAccount\b.*?\{)(.*?)(\n\})}{ + my ($head, $body, $tail) = ($1, $2, $3); + $body =~ s/\bCHAIN\.PUSH_TESTNET_DONUT\b/CHAIN.PUSH_LOCALNET/g; + "$head$body$tail"; + }gse; + ' "$sdk_account_file" + log_ok "Replaced CHAIN.PUSH_TESTNET_DONUT with CHAIN.PUSH_LOCALNET only in convertExecutorToOriginAccount() in $sdk_account_file" + + local sdk_e2e_root="$PUSH_CHAIN_SDK_DIR/packages/core/__e2e__" + if [[ -d "$sdk_e2e_root" ]]; then + log_info "Replacing TESTNET/TESTNET_DONUT with LOCALNET across all SDK __e2e__ test files" + local patched_count=0 + while IFS= read -r -d '' e2e_file; do + perl -0pi -e ' + s/\bPUSH_NETWORK\.TESTNET_DONUT\b/PUSH_NETWORK.LOCALNET/g; + s/\bPUSH_NETWORK\.TESTNET\b/PUSH_NETWORK.LOCALNET/g; + s/\bCHAIN\.PUSH_TESTNET_DONUT\b/CHAIN.PUSH_LOCALNET/g; + ' "$e2e_file" + patched_count=$((patched_count + 1)) + done < <(find "$sdk_e2e_root" -type f \( -name '*.ts' -o -name '*.tsx' \) -print0) + log_ok "Applied LOCALNET replacement to $patched_count file(s) under $sdk_e2e_root" + else + log_warn "SDK __e2e__ directory not found at $sdk_e2e_root; skipping TESTNET→LOCALNET replacement" + fi + + log_info "Installing push-chain-sdk dependencies" + ( + cd "$PUSH_CHAIN_SDK_DIR" + yarn install + npm install + npm i --save-dev @types/bs58 + npm i tweetnacl + ) + + log_ok "push-chain-sdk setup complete" +} + +step_run_sdk_test_file() { + local test_basename="$1" + local test_file="" + + # Search inbound test files first + while IFS= read -r candidate; do + [[ -n "$candidate" ]] || continue + if [[ "$(basename "$candidate")" == "$test_basename" ]]; then + test_file="$candidate" + break + fi + done < <(sdk_test_files) + + if [[ -n "$test_file" ]]; then + # Inbound file — use full prepare (TESTNET→LOCALNET for all inbound files) + sdk_prepare_test_files_for_localnet + else + # Search outbound test files + while IFS= read -r candidate; do + [[ -n "$candidate" ]] || continue + if [[ "$(basename "$candidate")" == "$test_basename" ]]; then + test_file="$candidate" + break + fi + done < <(sdk_outbound_test_files) + + if [[ -n "$test_file" ]]; then + # Outbound file — sync localnet constants and apply TESTNET→LOCALNET to outbound files only + sdk_sync_localnet_constants + perl -0pi -e 's/\bPUSH_NETWORK\.TESTNET_DONUT\b/PUSH_NETWORK.LOCALNET/g; s/\bPUSH_NETWORK\.TESTNET\b/PUSH_NETWORK.LOCALNET/g; s/\bCHAIN\.PUSH_TESTNET_DONUT\b/CHAIN.PUSH_LOCALNET/g' "$test_file" + log_ok "Prepared LOCALNET network replacement in $test_basename" + # Also patch shared evm-client.ts default network + local evm_client_file="$PUSH_CHAIN_SDK_DIR/packages/core/__e2e__/shared/evm-client.ts" + if [[ -f "$evm_client_file" ]]; then + perl -0pi -e 's/\bPUSH_NETWORK\.TESTNET_DONUT\b/PUSH_NETWORK.LOCALNET/g' "$evm_client_file" + log_ok "Patched evm-client.ts default network to PUSH_NETWORK.LOCALNET" + fi + # Patch utils.ts: fix TESTNET_DONUT default in getPRC20Address + local utils_file="$PUSH_CHAIN_SDK_DIR/packages/core/src/lib/utils.ts" + if [[ -f "$utils_file" ]]; then + perl -0pi -e 's/(const network = options\?\.network \?\?)\s*PUSH_NETWORK\.TESTNET_DONUT/$1 PUSH_NETWORK.LOCALNET/' "$utils_file" + log_ok "Patched utils.ts getPRC20Address default network to PUSH_NETWORK.LOCALNET" + fi + # Patch tokens.ts: fix TESTNET_DONUT in buildPushChainMoveableTokenAccessor + local tokens_file="$PUSH_CHAIN_SDK_DIR/packages/core/src/lib/constants/tokens.ts" + if [[ -f "$tokens_file" ]]; then + perl -0pi -e 's/(const s = SYNTHETIC_PUSH_ERC20\[)PUSH_NETWORK\.TESTNET_DONUT(\])/$1PUSH_NETWORK.LOCALNET$2/' "$tokens_file" + log_ok "Patched tokens.ts buildPushChainMoveableTokenAccessor default network to PUSH_NETWORK.LOCALNET" + fi + fi + fi + + if [[ -z "$test_file" ]]; then + log_err "Requested SDK test file not in configured list: $test_basename" + exit 1 + fi + + log_info "Running SDK test: $test_basename" + local rel_pattern="${test_file##*/packages/core/}" + ( + cd "$PUSH_CHAIN_SDK_DIR" + npx nx test core --runInBand --testPathPattern="$rel_pattern" + ) + + log_ok "Completed SDK test: $test_basename" +} + +step_run_sdk_tests_all() { + local test_file + + sdk_prepare_test_files_for_localnet + + while IFS= read -r test_file; do + [[ -n "$test_file" ]] || continue + log_info "Running SDK test: $(basename "$test_file")" + ( + cd "$PUSH_CHAIN_SDK_DIR" + npx nx test core --runInBand --testPathPattern="$(basename "$test_file")" + ) + done < <(sdk_test_files) + + log_ok "Completed all configured SDK E2E tests" +} + +step_run_sdk_outbound_tests_all() { + local test_file + local evm_client_file="$PUSH_CHAIN_SDK_DIR/packages/core/__e2e__/shared/evm-client.ts" + + # Sync localnet constants (rewrites chain.ts defaultRPC for LOCAL mode) and + # apply TESTNET_DONUT → LOCALNET replacement in outbound spec files. + sdk_sync_localnet_constants + + while IFS= read -r outbound_file; do + [[ -n "$outbound_file" ]] || continue + perl -0pi -e 's/\bPUSH_NETWORK\.TESTNET_DONUT\b/PUSH_NETWORK.LOCALNET/g; s/\bPUSH_NETWORK\.TESTNET\b/PUSH_NETWORK.LOCALNET/g; s/\bCHAIN\.PUSH_TESTNET_DONUT\b/CHAIN.PUSH_LOCALNET/g' "$outbound_file" + log_ok "Prepared LOCALNET network replacement in $(basename "$outbound_file")" + done < <(find "$PUSH_CHAIN_SDK_DIR/packages/core/__e2e__/evm/outbound" -type f -name '*.spec.ts' | sort) + + # Also patch shared evm-client.ts default network so PushChain.initialize uses LOCALNET + if [[ -f "$evm_client_file" ]]; then + perl -0pi -e 's/\bPUSH_NETWORK\.TESTNET_DONUT\b/PUSH_NETWORK.LOCALNET/g' "$evm_client_file" + log_ok "Patched evm-client.ts default network to PUSH_NETWORK.LOCALNET" + fi + + # Patch utils.ts: fix TESTNET_DONUT default in getPRC20Address (used for PRC20 token lookup) + local utils_file="$PUSH_CHAIN_SDK_DIR/packages/core/src/lib/utils.ts" + if [[ -f "$utils_file" ]]; then + perl -0pi -e 's/(const network = options\?\.network \?\?)\s*PUSH_NETWORK\.TESTNET_DONUT/$1 PUSH_NETWORK.LOCALNET/' "$utils_file" + log_ok "Patched utils.ts getPRC20Address default network to PUSH_NETWORK.LOCALNET" + fi + + # Patch tokens.ts: fix TESTNET_DONUT in buildPushChainMoveableTokenAccessor + local tokens_file="$PUSH_CHAIN_SDK_DIR/packages/core/src/lib/constants/tokens.ts" + if [[ -f "$tokens_file" ]]; then + perl -0pi -e 's/(const s = SYNTHETIC_PUSH_ERC20\[)PUSH_NETWORK\.TESTNET_DONUT(\])/$1PUSH_NETWORK.LOCALNET$2/' "$tokens_file" + log_ok "Patched tokens.ts buildPushChainMoveableTokenAccessor default network to PUSH_NETWORK.LOCALNET" + fi + + while IFS= read -r test_file; do + [[ -n "$test_file" ]] || continue + log_info "Running SDK outbound test: $(basename "$test_file")" + # Strip everything up to and including "packages/core/" to get a relative path + # that Jest can match against canonical absolute paths (avoids ".." in the pattern) + local rel_pattern="${test_file##*/packages/core/}" + ( + cd "$PUSH_CHAIN_SDK_DIR" + npx nx test core --runInBand --testPathPattern="$rel_pattern" + ) + done < <(sdk_outbound_test_files) + + log_ok "Completed all configured SDK outbound E2E tests" +} + +step_run_sdk_quick_testing_outbound() { + local outbound_dir="$PUSH_CHAIN_SDK_DIR/packages/core/__e2e__/evm/outbound" + local quick_files=( + "cea-to-eoa.spec.ts" + "cea-to-uea.spec.ts" + ) + local evm_client_file="$PUSH_CHAIN_SDK_DIR/packages/core/__e2e__/shared/evm-client.ts" + local utils_file="$PUSH_CHAIN_SDK_DIR/packages/core/src/lib/utils.ts" + local tokens_file="$PUSH_CHAIN_SDK_DIR/packages/core/src/lib/constants/tokens.ts" + local file full_path + + step_setup_push_chain_sdk + step_fund_uea_prc20 + + sdk_sync_localnet_constants + + for file in "${quick_files[@]}"; do + full_path="$outbound_dir/$file" + if [[ ! -f "$full_path" ]]; then + log_err "SDK outbound test file not found: $full_path" + exit 1 + fi + perl -0pi -e 's/\bPUSH_NETWORK\.TESTNET_DONUT\b/PUSH_NETWORK.LOCALNET/g; s/\bPUSH_NETWORK\.TESTNET\b/PUSH_NETWORK.LOCALNET/g; s/\bCHAIN\.PUSH_TESTNET_DONUT\b/CHAIN.PUSH_LOCALNET/g' "$full_path" + log_ok "Prepared LOCALNET network replacement in $file" + done + + if [[ -f "$evm_client_file" ]]; then + perl -0pi -e 's/\bPUSH_NETWORK\.TESTNET_DONUT\b/PUSH_NETWORK.LOCALNET/g' "$evm_client_file" + log_ok "Patched evm-client.ts default network to PUSH_NETWORK.LOCALNET" + fi + if [[ -f "$utils_file" ]]; then + perl -0pi -e 's/(const network = options\?\.network \?\?)\s*PUSH_NETWORK\.TESTNET_DONUT/$1 PUSH_NETWORK.LOCALNET/' "$utils_file" + log_ok "Patched utils.ts getPRC20Address default network to PUSH_NETWORK.LOCALNET" + fi + if [[ -f "$tokens_file" ]]; then + perl -0pi -e 's/(const s = SYNTHETIC_PUSH_ERC20\[)PUSH_NETWORK\.TESTNET_DONUT(\])/$1PUSH_NETWORK.LOCALNET$2/' "$tokens_file" + log_ok "Patched tokens.ts buildPushChainMoveableTokenAccessor default network to PUSH_NETWORK.LOCALNET" + fi + + for file in "${quick_files[@]}"; do + full_path="$outbound_dir/$file" + log_info "Running SDK outbound test: $file" + local rel_pattern="${full_path##*/packages/core/}" + ( + cd "$PUSH_CHAIN_SDK_DIR" + npx nx test core --runInBand --testPathPattern="$rel_pattern" + ) + done + + log_ok "Completed quick-testing-outbound SDK E2E tests" +} + +step_devnet() { + require_cmd bash jq + + local sepolia_rpc_override arbitrum_rpc_override base_rpc_override bsc_rpc_override solana_rpc_override + + chain_public_rpc_from_config() { + local file_path="$1" + local fallback_rpc="$2" + local label="$3" + local rpc_url + + if [[ ! -f "$file_path" ]]; then + log_warn "Chain config file not found for $label while preparing devnet RPC overrides: $file_path; using fallback $fallback_rpc" + printf "%s" "$fallback_rpc" + return + fi + + rpc_url="$(jq -r '.public_rpc_url // empty' "$file_path" 2>/dev/null || true)" + if [[ -z "$rpc_url" || "$rpc_url" == "null" ]]; then + log_warn "public_rpc_url missing in $file_path while preparing devnet RPC overrides; using fallback $fallback_rpc" + printf "%s" "$fallback_rpc" + return + fi + + printf "%s" "$rpc_url" + } + + if is_local_testing_env; then + local local_sepolia_rpc local_arbitrum_rpc local_base_rpc local_bsc_rpc local_solana_rpc + local_sepolia_rpc="${LOCAL_SEPOLIA_UV_RPC_URL:-${ANVIL_SEPOLIA_HOST_RPC_URL:-http://localhost:9545}}" + local_arbitrum_rpc="${LOCAL_ARBITRUM_UV_RPC_URL:-${ANVIL_ARBITRUM_HOST_RPC_URL:-http://localhost:9546}}" + local_base_rpc="${LOCAL_BASE_UV_RPC_URL:-${ANVIL_BASE_HOST_RPC_URL:-http://localhost:9547}}" + local_bsc_rpc="${LOCAL_BSC_UV_RPC_URL:-${ANVIL_BSC_HOST_RPC_URL:-http://localhost:9548}}" + local_solana_rpc="${LOCAL_SOLANA_UV_RPC_URL:-${SURFPOOL_SOLANA_HOST_RPC_URL:-http://localhost:8899}}" + + sepolia_rpc_override="$local_sepolia_rpc" + arbitrum_rpc_override="$local_arbitrum_rpc" + base_rpc_override="$local_base_rpc" + bsc_rpc_override="$local_bsc_rpc" + solana_rpc_override="$local_solana_rpc" + else + sepolia_rpc_override="$(chain_public_rpc_from_config "$TOKENS_CONFIG_DIR/eth_sepolia/chain.json" "https://eth-sepolia.public.blastapi.io" "eth_sepolia")" + arbitrum_rpc_override="$(chain_public_rpc_from_config "$TOKENS_CONFIG_DIR/arb_sepolia/chain.json" "https://arbitrum-sepolia.gateway.tenderly.co" "arb_sepolia")" + base_rpc_override="$(chain_public_rpc_from_config "$TOKENS_CONFIG_DIR/base_sepolia/chain.json" "https://sepolia.base.org" "base_sepolia")" + bsc_rpc_override="$(chain_public_rpc_from_config "$TOKENS_CONFIG_DIR/bsc_testnet/chain.json" "https://bsc-testnet-rpc.publicnode.com" "bsc_testnet")" + solana_rpc_override="$(chain_public_rpc_from_config "$TOKENS_CONFIG_DIR/solana_devnet/chain.json" "https://api.devnet.solana.com" "solana_devnet")" + fi + + log_info "Devnet RPC overrides: sepolia=$sepolia_rpc_override arbitrum=$arbitrum_rpc_override base=$base_rpc_override bsc=$bsc_rpc_override solana=$solana_rpc_override" + + local devnet_sepolia_start="" devnet_arbitrum_start="" devnet_base_start="" devnet_bsc_start="" devnet_solana_start="" + + if ! is_local_testing_env; then + require_cmd curl jq + local _fetch_block + _fetch_block() { + local label="$1" rpc_url="$2" + local response hex_block decimal_block + response="$(curl -sS --max-time 15 -X POST "$rpc_url" \ + -H 'Content-Type: application/json' \ + --data '{"jsonrpc":"2.0","id":1,"method":"eth_blockNumber","params":[]}' 2>/dev/null || true)" + hex_block="$(echo "$response" | jq -r '.result // empty' 2>/dev/null || true)" + if [[ -n "$hex_block" && "$hex_block" != "null" && "$hex_block" =~ ^0x[0-9a-fA-F]+$ ]]; then + decimal_block="$(printf '%d' "$hex_block" 2>/dev/null || true)" + [[ "$decimal_block" =~ ^[0-9]+$ ]] && { printf "%s" "$decimal_block"; return 0; } + fi + log_warn "Could not read block number for $label from $rpc_url; event_start_from will not be set" >&2 + printf "%s" "" + } + _fetch_solana_slot() { + local rpc_url="$1" + local slot response + response="$(curl -sS --max-time 15 -X POST "$rpc_url" -H 'Content-Type: application/json' \ + --data '{"jsonrpc":"2.0","id":1,"method":"getSlot","params":[{"commitment":"processed"}]}' 2>/dev/null || true)" + slot="$(echo "$response" | jq -r '.result // empty' 2>/dev/null || true)" + slot="$(echo "$slot" | tr -d '[:space:]')" + [[ "$slot" =~ ^[0-9]+$ ]] && { printf "%s" "$slot"; return 0; } + log_warn "Could not read Solana slot from $rpc_url; event_start_from will not be set" >&2 + printf "%s" "" + } + log_info "Fetching latest block/slot numbers from public chain RPCs for devnet startup" + devnet_sepolia_start="$(_fetch_block "sepolia" "$sepolia_rpc_override")" + devnet_arbitrum_start="$(_fetch_block "arbitrum" "$arbitrum_rpc_override")" + devnet_base_start="$(_fetch_block "base" "$base_rpc_override")" + devnet_bsc_start="$(_fetch_block "bsc" "$bsc_rpc_override")" + devnet_solana_start="$(_fetch_solana_slot "$solana_rpc_override")" + log_ok "Devnet event_start_from: sepolia=${devnet_sepolia_start:-n/a} arbitrum=${devnet_arbitrum_start:-n/a} base=${devnet_base_start:-n/a} bsc=${devnet_bsc_start:-n/a} solana=${devnet_solana_start:-n/a}" + fi + + log_info "Starting local devnet" + ( + cd "$LOCAL_DEVNET_DIR" + + # Start all 4 core validators + ./devnet start 4 + + # Build UV env array with RPC overrides and event_start_from values + local _uv_env=( + SEPOLIA_RPC_URL_OVERRIDE="$sepolia_rpc_override" + ARBITRUM_RPC_URL_OVERRIDE="$arbitrum_rpc_override" + BASE_RPC_URL_OVERRIDE="$base_rpc_override" + BSC_RPC_URL_OVERRIDE="$bsc_rpc_override" + SOLANA_RPC_URL_OVERRIDE="$solana_rpc_override" + ) + [[ -n "$devnet_sepolia_start" ]] && _uv_env+=(SEPOLIA_EVENT_START_FROM="$devnet_sepolia_start") + [[ -n "$devnet_arbitrum_start" ]] && _uv_env+=(ARBITRUM_EVENT_START_FROM="$devnet_arbitrum_start") + [[ -n "$devnet_base_start" ]] && _uv_env+=(BASE_EVENT_START_FROM="$devnet_base_start") + [[ -n "$devnet_bsc_start" ]] && _uv_env+=(BSC_EVENT_START_FROM="$devnet_bsc_start") + [[ -n "$devnet_solana_start" ]] && _uv_env+=(SOLANA_EVENT_START_FROM="$devnet_solana_start") + + # Register universal validators on-chain and create authz grants + env "${_uv_env[@]}" ./devnet setup-uvalidators + + # Start 4 universal validators with RPC overrides and event_start_from + env "${_uv_env[@]}" ./devnet start-uv 2 + ) + + # Sync freshly generated genesis accounts so step_recover_genesis_key uses the current mnemonic. + # Each fresh devnet run (after `rm -rf data/`) regenerates accounts with new mnemonics. + if [[ -f "$LOCAL_DEVNET_DIR/data/accounts/genesis_accounts.json" ]]; then + cp "$LOCAL_DEVNET_DIR/data/accounts/genesis_accounts.json" "$GENESIS_ACCOUNTS_JSON" + log_ok "Synced genesis_accounts.json from devnet" + fi + + log_ok "Devnet is up" +} + +step_ensure_tss_key_ready() { + require_cmd bash + log_info "Ensuring TSS key is ready" + ( + cd "$LOCAL_DEVNET_DIR" + ./devnet tss-keygen + ) + log_ok "TSS key is ready" +} + +step_setup_environment() { + require_cmd jq curl + + local has_docker="false" + if command -v docker >/dev/null 2>&1; then + has_docker="true" + fi + + if is_local_testing_env; then + require_cmd anvil cast surfpool + fi + + fetch_evm_block_number() { + local label="$1" + local rpc_url="$2" + local response hex_block decimal_block + + response="$(curl -sS --max-time 15 -X POST "$rpc_url" \ + -H 'Content-Type: application/json' \ + --data '{"jsonrpc":"2.0","id":1,"method":"eth_blockNumber","params":[]}' 2>/dev/null || true)" + + hex_block="$(echo "$response" | jq -r '.result // empty' 2>/dev/null || true)" + if [[ -n "$hex_block" && "$hex_block" != "null" && "$hex_block" =~ ^0x[0-9a-fA-F]+$ ]]; then + decimal_block="$(printf '%d' "$hex_block" 2>/dev/null || true)" + if [[ "$decimal_block" =~ ^[0-9]+$ ]]; then + printf "%s" "$decimal_block" + return 0 + fi + fi + + log_warn "Could not read block number for $label at $rpc_url; defaulting event_start_from to 0" >&2 + printf "%s" "0" + } + + local sepolia_host_rpc="${ANVIL_SEPOLIA_HOST_RPC_URL:-http://localhost:9545}" + local arbitrum_host_rpc="${ANVIL_ARBITRUM_HOST_RPC_URL:-http://localhost:9546}" + local base_host_rpc="${ANVIL_BASE_HOST_RPC_URL:-http://localhost:9547}" + local bsc_host_rpc="${ANVIL_BSC_HOST_RPC_URL:-http://localhost:9548}" + + local solana_host_rpc="${SURFPOOL_SOLANA_HOST_RPC_URL:-http://localhost:8899}" + local uv_sepolia_rpc_url="" + local uv_arbitrum_rpc_url="" + local uv_base_rpc_url="" + local uv_bsc_rpc_url="" + local uv_solana_rpc_url="" + + chain_public_rpc_from_config() { + local file_path="$1" + local fallback_rpc="$2" + local label="$3" + local rpc_url + + if [[ ! -f "$file_path" ]]; then + log_warn "Chain config file not found for $label: $file_path; using fallback $fallback_rpc" + printf "%s" "$fallback_rpc" + return + fi + + rpc_url="$(jq -r '.public_rpc_url // empty' "$file_path" 2>/dev/null || true)" + if [[ -z "$rpc_url" || "$rpc_url" == "null" ]]; then + log_warn "public_rpc_url missing in $file_path; using fallback $fallback_rpc" + printf "%s" "$fallback_rpc" + return + fi + + printf "%s" "$rpc_url" + } + + patch_chain_config_public_rpc() { + local file_path="$1" + local rpc_url="$2" + local label="$3" + local tmp + + if [[ ! -f "$file_path" ]]; then + log_warn "Chain config file not found for $label: $file_path" + return 0 + fi + + tmp="$(mktemp)" + jq --arg rpc "$rpc_url" '.public_rpc_url = $rpc' "$file_path" >"$tmp" + mv "$tmp" "$file_path" + log_ok "Patched $label chain config public_rpc_url => $rpc_url" + } + + patch_local_testnet_donut_chain_configs() { + patch_chain_config_public_rpc "$TOKENS_CONFIG_DIR/eth_sepolia/chain.json" "$sepolia_host_rpc" "eth_sepolia" + patch_chain_config_public_rpc "$TOKENS_CONFIG_DIR/arb_sepolia/chain.json" "$arbitrum_host_rpc" "arb_sepolia" + patch_chain_config_public_rpc "$TOKENS_CONFIG_DIR/base_sepolia/chain.json" "$base_host_rpc" "base_sepolia" + patch_chain_config_public_rpc "$TOKENS_CONFIG_DIR/bsc_testnet/chain.json" "$bsc_host_rpc" "bsc_testnet" + patch_chain_config_public_rpc "$TOKENS_CONFIG_DIR/solana_devnet/chain.json" "$solana_host_rpc" "solana_devnet" + } + + if is_local_testing_env; then + uv_sepolia_rpc_url="${LOCAL_SEPOLIA_UV_RPC_URL:-$sepolia_host_rpc}" + uv_arbitrum_rpc_url="${LOCAL_ARBITRUM_UV_RPC_URL:-$arbitrum_host_rpc}" + uv_base_rpc_url="${LOCAL_BASE_UV_RPC_URL:-$base_host_rpc}" + uv_bsc_rpc_url="${LOCAL_BSC_UV_RPC_URL:-$bsc_host_rpc}" + uv_solana_rpc_url="${LOCAL_SOLANA_UV_RPC_URL:-$solana_host_rpc}" + else + uv_sepolia_rpc_url="$(chain_public_rpc_from_config "$TOKENS_CONFIG_DIR/eth_sepolia/chain.json" "$sepolia_host_rpc" "eth_sepolia")" + uv_arbitrum_rpc_url="$(chain_public_rpc_from_config "$TOKENS_CONFIG_DIR/arb_sepolia/chain.json" "$arbitrum_host_rpc" "arb_sepolia")" + uv_base_rpc_url="$(chain_public_rpc_from_config "$TOKENS_CONFIG_DIR/base_sepolia/chain.json" "$base_host_rpc" "base_sepolia")" + uv_bsc_rpc_url="$(chain_public_rpc_from_config "$TOKENS_CONFIG_DIR/bsc_testnet/chain.json" "$bsc_host_rpc" "bsc_testnet")" + uv_solana_rpc_url="$(chain_public_rpc_from_config "$TOKENS_CONFIG_DIR/solana_devnet/chain.json" "$solana_host_rpc" "solana_devnet")" + + if pgrep -f "${PUSH_CHAIN_DIR}/build/puniversald start" >/dev/null 2>&1; then + log_warn "puniversald processes are already running; RPC URL file changes apply fully after devnet restart" + fi + fi + + local sepolia_latest_block arbitrum_latest_block base_latest_block bsc_latest_block solana_latest_slot + sepolia_latest_block="0" + arbitrum_latest_block="0" + base_latest_block="0" + bsc_latest_block="0" + solana_latest_slot="0" + + start_anvil_fork() { + local label="$1" + local port="$2" + local chain_id="$3" + local fork_url="$4" + + # Kill any process that is currently bound to the target port. + # This avoids stale fork nodes when the command-line pattern changes. + local pid + while IFS= read -r pid; do + [[ -n "$pid" ]] || continue + log_info "Stopping process $pid on port $port before starting anvil $label" + kill "$pid" >/dev/null 2>&1 || true + done < <(lsof -ti tcp:"$port" 2>/dev/null || true) + + # Wait up to 8 seconds for the port to be fully released before binding the new process. + local _w=0 + while lsof -ti tcp:"$port" >/dev/null 2>&1; do + if [[ $_w -ge 8 ]]; then + lsof -ti tcp:"$port" 2>/dev/null | xargs kill -9 2>/dev/null || true + sleep 1 + break + fi + sleep 1 + _w=$(( _w + 1 )) + done + + log_info "Starting anvil $label on port $port (chain-id: $chain_id)" + nohup anvil --host 0.0.0.0 --port "$port" --chain-id "$chain_id" --fork-url "$fork_url" --block-time 1 \ + >"$LOG_DIR/anvil_${label}.log" 2>&1 & + } + + wait_for_block_number() { + local label="$1" + local rpc_url="$2" + local latest="" + local i + for i in {1..30}; do + latest="$(cast block-number --rpc-url "$rpc_url" 2>/dev/null || true)" + latest="$(echo "$latest" | tr -d '[:space:]')" + if [[ "$latest" =~ ^[0-9]+$ ]]; then + printf "%s" "$latest" + return 0 + fi + sleep 1 + done + + log_warn "Could not read block number from $label anvil at $rpc_url after 30s; defaulting event_start_from to 0" >&2 + printf "%s" "0" + } + + start_surfpool() { + local surfpool_pattern="surfpool start --port 8899 --network devnet" + + if pgrep -f "$surfpool_pattern" >/dev/null 2>&1; then + log_info "Stopping existing surfpool on port 8899" + pkill -f "$surfpool_pattern" >/dev/null 2>&1 || true + sleep 1 + fi + + log_info "Starting surfpool for local Solana testing on port 8899" + nohup surfpool start --port 8899 --network devnet >"$LOG_DIR/surfpool.log" 2>&1 & + } + + wait_for_solana_slot() { + local rpc_url="$1" + local slot="" + local response + local i + for i in {1..30}; do + response="$(curl -sS -X POST "$rpc_url" -H 'Content-Type: application/json' --data '{"jsonrpc":"2.0","id":1,"method":"getSlot","params":[{"commitment":"processed"}]}' || true)" + slot="$(echo "$response" | jq -r '.result // empty' 2>/dev/null || true)" + slot="$(echo "$slot" | tr -d '[:space:]')" + if [[ "$slot" =~ ^[0-9]+$ ]]; then + printf "%s" "$slot" + return 0 + fi + sleep 1 + done + + log_warn "Could not read Solana slot from surfpool at $rpc_url after 30s; defaulting event_start_from to 0" >&2 + printf "%s" "0" + } + + if is_local_testing_env; then + # Upstream RPCs that the local anvil forks are derived from. + local sepolia_fork_rpc="https://sepolia.drpc.org" + local arbitrum_fork_rpc="https://arbitrum-sepolia.gateway.tenderly.co" + local base_fork_rpc="https://sepolia.base.org" + local bsc_fork_rpc="wss://bsc-testnet-rpc.publicnode.com" + local solana_upstream_rpc="https://api.devnet.solana.com" + + # Fetch event_start_from from the upstream RPCs BEFORE starting local forks. + # This gives us the exact fork point block number reliably, without waiting for + # local anvil startup. UVs configured to use the local anvil fork will start + # scanning from this block number, which covers all locally-deployed contracts. + log_info "Fetching latest block numbers from upstream RPCs for event_start_from" + sepolia_latest_block="$(wait_for_block_number "sepolia" "$sepolia_fork_rpc")" + arbitrum_latest_block="$(wait_for_block_number "arbitrum" "$arbitrum_fork_rpc")" + base_latest_block="$(wait_for_block_number "base" "$base_fork_rpc")" + bsc_latest_block="$(wait_for_block_number "bsc" "$bsc_fork_rpc")" + solana_latest_slot="$(wait_for_solana_slot "$solana_upstream_rpc")" + log_ok "event_start_from: sepolia=$sepolia_latest_block arbitrum=$arbitrum_latest_block base=$base_latest_block bsc=$bsc_latest_block solana=$solana_latest_slot" + + start_anvil_fork "sepolia" "9545" "11155111" "$sepolia_fork_rpc" + start_anvil_fork "arbitrum" "9546" "421614" "$arbitrum_fork_rpc" + start_anvil_fork "base" "9547" "84532" "$base_fork_rpc" + # Use the configured BSC endpoint for anvil forking. + start_anvil_fork "bsc" "9548" "97" "$bsc_fork_rpc" + start_surfpool + patch_local_testnet_donut_chain_configs + + # Wait for local forks to be ready before proceeding. + wait_for_block_number "sepolia" "$sepolia_host_rpc" >/dev/null + wait_for_block_number "arbitrum" "$arbitrum_host_rpc" >/dev/null + wait_for_block_number "base" "$base_host_rpc" >/dev/null + wait_for_block_number "bsc" "$bsc_host_rpc" >/dev/null + wait_for_solana_slot "$solana_host_rpc" >/dev/null + else + log_info "Fetching latest block numbers from public chain RPCs for event_start_from" + sepolia_latest_block="$(wait_for_block_number "sepolia" "$uv_sepolia_rpc_url")" + arbitrum_latest_block="$(wait_for_block_number "arbitrum" "$uv_arbitrum_rpc_url")" + base_latest_block="$(wait_for_block_number "base" "$uv_base_rpc_url")" + bsc_latest_block="$(wait_for_block_number "bsc" "$uv_bsc_rpc_url")" + solana_latest_slot="$(wait_for_solana_slot "$uv_solana_rpc_url")" + log_ok "event_start_from: sepolia=$sepolia_latest_block arbitrum=$arbitrum_latest_block base=$base_latest_block bsc=$bsc_latest_block solana=$solana_latest_slot" + fi + + local patched_count=0 + local uv_idx + for uv_idx in 1 2 3 4; do + # Prefer local file (local-native devnet); fall back to Docker container + local local_cfg="$LOCAL_DEVNET_DIR/data/universal${uv_idx}/.puniversal/config/pushuv_config.json" + local uv_container="universal-validator-${uv_idx}" + + local tmp_in tmp_out + tmp_in="$(mktemp)" + tmp_out="$(mktemp)" + + if [[ -f "$local_cfg" ]]; then + cp "$local_cfg" "$tmp_in" + elif [[ "$has_docker" == "true" ]] && docker ps --format '{{.Names}}' | grep -qx "$uv_container" 2>/dev/null; then + local docker_cfg="/root/.puniversal/config/pushuv_config.json" + if ! docker exec "$uv_container" cat "$docker_cfg" >"$tmp_in" 2>/dev/null; then + rm -f "$tmp_in" "$tmp_out" + log_warn "Failed to read config from $uv_container" + continue + fi + else + rm -f "$tmp_in" "$tmp_out" + continue + fi + + jq \ + --arg sepolia_rpc "$uv_sepolia_rpc_url" \ + --arg arbitrum_rpc "$uv_arbitrum_rpc_url" \ + --arg base_rpc "$uv_base_rpc_url" \ + --arg bsc_rpc "$uv_bsc_rpc_url" \ + --arg solana_rpc "$uv_solana_rpc_url" \ + --argjson sepolia_start "$sepolia_latest_block" \ + --argjson arbitrum_start "$arbitrum_latest_block" \ + --argjson base_start "$base_latest_block" \ + --argjson bsc_start "$bsc_latest_block" \ + --argjson solana_start "$solana_latest_slot" \ + ' + .chain_configs["eip155:11155111"].rpc_urls = [$sepolia_rpc] + | .chain_configs["eip155:11155111"].event_start_from = $sepolia_start + | .chain_configs["eip155:421614"].rpc_urls = [$arbitrum_rpc] + | .chain_configs["eip155:421614"].event_start_from = $arbitrum_start + | .chain_configs["eip155:84532"].rpc_urls = [$base_rpc] + | .chain_configs["eip155:84532"].event_start_from = $base_start + | .chain_configs["eip155:97"].rpc_urls = [$bsc_rpc] + | .chain_configs["eip155:97"].event_start_from = $bsc_start + | .chain_configs["solana:EtWTRABZaYq6iMfeYKouRu166VU2xqa1"].rpc_urls = [$solana_rpc] + | .chain_configs["solana:EtWTRABZaYq6iMfeYKouRu166VU2xqa1"].event_start_from = $solana_start + ' "$tmp_in" >"$tmp_out" + + if [[ -f "$local_cfg" ]]; then + cp "$tmp_out" "$local_cfg" + if is_local_testing_env; then + log_ok "Updated universal-validator-${uv_idx} local config for Sepolia/Arbitrum/Base/BSC/Solana LOCAL forks (event_start_from: sepolia=$sepolia_latest_block arbitrum=$arbitrum_latest_block base=$base_latest_block bsc=$bsc_latest_block solana=$solana_latest_slot)" + else + log_ok "Updated universal-validator-${uv_idx} local config from testnet-donut chain public RPCs (event_start_from: sepolia=$sepolia_latest_block arbitrum=$arbitrum_latest_block base=$base_latest_block bsc=$bsc_latest_block solana=$solana_latest_slot)" + fi + else + local docker_cfg="/root/.puniversal/config/pushuv_config.json" + docker cp "$tmp_out" "$uv_container":"$docker_cfg" + if is_local_testing_env; then + log_ok "Updated $uv_container Docker config for Sepolia/Arbitrum/Base/BSC/Solana LOCAL forks (event_start_from: sepolia=$sepolia_latest_block arbitrum=$arbitrum_latest_block base=$base_latest_block bsc=$bsc_latest_block solana=$solana_latest_slot)" + else + log_ok "Updated $uv_container Docker config from testnet-donut chain public RPCs (event_start_from: sepolia=$sepolia_latest_block arbitrum=$arbitrum_latest_block base=$base_latest_block bsc=$bsc_latest_block solana=$solana_latest_slot)" + fi + fi + rm -f "$tmp_in" "$tmp_out" + patched_count=$((patched_count + 1)) + done + + if [[ "$patched_count" -eq 0 ]]; then + log_warn "No universal validators found (local or Docker); skipped pushuv_config.json patch" + return 0 + fi + + if is_local_testing_env; then + log_ok "Patched $patched_count universal validator config(s) with LOCAL fork RPC/event_start_from (including Solana)" + else + log_ok "Patched $patched_count universal validator config(s) with testnet-donut chain public RPCs and live event_start_from values" + fi +} + +step_stop_running_nodes() { + log_info "Stopping running local nodes/validators" + + if [[ -x "$LOCAL_DEVNET_DIR/devnet" ]]; then + ( + cd "$LOCAL_DEVNET_DIR" + ./devnet down || true + ) + fi + + pkill -f "$PUSH_CHAIN_DIR/build/pchaind start" >/dev/null 2>&1 || true + pkill -f "$PUSH_CHAIN_DIR/build/puniversald" >/dev/null 2>&1 || true + + log_ok "Running nodes stopped" +} + +step_fund_uv_broadcasters_on_anvil() { + if ! is_local_testing_env; then + log_info "step_fund_uv_broadcasters_on_anvil: skipping (non-LOCAL environment)" + return 0 + fi + require_cmd cast + local anvil_rpc="${ANVIL_SEPOLIA_HOST_RPC_URL:-http://localhost:9545}" + # Anvil default account 0 — always seeded with 10,000 ETH in any anvil fork (mnemonic: "test test ... junk") + local funder_pk="0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" + local fund_amount="10ether" + local funded=0 + for addr_file in "$LOCAL_DEVNET_DIR/data"/universal*/.puniversal/keyring-test/*.address; do + [[ -f "$addr_file" ]] || continue + local addr_hex + addr_hex="$(basename "$addr_file" .address)" + local addr="0x${addr_hex}" + local balance + balance="$(cast balance "$addr" --rpc-url "$anvil_rpc" 2>/dev/null || echo "0")" + if [[ "$balance" == "0" ]]; then + log_info "Funding UV broadcaster $addr with $fund_amount on Anvil Sepolia" + if cast send "$addr" --value "$fund_amount" --private-key "$funder_pk" \ + --rpc-url "$anvil_rpc" >/dev/null 2>&1; then + funded=$((funded + 1)) + else + log_warn "Failed to fund UV broadcaster $addr on Anvil Sepolia" + fi + else + log_info "UV broadcaster $addr already has ETH on Anvil Sepolia: $balance wei" + fi + done + log_ok "UV broadcaster funding done (funded $funded new address(es))" +} + +# Sync every EVM vault's TSS_ADDRESS to the current local TSS key so that +# AccessControlUnauthorizedAccount (0xe2517d3f) never blocks outbound txs. +# Also funds the TSS signer on each Anvil chain so it can pay gas. +step_sync_vault_tss_on_anvil() { + if ! is_local_testing_env; then + log_info "step_sync_vault_tss_on_anvil: skipping (non-LOCAL environment)" + return 0 + fi + require_cmd cast jq python3 + + # Derive the TSS EVM address from the on-chain TSS public key. + # 1. Query compressed secp256k1 pubkey from the utss module. + # 2. Decompress it using pure Python3 math (stdlib only, no extra packages). + # 3. keccak256(x || y) via `cast keccak`, last 20 bytes = EVM address. + local tss_pubkey tss_addr + tss_pubkey="$("$PUSH_CHAIN_DIR/build/pchaind" query utss current-key \ + --node tcp://127.0.0.1:26657 --output json 2>/dev/null \ + | jq -r '.key.tss_pubkey // empty' 2>/dev/null || true)" + + if [[ -z "$tss_pubkey" ]]; then + log_warn "step_sync_vault_tss_on_anvil: TSS key not found on chain yet, skipping" + return 0 + fi + + # Decompress pubkey → 64-byte uncompressed (x||y) hex using Python3 stdlib. + local uncompressed_hex + uncompressed_hex="$(python3 -c " +prefix = int('${tss_pubkey:0:2}', 16) +x = int('${tss_pubkey:2}', 16) +p = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F +y_sq = (pow(x, 3, p) + 7) % p +y = pow(y_sq, (p + 1) // 4, p) +if (y % 2) != (prefix % 2): + y = p - y +print(format(x, '064x') + format(y, '064x')) +" 2>/dev/null || true)" + + if [[ -z "$uncompressed_hex" ]]; then + log_warn "step_sync_vault_tss_on_anvil: failed to decompress TSS pubkey, skipping" + return 0 + fi + + local keccak_hash + keccak_hash="$(cast keccak "0x$uncompressed_hex" 2>/dev/null || true)" + tss_addr="0x${keccak_hash: -40}" + + if [[ -z "$tss_addr" || ${#tss_addr} -ne 42 ]]; then + log_warn "step_sync_vault_tss_on_anvil: failed to derive TSS EVM address, skipping" + return 0 + fi + + log_info "Syncing vault TSS address to $tss_addr on all local Anvil EVM chains" + + local DEF_ADMIN_ROLE="0x0000000000000000000000000000000000000000000000000000000000000000" + # Anvil default account 0 — always seeded with 10,000 ETH in every fork + local funder_pk="0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" + # Known deployer addresses for the forge localSetup scripts — these never change + # between runs since it is the same forge wallet that deploys the vault contracts. + local KNOWN_ADMINS=( + "0x35b84d6848d16415177c64d64504663b998a6ab4" + "0xe520d4A985A2356Fa615935a822Ce4eFAcA24aB6" + "0xd854dde7c58ec1b405e6577f48a7cc5b5e6ef317" + ) + + # cfg_name:anvil_rpc pairs — mirrors the Anvil forks started in step_devnet. + local CHAIN_INFO=( + "eth_sepolia:${ANVIL_SEPOLIA_HOST_RPC_URL:-http://localhost:9545}" + "arb_sepolia:${ANVIL_ARBITRUM_HOST_RPC_URL:-http://localhost:9546}" + "base_sepolia:${ANVIL_BASE_HOST_RPC_URL:-http://localhost:9547}" + "bsc_testnet:${ANVIL_BSC_HOST_RPC_URL:-http://localhost:9548}" + ) + + for entry in "${CHAIN_INFO[@]}"; do + local cfg_name="${entry%%:*}" + local rpc="${entry#*:}" + local chain_cfg="$TOKENS_CONFIG_DIR/$cfg_name/chain.json" + + if [[ ! -f "$chain_cfg" ]]; then + log_warn "step_sync_vault_tss_on_anvil: no chain config at $chain_cfg, skipping" + continue + fi + + # Fund the TSS signer so it can pay gas for outbound vault txs. + local tss_bal + tss_bal="$(cast balance "$tss_addr" --rpc-url "$rpc" 2>/dev/null || echo "0")" + if [[ "$tss_bal" == "0" ]]; then + if cast send "$tss_addr" --value "10ether" --private-key "$funder_pk" --rpc-url "$rpc" >/dev/null 2>&1; then + log_ok " $cfg_name: funded TSS signer $tss_addr with 10 ETH" + else + log_warn " $cfg_name: failed to fund TSS signer $tss_addr" + fi + else + log_info " $cfg_name: TSS signer $tss_addr already has ETH (bal=$tss_bal)" + fi + + local gateway + gateway="$(jq -r '.gateway_address // empty' "$chain_cfg" 2>/dev/null || true)" + if [[ -z "$gateway" || "$gateway" == "null" ]]; then + log_warn "step_sync_vault_tss_on_anvil: no gateway_address in $chain_cfg, skipping" + continue + fi + + local vault + vault="$(cast call "$gateway" 'VAULT()(address)' --rpc-url "$rpc" 2>/dev/null || true)" + if [[ -z "$vault" || "$vault" == "0x0000000000000000000000000000000000000000" ]]; then + log_warn "step_sync_vault_tss_on_anvil: VAULT() empty for gateway $gateway ($cfg_name), skipping" + continue + fi + + # Skip only if the vault's stored TSS_ADDRESS already matches the current key. + # Checking TSS_ADDRESS (not just hasRole) ensures we update after every re-keying, + # because setTSS atomically revokes the old role and grants the new one. + local vault_tss + vault_tss="$(cast call "$vault" 'TSS_ADDRESS()(address)' --rpc-url "$rpc" 2>/dev/null || true)" + if [[ "$(echo "$vault_tss" | tr '[:upper:]' '[:lower:]')" == "$(echo "$tss_addr" | tr '[:upper:]' '[:lower:]')" ]]; then + log_info " $cfg_name vault $vault TSS_ADDRESS already matches $tss_addr" + continue + fi + + # Find the DEFAULT_ADMIN_ROLE holder among known candidates. + local vault_admin="" + for candidate in "${KNOWN_ADMINS[@]}"; do + local is_admin + is_admin="$(cast call "$vault" 'hasRole(bytes32,address)(bool)' "$DEF_ADMIN_ROLE" "$candidate" \ + --rpc-url "$rpc" 2>/dev/null || echo "false")" + if [[ "$is_admin" == "true" ]]; then + vault_admin="$candidate" + break + fi + done + + if [[ -z "$vault_admin" ]]; then + log_warn "step_sync_vault_tss_on_anvil: no known admin for vault $vault ($cfg_name), skipping" + continue + fi + + # Impersonate the admin on the Anvil fork (no private key needed) and call setTSS. + cast rpc anvil_impersonateAccount "$vault_admin" --rpc-url "$rpc" >/dev/null 2>&1 || true + cast rpc anvil_setBalance "$vault_admin" "0x56BC75E2D63100000" --rpc-url "$rpc" >/dev/null 2>&1 || true + + if cast send "$vault" "setTSS(address)" "$tss_addr" \ + --rpc-url "$rpc" \ + --from "$vault_admin" \ + --unlocked >/dev/null 2>&1; then + log_ok " $cfg_name vault $vault: TSS updated to $tss_addr" + else + log_warn " step_sync_vault_tss_on_anvil: setTSS failed on vault $vault ($cfg_name)" + fi + done + + log_ok "Vault TSS sync complete" +} + +step_print_genesis() { + require_cmd jq + local accounts_json + if ! accounts_json="$(get_genesis_accounts_json)"; then + log_err "Could not resolve genesis accounts from $GENESIS_ACCOUNTS_JSON or docker container core-validator-1" + exit 1 + fi + + jq -r '.[0] | "Account: \(.name)\nAddress: \(.address)\nMnemonic: \(.mnemonic)"' <<<"$accounts_json" +} + +step_recover_genesis_key() { + require_cmd "$PUSH_CHAIN_DIR/build/pchaind" jq + + local mnemonic="${GENESIS_MNEMONIC:-}" + if [[ -z "$mnemonic" ]]; then + local accounts_json + accounts_json="$(get_genesis_accounts_json || true)" + if [[ -n "$accounts_json" ]]; then + mnemonic="$(jq -r --arg n "$GENESIS_KEY_NAME" ' + (first(.[] | select(.name == $n) | .mnemonic) // first(.[].mnemonic) // "") + ' <<<"$accounts_json")" + fi + fi + + if [[ -z "$mnemonic" ]]; then + log_err "Could not auto-resolve mnemonic from $GENESIS_ACCOUNTS_JSON or docker container core-validator-1" + log_err "Set GENESIS_MNEMONIC in e2e-tests/.env" + exit 1 + fi + + if "$PUSH_CHAIN_DIR/build/pchaind" keys show "$GENESIS_KEY_NAME" \ + --keyring-backend "$KEYRING_BACKEND" \ + --home "$GENESIS_KEY_HOME" >/dev/null 2>&1; then + log_warn "Key ${GENESIS_KEY_NAME} already exists. Deleting before recover." + "$PUSH_CHAIN_DIR/build/pchaind" keys delete "$GENESIS_KEY_NAME" \ + --keyring-backend "$KEYRING_BACKEND" \ + --home "$GENESIS_KEY_HOME" \ + -y >/dev/null + fi + + log_info "Recovering key ${GENESIS_KEY_NAME}" + printf "%s\n" "$mnemonic" | "$PUSH_CHAIN_DIR/build/pchaind" keys add "$GENESIS_KEY_NAME" \ + --recover \ + --keyring-backend "$KEYRING_BACKEND" \ + --algo eth_secp256k1 \ + --home "$GENESIS_KEY_HOME" >/dev/null + + log_ok "Recovered key ${GENESIS_KEY_NAME}" +} + +step_fund_account() { + require_cmd "$PUSH_CHAIN_DIR/build/pchaind" + + local to_addr="${FUND_TO_ADDRESS:-}" + if [[ -z "$to_addr" ]]; then + log_err "Set FUND_TO_ADDRESS in e2e-tests/.env" + exit 1 + fi + if ! validate_eth_address "$to_addr" && [[ ! "$to_addr" =~ ^push1[0-9a-z]+$ ]]; then + log_err "Invalid FUND_TO_ADDRESS: $to_addr" + exit 1 + fi + + log_info "Funding $to_addr with $FUND_AMOUNT" + "$PUSH_CHAIN_DIR/build/pchaind" tx bank send "$GENESIS_KEY_NAME" "$to_addr" "$FUND_AMOUNT" \ + --gas-prices "$GAS_PRICES" \ + --keyring-backend "$KEYRING_BACKEND" \ + --chain-id "$CHAIN_ID" \ + --home "$GENESIS_KEY_HOME" \ + -y + + log_ok "Funding transaction submitted" +} + +step_update_env_fund_to_address() { + require_cmd jq + ENV_FILE="$SCRIPT_DIR/.env" + if [[ ! -f "$ENV_FILE" ]]; then + log_err ".env file not found in e2e-tests folder" + exit 1 + fi + PRIVATE_KEY=$(grep '^PRIVATE_KEY=' "$ENV_FILE" | cut -d= -f2 | tr -d '"' | tr -d "'") + if [[ -z "$PRIVATE_KEY" ]]; then + log_err "PRIVATE_KEY not found in .env" + exit 1 + fi + if ! command -v $PUSH_CHAIN_DIR/build/pchaind >/dev/null 2>&1; then + log_err "pchaind binary not found in build/ (run make build)" + exit 1 + fi + EVM_ADDRESS=$(cast wallet address $PRIVATE_KEY) + COSMOS_ADDRESS=$($PUSH_CHAIN_DIR/build/pchaind debug addr $(echo $EVM_ADDRESS | tr '[:upper:]' '[:lower:]' | sed 's/^0x//') | awk -F': ' '/Bech32 Acc:/ {print $2; exit}') + if [[ -z "$COSMOS_ADDRESS" ]]; then + log_err "Could not derive cosmos address from $EVM_ADDRESS" + exit 1 + fi + if grep -q '^FUND_TO_ADDRESS=' "$ENV_FILE"; then + sed -i.bak "s|^FUND_TO_ADDRESS=.*$|FUND_TO_ADDRESS=$COSMOS_ADDRESS|" "$ENV_FILE" + else + echo "FUND_TO_ADDRESS=$COSMOS_ADDRESS" >> "$ENV_FILE" + fi + # Keep runtime env stable: avoid re-sourcing .env here because that can + # reset already-normalized absolute paths (CORE_CONTRACTS_DIR/GATEWAY_DIR/etc). + FUND_TO_ADDRESS="$COSMOS_ADDRESS" + log_ok "Updated FUND_TO_ADDRESS in .env to $COSMOS_ADDRESS" +} + +parse_core_prc20_logs() { + local log_file="$1" + local current_addr="" + local line + + while IFS= read -r line; do + if [[ "$line" =~ PRC20[[:space:]]deployed[[:space:]]at:[[:space:]](0x[a-fA-F0-9]{40}) ]]; then + current_addr="${BASH_REMATCH[1]}" + continue + fi + + if [[ -n "$current_addr" && "$line" =~ Name:[[:space:]](.+)[[:space:]]Symbol:[[:space:]]([A-Za-z0-9._-]+)$ ]]; then + local token_name="${BASH_REMATCH[1]}" + local token_symbol="${BASH_REMATCH[2]}" + record_token "$token_name" "$token_symbol" "$current_addr" "core-contracts" + current_addr="" + fi + done <"$log_file" +} + +enrich_core_token_decimals() { + require_cmd jq cast + ensure_deploy_file + + local addr decimals tmp + while IFS= read -r addr; do + [[ -n "$addr" ]] || continue + decimals="$(cast call "$addr" "decimals()(uint8)" --rpc-url "$PUSH_RPC_URL" 2>/dev/null || true)" + decimals="$(echo "$decimals" | tr -d '[:space:]')" + + if [[ "$decimals" =~ ^[0-9]+$ ]]; then + tmp="$(mktemp)" + jq --arg addr "$addr" --argjson dec "$decimals" ' + .tokens |= map( + if ((.address | ascii_downcase) == ($addr | ascii_downcase)) + then . + {decimals: $dec} + else . + end + ) + ' "$DEPLOY_ADDRESSES_FILE" >"$tmp" + mv "$tmp" "$DEPLOY_ADDRESSES_FILE" + log_ok "Resolved token decimals: $addr => $decimals" + else + log_warn "Could not resolve decimals() for token $addr" + fi + done < <(jq -r '.tokens[]? | select(.decimals == null) | .address' "$DEPLOY_ADDRESSES_FILE") +} + +step_setup_core_contracts() { + require_cmd git forge jq + [[ -n "${PRIVATE_KEY:-}" ]] || { log_err "Set PRIVATE_KEY in e2e-tests/.env"; exit 1; } + + ensure_deploy_file + log_info "Using core contracts repo dir: $CORE_CONTRACTS_DIR" + clone_or_update_repo "$CORE_CONTRACTS_REPO" "$CORE_CONTRACTS_BRANCH" "$CORE_CONTRACTS_DIR" + + log_info "Running forge build in core contracts" + (cd "$CORE_CONTRACTS_DIR" && forge build) + + local log_file="$LOG_DIR/core_setup_$(date +%Y%m%d_%H%M%S).log" + local failed=0 + local resume_attempt=1 + local resume_max_attempts="${CORE_RESUME_MAX_ATTEMPTS:-0}" # 0 = unlimited + + log_info "Clearing stale forge broadcast cache for fresh deploy" + rm -rf "$CORE_CONTRACTS_DIR/broadcast/setup.s.sol" + + log_info "Running local core setup script" + ( + cd "$CORE_CONTRACTS_DIR" + forge script scripts/localSetup/setup.s.sol \ + --broadcast \ + --rpc-url "$PUSH_RPC_URL" \ + --private-key "$PRIVATE_KEY" \ + --slow + ) 2>&1 | tee "$log_file" || failed=1 + + if [[ "$failed" -ne 0 ]]; then + log_warn "Initial run failed. Retrying with --resume until success" + while true; do + log_info "Resume attempt: $resume_attempt" + if ( + cd "$CORE_CONTRACTS_DIR" + forge script scripts/localSetup/setup.s.sol \ + --broadcast \ + --rpc-url "$PUSH_RPC_URL" \ + --private-key "$PRIVATE_KEY" \ + --slow \ + --resume + ) 2>&1 | tee -a "$log_file"; then + break + fi + + if [[ "$resume_max_attempts" != "0" && "$resume_attempt" -ge "$resume_max_attempts" ]]; then + log_err "Reached CORE_RESUME_MAX_ATTEMPTS=$resume_max_attempts without success" + exit 1 + fi + + resume_attempt=$((resume_attempt + 1)) + sleep 2 + done + fi + + parse_core_prc20_logs "$log_file" + enrich_core_token_decimals + log_ok "Core contracts setup complete" +} + +find_first_address_with_keywords() { + local log_file="$1" + shift + local pattern + pattern="$(printf '%s|' "$@")" + pattern="${pattern%|}" + grep -Ei "$pattern" "$log_file" | grep -Eo '0x[a-fA-F0-9]{40}' | tail -1 || true +} + +address_from_deploy_contract() { + local key="$1" + jq -r --arg k "$key" '.contracts[$k] // ""' "$DEPLOY_ADDRESSES_FILE" +} + +address_from_deploy_token() { + local sym="$1" + jq -r --arg s "$sym" 'first(.tokens[]? | select((.symbol|ascii_downcase) == ($s|ascii_downcase)) | .address) // ""' "$DEPLOY_ADDRESSES_FILE" +} + +resolve_peth_token_address() { + local addr="" + addr="$(address_from_deploy_token "pETH")" + [[ -n "$addr" ]] || addr="$(address_from_deploy_token "WETH")" + if [[ -z "$addr" ]]; then + addr="$(jq -r 'first(.tokens[]? | select((.name|ascii_downcase) | test("eth")) | .address) // ""' "$DEPLOY_ADDRESSES_FILE")" + fi + printf "%s" "$addr" +} + +assert_required_addresses() { + ensure_deploy_file + local required=("WPC" "Factory" "QuoterV2" "SwapRouter") + local missing=0 + local key val + + for key in "${required[@]}"; do + val="$(address_from_deploy_contract "$key")" + if [[ -z "$val" ]]; then + log_warn "Missing address in deploy file: contracts.$key" + missing=1 + else + log_ok "contracts.$key=$val" + fi + done + + if [[ "$missing" -ne 0 ]]; then + log_warn "Some addresses are missing in $DEPLOY_ADDRESSES_FILE; continuing with available values" + fi +} + +step_write_core_env() { + require_cmd jq + ensure_deploy_file + assert_required_addresses + + local core_env="$CORE_CONTRACTS_DIR/.env" + local wpc factory quoter router + wpc="$(address_from_deploy_contract "WPC")" + factory="$(address_from_deploy_contract "Factory")" + quoter="$(address_from_deploy_contract "QuoterV2")" + router="$(address_from_deploy_contract "SwapRouter")" + + log_info "Writing core-contracts .env" + { + echo "PUSH_RPC_URL=$PUSH_RPC_URL" + echo "PRIVATE_KEY=$PRIVATE_KEY" + echo "WPC_ADDRESS=$wpc" + echo "FACTORY_ADDRESS=$factory" + echo "QUOTER_V2_ADDRESS=$quoter" + echo "SWAP_ROUTER_ADDRESS=$router" + echo "WPC=$wpc" + echo "UNISWAP_V3_FACTORY=$factory" + echo "UNISWAP_V3_QUOTER=$quoter" + echo "UNISWAP_V3_ROUTER=$router" + echo "" + echo "# Tokens deployed from core setup" + jq -r '.tokens | to_entries[]? | "TOKEN" + ((.key + 1)|tostring) + "=" + .value.address' "$DEPLOY_ADDRESSES_FILE" + } >"$core_env" + + log_ok "Generated $core_env" +} + +step_update_eth_token_config() { + step_update_deployed_token_configs +} + +norm_token_key() { + local s="$1" + s="$(echo "$s" | tr '[:upper:]' '[:lower:]')" + s="$(echo "$s" | sed -E 's/[^a-z0-9]+//g')" + printf "%s" "$s" +} + +norm_token_key_without_leading_p() { + local s + s="$(norm_token_key "$1")" + if [[ "$s" == p* && ${#s} -gt 1 ]]; then + printf "%s" "${s#p}" + else + printf "%s" "$s" + fi +} + +find_matching_token_config_file() { + local deployed_symbol="$1" + local deployed_name="$2" + local best_file="" + local best_score=0 + + local d_sym d_name d_sym_np d_name_np + d_sym="$(norm_token_key "$deployed_symbol")" + d_name="$(norm_token_key "$deployed_name")" + d_sym_np="$(norm_token_key_without_leading_p "$deployed_symbol")" + d_name_np="$(norm_token_key_without_leading_p "$deployed_name")" + + local file f_sym f_name f_base f_sym_np f_name_np score + while IFS= read -r file; do + [[ -f "$file" ]] || continue + f_sym="$(jq -r '.symbol // ""' "$file")" + f_name="$(jq -r '.name // ""' "$file")" + f_base="$(basename "$file" .json)" + + f_sym="$(norm_token_key "$f_sym")" + f_name="$(norm_token_key "$f_name")" + f_base="$(norm_token_key "$f_base")" + f_sym_np="$(norm_token_key_without_leading_p "$f_sym")" + f_name_np="$(norm_token_key_without_leading_p "$f_name")" + + score=0 + [[ -n "$d_sym" && "$d_sym" == "$f_sym" ]] && score=$((score + 100)) + [[ -n "$d_name" && "$d_name" == "$f_name" ]] && score=$((score + 90)) + [[ -n "$d_sym_np" && "$d_sym_np" == "$f_sym" ]] && score=$((score + 80)) + [[ -n "$d_name_np" && "$d_name_np" == "$f_name" ]] && score=$((score + 70)) + [[ -n "$d_sym" && "$d_sym" == "$f_name" ]] && score=$((score + 60)) + [[ -n "$d_name" && "$d_name" == "$f_sym" ]] && score=$((score + 60)) + [[ -n "$d_sym_np" && "$f_base" == *"$d_sym_np"* ]] && score=$((score + 30)) + [[ -n "$d_name_np" && "$f_base" == *"$d_name_np"* ]] && score=$((score + 20)) + + if (( score > best_score )); then + best_score=$score + best_file="$file" + fi + done < <(find "$TOKENS_CONFIG_DIR" -type f -path '*/tokens/*.json' | sort) + + if (( best_score >= 60 )); then + printf "%s" "$best_file" + fi +} + +step_update_deployed_token_configs() { + require_cmd jq + ensure_deploy_file + + if [[ ! -d "$TOKENS_CONFIG_DIR" ]]; then + log_err "Tokens config directory missing: $TOKENS_CONFIG_DIR" + exit 1 + fi + + if ! find "$TOKENS_CONFIG_DIR" -type f -path '*/tokens/*.json' | grep -q .; then + log_err "No token config files found under: $TOKENS_CONFIG_DIR" + exit 1 + fi + + local used_files="" + local updated=0 + local token_json token_symbol token_name token_address match_file tmp + + while IFS= read -r token_json; do + token_symbol="$(echo "$token_json" | jq -r '.symbol // ""')" + token_name="$(echo "$token_json" | jq -r '.name // ""')" + token_address="$(echo "$token_json" | jq -r '.address // ""')" + + [[ -n "$token_address" ]] || continue + match_file="$(find_matching_token_config_file "$token_symbol" "$token_name")" + + if [[ -z "$match_file" ]]; then + log_warn "No token config match found for deployed token: $token_symbol ($token_name)" + continue + fi + + if echo "$used_files" | grep -Fxq "$match_file"; then + log_warn "Token config already matched by another token, skipping: $(basename "$match_file")" + continue + fi + + tmp="$(mktemp)" + jq --arg a "$token_address" '.native_representation.contract_address = $a' "$match_file" >"$tmp" + mv "$tmp" "$match_file" + used_files+="$match_file"$'\n' + updated=$((updated + 1)) + log_ok "Updated $(basename "$match_file") contract_address => $token_address" + done < <(jq -c '.tokens[]?' "$DEPLOY_ADDRESSES_FILE") + + if [[ "$updated" -eq 0 ]]; then + log_warn "No token config files were updated from deployed tokens" + else + log_ok "Updated $updated token config file(s) from deployed tokens" + fi +} + +step_setup_swap_amm() { + require_cmd git node npm npx jq + [[ -n "${PRIVATE_KEY:-}" ]] || { log_err "Set PRIVATE_KEY in e2e-tests/.env"; exit 1; } + + ensure_deploy_file + clone_or_update_repo "$SWAP_AMM_REPO" "$SWAP_AMM_BRANCH" "$SWAP_AMM_DIR" + + log_info "Installing swap-amm dependencies" + ( + cd "$SWAP_AMM_DIR" + npm install + (cd v3-core && npm install) + (cd v3-periphery && npm install) + ) + + log_info "Writing swap repo .env from main e2e .env" + cat >"$SWAP_AMM_DIR/.env" <&1 | tee "$wpc_log" + + local wpc_addr + wpc_addr="$(find_first_address_with_keywords "$wpc_log" wpc wpush wrapped)" + if [[ -n "$wpc_addr" ]]; then + record_contract "WPC" "$wpc_addr" + else + log_warn "Could not auto-detect WPC address from logs" + fi + + local core_log="$LOG_DIR/swap_core_$(date +%Y%m%d_%H%M%S).log" + log_info "Deploying v3-core" + ( + cd "$SWAP_AMM_DIR/v3-core" + npx hardhat compile + npx hardhat run scripts/deploy-core.js --network pushchain + ) 2>&1 | tee "$core_log" + + local factory_addr + factory_addr="$(grep -E 'Factory Address|FACTORY_ADDRESS=' "$core_log" | grep -Eo '0x[a-fA-F0-9]{40}' | tail -1 || true)" + if [[ -n "$factory_addr" ]]; then + record_contract "Factory" "$factory_addr" + else + log_warn "Could not auto-detect Factory address from logs" + fi + + local periphery_log="$LOG_DIR/swap_periphery_$(date +%Y%m%d_%H%M%S).log" + log_info "Deploying v3-periphery" + ( + cd "$SWAP_AMM_DIR/v3-periphery" + npx hardhat compile + npx hardhat run scripts/deploy-periphery.js --network pushchain + ) 2>&1 | tee "$periphery_log" + + local swap_router quoter_v2 position_manager + swap_router="$(grep -E 'SwapRouter' "$periphery_log" | grep -Eo '0x[a-fA-F0-9]{40}' | tail -1 || true)" + quoter_v2="$(grep -E 'QuoterV2' "$periphery_log" | grep -Eo '0x[a-fA-F0-9]{40}' | tail -1 || true)" + position_manager="$(grep -E 'PositionManager' "$periphery_log" | grep -Eo '0x[a-fA-F0-9]{40}' | tail -1 || true)" + wpc_addr="$(grep -E '^.*WPC:' "$periphery_log" | grep -Eo '0x[a-fA-F0-9]{40}' | tail -1 || true)" + + [[ -n "$swap_router" ]] && record_contract "SwapRouter" "$swap_router" + [[ -n "$quoter_v2" ]] && record_contract "QuoterV2" "$quoter_v2" + [[ -n "$position_manager" ]] && record_contract "PositionManager" "$position_manager" + [[ -n "$wpc_addr" ]] && record_contract "WPC" "$wpc_addr" + + assert_required_addresses + + log_ok "Swap AMM setup complete" +} + +step_setup_gateway() { + require_cmd git forge + [[ -n "${PRIVATE_KEY:-}" ]] || { log_err "Set PRIVATE_KEY in e2e-tests/.env"; exit 1; } + + local gateway_repo_dir="$GATEWAY_DIR" + local sibling_gateway_dir="$PUSH_CHAIN_DIR/../push-chain-gateway-contracts" + + log_info "Using gateway repo dir: $gateway_repo_dir" + + # Some local setups accidentally resolve GATEWAY_DIR under push-chain/ itself. + # Prefer a repo path that actually contains the localSetup gateway scripts. + if [[ -d "$sibling_gateway_dir/contracts/evm-gateway" ]]; then + if [[ ! -d "$gateway_repo_dir/contracts/evm-gateway" || ( ! -f "$gateway_repo_dir/contracts/evm-gateway/script/localSetup/setup.s.sol" && ! -f "$gateway_repo_dir/contracts/evm-gateway/scripts/localSetup/setup.s.sol" && ! -f "$gateway_repo_dir/contracts/evm-gateway/localSetup/setup.s.sol" ) ]]; then + log_warn "Switching gateway repo dir to sibling path: $sibling_gateway_dir" + gateway_repo_dir="$sibling_gateway_dir" + fi + fi + + clone_or_update_repo "$GATEWAY_REPO" "$GATEWAY_BRANCH" "$gateway_repo_dir" + + log_info "Preparing gateway repo submodules" + ( + cd "$gateway_repo_dir" + if [[ -d "contracts/svm-gateway/mock-pyth" ]]; then + git rm --cached contracts/svm-gateway/mock-pyth || true + rm -rf contracts/svm-gateway/mock-pyth + fi + git submodule update --init --recursive + ) + + local gw_dir="$gateway_repo_dir/contracts/evm-gateway" + local gw_setup_script="" + local gw_log="$LOG_DIR/gateway_setup_$(date +%Y%m%d_%H%M%S).log" + local failed=0 + local resume_attempt=1 + local resume_max_attempts="${GATEWAY_RESUME_MAX_ATTEMPTS:-0}" # 0 = unlimited + + if [[ -f "$gw_dir/script/localSetup/setup.s.sol" ]]; then + gw_setup_script="script/localSetup/setup.s.sol" + elif [[ -f "$gw_dir/scripts/localSetup/setup.s.sol" ]]; then + gw_setup_script="scripts/localSetup/setup.s.sol" + elif [[ -f "$gw_dir/localSetup/setup.s.sol" ]]; then + gw_setup_script="localSetup/setup.s.sol" + else + log_err "Gateway setup script not found under $gw_dir/(script|scripts)/localSetup/setup.s.sol or $gw_dir/localSetup/setup.s.sol" + exit 1 + fi + + log_info "Building gateway evm contracts" + (cd "$gw_dir" && forge build) + + log_info "Clearing stale forge broadcast cache for gateway deploy" + rm -rf "$gw_dir/broadcast/$(basename "$gw_setup_script" .s.sol).s.sol" + + log_info "Running gateway local setup script" + ( + cd "$gw_dir" + forge script "$gw_setup_script" \ + --broadcast \ + --rpc-url "$PUSH_RPC_URL" \ + --private-key "$PRIVATE_KEY" \ + --slow + ) 2>&1 | tee "$gw_log" || failed=1 + + if [[ "$failed" -ne 0 ]]; then + log_warn "Gateway script failed. Retrying with --resume until success" + while true; do + log_info "Gateway resume attempt: $resume_attempt" + if ( + cd "$gw_dir" + forge script "$gw_setup_script" \ + --broadcast \ + --rpc-url "$PUSH_RPC_URL" \ + --private-key "$PRIVATE_KEY" \ + --slow \ + --resume + ) 2>&1 | tee -a "$gw_log"; then + break + fi + + if [[ "$resume_max_attempts" != "0" && "$resume_attempt" -ge "$resume_max_attempts" ]]; then + log_err "Reached GATEWAY_RESUME_MAX_ATTEMPTS=$resume_max_attempts without success" + exit 1 + fi + + resume_attempt=$((resume_attempt + 1)) + sleep 2 + done + fi + + # Ensure canonical local precompile proxy wiring used by SDK tests: + # C1 = UniversalGatewayPC proxy, B0 = VaultPC proxy, C0 = UniversalCore. + # Some gateway repo branches configure B0 only; this post-step self-heals C1. + local C0="0x00000000000000000000000000000000000000C0" + local C1="0x00000000000000000000000000000000000000C1" + local B0="0x00000000000000000000000000000000000000B0" + local C1_PROXY_ADMIN="0xf2000000000000000000000000000000000000c1" + local OWNER_ADDR="0x778D3206374f8AC265728E18E3fE2Ae6b93E4ce4" + + log_info "Verifying C1 UniversalGatewayPC wiring" + if ! cast call "$C1" 'UNIVERSAL_CORE()(address)' --rpc-url "$PUSH_RPC_URL" >/dev/null 2>&1; then + log_warn "C1.UNIVERSAL_CORE() reverted. Repairing C1 proxy implementation + initialize" + + # Reuse implementation currently behind B0 proxy (same UniversalGatewayPC bytecode family). + local impl_slot impl_word impl_addr + impl_slot="0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc" + impl_word="$(cast storage "$B0" "$impl_slot" --rpc-url "$PUSH_RPC_URL" 2>/dev/null || true)" + impl_addr="0x$(echo "$impl_word" | sed -E 's/^0x//; s/^.{24}//' | tr -d '\n')" + if ! validate_eth_address "$impl_addr"; then + log_err "Failed to resolve UniversalGatewayPC implementation from B0 proxy slot" + exit 1 + fi + + cast send "$C1_PROXY_ADMIN" 'upgradeAndCall(address,address,bytes)' \ + "$C1" "$impl_addr" "0x" \ + --rpc-url "$PUSH_RPC_URL" \ + --private-key "$PRIVATE_KEY" >/dev/null + + cast send "$C1" 'initialize(address,address,address,address)' \ + "$OWNER_ADDR" "$OWNER_ADDR" "$C0" "$B0" \ + --rpc-url "$PUSH_RPC_URL" \ + --private-key "$PRIVATE_KEY" >/dev/null || true + + cast send "$C0" 'setUniversalGatewayPC(address)' "$C1" \ + --rpc-url "$PUSH_RPC_URL" \ + --private-key "$PRIVATE_KEY" >/dev/null || true + fi + + local c1_uc c0_ug c1_uc_lc c0_ug_lc c0_lc c1_lc + c1_uc="$(cast call "$C1" 'UNIVERSAL_CORE()(address)' --rpc-url "$PUSH_RPC_URL" 2>/dev/null || true)" + c0_ug="$(cast call "$C0" 'universalGatewayPC()(address)' --rpc-url "$PUSH_RPC_URL" 2>/dev/null || true)" + + # If C1 is initialized but C0 is not linked yet, repair linkage explicitly. + if [[ -n "$c1_uc" && -n "$c0_ug" ]]; then + local c1_uc_tmp c0_ug_tmp c0_lc_tmp c1_lc_tmp + c1_uc_tmp="$(echo "$c1_uc" | tr '[:upper:]' '[:lower:]')" + c0_ug_tmp="$(echo "$c0_ug" | tr '[:upper:]' '[:lower:]')" + c0_lc_tmp="$(echo "$C0" | tr '[:upper:]' '[:lower:]')" + c1_lc_tmp="$(echo "$C1" | tr '[:upper:]' '[:lower:]')" + + if [[ "$c1_uc_tmp" == "$c0_lc_tmp" && "$c0_ug_tmp" != "$c1_lc_tmp" ]]; then + log_warn "C0.universalGatewayPC is not linked to C1. Repairing linkage" + cast send "$C0" 'setUniversalGatewayPC(address)' "$C1" \ + --rpc-url "$PUSH_RPC_URL" \ + --private-key "$PRIVATE_KEY" >/dev/null || true + + c0_ug="$(cast call "$C0" 'universalGatewayPC()(address)' --rpc-url "$PUSH_RPC_URL" 2>/dev/null || true)" + fi + fi + + c1_uc_lc="$(echo "$c1_uc" | tr '[:upper:]' '[:lower:]')" + c0_ug_lc="$(echo "$c0_ug" | tr '[:upper:]' '[:lower:]')" + c0_lc="$(echo "$C0" | tr '[:upper:]' '[:lower:]')" + c1_lc="$(echo "$C1" | tr '[:upper:]' '[:lower:]')" + if [[ "$c1_uc_lc" != "$c0_lc" || "$c0_ug_lc" != "$c1_lc" ]]; then + log_err "Gateway wiring invalid after setup: C1.UNIVERSAL_CORE=$c1_uc, C0.universalGatewayPC=$c0_ug" + exit 1 + fi + + local manager_role has_manager + manager_role="$(cast keccak 'MANAGER_ROLE')" + has_manager="$(cast call "$C0" 'hasRole(bytes32,address)(bool)' "$manager_role" "$OWNER_ADDR" --rpc-url "$PUSH_RPC_URL" 2>/dev/null || echo "false")" + + if [[ "$has_manager" != "true" ]]; then + cast send "$C0" 'grantRole(bytes32,address)' "$manager_role" "$OWNER_ADDR" \ + --rpc-url "$PUSH_RPC_URL" \ + --private-key "$PRIVATE_KEY" >/dev/null || true + fi + + # Seed gas-token mapping for each deployed gas token PRC20 (p* symbols). + if [[ -s "$DEPLOY_ADDRESSES_FILE" ]]; then + while IFS=$'\t' read -r symbol token_addr; do + [[ -n "$symbol" && -n "$token_addr" ]] || continue + local chain_ns + chain_ns="$(cast call "$token_addr" 'SOURCE_CHAIN_NAMESPACE()(string)' --rpc-url "$PUSH_RPC_URL" 2>/dev/null || echo "")" + [[ -n "$chain_ns" ]] || continue + + cast send "$C0" 'setGasTokenPRC20(string,address)' "$chain_ns" "$token_addr" \ + --rpc-url "$PUSH_RPC_URL" \ + --private-key "$PRIVATE_KEY" >/dev/null || true + done < <(jq -r '.tokens[]? | select((.symbol // "") | startswith("p")) | [.symbol, .address] | @tsv' "$DEPLOY_ADDRESSES_FILE") + fi + + # Ensure non-zero base gas limits so sendUniversalTxOutbound(req.gasLimit=0) + # can resolve a valid fee quote through UniversalCore. + local base_gas + base_gas="$(cast call "$C0" 'BASE_GAS_LIMIT()(uint256)' --rpc-url "$PUSH_RPC_URL" 2>/dev/null || echo "")" + if [[ -z "$base_gas" || "$base_gas" == "0" ]]; then + log_warn "UniversalCore BASE_GAS_LIMIT is 0. Applying local defaults for outbound chains" + + for ns in "eip155:11155111" "eip155:421614" "eip155:84532" "eip155:97" "solana:EtWTRABZaYq6iMfeYKouRu166VU2xqa1"; do + cast send "$C0" 'setBaseGasLimitByChain(string,uint256)' "$ns" 2000000 \ + --rpc-url "$PUSH_RPC_URL" \ + --private-key "$PRIVATE_KEY" >/dev/null || true + done + fi + + log_ok "Gateway setup complete" +} + +step_add_uregistry_configs() { + require_cmd "$PUSH_CHAIN_DIR/build/pchaind" jq + + [[ -d "$TOKENS_CONFIG_DIR" ]] || { log_err "Missing tokens config directory: $TOKENS_CONFIG_DIR"; exit 1; } + + local token_payload + + run_registry_tx() { + local kind="$1" + local payload="$2" + local max_attempts=10 + local attempt=1 + local out code raw + + while true; do + if [[ "$kind" == "chain" ]]; then + out="$("$PUSH_CHAIN_DIR/build/pchaind" tx uregistry add-chain-config \ + --chain-config "$payload" \ + --from "$GENESIS_KEY_NAME" \ + --keyring-backend "$KEYRING_BACKEND" \ + --home "$GENESIS_KEY_HOME" \ + --node tcp://127.0.0.1:26657 \ + --gas-prices "$GAS_PRICES" \ + -y)" + else + out="$("$PUSH_CHAIN_DIR/build/pchaind" tx uregistry add-token-config \ + --token-config "$payload" \ + --from "$GENESIS_KEY_NAME" \ + --keyring-backend "$KEYRING_BACKEND" \ + --home "$GENESIS_KEY_HOME" \ + --node tcp://127.0.0.1:26657 \ + --gas-prices "$GAS_PRICES" \ + -y)" + fi + echo "$out" + if [[ "$out" =~ ^\{ ]]; then + code="$(echo "$out" | jq -r '.code // 1')" + raw="$(echo "$out" | jq -r '.raw_log // ""')" + else + code="$(echo "$out" | awk -F': ' '/^code:/ {print $2; exit}')" + raw="$(echo "$out" | awk -F': ' '/^raw_log:/ {sub(/^\x27|\x27$/, "", $2); print $2; exit}')" + [[ -n "$code" ]] || code="1" + fi + + if [[ "$code" == "0" ]]; then + return 0 + fi + + if [[ "$raw" == *"account sequence mismatch"* && "$attempt" -lt "$max_attempts" ]]; then + log_warn "Sequence mismatch on attempt $attempt/$max_attempts. Retrying..." + attempt=$((attempt + 1)) + sleep 2 + continue + fi + + log_err "Registry tx failed: code=$code raw_log=$raw" + return 1 + done + } + + local chain_config_dir chain_file chain_payload chain_count + chain_config_dir="$TOKENS_CONFIG_DIR" + chain_count=0 + + while IFS= read -r chain_file; do + [[ -f "$chain_file" ]] || continue + chain_payload="$(jq -c . "$chain_file")" + log_info "Adding chain config to uregistry: $(basename "$chain_file")" + run_registry_tx "chain" "$chain_payload" + chain_count=$((chain_count + 1)) + done < <(find "$chain_config_dir" -type f \( -name 'chain.json' -o -name '*_chain_config.json' \) | sort) + + if [[ "$chain_count" -eq 0 ]]; then + log_err "No chain config files found in: $chain_config_dir" + exit 1 + fi + + log_ok "Registered $chain_count chain config(s) from $chain_config_dir" + + local token_json token_file token_addr token_symbol token_name matched_count submitted_files tmp + matched_count=0 + submitted_files="" + + while IFS= read -r token_json; do + token_symbol="$(echo "$token_json" | jq -r '.symbol // ""')" + token_name="$(echo "$token_json" | jq -r '.name // ""')" + token_addr="$(echo "$token_json" | jq -r '.address // ""')" + + [[ -n "$token_addr" ]] || continue + + token_file="$(find_matching_token_config_file "$token_symbol" "$token_name")" + if [[ -z "$token_file" ]]; then + log_warn "No token config match found for deployed token (uregistry): $token_symbol ($token_name)" + continue + fi + + if echo "$submitted_files" | grep -Fxq "$token_file"; then + log_warn "Token config already submitted by another deployed token, skipping: $(basename "$token_file")" + continue + fi + + tmp="$(mktemp)" + jq --arg a "$token_addr" '.native_representation.contract_address = $a' "$token_file" >"$tmp" + mv "$tmp" "$token_file" + + token_payload="$(jq -c . "$token_file")" + log_info "Adding token config to uregistry: $(basename "$token_file") (from $token_symbol)" + run_registry_tx "token" "$token_payload" + + submitted_files+="$token_file"$'\n' + matched_count=$((matched_count + 1)) + done < <(jq -c '.tokens[]?' "$DEPLOY_ADDRESSES_FILE") + + if [[ "$matched_count" -eq 0 ]]; then + log_warn "No token configs were registered from deploy_addresses.json tokens" + else + log_ok "Registered $matched_count token config(s) from deploy_addresses.json" + fi + + log_ok "uregistry chain/token configs added" +} + +step_sync_test_addresses() { + require_cmd jq + ensure_deploy_file + + if [[ ! -f "$TEST_ADDRESSES_PATH" ]]; then + log_err "test-addresses.json not found: $TEST_ADDRESSES_PATH" + exit 1 + fi + + log_info "Syncing deploy addresses into test-addresses.json" + local tmp + tmp="$(mktemp)" + + jq \ + --arg today "$(date +%F)" \ + --arg rpc "$PUSH_RPC_URL" \ + --slurpfile dep "$DEPLOY_ADDRESSES_FILE" \ + ' + ($dep[0]) as $d + | def token_addr($sym): first(($d.tokens[]? | select(.symbol == $sym) | .address), empty); + .lastUpdated = $today + | .network.rpcUrl = $rpc + | if ($d.contracts.Factory // "") != "" then .contracts.factory = $d.contracts.Factory else . end + | if ($d.contracts.WPC // "") != "" then .contracts.WPC = $d.contracts.WPC else . end + | if ($d.contracts.SwapRouter // "") != "" then .contracts.swapRouter = $d.contracts.SwapRouter else . end + | if ($d.contracts.PositionManager // "") != "" then .contracts.positionManager = $d.contracts.PositionManager else . end + | if ($d.contracts.QuoterV2 // "") != "" then .contracts.quoterV2 = $d.contracts.QuoterV2 else . end + | .testTokens |= with_entries( + .value.address = (token_addr(.key) // .value.address) + ) + | .testTokens = ( + .testTokens as $existing + | $existing + + ( + reduce ($d.tokens[]?) as $t ({}; + .[$t.symbol] = { + name: $t.name, + symbol: $t.symbol, + address: $t.address, + decimals: ($t.decimals // ($existing[$t.symbol].decimals // null)), + totalSupply: ($existing[$t.symbol].totalSupply // "") + } + ) + ) + ) + | .pools |= with_entries( + .value.token0 = (token_addr(.value.token0Symbol) // .value.token0) + | .value.token1 = (token_addr(.value.token1Symbol) // .value.token1) + ) + ' "$TEST_ADDRESSES_PATH" >"$tmp" + + mv "$tmp" "$TEST_ADDRESSES_PATH" + log_ok "Updated $TEST_ADDRESSES_PATH" +} + +step_fund_uea_prc20() { + require_cmd cast jq + ensure_deploy_file + + local sdk_evm_private_key + sdk_evm_private_key="${EVM_PRIVATE_KEY:-${PRIVATE_KEY:-}}" + if [[ -z "$sdk_evm_private_key" ]]; then + log_warn "No EVM_PRIVATE_KEY found; skipping UEA PRC20 funding" + return 0 + fi + + local evm_addr + evm_addr="$(cast wallet address "$sdk_evm_private_key" 2>/dev/null || true)" + if ! validate_eth_address "$evm_addr"; then + log_warn "Could not derive EVM address from EVM_PRIVATE_KEY; skipping UEA PRC20 funding" + return 0 + fi + + local factory_addr="0x00000000000000000000000000000000000000eA" + local uea_addr + uea_addr="$(cast call "$factory_addr" "computeUEA((string,string,bytes))(address)" \ + "(eip155,11155111,$evm_addr)" \ + --rpc-url "$PUSH_RPC_URL" 2>/dev/null | grep -Eo '0x[a-fA-F0-9]{40}' | head -1 || true)" + + if ! validate_eth_address "$uea_addr"; then + log_warn "Could not compute UEA address for $evm_addr; skipping UEA PRC20 funding" + return 0 + fi + + log_info "Funding UEA $uea_addr (signer: $evm_addr) with PRC20 tokens from deployer" + + local token_count + token_count="$(jq -r '.tokens | length' "$DEPLOY_ADDRESSES_FILE")" + if [[ "$token_count" == "0" ]]; then + log_warn "No tokens in deploy addresses to fund UEA with" + return 0 + fi + + local token_symbol token_addr token_decimals fund_amount + while IFS=$'\t' read -r token_symbol token_addr token_decimals; do + [[ -n "$token_addr" ]] || continue + # 1e9 for tokens with <=9 decimals (e.g. USDT×1000, pSOL×1), 1e18 for 18-decimal tokens (e.g. 1 pETH) + if [[ "${token_decimals:-18}" -le 9 ]]; then + fund_amount="1000000000" + else + fund_amount="1000000000000000000" + fi + log_info " Sending $fund_amount of $token_symbol ($token_addr) to UEA $uea_addr" + cast send --private-key "$PRIVATE_KEY" "$token_addr" \ + "transfer(address,uint256)(bool)" "$uea_addr" "$fund_amount" \ + --rpc-url "$PUSH_RPC_URL" 2>&1 | grep -E "^status" || true + done < <(jq -r '.tokens[]? | [.symbol, .address, (.decimals // 18)] | @tsv' "$DEPLOY_ADDRESSES_FILE") + + log_ok "UEA PRC20 funding complete" +} + +step_create_all_wpc_pools() { + require_cmd node cast "$PUSH_CHAIN_DIR/build/pchaind" + ensure_deploy_file + + [[ -n "${PRIVATE_KEY:-}" ]] || { log_err "Set PRIVATE_KEY in e2e-tests/.env"; exit 1; } + + if [[ ! -f "$TEST_ADDRESSES_PATH" ]]; then + log_err "Missing test-addresses.json at $TEST_ADDRESSES_PATH" + exit 1 + fi + + local wpc_addr token_count token_addr token_symbol + wpc_addr="$(address_from_deploy_contract "WPC")" + if [[ -z "$wpc_addr" ]]; then + log_err "Missing WPC contract address in $DEPLOY_ADDRESSES_FILE" + exit 1 + fi + + token_count="$(jq -r '.tokens | length' "$DEPLOY_ADDRESSES_FILE")" + if [[ "$token_count" == "0" ]]; then + log_warn "No core tokens found in deploy addresses; skipping pool creation" + return 0 + fi + + local deployer_evm_addr + deployer_evm_addr="$(cast wallet address --private-key "$PRIVATE_KEY" 2>/dev/null || true)" + if ! validate_eth_address "$deployer_evm_addr"; then + log_err "Could not resolve deployer EVM address from PRIVATE_KEY" + exit 1 + fi + + local deployer_hex deployer_push_addr + deployer_hex="$(echo "$deployer_evm_addr" | tr '[:upper:]' '[:lower:]' | sed 's/^0x//')" + deployer_push_addr="$("$PUSH_CHAIN_DIR/build/pchaind" debug addr "$deployer_hex" 2>/dev/null | awk -F': ' '/Bech32 Acc:/ {print $2; exit}')" + if [[ -z "$deployer_push_addr" ]]; then + log_err "Could not derive bech32 deployer address from $deployer_evm_addr" + exit 1 + fi + + log_info "Funding deployer $deployer_push_addr ($deployer_evm_addr) for pool creation ($POOL_CREATION_TOPUP_AMOUNT)" + local fund_attempt=1 + local fund_max_attempts=5 + local fund_out="" + while true; do + fund_out="$("$PUSH_CHAIN_DIR/build/pchaind" tx bank send "$GENESIS_KEY_NAME" "$deployer_push_addr" "$POOL_CREATION_TOPUP_AMOUNT" \ + --gas-prices "$GAS_PRICES" \ + --keyring-backend "$KEYRING_BACKEND" \ + --chain-id "$CHAIN_ID" \ + --home "$GENESIS_KEY_HOME" \ + -y 2>&1 || true)" + + if echo "$fund_out" | grep -q 'txhash:' || echo "$fund_out" | grep -q '"txhash"'; then + log_ok "Deployer funding transaction submitted" + break + fi + + if echo "$fund_out" | grep -qi 'account sequence mismatch' && [[ "$fund_attempt" -lt "$fund_max_attempts" ]]; then + log_warn "Funding sequence mismatch on attempt $fund_attempt/$fund_max_attempts. Retrying..." + fund_attempt=$((fund_attempt + 1)) + sleep 2 + continue + fi + + log_err "Failed to fund deployer for pool creation" + echo "$fund_out" + exit 1 + done + sleep 2 + + while IFS=$'\t' read -r token_symbol token_addr; do + [[ -n "$token_addr" ]] || continue + if [[ "$(echo "$token_addr" | tr '[:upper:]' '[:lower:]')" == "$(echo "$wpc_addr" | tr '[:upper:]' '[:lower:]')" ]]; then + continue + fi + + log_info "Creating ${token_symbol}/WPC pool with liquidity" + ( + cd "$SWAP_AMM_DIR" + node scripts/pool-manager.js create-pool "$token_addr" "$wpc_addr" 4 500 true 1 4 + ) + done < <(jq -r '.tokens[]? | [.symbol, .address] | @tsv' "$DEPLOY_ADDRESSES_FILE") + + log_ok "All token/WPC pool creation commands completed" +} + +step_configure_universal_core() { + require_cmd forge + [[ -n "${PRIVATE_KEY:-}" ]] || { log_err "Set PRIVATE_KEY in e2e-tests/.env"; exit 1; } + + # configureUniversalCore depends on values from core .env + step_write_core_env + + local script_path="scripts/localSetup/configureUniversalCore.s.sol" + local log_file="$LOG_DIR/core_configure_$(date +%Y%m%d_%H%M%S).log" + local resume_attempt=1 + local resume_max_attempts="${CORE_CONFIGURE_RESUME_MAX_ATTEMPTS:-0}" # 0 = unlimited + + if [[ ! -f "$CORE_CONTRACTS_DIR/$script_path" ]]; then + log_warn "configureUniversalCore script not found at $CORE_CONTRACTS_DIR/$script_path; skipping" + return 0 + fi + + log_info "Clearing stale forge broadcast cache for configureUniversalCore" + rm -rf "$CORE_CONTRACTS_DIR/broadcast/configureUniversalCore.s.sol" + + log_info "Running configureUniversalCore script" + if ( + cd "$CORE_CONTRACTS_DIR" + forge script "$script_path" \ + --broadcast \ + --rpc-url "$PUSH_RPC_URL" \ + --private-key "$PRIVATE_KEY" \ + --slow + ) 2>&1 | tee "$log_file"; then + log_ok "configureUniversalCore completed" + return 0 + fi + + log_warn "configureUniversalCore failed. Retrying with --resume until success" + while true; do + log_info "configureUniversalCore resume attempt: $resume_attempt" + if ( + cd "$CORE_CONTRACTS_DIR" + forge script "$script_path" \ + --broadcast \ + --rpc-url "$PUSH_RPC_URL" \ + --private-key "$PRIVATE_KEY" \ + --slow \ + --resume + ) 2>&1 | tee -a "$log_file"; then + log_ok "configureUniversalCore resumed successfully" + return 0 + fi + + if [[ "$resume_max_attempts" != "0" && "$resume_attempt" -ge "$resume_max_attempts" ]]; then + log_err "Reached CORE_CONFIGURE_RESUME_MAX_ATTEMPTS=$resume_max_attempts without success" + exit 1 + fi + + resume_attempt=$((resume_attempt + 1)) + sleep 2 + done +} + +step_deploy_counter_and_sync_sdk() { + require_cmd cast perl + [[ -n "${PRIVATE_KEY:-}" ]] || { log_err "Set PRIVATE_KEY in e2e-tests/.env"; exit 1; } + + local sdk_counter_addr_file="$PUSH_CHAIN_SDK_DIR/packages/core/src/lib/push-chain/helpers/addresses.ts" + local counter_creation_code="0x6080604052348015600e575f5ffd5b506102068061001c5f395ff3fe608060405260043610610042575f3560e01c806312065fe01461004d5780639b0e94af14610077578063d09de08a146100a1578063d826f88f146100ab57610049565b3661004957005b5f5ffd5b348015610058575f5ffd5b506100616100c1565b60405161006e9190610157565b60405180910390f35b348015610082575f5ffd5b5061008b6100c8565b6040516100989190610157565b60405180910390f35b6100a96100cd565b005b3480156100b6575f5ffd5b506100bf610137565b005b5f47905090565b5f5481565b60015f5f8282546100de919061019d565b925050819055503373ffffffffffffffffffffffffffffffffffffffff165f547fb6aa5bfdc1ab753194658fada8fa1725a667cdea7df54bd400f8bced617dfd4c3460405161012d9190610157565b60405180910390a3565b5f5f81905550565b5f819050919050565b6101518161013f565b82525050565b5f60208201905061016a5f830184610148565b92915050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b5f6101a78261013f565b91506101b28361013f565b92508282019050808211156101ca576101c9610170565b5b9291505056fea26469706673582212204acec08331d08192e4797fc12653c602c2ca1574d44468713f91a095fdefe6d564736f6c634300081e0033" + + if [[ ! -f "$sdk_counter_addr_file" ]]; then + log_err "SDK counter addresses file not found: $sdk_counter_addr_file" + exit 1 + fi + + log_info "Deploying CounterPayable contract on Push localnet" + local deploy_out counter_addr + deploy_out="$(cast send --rpc-url "$PUSH_RPC_URL" --private-key "$PRIVATE_KEY" --create "$counter_creation_code" 2>&1)" || { + log_err "Counter deployment failed" + echo "$deploy_out" + exit 1 + } + + counter_addr="$(echo "$deploy_out" | awk '/contractAddress/ {print $2; exit}')" + if ! validate_eth_address "$counter_addr"; then + log_err "Could not parse deployed counter contract address from cast output" + echo "$deploy_out" + exit 1 + fi + + ensure_deploy_file + record_contract "COUNTER_ADDRESS_PAYABLE" "$counter_addr" + + COUNTER_ADDR="$counter_addr" perl -0pi -e ' + if (/COUNTER_ADDRESS_PAYABLE/s) { + s/0x[a-fA-F0-9]{40}/$ENV{COUNTER_ADDR}/; + } + ' "$sdk_counter_addr_file" + + if ! grep -q "$counter_addr" "$sdk_counter_addr_file"; then + log_err "Failed to sync COUNTER_ADDRESS_PAYABLE in $sdk_counter_addr_file" + exit 1 + fi + + log_ok "Deployed CounterPayable: $counter_addr" + log_ok "Synced SDK COUNTER_ADDRESS_PAYABLE in $sdk_counter_addr_file" +} + +step_bootstrap_cea_for_sdk_signer() { + require_cmd node + + local sdk_env_file="$PUSH_CHAIN_SDK_DIR/packages/core/.env" + if [[ ! -f "$sdk_env_file" ]]; then + log_warn "SDK env file not found at $sdk_env_file; running setup-sdk first" + step_setup_push_chain_sdk + fi + + if [[ ! -d "$PUSH_CHAIN_SDK_DIR" ]]; then + log_err "SDK repo not found at $PUSH_CHAIN_SDK_DIR" + exit 1 + fi + + log_info "Bootstrapping CEA deployment for SDK signer on BSC testnet fork" + if ! ( + cd "$PUSH_CHAIN_SDK_DIR" + node -r @swc-node/register <<'NODE' +const path = require('path'); +require('dotenv').config({ path: path.resolve(process.cwd(), 'packages/core/.env') }); + +const { PushChain } = require('./packages/core/src'); +const { createWalletClient, http, parseEther } = require('viem'); +const { privateKeyToAccount } = require('viem/accounts'); +const { CHAIN_INFO } = require('./packages/core/src/lib/constants/chain'); +const { CHAIN, PUSH_NETWORK } = require('./packages/core/src/lib/constants/enums'); +const { getCEAAddress } = require('./packages/core/src/lib/orchestrator/cea-utils'); + +async function main() { + const evmPrivateKey = process.env.EVM_PRIVATE_KEY; + const pushPrivateKey = process.env.PUSH_PRIVATE_KEY; + if (!evmPrivateKey) { + throw new Error('EVM_PRIVATE_KEY is missing in packages/core/.env'); + } + if (!pushPrivateKey) { + throw new Error('PUSH_PRIVATE_KEY is missing in packages/core/.env'); + } + + // Derive the target UEA account from the EVM key (the same identity used by cea-to-uea tests). + const evmAccount = privateKeyToAccount(evmPrivateKey); + const evmWalletClient = createWalletClient({ + account: evmAccount, + transport: http(CHAIN_INFO[CHAIN.ETHEREUM_SEPOLIA].defaultRPC[0]), + }); + + const evmUniversalSigner = await PushChain.utils.signer.toUniversalFromKeypair(evmWalletClient, { + chain: CHAIN.ETHEREUM_SEPOLIA, + library: PushChain.CONSTANTS.LIBRARY.ETHEREUM_VIEM, + }); + const evmClient = await PushChain.initialize(evmUniversalSigner, { + network: PUSH_NETWORK.LOCALNET, + printTraces: false, + }); + const targetUea = evmClient.universal.account; + + // Use a native Push signer to bootstrap the CEA deployment/funding for that target UEA. + const pushAccount = privateKeyToAccount(pushPrivateKey); + const pushWalletClient = createWalletClient({ + account: pushAccount, + transport: http(CHAIN_INFO[CHAIN.PUSH_LOCALNET].defaultRPC[0]), + }); + + const pushUniversalSigner = await PushChain.utils.signer.toUniversalFromKeypair(pushWalletClient, { + chain: CHAIN.PUSH_LOCALNET, + library: PushChain.CONSTANTS.LIBRARY.ETHEREUM_VIEM, + }); + const pushClient = await PushChain.initialize(pushUniversalSigner, { + network: PUSH_NETWORK.LOCALNET, + printTraces: false, + }); + + let ceaResult = await getCEAAddress(targetUea, CHAIN.BNB_TESTNET); + console.log(`CEA bootstrap pre-check: targetUEA=${targetUea} cea=${ceaResult.cea} deployed=${ceaResult.isDeployed}`); + + if (!ceaResult.isDeployed) { + const tx = await pushClient.universal.sendTransaction({ + to: { address: ceaResult.cea, chain: CHAIN.BNB_TESTNET }, + value: parseEther('0.00005'), + }); + const receipt = await tx.wait(); + console.log(`CEA bootstrap tx: hash=${tx.hash} status=${receipt.status} external=${receipt.externalTxHash || 'n/a'}`); + + ceaResult = await getCEAAddress(targetUea, CHAIN.BNB_TESTNET); + console.log(`CEA bootstrap post-check: deployed=${ceaResult.isDeployed}`); + } + + if (!ceaResult.isDeployed) { + throw new Error('CEA is still not deployed after bootstrap transaction'); + } +} + +main().catch((err) => { + const msg = err && err.message ? err.message : String(err); + console.error(msg); + process.exit(1); +}); +NODE + ); then + log_err "CEA bootstrap step failed" + + if docker ps --format '{{.Names}}' | grep -qx 'universal-validator-1'; then + log_warn "Dumping recent universal-validator-1 logs for diagnosis" + docker logs --tail 200 universal-validator-1 2>&1 || true + fi + exit 1 + fi + + log_ok "CEA bootstrap complete" +} + +cmd_all() { + step_setup_environment + (cd "$PUSH_CHAIN_DIR" && make replace-addresses) + (cd "$PUSH_CHAIN_DIR" && make build) + step_update_env_fund_to_address + step_stop_running_nodes + step_devnet + step_ensure_tss_key_ready + step_setup_environment + step_recover_genesis_key + step_fund_account + step_setup_core_contracts + step_setup_swap_amm + step_sync_test_addresses + step_create_all_wpc_pools + assert_required_addresses + step_write_core_env + step_configure_universal_core + step_update_eth_token_config + step_setup_gateway + step_add_uregistry_configs + step_clone_push_chain_sdk + step_deploy_counter_and_sync_sdk + sdk_sync_localnet_constants + step_sync_vault_tss_on_anvil +} + +cmd_show_help() { + cat < + +Commands: + setup-environment Sync universal-validator RPC URLs (LOCAL => anvil localhost RPCs; non-LOCAL => testnet-donut chain public_rpc_url) + devnet Build/start local-multi-validator devnet + uvalidators + print-genesis Print first genesis account + mnemonic + recover-genesis-key Recover genesis key into local keyring + fund Fund FUND_TO_ADDRESS from genesis key + setup-core Clone/build/setup core contracts (auto resume on failure) + setup-swap Clone/install/deploy swap AMM contracts + sync-addresses Apply deploy_addresses.json into test-addresses.json + create-pool Create WPC pools for all deployed core tokens + fund-uea-prc20 Transfer PRC20 tokens (pETH/pUSDT/pSOL etc.) from deployer to test UEA + configure-core Run configureUniversalCore.s.sol (auto --resume retries) + check-addresses Check/report deploy addresses (WPC/Factory/QuoterV2/SwapRouter) + write-core-env Create core-contracts .env from deploy_addresses.json + update-token-config Update eth_sepolia_eth.json contract_address using deployed token + setup-gateway Clone/setup gateway repo and run forge localSetup (with --resume retry) + sync-vault-tss Grant TSS_ROLE on each Anvil EVM vault to the current local TSS key (LOCAL only) + bootstrap-cea-sdk Ensure CEA is deployed for SDK signer on BSC testnet fork (Route 2 bootstrap) + deploy-counter-sdk Deploy CounterPayable on Push localnet and sync SDK COUNTER_ADDRESS_PAYABLE + clone-sdk Clone/update push-chain-sdk repo only (no env/deps setup) + setup-sdk Setup push-chain-sdk (requires clone-sdk first): generate .env, replace TESTNET→LOCALNET in __e2e__ files, install deps + sdk-test-all Replace PUSH_NETWORK TESTNET variants with LOCALNET and run all configured SDK E2E tests + sdk-test-outbound-all Replace PUSH_NETWORK TESTNET variants with LOCALNET and run all configured SDK outbound E2E tests (TESTING_ENV=LOCAL) + quick-testing-outbound Run setup-sdk + fund-uea-prc20, then execute cea-to-eoa.spec.ts and cea-to-uea.spec.ts only + sdk-test-pctx-last-transaction Run pctx-last-transaction.spec.ts + sdk-test-send-to-self Run send-to-self.spec.ts + sdk-test-progress-hook Run progress-hook-per-tx.spec.ts + sdk-test-bridge-multicall Run bridge-multicall.spec.ts + sdk-test-pushchain Run pushchain.spec.ts + sdk-test-bridge-hooks Run bridge-hooks.spec.ts + sdk-test-cea-to-eoa Run cea-to-eoa.spec.ts (outbound Route 3; requires TESTING_ENV=LOCAL) + add-uregistry-configs Submit chain + token config txs via local-multi-validator validator1 + record-contract K A Manually record contract key/address + record-token N S A Manually record token name/symbol/address + all Run full setup pipeline + help Show this help + +Primary files: + Env: $ENV_FILE + Address: $DEPLOY_ADDRESSES_FILE + +Important env: + TESTING_ENV=LOCAL Enables local anvil/surfpool startup and localhost RPC rewrites; when not LOCAL, setup-environment uses testnet-donut chain public_rpc_url values for universal validator RPCs + ANVIL_SEPOLIA_HOST_RPC_URL=http://localhost:9545 + ANVIL_ARBITRUM_HOST_RPC_URL=http://localhost:9546 + ANVIL_BASE_HOST_RPC_URL=http://localhost:9547 + ANVIL_BSC_HOST_RPC_URL=http://localhost:9548 + LOCAL_SEPOLIA_UV_RPC_URL=http://localhost:9545 + LOCAL_ARBITRUM_UV_RPC_URL=http://localhost:9546 + LOCAL_BASE_UV_RPC_URL=http://localhost:9547 + LOCAL_BSC_UV_RPC_URL=http://localhost:9548 + SURFPOOL_SOLANA_HOST_RPC_URL=http://localhost:8899 + LOCAL_SOLANA_UV_RPC_URL=http://localhost:8899 +EOF +} + +main() { + ensure_testing_env_var_in_env_file + + local cmd="${1:-help}" + case "$cmd" in + setup-environment) step_setup_environment ;; + devnet) step_devnet ;; + print-genesis) step_print_genesis ;; + recover-genesis-key) step_recover_genesis_key ;; + fund) step_fund_account ;; + setup-core) step_setup_core_contracts ;; + setup-swap) step_setup_swap_amm ;; + sync-addresses) step_sync_test_addresses ;; + create-pool) step_create_all_wpc_pools ;; + fund-uea-prc20) step_fund_uea_prc20 ;; + configure-core) step_configure_universal_core ;; + check-addresses) assert_required_addresses ;; + write-core-env) step_write_core_env ;; + update-token-config) step_update_deployed_token_configs ;; + setup-gateway) step_setup_gateway ;; + sync-vault-tss) step_sync_vault_tss_on_anvil ;; + bootstrap-cea-sdk) step_bootstrap_cea_for_sdk_signer ;; + deploy-counter-sdk) step_deploy_counter_and_sync_sdk ;; + clone-sdk) step_clone_push_chain_sdk ;; + setup-sdk) step_setup_push_chain_sdk ;; + sdk-test-all) step_run_sdk_tests_all ;; + sdk-test-outbound-all) step_run_sdk_outbound_tests_all ;; + quick-testing-outbound) step_run_sdk_quick_testing_outbound ;; + sdk-test-pctx-last-transaction) step_run_sdk_test_file "pctx-last-transaction.spec.ts" ;; + sdk-test-send-to-self) step_run_sdk_test_file "send-to-self.spec.ts" ;; + sdk-test-progress-hook) step_run_sdk_test_file "progress-hook-per-tx.spec.ts" ;; + sdk-test-bridge-multicall) step_run_sdk_test_file "bridge-multicall.spec.ts" ;; + sdk-test-pushchain) step_run_sdk_test_file "pushchain.spec.ts" ;; + sdk-test-bridge-hooks) step_run_sdk_test_file "bridge-hooks.spec.ts" ;; + sdk-test-cea-to-eoa) step_run_sdk_test_file "cea-to-eoa.spec.ts" ;; + add-uregistry-configs) step_add_uregistry_configs ;; + record-contract) + ensure_deploy_file + [[ $# -eq 3 ]] || { log_err "Usage: $0 record-contract
"; exit 1; } + validate_eth_address "$3" || { log_err "Invalid address: $3"; exit 1; } + record_contract "$2" "$3" + ;; + record-token) + ensure_deploy_file + [[ $# -eq 4 ]] || { log_err "Usage: $0 record-token
"; exit 1; } + validate_eth_address "$4" || { log_err "Invalid address: $4"; exit 1; } + record_token "$2" "$3" "$4" "manual" + ;; + all) cmd_all ;; + help|--help|-h) cmd_show_help ;; + *) log_err "Unknown command: $cmd"; cmd_show_help; exit 1 ;; + esac +} + +main "$@" \ No newline at end of file diff --git a/local-multi-validator/README.md b/local-multi-validator/README.md index 818b72f6..bb8d6be2 100644 --- a/local-multi-validator/README.md +++ b/local-multi-validator/README.md @@ -37,6 +37,28 @@ docker compose up --build - Auto-builds base image if missing (~15-20 min first time) - Pulls core/universal from cache or builds locally - Starts all 8 validators +- Auto-sets `event_start_from` to latest height/slot for Sepolia, Base Sepolia, Arbitrum Sepolia, BSC testnet, and Solana devnet + +### Event Start Heights/Slots + +On `./devnet start`, the script fetches latest chain heights/slots and injects them into each universal validator config: + +- `chain_configs["eip155:11155111"].event_start_from = ` +- `chain_configs["eip155:84532"].event_start_from = ` +- `chain_configs["eip155:421614"].event_start_from = ` +- `chain_configs["eip155:97"].event_start_from = ` +- `chain_configs["solana:EtWTRABZaYq6iMfeYKouRu166VU2xqa1"].event_start_from = ` + +You can override any of them manually at startup: + +```bash +SEPOLIA_EVENT_START_FROM=12345678 \ +BASE_EVENT_START_FROM=23456789 \ +ARBITRUM_EVENT_START_FROM=34567890 \ +BSC_EVENT_START_FROM=45678901 \ +SOLANA_EVENT_START_FROM=56789012 \ +./devnet start +``` ### I Changed Core Validator Code **Files:** `cmd/pchaind/`, `app/`, `x/` modules @@ -133,6 +155,16 @@ docker compose up -d # Start containers directly | `./devnet push-cache` | Push local images to GCR | | `./devnet refresh-cache` | Force rebuild and push to GCR | +The `start` command also supports: + +| Environment Variable | Description | +|----------------------|-------------| +| `SEPOLIA_EVENT_START_FROM` | Force universal validators to start monitoring Sepolia from a specific block | +| `BASE_EVENT_START_FROM` | Force universal validators to start monitoring Base Sepolia from a specific block | +| `ARBITRUM_EVENT_START_FROM` | Force universal validators to start monitoring Arbitrum Sepolia from a specific block | +| `BSC_EVENT_START_FROM` | Force universal validators to start monitoring BSC testnet from a specific block | +| `SOLANA_EVENT_START_FROM` | Force universal validators to start monitoring Solana devnet from a specific slot | + ## Endpoints | Service | Port | Description | diff --git a/local-multi-validator/devnet b/local-multi-validator/devnet index fd92da91..c295ac4b 100755 --- a/local-multi-validator/devnet +++ b/local-multi-validator/devnet @@ -15,6 +15,27 @@ cd "$SCRIPT_DIR" # ═══════════════════════════════════════════════════════════════════════════════ GCR_REGISTRY="${GCR_REGISTRY:-gcr.io/push-chain-testnet}" CACHE_TAG="${CACHE_TAG:-latest}" +SEPOLIA_CHAIN_ID="eip155:11155111" +ARBITRUM_SEPOLIA_CHAIN_ID="eip155:421614" +BASE_SEPOLIA_CHAIN_ID="eip155:84532" +BSC_TESTNET_CHAIN_ID="eip155:97" +SOLANA_DEVNET_CHAIN_ID="solana:EtWTRABZaYq6iMfeYKouRu166VU2xqa1" + +SEPOLIA_DEFAULT_RPC_URL="${SEPOLIA_DEFAULT_RPC_URL:-https://sepolia.drpc.org}" +ARBITRUM_SEPOLIA_DEFAULT_RPC_URL="${ARBITRUM_SEPOLIA_DEFAULT_RPC_URL:-https://arbitrum-sepolia.gateway.tenderly.co}" +BASE_SEPOLIA_DEFAULT_RPC_URL="${BASE_SEPOLIA_DEFAULT_RPC_URL:-https://sepolia.base.org}" +BSC_TESTNET_DEFAULT_RPC_URL="${BSC_TESTNET_DEFAULT_RPC_URL:-https://bsc-testnet-rpc.publicnode.com}" +SOLANA_DEVNET_DEFAULT_RPC_URL="${SOLANA_DEVNET_DEFAULT_RPC_URL:-https://api.devnet.solana.com}" +LOCAL_SEPOLIA_SOURCE_RPC_URL="${LOCAL_SEPOLIA_SOURCE_RPC_URL:-http://localhost:9545}" +LOCAL_SEPOLIA_UV_RPC_URL="${LOCAL_SEPOLIA_UV_RPC_URL:-http://host.docker.internal:9545}" +LOCAL_ARBITRUM_SOURCE_RPC_URL="${LOCAL_ARBITRUM_SOURCE_RPC_URL:-http://localhost:9546}" +LOCAL_ARBITRUM_UV_RPC_URL="${LOCAL_ARBITRUM_UV_RPC_URL:-http://host.docker.internal:9546}" +LOCAL_BASE_SOURCE_RPC_URL="${LOCAL_BASE_SOURCE_RPC_URL:-http://localhost:9547}" +LOCAL_BASE_UV_RPC_URL="${LOCAL_BASE_UV_RPC_URL:-http://host.docker.internal:9547}" +LOCAL_BSC_SOURCE_RPC_URL="${LOCAL_BSC_SOURCE_RPC_URL:-http://localhost:9548}" +LOCAL_BSC_UV_RPC_URL="${LOCAL_BSC_UV_RPC_URL:-http://host.docker.internal:9548}" +LOCAL_SOLANA_SOURCE_RPC_URL="${LOCAL_SOLANA_SOURCE_RPC_URL:-http://localhost:8899}" +LOCAL_SOLANA_UV_RPC_URL="${LOCAL_SOLANA_UV_RPC_URL:-http://host.docker.internal:8899}" # ═══════════════════════════════════════════════════════════════════════════════ # COLORS @@ -68,6 +89,40 @@ has_buildx() { docker buildx version >/dev/null 2>&1 } +fetch_evm_height() { + local rpc_url="$1" + local response + response=$(curl -sS -X POST "$rpc_url" \ + -H "Content-Type: application/json" \ + --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}') + + local hex_height + hex_height=$(echo "$response" | jq -r '.result // empty') + + if [ -z "$hex_height" ] || [ "$hex_height" = "null" ] || [[ ! "$hex_height" =~ ^0x[0-9a-fA-F]+$ ]]; then + return 1 + fi + + echo "$((16#${hex_height#0x}))" +} + +fetch_solana_slot() { + local rpc_url="$1" + local response + response=$(curl -sS -X POST "$rpc_url" \ + -H "Content-Type: application/json" \ + --data '{"jsonrpc":"2.0","id":1,"method":"getSlot","params":[{"commitment":"processed"}]}') + + local slot + slot=$(echo "$response" | jq -r '.result // empty') + + if [ -z "$slot" ] || [ "$slot" = "null" ] || [[ ! "$slot" =~ ^[0-9]+$ ]]; then + return 1 + fi + + echo "$slot" +} + # ═══════════════════════════════════════════════════════════════════════════════ # STATUS HELPERS # ═══════════════════════════════════════════════════════════════════════════════ @@ -128,17 +183,112 @@ check_grants() { local granter_addr=$(docker exec "core-validator-$i" pchaind keys show "validator-$i" -a --keyring-backend test 2>/dev/null || echo "") if [ -n "$hotkey_addr" ] && [ -n "$granter_addr" ]; then - local count=$(docker exec core-validator-1 pchaind query authz grants "$granter_addr" "$hotkey_addr" --node tcp://localhost:26657 --output json 2>/dev/null | jq -r '.grants | length' 2>/dev/null || echo "0") + local count=$(docker exec core-validator-1 pchaind query authz grants "$granter_addr" "$hotkey_addr" --node tcp://localhost:26657 --output json 2>/dev/null | jq -r '[.grants[]? | .authorization.value.msg | select(. == "/uexecutor.v1.MsgVoteInbound" or . == "/uexecutor.v1.MsgVoteChainMeta" or . == "/uexecutor.v1.MsgVoteOutbound" or . == "/utss.v1.MsgVoteTssKeyProcess")] | length' 2>/dev/null || echo "0") if [ "$count" = "0" ] || [ -z "$count" ]; then echo "no|[no]" + elif [ "$count" -lt "4" ] 2>/dev/null; then + echo "no|[no] ${count}/4" else - echo "yes|[ok] ${count}" + echo "yes|[ok] ${count}/4" fi else echo "no|[no]" fi } +wait_for_chain_tx_success() { + local tx_hash="$1" + local max_wait="${2:-30}" + local waited=0 + local start_height + start_height=$(curl -sf "http://localhost:26657/status" 2>/dev/null | jq -r '.result.sync_info.latest_block_height // ""' 2>/dev/null || echo "") + + while [ "$waited" -lt "$max_wait" ]; do + local tx_json + tx_json=$(docker exec core-validator-1 curl -sf "http://localhost:1317/cosmos/tx/v1beta1/txs/$tx_hash" 2>/dev/null || true) + + local has_tx_response + has_tx_response=$(echo "$tx_json" | jq -r 'has("tx_response")' 2>/dev/null || echo "false") + + if [ "$has_tx_response" = "true" ]; then + local tx_code + tx_code=$(echo "$tx_json" | jq -r '.tx_response.code // "0"' 2>/dev/null || echo "0") + + if [ "$tx_code" = "0" ]; then + return 0 + fi + + local raw_log + raw_log=$(echo "$tx_json" | jq -r '.tx_response.raw_log // .message // ""' 2>/dev/null || true) + print_error "TX $tx_hash failed with code $tx_code: ${raw_log:-unknown error}" + return 1 + fi + + local query_code + query_code=$(echo "$tx_json" | jq -r '.code // empty' 2>/dev/null || true) + if [ -n "$query_code" ] && [ "$query_code" != "5" ]; then + local query_msg + query_msg=$(echo "$tx_json" | jq -r '.message // ""' 2>/dev/null || true) + print_warning "TX query for $tx_hash returned code $query_code: ${query_msg:-unknown response}" + fi + + sleep 1 + waited=$((waited + 1)) + done + + local end_height + end_height=$(curl -sf "http://localhost:26657/status" 2>/dev/null | jq -r '.result.sync_info.latest_block_height // ""' 2>/dev/null || echo "") + if [ -n "$start_height" ] && [ -n "$end_height" ] && [ "$start_height" = "$end_height" ]; then + print_error "Timed out waiting for tx $tx_hash (chain height is stalled at $end_height)" + else + print_error "Timed out waiting for tx $tx_hash" + fi + return 1 +} + +wait_for_block_progress() { + local max_wait="${1:-10}" + local waited=0 + local start_height + start_height=$(curl -sf "http://localhost:26657/status" 2>/dev/null | jq -r '.result.sync_info.latest_block_height // ""' 2>/dev/null || echo "") + + while [ "$waited" -lt "$max_wait" ]; do + local current_height + current_height=$(curl -sf "http://localhost:26657/status" 2>/dev/null | jq -r '.result.sync_info.latest_block_height // ""' 2>/dev/null || echo "") + if [ -n "$start_height" ] && [ -n "$current_height" ] && [ "$current_height" != "$start_height" ]; then + return 0 + fi + + sleep 1 + waited=$((waited + 1)) + done + + return 1 +} + +get_current_tss_key_id() { + docker exec core-validator-1 pchaind query utss current-key --node tcp://localhost:26657 --output json 2>/dev/null | jq -r '.key.key_id // .current_key.key_id // empty' 2>/dev/null || true +} + +get_utss_admin_address() { + docker exec core-validator-1 pchaind query utss params --node tcp://localhost:26657 --output json 2>/dev/null | jq -r '.params.admin // empty' 2>/dev/null || true +} + +get_key_name_for_address() { + local address="$1" + docker exec core-validator-1 pchaind keys list --keyring-backend test --output json 2>/dev/null | jq -r --arg addr "$address" '.[] | select(.address == $addr) | .name' 2>/dev/null | head -n1 +} + +resolve_utss_admin_signer() { + local admin_addr + admin_addr=$(get_utss_admin_address) + if [ -z "$admin_addr" ]; then + return 1 + fi + + get_key_name_for_address "$admin_addr" +} + # ═══════════════════════════════════════════════════════════════════════════════ # STATUS DISPLAY # ═══════════════════════════════════════════════════════════════════════════════ @@ -413,6 +563,123 @@ cmd_up() { fi fi + local sepolia_start_height="${SEPOLIA_EVENT_START_FROM:-}" + local base_start_height="${BASE_EVENT_START_FROM:-}" + local arbitrum_start_height="${ARBITRUM_EVENT_START_FROM:-}" + local bsc_start_height="${BSC_EVENT_START_FROM:-}" + local solana_start_height="${SOLANA_EVENT_START_FROM:-}" + + if [ -z "$sepolia_start_height" ]; then + if [ "${TESTING_ENV:-}" = "LOCAL" ]; then + if sepolia_start_height=$(fetch_evm_height "$LOCAL_SEPOLIA_SOURCE_RPC_URL"); then + print_status "Using LOCAL Sepolia event_start_from from $LOCAL_SEPOLIA_SOURCE_RPC_URL: $sepolia_start_height" + else + print_warning "Could not fetch LOCAL Sepolia latest block from $LOCAL_SEPOLIA_SOURCE_RPC_URL" + print_warning "Sepolia will use default event_start_from from pushuv config" + sepolia_start_height="" + fi + elif sepolia_start_height=$(fetch_evm_height "$SEPOLIA_DEFAULT_RPC_URL"); then + print_status "Using Sepolia event_start_from: $sepolia_start_height" + else + print_warning "Could not fetch Sepolia latest block from $SEPOLIA_DEFAULT_RPC_URL" + print_warning "Sepolia will use default event_start_from from pushuv config" + sepolia_start_height="" + fi + else + print_status "Using provided SEPOLIA_EVENT_START_FROM: $sepolia_start_height" + fi + + if [ -z "$base_start_height" ]; then + if [ "${TESTING_ENV:-}" = "LOCAL" ]; then + if base_start_height=$(fetch_evm_height "$LOCAL_BASE_SOURCE_RPC_URL"); then + print_status "Using LOCAL Base Sepolia event_start_from from $LOCAL_BASE_SOURCE_RPC_URL: $base_start_height" + else + print_warning "Could not fetch LOCAL Base Sepolia latest block from $LOCAL_BASE_SOURCE_RPC_URL" + print_warning "Base Sepolia will use default event_start_from from pushuv config" + base_start_height="" + fi + elif base_start_height=$(fetch_evm_height "$BASE_SEPOLIA_DEFAULT_RPC_URL"); then + print_status "Using Base Sepolia event_start_from: $base_start_height" + else + print_warning "Could not fetch Base Sepolia latest block from $BASE_SEPOLIA_DEFAULT_RPC_URL" + print_warning "Base Sepolia will use default event_start_from from pushuv config" + base_start_height="" + fi + else + print_status "Using provided BASE_EVENT_START_FROM: $base_start_height" + fi + + if [ -z "$arbitrum_start_height" ]; then + if [ "${TESTING_ENV:-}" = "LOCAL" ]; then + if arbitrum_start_height=$(fetch_evm_height "$LOCAL_ARBITRUM_SOURCE_RPC_URL"); then + print_status "Using LOCAL Arbitrum Sepolia event_start_from from $LOCAL_ARBITRUM_SOURCE_RPC_URL: $arbitrum_start_height" + else + print_warning "Could not fetch LOCAL Arbitrum Sepolia latest block from $LOCAL_ARBITRUM_SOURCE_RPC_URL" + print_warning "Arbitrum Sepolia will use default event_start_from from pushuv config" + arbitrum_start_height="" + fi + elif arbitrum_start_height=$(fetch_evm_height "$ARBITRUM_SEPOLIA_DEFAULT_RPC_URL"); then + print_status "Using Arbitrum Sepolia event_start_from: $arbitrum_start_height" + else + print_warning "Could not fetch Arbitrum Sepolia latest block from $ARBITRUM_SEPOLIA_DEFAULT_RPC_URL" + print_warning "Arbitrum Sepolia will use default event_start_from from pushuv config" + arbitrum_start_height="" + fi + else + print_status "Using provided ARBITRUM_EVENT_START_FROM: $arbitrum_start_height" + fi + + if [ -z "$bsc_start_height" ]; then + if [ "${TESTING_ENV:-}" = "LOCAL" ]; then + if bsc_start_height=$(fetch_evm_height "$LOCAL_BSC_SOURCE_RPC_URL"); then + print_status "Using LOCAL BSC testnet event_start_from from $LOCAL_BSC_SOURCE_RPC_URL: $bsc_start_height" + else + print_warning "Could not fetch LOCAL BSC testnet latest block from $LOCAL_BSC_SOURCE_RPC_URL" + print_warning "BSC testnet will use default event_start_from from pushuv config" + bsc_start_height="" + fi + elif bsc_start_height=$(fetch_evm_height "$BSC_TESTNET_DEFAULT_RPC_URL"); then + print_status "Using BSC testnet event_start_from: $bsc_start_height" + else + print_warning "Could not fetch BSC testnet latest block from $BSC_TESTNET_DEFAULT_RPC_URL" + print_warning "BSC testnet will use default event_start_from from pushuv config" + bsc_start_height="" + fi + else + print_status "Using provided BSC_EVENT_START_FROM: $bsc_start_height" + fi + + if [ -z "$solana_start_height" ]; then + if [ "${TESTING_ENV:-}" = "LOCAL" ]; then + if solana_start_height=$(fetch_solana_slot "$LOCAL_SOLANA_SOURCE_RPC_URL"); then + print_status "Using LOCAL Solana event_start_from from $LOCAL_SOLANA_SOURCE_RPC_URL: $solana_start_height" + else + print_warning "Could not fetch LOCAL Solana latest slot from $LOCAL_SOLANA_SOURCE_RPC_URL" + print_warning "Solana will use default event_start_from from pushuv config" + solana_start_height="" + fi + elif solana_start_height=$(fetch_solana_slot "$SOLANA_DEVNET_DEFAULT_RPC_URL"); then + print_status "Using Solana devnet event_start_from: $solana_start_height" + else + print_warning "Could not fetch Solana devnet latest slot from $SOLANA_DEVNET_DEFAULT_RPC_URL" + print_warning "Solana devnet will use default event_start_from from pushuv config" + solana_start_height="" + fi + else + print_status "Using provided SOLANA_EVENT_START_FROM: $solana_start_height" + fi + + TESTING_ENV="${TESTING_ENV:-}" \ + SEPOLIA_RPC_URL_OVERRIDE="$([ "${TESTING_ENV:-}" = "LOCAL" ] && echo "$LOCAL_SEPOLIA_UV_RPC_URL" || echo "${SEPOLIA_RPC_URL_OVERRIDE:-}")" \ + ARBITRUM_RPC_URL_OVERRIDE="$([ "${TESTING_ENV:-}" = "LOCAL" ] && echo "$LOCAL_ARBITRUM_UV_RPC_URL" || echo "${ARBITRUM_RPC_URL_OVERRIDE:-}")" \ + BASE_RPC_URL_OVERRIDE="$([ "${TESTING_ENV:-}" = "LOCAL" ] && echo "$LOCAL_BASE_UV_RPC_URL" || echo "${BASE_RPC_URL_OVERRIDE:-}")" \ + BSC_RPC_URL_OVERRIDE="$([ "${TESTING_ENV:-}" = "LOCAL" ] && echo "$LOCAL_BSC_UV_RPC_URL" || echo "${BSC_RPC_URL_OVERRIDE:-}")" \ + SOLANA_RPC_URL_OVERRIDE="$([ "${TESTING_ENV:-}" = "LOCAL" ] && echo "$LOCAL_SOLANA_UV_RPC_URL" || echo "${SOLANA_RPC_URL_OVERRIDE:-}")" \ + SEPOLIA_EVENT_START_FROM="$sepolia_start_height" \ + BASE_EVENT_START_FROM="$base_start_height" \ + ARBITRUM_EVENT_START_FROM="$arbitrum_start_height" \ + BSC_EVENT_START_FROM="$bsc_start_height" \ + SOLANA_EVENT_START_FROM="$solana_start_height" \ docker compose up -d # Auto-push to cache if we built locally (populate cache for team) @@ -500,26 +767,169 @@ cmd_rebuild() { # ═══════════════════════════════════════════════════════════════════════════════ cmd_tss_keygen() { print_header "TSS Key Generation" - print_status "Initiating TSS keygen process..." + print_status "Validating UV topology before keygen..." + + local uv_json + uv_json=$(docker exec core-validator-1 pchaind query uvalidator all-universal-validators --node tcp://localhost:26657 --output json 2>/dev/null || echo "{}") + local uv_count + uv_count=$(echo "$uv_json" | jq -r '.universal_validator | length // 0' 2>/dev/null || echo "0") + if [ "$uv_count" -lt 2 ]; then + print_error "Need at least 2 registered universal validators for DKLS keygen (found: $uv_count)" + return 1 + fi - docker exec core-validator-1 pchaind tx utss initiate-tss-key-process \ - --process-type tss-process-keygen \ - --from genesis-acc-1 \ - --chain-id localchain_9000-1 \ - --keyring-backend test \ - --fees 1000000000000000upc \ - --yes + local invalid_peer_count + invalid_peer_count=$(echo "$uv_json" | jq -r '[.universal_validator[]? | select((.network_info.peer_id // "") | startswith("12D3") | not)] | length' 2>/dev/null || echo "0") + if [ "$invalid_peer_count" -gt 0 ]; then + print_error "Found $invalid_peer_count universal validators with non-libp2p peer IDs" + echo "$uv_json" | jq -r '.universal_validator[]? | " - " + (.identify_info.core_validator_address // "unknown") + " => " + (.network_info.peer_id // "")' 2>/dev/null || true + print_error "Run './devnet setup-uvalidators' to repair peer IDs before keygen" + return 1 + fi + + local existing_key + existing_key=$(get_current_tss_key_id) + if [ -n "$existing_key" ]; then + print_success "TSS key already present: $existing_key" + return 0 + fi + + local max_attempts=5 + local max_wait_per_attempt=180 + local attempt + local admin_addr + admin_addr=$(get_utss_admin_address) + if [ -z "$admin_addr" ]; then + print_error "Unable to read UTSS params.admin from chain" + return 1 + fi + + local tx_signer + tx_signer=$(resolve_utss_admin_signer) + if [ -z "$tx_signer" ]; then + print_error "No local key matches UTSS admin address: $admin_addr" + print_status "Available local keys in core-validator-1:" + docker exec core-validator-1 pchaind keys list --keyring-backend test --output json 2>/dev/null | jq -r '.[] | " - " + .name + " => " + .address' 2>/dev/null || true + return 1 + fi + + print_status "UTSS admin signer resolved: $tx_signer ($admin_addr)" + + for ((attempt = 1; attempt <= max_attempts; attempt++)); do + if ! wait_for_block_progress 8; then + print_error "Chain is not producing blocks; cannot run TSS keygen" + return 1 + fi + + print_status "Initiating TSS keygen process (attempt ${attempt}/${max_attempts}, signer: ${tx_signer})..." + + local keygen_result + keygen_result=$(docker exec core-validator-1 pchaind tx utss initiate-tss-key-process \ + --process-type tss-process-keygen \ + --from "$tx_signer" \ + --chain-id localchain_9000-1 \ + --keyring-backend test \ + --fees 1000000000000000upc \ + --yes \ + --output json 2>&1 || true) + + local submit_code + submit_code=$(echo "$keygen_result" | jq -r '.code // "0"' 2>/dev/null || echo "0") + local submit_log + submit_log=$(echo "$keygen_result" | jq -r '.raw_log // .message // ""' 2>/dev/null || true) + local tx_hash + tx_hash=$(echo "$keygen_result" | jq -r '.txhash // ""' 2>/dev/null) + + if [ "$submit_code" != "0" ]; then + if [ -n "$submit_log" ] && echo "$submit_log" | grep -qi "account sequence mismatch"; then + print_warning "Keygen tx had sequence mismatch for signer $tx_signer on attempt ${attempt}/${max_attempts}; waiting for block progress and retrying" + wait_for_block_progress 8 || sleep 2 + continue + fi + + if [ -n "$submit_log" ] && echo "$submit_log" | grep -qi "invalid authority"; then + print_error "UTSS rejected signer authority: ${submit_log}" + print_error "Resolved signer: $tx_signer ($admin_addr)" + return 1 + fi + + if [ -n "$submit_log" ] && echo "$submit_log" | grep -qiE "insufficient funds|insufficient fee"; then + print_error "Keygen signer $tx_signer cannot pay fees: ${submit_log}" + return 1 + fi + + if [ -n "$submit_log" ] && echo "$submit_log" | grep -qiE "unauthorized|not found"; then + print_warning "Keygen submission returned authorization/keyring error: ${submit_log}" + continue + fi + + if [ -n "$submit_log" ] && echo "$submit_log" | grep -qiE "already|in progress|pending"; then + print_status "Keygen process appears active already: $submit_log" + else + print_warning "Keygen submission returned code $submit_code on attempt ${attempt}/${max_attempts}: ${submit_log:-unknown error}" + sleep 2 + continue + fi + else + if [ -n "$tx_hash" ]; then + if ! wait_for_chain_tx_success "$tx_hash" 45; then + if ! wait_for_block_progress 8; then + print_error "Chain stalled while waiting for keygen tx confirmation" + return 1 + fi + print_warning "Keygen tx did not confirm successfully on attempt ${attempt}/${max_attempts}, checking on-chain key state" + fi + else + print_warning "Keygen submission returned no tx hash on attempt ${attempt}/${max_attempts}" + fi + fi - print_success "TSS keygen initiated!" + print_status "Waiting for TSS key to materialize on-chain..." + local waited=0 + while [ "$waited" -lt "$max_wait_per_attempt" ]; do + local key_id + key_id=$(get_current_tss_key_id) + if [ -n "$key_id" ]; then + print_success "TSS key is ready: $key_id" + return 0 + fi + sleep 2 + waited=$((waited + 2)) + done + + if [ "$attempt" -lt "$max_attempts" ]; then + print_warning "TSS key not available after ${max_wait_per_attempt}s on attempt ${attempt}/${max_attempts}" + sleep 5 + fi + done + + print_error "TSS key not available after ${max_attempts} attempts" + print_status "Recent universal-validator logs for diagnosis:" + for i in 1 2 3 4; do + if docker ps --format '{{.Names}}' | grep -qx "universal-validator-$i"; then + echo "---- universal-validator-$i ----" + docker logs --tail 40 "universal-validator-$i" 2>&1 || true + fi + done + return 1 } cmd_tss_refresh() { print_header "TSS Key Refresh" print_status "Initiating TSS key refresh process..." + local admin_addr + admin_addr=$(get_utss_admin_address) + local tx_signer + tx_signer=$(resolve_utss_admin_signer) + if [ -z "$tx_signer" ] || [ -z "$admin_addr" ]; then + print_error "Unable to resolve UTSS admin signer for refresh" + return 1 + fi + docker exec core-validator-1 pchaind tx utss initiate-tss-key-process \ --process-type tss-process-refresh \ - --from genesis-acc-1 \ + --from "$tx_signer" \ --chain-id localchain_9000-1 \ --keyring-backend test \ --fees 1000000000000000upc \ @@ -532,9 +942,18 @@ cmd_tss_quorum() { print_header "TSS Quorum Change" print_status "Initiating TSS quorum change process..." + local admin_addr + admin_addr=$(get_utss_admin_address) + local tx_signer + tx_signer=$(resolve_utss_admin_signer) + if [ -z "$tx_signer" ] || [ -z "$admin_addr" ]; then + print_error "Unable to resolve UTSS admin signer for quorum change" + return 1 + fi + docker exec core-validator-1 pchaind tx utss initiate-tss-key-process \ --process-type tss-process-quorum-change \ - --from genesis-acc-1 \ + --from "$tx_signer" \ --chain-id localchain_9000-1 \ --keyring-backend test \ --fees 1000000000000000upc \ @@ -665,6 +1084,8 @@ cmd_setup_uvalidators() { echo -e "${YELLOW}Registering universal validators and granting AuthZ permissions...${NC}" echo + local setup_failures=0 + for i in 1 2 3 4; do echo -e "${BLUE}─────────────────────────────────────────────────────────────────────${NC}" echo -e "${BOLD}Setting up Universal Validator $i${NC}" @@ -682,80 +1103,168 @@ cmd_setup_uvalidators() { echo -e " Account: ${CYAN}$VALIDATOR_ADDR${NC}" echo -e " Valoper: ${CYAN}$VALOPER_ADDR${NC}" - # Get network info from universal validator's TSS port + # Get TSS libp2p network info (not CometBFT node ID). echo -ne " ${YELLOW}Getting network info:${NC} " - local rpc_port=$(get_rpc_port $i) - local NODE_ID=$(curl -s "http://localhost:${rpc_port}/status" | jq -r '.result.node_info.id' 2>/dev/null) + local PEER_ID + case "$i" in + 1) PEER_ID="12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5" ;; + 2) PEER_ID="12D3KooWJWoaqZhDaoEFshF7Rh1bpY9ohihFhzcW6d69Lr2NASuq" ;; + 3) PEER_ID="12D3KooWRndVhVZPCiQwHBBBdg769GyrPUW13zxwqQyf9r3ANaba" ;; + 4) PEER_ID="12D3KooWPT98FXMfDQYavZm66EeVjTqP9Nnehn1gyaydqV8L8BQw" ;; + *) PEER_ID="" ;; + esac + local TSS_PORT + TSS_PORT=$(get_tss_port "$i") + local MULTI_ADDR="/dns4/universal-validator-$i/tcp/$TSS_PORT" - if [ -z "$NODE_ID" ] || [ "$NODE_ID" = "null" ]; then - echo -e "${RED}Failed to get node ID${NC}" + if [ -z "$PEER_ID" ] || [[ ! "$PEER_ID" =~ ^12D3 ]]; then + echo -e "${RED}Failed to compute TSS peer ID${NC}" + setup_failures=$((setup_failures + 1)) continue fi - local MULTI_ADDR="/ip4/core-validator-$i/tcp/26656" echo -e "${GREEN}Done${NC}" - echo -e " Node ID: ${CYAN}$NODE_ID${NC}" + echo -e " Peer ID: ${CYAN}$PEER_ID${NC}" + echo -e " MultiAddr: ${CYAN}$MULTI_ADDR${NC}" + + # Ensure core validator exists in staking before UV registration. + local bond_status="NOT_FOUND" + local bond_wait=0 + local bond_wait_max=90 + while [ "$bond_wait" -lt "$bond_wait_max" ]; do + bond_status=$(docker exec core-validator-1 pchaind query staking validator "$VALOPER_ADDR" --node tcp://localhost:26657 --output json 2>/dev/null | jq -r '.validator.status // "NOT_FOUND"' 2>/dev/null || echo "NOT_FOUND") + if [ "$bond_status" = "BOND_STATUS_BONDED" ]; then + break + fi + sleep 2 + bond_wait=$((bond_wait + 2)) + done + if [ "$bond_status" != "BOND_STATUS_BONDED" ]; then + echo -e "${RED}Core validator is not bonded after ${bond_wait_max}s (status: $bond_status)${NC}" + setup_failures=$((setup_failures + 1)) + continue + fi + + local network_json + network_json="{\"peer_id\": \"$PEER_ID\", \"multi_addrs\": [\"$MULTI_ADDR\"]}" # Register as universal validator echo -ne " ${YELLOW}Registering as universal validator:${NC} " local register_result=$(docker exec core-validator-1 pchaind tx uvalidator add-universal-validator \ --core-validator-address "$VALOPER_ADDR" \ - --network "{\"peer_id\": \"$NODE_ID\", \"multi_addrs\": [\"$MULTI_ADDR\"]}" \ + --network "$network_json" \ --from genesis-acc-1 \ --chain-id localchain_9000-1 \ + --node tcp://localhost:26657 \ --keyring-backend test \ --fees 1000000000000000upc \ --yes \ - --output json 2>&1) + --output json 2>&1 || true) - if echo "$register_result" | grep -q '"txhash"'; then + local register_tx + register_tx=$(echo "$register_result" | jq -r '.txhash // ""' 2>/dev/null) + local registered=false + if [ -n "$register_tx" ] && wait_for_chain_tx_success "$register_tx" 30; then echo -e "${GREEN}Done${NC}" - else - echo -e "${YELLOW}May already be registered${NC}" + registered=true + fi + + # If add failed (often already-exists), update network info from validator account. + if [ "$registered" = "false" ]; then + local update_result + update_result=$(docker exec core-validator-$i pchaind tx uvalidator update-universal-validator \ + --network "$network_json" \ + --from validator-$i \ + --chain-id localchain_9000-1 \ + --node tcp://core-validator-1:26657 \ + --keyring-backend test \ + --fees 1000000000000000upc \ + --yes \ + --output json 2>&1 || true) + + local update_tx + update_tx=$(echo "$update_result" | jq -r '.txhash // ""' 2>/dev/null) + if [ -n "$update_tx" ] && wait_for_chain_tx_success "$update_tx" 30; then + echo -e "${GREEN}Updated existing registration${NC}" + registered=true + fi + fi + + if [ "$registered" = "false" ]; then + echo -e "${RED}Registration/update failed for validator-$i${NC}" + setup_failures=$((setup_failures + 1)) + continue fi sleep 3 - # Setup hotkey (use pre-generated hotkey-$i from setup-universal.sh) + # Resolve hotkey. Prefer container keyring, fallback to shared generated accounts. echo -ne " ${YELLOW}Setting up hotkey:${NC} " local hotkey_name="hotkey-$i" - local HOTKEY_ADDR=$(docker exec universal-validator-$i puniversald keys show "$hotkey_name" -a --keyring-backend test --home /root/.puniversal 2>/dev/null || echo "") + local HOTKEY_ADDR + HOTKEY_ADDR=$(docker exec universal-validator-$i puniversald keys show "$hotkey_name" -a --keyring-backend test --home /root/.puniversal 2>/dev/null || echo "") + + if [ -z "$HOTKEY_ADDR" ] && docker exec core-validator-1 test -f /tmp/push-accounts/hotkeys.json >/dev/null 2>&1; then + HOTKEY_ADDR=$(docker exec core-validator-1 sh -lc "jq -r '.[${i}-1].address' /tmp/push-accounts/hotkeys.json" 2>/dev/null || echo "") + if [ -n "$HOTKEY_ADDR" ] && [ "$HOTKEY_ADDR" != "null" ]; then + echo -e "${YELLOW}Using fallback shared hotkey-$i${NC}" + fi + fi - if [ -z "$HOTKEY_ADDR" ]; then - echo -e "${RED}Hotkey not found - validator may not be initialized${NC}" + if [ -z "$HOTKEY_ADDR" ] || [ "$HOTKEY_ADDR" = "null" ]; then + echo -e "${RED}Hotkey not found for validator-$i${NC}" continue + fi + + if [ -z "${HOTKEY_ADDR##push1*}" ]; then + : else - echo -e "${GREEN}Using hotkey-$i${NC}" + echo -e "${RED}Invalid hotkey address for validator-$i: $HOTKEY_ADDR${NC}" + continue fi - echo -e " Hotkey: ${CYAN}$HOTKEY_ADDR${NC}" - # Grant MsgVoteInbound - echo -ne " ${YELLOW}Granting MsgVoteInbound:${NC} " - docker exec core-validator-$i pchaind tx authz grant "$HOTKEY_ADDR" generic \ - --msg-type=/uexecutor.v1.MsgVoteInbound \ - --from validator-$i \ - --chain-id localchain_9000-1 \ - --keyring-backend test \ - --fees 200000000000000upc \ - --yes >/dev/null 2>&1 && echo -e "${GREEN}Done${NC}" || echo -e "${YELLOW}May already exist${NC}" + echo -e "${GREEN}Using hotkey-$i${NC}" + echo -e " Hotkey: ${CYAN}$HOTKEY_ADDR${NC}" - sleep 2 + # Grant required AuthZ messages (must match PushSigner expectations) + for msg_type in \ + /uexecutor.v1.MsgVoteInbound \ + /uexecutor.v1.MsgVoteChainMeta \ + /uexecutor.v1.MsgVoteOutbound \ + /utss.v1.MsgVoteTssKeyProcess + do + local msg_name=$(basename "$msg_type") + echo -ne " ${YELLOW}Granting ${msg_name}:${NC} " + docker exec core-validator-$i pchaind tx authz grant "$HOTKEY_ADDR" generic \ + --msg-type="$msg_type" \ + --from validator-$i \ + --chain-id localchain_9000-1 \ + --node tcp://core-validator-1:26657 \ + --keyring-backend test \ + --fees 200000000000000upc \ + --yes >/dev/null 2>&1 && echo -e "${GREEN}Done${NC}" || echo -e "${YELLOW}May already exist${NC}" - # Grant MsgVoteGasPrice - echo -ne " ${YELLOW}Granting MsgVoteGasPrice:${NC} " - docker exec core-validator-$i pchaind tx authz grant "$HOTKEY_ADDR" generic \ - --msg-type=/uexecutor.v1.MsgVoteGasPrice \ - --from validator-$i \ - --chain-id localchain_9000-1 \ - --keyring-backend test \ - --fees 200000000000000upc \ - --yes >/dev/null 2>&1 && echo -e "${GREEN}Done${NC}" || echo -e "${YELLOW}May already exist${NC}" + sleep 1 + done - sleep 2 + # Verify all required grants are visible on the canonical RPC endpoint. + local grant_count + grant_count=$(docker exec core-validator-1 pchaind query authz grants "$VALIDATOR_ADDR" "$HOTKEY_ADDR" --node tcp://core-validator-1:26657 --output json 2>/dev/null | jq -r '[.grants[]? | .authorization.value.msg | select(. == "/uexecutor.v1.MsgVoteInbound" or . == "/uexecutor.v1.MsgVoteChainMeta" or . == "/uexecutor.v1.MsgVoteOutbound" or . == "/utss.v1.MsgVoteTssKeyProcess")] | length' 2>/dev/null || echo "0") + if [ "$grant_count" -lt 4 ]; then + echo -e " ${RED}Only ${grant_count}/4 required grants visible for validator-$i${NC}" + setup_failures=$((setup_failures + 1)) + else + echo -e " ${GREEN}Verified 4/4 required grants${NC}" + fi echo -e " ${GREEN}Validator $i setup complete${NC}" echo done + if [ "$setup_failures" -gt 0 ]; then + print_error "Universal validator setup completed with $setup_failures failure(s)" + return 1 + fi + print_success "Universal validators setup complete!" echo echo -e "${BOLD}Verify with:${NC}" diff --git a/local-multi-validator/docker-compose.yml b/local-multi-validator/docker-compose.yml index a9374086..815ae85f 100644 --- a/local-multi-validator/docker-compose.yml +++ b/local-multi-validator/docker-compose.yml @@ -221,6 +221,8 @@ services: ports: - "8080:8080" # Query API - "39000:39000" # TSS P2P + ulimits: + stack: -1 volumes: - universal1-data:/root/.puniversal - ./scripts:/opt/scripts @@ -231,6 +233,17 @@ services: - CORE_VALIDATOR_GRPC=core-validator-1:9090 - QUERY_PORT=8080 - TSS_ENABLED=true + - TESTING_ENV=${TESTING_ENV:-} + - SEPOLIA_RPC_URL_OVERRIDE=${SEPOLIA_RPC_URL_OVERRIDE:-} + - ARBITRUM_RPC_URL_OVERRIDE=${ARBITRUM_RPC_URL_OVERRIDE:-} + - BASE_RPC_URL_OVERRIDE=${BASE_RPC_URL_OVERRIDE:-} + - BSC_RPC_URL_OVERRIDE=${BSC_RPC_URL_OVERRIDE:-} + - SOLANA_RPC_URL_OVERRIDE=${SOLANA_RPC_URL_OVERRIDE:-} + - SEPOLIA_EVENT_START_FROM=${SEPOLIA_EVENT_START_FROM:-} + - BASE_EVENT_START_FROM=${BASE_EVENT_START_FROM:-} + - ARBITRUM_EVENT_START_FROM=${ARBITRUM_EVENT_START_FROM:-} + - BSC_EVENT_START_FROM=${BSC_EVENT_START_FROM:-} + - SOLANA_EVENT_START_FROM=${SOLANA_EVENT_START_FROM:-} command: ["/opt/scripts/setup-universal.sh"] depends_on: core-validator-1: @@ -254,6 +267,8 @@ services: ports: - "8081:8080" # Query API - "39001:39001" # TSS P2P + ulimits: + stack: -1 volumes: - universal2-data:/root/.puniversal - ./scripts:/opt/scripts @@ -264,6 +279,17 @@ services: - CORE_VALIDATOR_GRPC=core-validator-2:9090 - QUERY_PORT=8080 - TSS_ENABLED=true + - TESTING_ENV=${TESTING_ENV:-} + - SEPOLIA_RPC_URL_OVERRIDE=${SEPOLIA_RPC_URL_OVERRIDE:-} + - ARBITRUM_RPC_URL_OVERRIDE=${ARBITRUM_RPC_URL_OVERRIDE:-} + - BASE_RPC_URL_OVERRIDE=${BASE_RPC_URL_OVERRIDE:-} + - BSC_RPC_URL_OVERRIDE=${BSC_RPC_URL_OVERRIDE:-} + - SOLANA_RPC_URL_OVERRIDE=${SOLANA_RPC_URL_OVERRIDE:-} + - SEPOLIA_EVENT_START_FROM=${SEPOLIA_EVENT_START_FROM:-} + - BASE_EVENT_START_FROM=${BASE_EVENT_START_FROM:-} + - ARBITRUM_EVENT_START_FROM=${ARBITRUM_EVENT_START_FROM:-} + - BSC_EVENT_START_FROM=${BSC_EVENT_START_FROM:-} + - SOLANA_EVENT_START_FROM=${SOLANA_EVENT_START_FROM:-} command: ["/opt/scripts/setup-universal.sh"] depends_on: core-validator-2: @@ -287,6 +313,8 @@ services: ports: - "8082:8080" # Query API - "39002:39002" # TSS P2P + ulimits: + stack: -1 volumes: - universal3-data:/root/.puniversal - ./scripts:/opt/scripts @@ -297,6 +325,17 @@ services: - CORE_VALIDATOR_GRPC=core-validator-3:9090 - QUERY_PORT=8080 - TSS_ENABLED=true + - TESTING_ENV=${TESTING_ENV:-} + - SEPOLIA_RPC_URL_OVERRIDE=${SEPOLIA_RPC_URL_OVERRIDE:-} + - ARBITRUM_RPC_URL_OVERRIDE=${ARBITRUM_RPC_URL_OVERRIDE:-} + - BASE_RPC_URL_OVERRIDE=${BASE_RPC_URL_OVERRIDE:-} + - BSC_RPC_URL_OVERRIDE=${BSC_RPC_URL_OVERRIDE:-} + - SOLANA_RPC_URL_OVERRIDE=${SOLANA_RPC_URL_OVERRIDE:-} + - SEPOLIA_EVENT_START_FROM=${SEPOLIA_EVENT_START_FROM:-} + - BASE_EVENT_START_FROM=${BASE_EVENT_START_FROM:-} + - ARBITRUM_EVENT_START_FROM=${ARBITRUM_EVENT_START_FROM:-} + - BSC_EVENT_START_FROM=${BSC_EVENT_START_FROM:-} + - SOLANA_EVENT_START_FROM=${SOLANA_EVENT_START_FROM:-} command: ["/opt/scripts/setup-universal.sh"] depends_on: core-validator-3: @@ -320,6 +359,8 @@ services: ports: - "8083:8080" # Query API - "39003:39003" # TSS P2P + ulimits: + stack: -1 volumes: - universal4-data:/root/.puniversal - ./scripts:/opt/scripts @@ -330,6 +371,17 @@ services: - CORE_VALIDATOR_GRPC=core-validator-4:9090 - QUERY_PORT=8080 - TSS_ENABLED=true + - TESTING_ENV=${TESTING_ENV:-} + - SEPOLIA_RPC_URL_OVERRIDE=${SEPOLIA_RPC_URL_OVERRIDE:-} + - ARBITRUM_RPC_URL_OVERRIDE=${ARBITRUM_RPC_URL_OVERRIDE:-} + - BASE_RPC_URL_OVERRIDE=${BASE_RPC_URL_OVERRIDE:-} + - BSC_RPC_URL_OVERRIDE=${BSC_RPC_URL_OVERRIDE:-} + - SOLANA_RPC_URL_OVERRIDE=${SOLANA_RPC_URL_OVERRIDE:-} + - SEPOLIA_EVENT_START_FROM=${SEPOLIA_EVENT_START_FROM:-} + - BASE_EVENT_START_FROM=${BASE_EVENT_START_FROM:-} + - ARBITRUM_EVENT_START_FROM=${ARBITRUM_EVENT_START_FROM:-} + - BSC_EVENT_START_FROM=${BSC_EVENT_START_FROM:-} + - SOLANA_EVENT_START_FROM=${SOLANA_EVENT_START_FROM:-} command: ["/opt/scripts/setup-universal.sh"] depends_on: core-validator-4: diff --git a/local-multi-validator/scripts/setup-genesis-auto.sh b/local-multi-validator/scripts/setup-genesis-auto.sh index baadd4cb..9b89c34c 100755 --- a/local-multi-validator/scripts/setup-genesis-auto.sh +++ b/local-multi-validator/scripts/setup-genesis-auto.sh @@ -444,7 +444,8 @@ if [ -f "$HOTKEYS_FILE" ]; then echo " Granter: $VALIDATOR_ADDR" echo " Grantee: $HOTKEY_ADDR" - # Grant all 4 message types for this validator + # Grant all required message types for this validator. + # Keep this list aligned with universalClient/constant/constant.go (RequiredMsgGrants). for MSG_TYPE in \ "/uexecutor.v1.MsgVoteInbound" \ "/uexecutor.v1.MsgVoteChainMeta" \ diff --git a/local-multi-validator/scripts/setup-universal.sh b/local-multi-validator/scripts/setup-universal.sh index 19de8fe4..adb957bd 100755 --- a/local-multi-validator/scripts/setup-universal.sh +++ b/local-multi-validator/scripts/setup-universal.sh @@ -9,6 +9,14 @@ UNIVERSAL_ID=${UNIVERSAL_ID:-"1"} CORE_VALIDATOR_GRPC=${CORE_VALIDATOR_GRPC:-"core-validator-1:9090"} QUERY_PORT=${QUERY_PORT:-8080} +# In LOCAL devnet, use a single canonical gRPC endpoint for startup validation +# to avoid transient per-node state skew during UV boot. +if [ "${TESTING_ENV:-}" = "LOCAL" ] && [ -n "${LOCAL_CANONICAL_CORE_GRPC:-}" ]; then + CORE_VALIDATOR_GRPC="$LOCAL_CANONICAL_CORE_GRPC" +elif [ "${TESTING_ENV:-}" = "LOCAL" ]; then + CORE_VALIDATOR_GRPC="core-validator-1:9090" +fi + # Paths BINARY="/usr/bin/puniversald" HOME_DIR="/root/.puniversal" @@ -80,7 +88,12 @@ fi # === INITIALIZATION === # --------------------------- -# Clean start +# Clean start — preserve keyshares across restarts if they exist +if [ -d "$HOME_DIR/keyshares" ] && [ "$(ls -A "$HOME_DIR/keyshares" 2>/dev/null)" ]; then + _KEYSHARES_TMP=$(mktemp -d) + cp -r "$HOME_DIR/keyshares/." "$_KEYSHARES_TMP/" + echo "🔑 Preserved $(ls "$_KEYSHARES_TMP" | wc -l | tr -d ' ') keyshare(s) before clean" +fi rm -rf "$HOME_DIR"/* "$HOME_DIR"/.[!.]* "$HOME_DIR"/..?* 2>/dev/null || true echo "🔧 Initializing universal validator..." @@ -88,6 +101,14 @@ echo "🔧 Initializing universal validator..." # Initialize puniversald (creates config directory and default config) $BINARY init +# Restore keyshares if they were preserved +if [ -n "${_KEYSHARES_TMP:-}" ] && [ -d "$_KEYSHARES_TMP" ]; then + mkdir -p "$HOME_DIR/keyshares" + cp -r "$_KEYSHARES_TMP/." "$HOME_DIR/keyshares/" + rm -rf "$_KEYSHARES_TMP" + echo "🔑 Restored $(ls "$HOME_DIR/keyshares" | wc -l | tr -d ' ') keyshare(s)" +fi + # Update the gRPC URL and keyring backend in the config # The CORE_VALIDATOR_GRPC env var is already set correctly in docker-compose.yml: # - universal-validator-1 uses core-validator-1:9090 @@ -136,6 +157,119 @@ if [ "$QUERY_PORT" != "8080" ]; then mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" fi +# After initialization and before event start from overrides +# Force Arbitrum Sepolia RPC URL to tenderly endpoint +ARBITRUM_CHAIN_ID="eip155:421614" +ARBITRUM_TENDERLY_URL="https://arbitrum-sepolia.gateway.tenderly.co" +BASE_CHAIN_ID="eip155:84532" +BSC_CHAIN_ID="eip155:97" +SOLANA_CHAIN_ID="solana:EtWTRABZaYq6iMfeYKouRu166VU2xqa1" +BSC_TESTNET_CHAIN_ID="eip155:97" +BSC_TESTNET_RPC_URL="${BSC_TESTNET_RPC_URL:-https://bsc-testnet-rpc.publicnode.com}" +SEPOLIA_CHAIN_ID="eip155:11155111" + +# In LOCAL testing, universal-validator containers must not use localhost for host services. +if [ "${TESTING_ENV:-}" = "LOCAL" ] && [ -z "${SEPOLIA_RPC_URL_OVERRIDE:-}" ]; then + SEPOLIA_RPC_URL_OVERRIDE="http://host.docker.internal:9545" +fi +if [ "${TESTING_ENV:-}" = "LOCAL" ] && [ -z "${ARBITRUM_RPC_URL_OVERRIDE:-}" ]; then + ARBITRUM_RPC_URL_OVERRIDE="http://host.docker.internal:9546" +fi +if [ "${TESTING_ENV:-}" = "LOCAL" ] && [ -z "${BASE_RPC_URL_OVERRIDE:-}" ]; then + BASE_RPC_URL_OVERRIDE="http://host.docker.internal:9547" +fi +if [ "${TESTING_ENV:-}" = "LOCAL" ] && [ -z "${BSC_RPC_URL_OVERRIDE:-}" ]; then + BSC_RPC_URL_OVERRIDE="http://host.docker.internal:9548" +fi +if [ "${TESTING_ENV:-}" = "LOCAL" ] && [ -z "${SOLANA_RPC_URL_OVERRIDE:-}" ]; then + SOLANA_RPC_URL_OVERRIDE="http://host.docker.internal:8899" +fi + +jq --arg chain "$ARBITRUM_CHAIN_ID" --arg url "$ARBITRUM_TENDERLY_URL" \ + '.chain_configs[$chain].rpc_urls = [$url]' \ + "$HOME_DIR/config/pushuv_config.json" > "$HOME_DIR/config/pushuv_config.json.tmp" && \ + mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" + +jq --arg chain "$BSC_TESTNET_CHAIN_ID" --arg url "$BSC_TESTNET_RPC_URL" \ + '.chain_configs[$chain].rpc_urls = [$url]' \ + "$HOME_DIR/config/pushuv_config.json" > "$HOME_DIR/config/pushuv_config.json.tmp" && \ + mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" + +if [ -n "${SEPOLIA_RPC_URL_OVERRIDE:-}" ]; then + echo "🌐 Overriding Sepolia rpc_urls to: $SEPOLIA_RPC_URL_OVERRIDE" + jq --arg chain "$SEPOLIA_CHAIN_ID" --arg url "$SEPOLIA_RPC_URL_OVERRIDE" \ + '.chain_configs[$chain].rpc_urls = [$url]' \ + "$HOME_DIR/config/pushuv_config.json" > "$HOME_DIR/config/pushuv_config.json.tmp" && \ + mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" +fi + +if [ -n "${ARBITRUM_RPC_URL_OVERRIDE:-}" ]; then + echo "🌐 Overriding Arbitrum Sepolia rpc_urls to: $ARBITRUM_RPC_URL_OVERRIDE" + jq --arg chain "$ARBITRUM_CHAIN_ID" --arg url "$ARBITRUM_RPC_URL_OVERRIDE" \ + '.chain_configs[$chain].rpc_urls = [$url]' \ + "$HOME_DIR/config/pushuv_config.json" > "$HOME_DIR/config/pushuv_config.json.tmp" && \ + mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" +fi + +if [ -n "${BASE_RPC_URL_OVERRIDE:-}" ]; then + echo "🌐 Overriding Base Sepolia rpc_urls to: $BASE_RPC_URL_OVERRIDE" + jq --arg chain "$BASE_CHAIN_ID" --arg url "$BASE_RPC_URL_OVERRIDE" \ + '.chain_configs[$chain].rpc_urls = [$url]' \ + "$HOME_DIR/config/pushuv_config.json" > "$HOME_DIR/config/pushuv_config.json.tmp" && \ + mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" +fi + +if [ -n "${BSC_RPC_URL_OVERRIDE:-}" ]; then + echo "🌐 Overriding BSC testnet rpc_urls to: $BSC_RPC_URL_OVERRIDE" + jq --arg chain "$BSC_CHAIN_ID" --arg url "$BSC_RPC_URL_OVERRIDE" \ + '.chain_configs[$chain].rpc_urls = [$url]' \ + "$HOME_DIR/config/pushuv_config.json" > "$HOME_DIR/config/pushuv_config.json.tmp" && \ + mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" +fi + +if [ -n "${SOLANA_RPC_URL_OVERRIDE:-}" ]; then + echo "🌐 Overriding Solana rpc_urls to: $SOLANA_RPC_URL_OVERRIDE" + jq --arg chain "$SOLANA_CHAIN_ID" --arg url "$SOLANA_RPC_URL_OVERRIDE" \ + '.chain_configs[$chain].rpc_urls = [$url]' \ + "$HOME_DIR/config/pushuv_config.json" > "$HOME_DIR/config/pushuv_config.json.tmp" && \ + mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" +fi + +# Optionally override chain event start heights (set by ./devnet start) +set_chain_event_start_from() { + local chain_id="$1" + local chain_label="$2" + local start_height="$3" + + [ -n "$start_height" ] || return 0 + + echo "📍 Setting ${chain_label} event_start_from: $start_height" + jq --arg chain "$chain_id" --argjson height "$start_height" \ + '.chain_configs[$chain].event_start_from = $height' \ + "$HOME_DIR/config/pushuv_config.json" > "$HOME_DIR/config/pushuv_config.json.tmp" && \ + mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" +} + +set_chain_event_start_from "eip155:11155111" "Sepolia" "${SEPOLIA_EVENT_START_FROM:-}" +set_chain_event_start_from "eip155:84532" "Base Sepolia" "${BASE_EVENT_START_FROM:-}" +set_chain_event_start_from "eip155:421614" "Arbitrum Sepolia" "${ARBITRUM_EVENT_START_FROM:-}" +set_chain_event_start_from "eip155:97" "BSC testnet" "${BSC_EVENT_START_FROM:-}" +set_chain_event_start_from "solana:EtWTRABZaYq6iMfeYKouRu166VU2xqa1" "Solana devnet" "${SOLANA_EVENT_START_FROM:-}" + +# Always align Push localchain scanning start with current height in local environments. +# Default config uses a high static start block for public networks, which would skip +# all events on fresh local devnets if left unchanged. +LOCALCHAIN_CHAIN_ID="localchain_9000-1" +LOCALCHAIN_START_FROM=$BLOCK_HEIGHT +if [ "$LOCALCHAIN_START_FROM" -gt 20 ]; then + LOCALCHAIN_START_FROM=$((LOCALCHAIN_START_FROM - 20)) +fi +echo "📍 Setting Push localchain event_start_from: $LOCALCHAIN_START_FROM" +jq --arg chain "$LOCALCHAIN_CHAIN_ID" --argjson height "$LOCALCHAIN_START_FROM" \ + '.chain_configs[$chain].event_start_from = $height' \ + "$HOME_DIR/config/pushuv_config.json" > "$HOME_DIR/config/pushuv_config.json.tmp" && \ + mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" + # --------------------------- # === SET CORE VALOPER ADDRESS === # --------------------------- @@ -222,9 +356,13 @@ echo "📋 Required grants: MsgVoteInbound, MsgVoteChainMeta, MsgVoteOutbound, M # Get the hotkey address HOTKEY_ADDR=$($BINARY keys show "$HOTKEY_NAME" --address --keyring-backend test --home "$HOME_DIR" 2>/dev/null || echo "") -# Required number of grants (4 message types) +# Required message types (must match PushSigner validation requirements) +REQUIRED_MSG_TYPES='["/uexecutor.v1.MsgVoteInbound","/uexecutor.v1.MsgVoteChainMeta","/uexecutor.v1.MsgVoteOutbound","/utss.v1.MsgVoteTssKeyProcess"]' REQUIRED_GRANTS=4 +# Allow additional time for grant propagation during startup races. +AUTHZ_GRANTS_WAIT_SECONDS=${AUTHZ_GRANTS_WAIT_SECONDS:-120} + # Query core-validator-1 for grants (genesis validator creates ALL grants immediately) GRANTS_QUERY_HOST="core-validator-1" @@ -232,33 +370,35 @@ if [ -n "$HOTKEY_ADDR" ]; then echo "🔍 Checking for AuthZ grants for hotkey: $HOTKEY_ADDR" echo "📡 Querying grants from: $GRANTS_QUERY_HOST:1317" - # Wait for all 4 AuthZ grants (should be fast - genesis validator creates all grants) - max_wait=20 + # Wait for all required AuthZ grants (genesis validator creates all grants, but propagation can lag) + max_wait=$AUTHZ_GRANTS_WAIT_SECONDS wait_time=0 - GRANTS_COUNT=0 + MATCHED_GRANTS=0 while [ $wait_time -lt $max_wait ]; do - # Query grants from genesis validator - GRANTS_COUNT=$(curl -s "http://$GRANTS_QUERY_HOST:1317/cosmos/authz/v1beta1/grants/grantee/$HOTKEY_ADDR" 2>/dev/null | jq -r '.grants | length' 2>/dev/null || echo "0") + # Query grants and count only required message types + MATCHED_GRANTS=$(curl -s "http://$GRANTS_QUERY_HOST:1317/cosmos/authz/v1beta1/grants/grantee/$HOTKEY_ADDR" 2>/dev/null | \ + jq -r --argjson required "$REQUIRED_MSG_TYPES" '[.grants[]? | (.authorization.msg // .authorization.value.msg // "") as $m | select($required | index($m))] | length' 2>/dev/null || echo "0") - if [ "$GRANTS_COUNT" -ge "$REQUIRED_GRANTS" ] 2>/dev/null; then - echo "✅ Found all $GRANTS_COUNT/$REQUIRED_GRANTS required AuthZ grants!" + if [ "$MATCHED_GRANTS" -ge "$REQUIRED_GRANTS" ] 2>/dev/null; then + echo "✅ Found all $MATCHED_GRANTS/$REQUIRED_GRANTS required AuthZ grants!" break fi # Show progress every 5 seconds if [ $((wait_time % 5)) -eq 0 ]; then - echo "⏳ Waiting for AuthZ grants... ($GRANTS_COUNT/$REQUIRED_GRANTS) (${wait_time}s / ${max_wait}s)" + echo "⏳ Waiting for AuthZ grants... ($MATCHED_GRANTS/$REQUIRED_GRANTS) (${wait_time}s / ${max_wait}s)" fi sleep 1 wait_time=$((wait_time + 1)) done - if [ "$GRANTS_COUNT" -lt "$REQUIRED_GRANTS" ] 2>/dev/null; then - echo "⚠️ Only found $GRANTS_COUNT/$REQUIRED_GRANTS grants after ${max_wait}s" - echo " The universal validator may fail startup validation if grants are missing." + if [ "$MATCHED_GRANTS" -lt "$REQUIRED_GRANTS" ] 2>/dev/null; then + echo "⚠️ Only found $MATCHED_GRANTS/$REQUIRED_GRANTS required grants after ${max_wait}s" + echo " Continuing startup; grants may still arrive shortly." fi else - echo "⚠️ Could not get hotkey address, skipping AuthZ check" + echo "❌ Could not get hotkey address, cannot verify AuthZ grants" + exit 1 fi # --------------------------- @@ -284,7 +424,7 @@ if [ -n "$EXPECTED_PEER_ID" ]; then reg_wait=0 while [ $reg_wait -lt $max_reg_wait ]; do # Query all universal validators via REST API and look for our peer_id - FOUND=$(curl -s "http://core-validator-1:1317/push/uvalidator/v1/all_universal_validators" 2>/dev/null | \ + FOUND=$(curl -s "http://core-validator-1:1317/uvalidator/v1/universal_validators" 2>/dev/null | \ jq -r --arg pid "$EXPECTED_PEER_ID" \ '.universal_validator[]? | select(.network_info.peer_id == $pid) | .network_info.peer_id' 2>/dev/null || echo "") @@ -301,10 +441,13 @@ if [ -n "$EXPECTED_PEER_ID" ]; then done if [ -z "$FOUND" ]; then - echo "⚠️ Validator not found on-chain after ${max_reg_wait}s, continuing anyway..." + echo "❌ Validator not found on-chain after ${max_reg_wait}s" + echo " Failing startup so container restarts until registration is correct." + exit 1 fi else - echo "⚠️ Unknown UNIVERSAL_ID, skipping registration check" + echo "❌ Unknown UNIVERSAL_ID, cannot validate on-chain registration" + exit 1 fi # --------------------------- @@ -314,4 +457,8 @@ fi echo "🚀 Starting universal validator $UNIVERSAL_ID..." echo "🔗 Connecting to core validator: $CORE_VALIDATOR_GRPC" +# Increase OS thread stack size to unlimited so the Rust DKLS sign library +# (called via CGo) does not SIGSEGV from native stack overflow during sign sessions. +ulimit -s unlimited + exec $BINARY start \ No newline at end of file diff --git a/local-multi-validator/scripts/setup-validator-auto.sh b/local-multi-validator/scripts/setup-validator-auto.sh index 533319b5..fbc24b6f 100755 --- a/local-multi-validator/scripts/setup-validator-auto.sh +++ b/local-multi-validator/scripts/setup-validator-auto.sh @@ -63,6 +63,64 @@ FUNDING_MNEMONIC=$(jq -r ".[$FUNDING_INDEX].mnemonic" "$GENESIS_ACCOUNTS_FILE") FUNDING_KEY="genesis-acc-$VALIDATOR_ID" FUNDING_AMOUNT="200000000000000000000000" # 200k * 10^18 (enough for staking + fees) +wait_for_deliver_tx_success() { + local tx_hash="$1" + local node="$2" + local max_wait="${3:-30}" + local waited=0 + + while [ "$waited" -lt "$max_wait" ]; do + local tx_json + tx_json=$($BINARY query tx "$tx_hash" --node="$node" --output json 2>/dev/null || true) + local tx_code + tx_code=$(echo "$tx_json" | jq -r '.code // empty' 2>/dev/null || true) + + if [ -n "$tx_code" ]; then + if [ "$tx_code" = "0" ]; then + return 0 + fi + + local raw_log + raw_log=$(echo "$tx_json" | jq -r '.raw_log // ""' 2>/dev/null || true) + echo "❌ TX $tx_hash failed with code $tx_code: ${raw_log:-unknown error}" + return 1 + fi + + sleep 1 + waited=$((waited + 1)) + done + + echo "❌ Timed out waiting for TX $tx_hash to be included" + return 1 +} + +is_universal_validator_registered() { + local valoper_addr="$1" + local peer_id="$2" + local node="$3" + local uv_json + + uv_json=$($BINARY query uvalidator all-universal-validators --node="$node" --output json 2>/dev/null || echo "{}") + + if [ -n "$valoper_addr" ] && echo "$uv_json" | jq -e --arg addr "$valoper_addr" '.universal_validator[]? | select(.identify_info.core_validator_address == $addr)' >/dev/null 2>&1; then + return 0 + fi + + if [ -n "$peer_id" ] && echo "$uv_json" | jq -e --arg pid "$peer_id" '.universal_validator[]? | select(.network_info.peer_id == $pid)' >/dev/null 2>&1; then + return 0 + fi + + return 1 +} + +is_validator_bonded() { + local valoper_addr="$1" + local node="$2" + local status + status=$($BINARY query staking validator "$valoper_addr" --node="$node" --output json 2>/dev/null | jq -r '.validator.status // "NOT_FOUND"' 2>/dev/null || echo "NOT_FOUND") + [ "$status" = "BOND_STATUS_BONDED" ] +} + # --------------------------- # === WAIT FOR GENESIS VALIDATOR === # --------------------------- @@ -97,125 +155,179 @@ if [ -f "$HOME_DIR/data/priv_validator_state.json" ]; then # Check if the state file has valid content (not just initial state) HEIGHT=$(cat "$HOME_DIR/data/priv_validator_state.json" | jq -r '.height // "0"' 2>/dev/null || echo "0") if [ "$HEIGHT" != "0" ] && [ "$HEIGHT" != "\"0\"" ]; then - echo "✅ Node already initialized with block height $HEIGHT, starting node..." - - # Start node in background so we can check UV registration - $BINARY start \ - --home "$HOME_DIR" \ - --pruning=nothing \ - --minimum-gas-prices="1000000000${DENOM}" \ - --rpc.laddr="tcp://0.0.0.0:${RPC_PORT}" \ - --json-rpc.address="0.0.0.0:8545" \ - --json-rpc.ws-address="0.0.0.0:8546" \ - --json-rpc.api=eth,txpool,personal,net,debug,web3 \ - --chain-id="$CHAIN_ID" & - - NODE_PID=$! - - # Wait for node to be ready - echo "⏳ Waiting for node to be ready..." - sleep 10 - - # Check if UV registration is needed (for validators 2, 3, and 4) - if [ "$VALIDATOR_ID" = "2" ] || [ "$VALIDATOR_ID" = "3" ] || [ "$VALIDATOR_ID" = "4" ]; then - echo "🔍 Checking universal validator registration status..." - - GENESIS_RPC="http://core-validator-1:26657" - - # Pre-computed peer_ids - case $VALIDATOR_ID in - 2) - PEER_ID="12D3KooWJWoaqZhDaoEFshF7Rh1bpY9ohihFhzcW6d69Lr2NASuq" - TSS_PORT=39001 - ;; - 3) - PEER_ID="12D3KooWRndVhVZPCiQwHBBBdg769GyrPUW13zxwqQyf9r3ANaba" - TSS_PORT=39002 - ;; - 4) - PEER_ID="12D3KooWPT98FXMfDQYavZm66EeVjTqP9Nnehn1gyaydqV8L8BQw" - TSS_PORT=39003 - ;; - esac - - # Check if already registered by querying for our peer_id - UV_CHECK=$($BINARY query uvalidator all-universal-validators --node="$GENESIS_RPC" --output json 2>/dev/null || echo "{}") - - if echo "$UV_CHECK" | grep -q "$PEER_ID"; then - echo "✅ Universal-validator-$VALIDATOR_ID already registered" - else - echo "📝 Universal-validator-$VALIDATOR_ID not registered, registering now..." + REUSE_EXISTING_STATE=true + + # Always refresh persistent peer so stale node IDs from previous runs don't isolate the validator. + if [ -f "$HOME_DIR/config/config.toml" ]; then + GENESIS_NODE_ID=$(curl -s "$GENESIS_RPC/status" | jq -r '.result.node_info.id // ""') + if [ -n "$GENESIS_NODE_ID" ] && [ "$GENESIS_NODE_ID" != "null" ]; then + PERSISTENT_PEER="$GENESIS_NODE_ID@$GENESIS_PEER" + echo "🔗 Refreshing persistent peer to: $PERSISTENT_PEER" + sed -i -e "s/^persistent_peers *=.*/persistent_peers = \"$PERSISTENT_PEER\"/" "$HOME_DIR/config/config.toml" + fi + else + REUSE_EXISTING_STATE=false + echo "⚠️ Missing config.toml in existing state; forcing re-init" + fi - # Get valoper address - VALOPER_ADDR=$($BINARY keys show validator-$VALIDATOR_ID --bech val -a --keyring-backend "$KEYRING" --home "$HOME_DIR" 2>/dev/null) + # Reuse existing data only if validator is actually bonded on-chain. + if [ "$REUSE_EXISTING_STATE" = "true" ]; then + VALOPER_ADDR=$($BINARY keys show validator-$VALIDATOR_ID --bech val -a --keyring-backend "$KEYRING" --home "$HOME_DIR" 2>/dev/null || echo "") + if [ -z "$VALOPER_ADDR" ]; then + REUSE_EXISTING_STATE=false + echo "⚠️ Could not read valoper address from existing keyring; forcing re-init" + else + VALIDATOR_STATUS=$($BINARY query staking validator "$VALOPER_ADDR" --node="$GENESIS_RPC" --output json 2>/dev/null | jq -r '.validator.status // "NOT_FOUND"' || echo "NOT_FOUND") + if [ "$VALIDATOR_STATUS" != "BOND_STATUS_BONDED" ]; then + REUSE_EXISTING_STATE=false + echo "⚠️ Existing validator status is $VALIDATOR_STATUS; forcing re-init" + fi + fi + fi - if [ -n "$VALOPER_ADDR" ]; then - MULTI_ADDR="/dns4/universal-validator-$VALIDATOR_ID/tcp/$TSS_PORT" - NETWORK_JSON="{\"peer_id\": \"$PEER_ID\", \"multi_addrs\": [\"$MULTI_ADDR\"]}" + if [ "$REUSE_EXISTING_STATE" = "true" ]; then + echo "✅ Node already initialized with block height $HEIGHT, starting node..." - # Import genesis account for signing - GENESIS_ACCOUNTS_FILE="/tmp/push-accounts/genesis_accounts.json" - if [ -f "$GENESIS_ACCOUNTS_FILE" ]; then - GENESIS_ACC_MNEMONIC=$(jq -r '.[0].mnemonic' "$GENESIS_ACCOUNTS_FILE") - echo "$GENESIS_ACC_MNEMONIC" | $BINARY keys add genesis-acc-1 --recover --keyring-backend "$KEYRING" --home "$HOME_DIR" 2>/dev/null || true + # Start node in background so we can verify sync and UV registration. + $BINARY start \ + --home "$HOME_DIR" \ + --pruning=nothing \ + --minimum-gas-prices="1000000000${DENOM}" \ + --rpc.laddr="tcp://0.0.0.0:${RPC_PORT}" \ + --json-rpc.address="0.0.0.0:8545" \ + --json-rpc.ws-address="0.0.0.0:8546" \ + --json-rpc.api=eth,txpool,personal,net,debug,web3 \ + --chain-id="$CHAIN_ID" & + + NODE_PID=$! + + echo "⏳ Waiting for node to sync..." + max_sync_attempts=90 + sync_attempt=0 + while [ $sync_attempt -lt $max_sync_attempts ]; do + if curl -s "http://localhost:${RPC_PORT}/status" > /dev/null 2>&1; then + CATCHING_UP=$(curl -s "http://localhost:${RPC_PORT}/status" | jq -r '.result.sync_info.catching_up' 2>/dev/null || echo "true") + if [ "$CATCHING_UP" = "false" ]; then + echo "✅ Existing node state is synced" + break fi + fi + sleep 2 + sync_attempt=$((sync_attempt + 1)) + done - # Retry loop for registration (handles sequence mismatch race condition) - MAX_RETRIES=5 - RETRY_COUNT=0 - REGISTERED=false + if [ $sync_attempt -eq $max_sync_attempts ]; then + echo "❌ Existing node did not sync in time; forcing restart on clean init" + kill $NODE_PID + exit 1 + fi - while [ "$RETRY_COUNT" -lt "$MAX_RETRIES" ] && [ "$REGISTERED" = "false" ]; do - RETRY_COUNT=$((RETRY_COUNT + 1)) + # Check if UV registration is needed (for validators 2, 3, and 4) + if [ "$VALIDATOR_ID" = "2" ] || [ "$VALIDATOR_ID" = "3" ] || [ "$VALIDATOR_ID" = "4" ]; then + echo "🔍 Checking universal validator registration status..." + + GENESIS_RPC="http://core-validator-1:26657" + + # Pre-computed peer_ids + case $VALIDATOR_ID in + 2) + PEER_ID="12D3KooWJWoaqZhDaoEFshF7Rh1bpY9ohihFhzcW6d69Lr2NASuq" + TSS_PORT=39001 + ;; + 3) + PEER_ID="12D3KooWRndVhVZPCiQwHBBBdg769GyrPUW13zxwqQyf9r3ANaba" + TSS_PORT=39002 + ;; + 4) + PEER_ID="12D3KooWPT98FXMfDQYavZm66EeVjTqP9Nnehn1gyaydqV8L8BQw" + TSS_PORT=39003 + ;; + esac - # Stagger validators to reduce race conditions (validator 2 waits 2s, validator 3 waits 4s) - if [ "$RETRY_COUNT" -eq 1 ]; then - STAGGER_DELAY=$((VALIDATOR_ID * 2)) - echo "⏳ Waiting ${STAGGER_DELAY}s to stagger registration..." - sleep $STAGGER_DELAY - fi + # Get valoper address + VALOPER_ADDR=$($BINARY keys show validator-$VALIDATOR_ID --bech val -a --keyring-backend "$KEYRING" --home "$HOME_DIR" 2>/dev/null || true) - echo "📤 Registering universal-validator-$VALIDATOR_ID (attempt $RETRY_COUNT/$MAX_RETRIES)..." - RESULT=$($BINARY tx uvalidator add-universal-validator \ - --core-validator-address "$VALOPER_ADDR" \ - --network "$NETWORK_JSON" \ - --from genesis-acc-1 \ - --chain-id "$CHAIN_ID" \ - --keyring-backend "$KEYRING" \ - --home "$HOME_DIR" \ - --node="$GENESIS_RPC" \ - --fees 1000000000000000upc \ - --yes \ - --output json 2>&1 || echo "{}") - - if echo "$RESULT" | grep -q '"txhash"'; then - TX_HASH=$(echo "$RESULT" | jq -r '.txhash' 2>/dev/null) - echo "✅ Universal-validator-$VALIDATOR_ID registered! TX: $TX_HASH" - REGISTERED=true - elif echo "$RESULT" | grep -q "sequence mismatch"; then - echo "⚠️ Sequence mismatch, retrying in 3s..." - sleep 3 - elif echo "$RESULT" | grep -q "already registered\|already exists"; then - echo "✅ Universal-validator-$VALIDATOR_ID already registered" - REGISTERED=true - else - echo "⚠️ Registration attempt failed: $(echo "$RESULT" | head -1)" - sleep 2 + if [ -z "$VALOPER_ADDR" ]; then + echo "⚠️ Could not get valoper address for validator-$VALIDATOR_ID; skipping UV registration check" + elif is_universal_validator_registered "$VALOPER_ADDR" "$PEER_ID" "$GENESIS_RPC"; then + echo "✅ Universal-validator-$VALIDATOR_ID already registered" + else + echo "📝 Universal-validator-$VALIDATOR_ID not registered, registering now..." + + MULTI_ADDR="/dns4/universal-validator-$VALIDATOR_ID/tcp/$TSS_PORT" + NETWORK_JSON="{\"peer_id\": \"$PEER_ID\", \"multi_addrs\": [\"$MULTI_ADDR\"]}" + + # Import genesis account for signing + GENESIS_ACCOUNTS_FILE="/tmp/push-accounts/genesis_accounts.json" + if [ -f "$GENESIS_ACCOUNTS_FILE" ]; then + GENESIS_ACC_MNEMONIC=$(jq -r '.[0].mnemonic' "$GENESIS_ACCOUNTS_FILE") + echo "$GENESIS_ACC_MNEMONIC" | $BINARY keys add genesis-acc-1 --recover --keyring-backend "$KEYRING" --home "$HOME_DIR" 2>/dev/null || true fi - done - if [ "$REGISTERED" = "false" ]; then - echo "❌ Registration TX failed after $MAX_RETRIES attempts" - fi - else - echo "⚠️ Could not get valoper address" + # Retry loop for registration (handles sequence mismatch race condition) + MAX_RETRIES=5 + RETRY_COUNT=0 + REGISTERED=false + + while [ "$RETRY_COUNT" -lt "$MAX_RETRIES" ] && [ "$REGISTERED" = "false" ]; do + RETRY_COUNT=$((RETRY_COUNT + 1)) + + # Stagger validators to reduce race conditions (validator 2 waits 2s, validator 3 waits 4s) + if [ "$RETRY_COUNT" -eq 1 ]; then + STAGGER_DELAY=$((VALIDATOR_ID * 2)) + echo "⏳ Waiting ${STAGGER_DELAY}s to stagger registration..." + sleep $STAGGER_DELAY + fi + + echo "📤 Registering universal-validator-$VALIDATOR_ID (attempt $RETRY_COUNT/$MAX_RETRIES)..." + RESULT=$($BINARY tx uvalidator add-universal-validator \ + --core-validator-address "$VALOPER_ADDR" \ + --network "$NETWORK_JSON" \ + --from genesis-acc-1 \ + --chain-id "$CHAIN_ID" \ + --keyring-backend "$KEYRING" \ + --home "$HOME_DIR" \ + --node="$GENESIS_RPC" \ + --fees 1000000000000000upc \ + --yes \ + --output json 2>&1 || echo "{}") + + TX_HASH=$(echo "$RESULT" | jq -r '.txhash // ""' 2>/dev/null) + + if [ -n "$TX_HASH" ] && wait_for_deliver_tx_success "$TX_HASH" "$GENESIS_RPC" 30; then + echo "✅ Universal-validator-$VALIDATOR_ID registered! TX: $TX_HASH" + REGISTERED=true + elif is_universal_validator_registered "$VALOPER_ADDR" "$PEER_ID" "$GENESIS_RPC"; then + echo "✅ Universal-validator-$VALIDATOR_ID confirmed registered on-chain" + REGISTERED=true + elif echo "$RESULT" | grep -q "sequence mismatch"; then + echo "⚠️ Sequence mismatch, retrying in 3s..." + sleep 3 + elif echo "$RESULT" | grep -q "already registered\|already exists"; then + echo "✅ Universal-validator-$VALIDATOR_ID already registered" + REGISTERED=true + else + echo "⚠️ Registration attempt failed: $(echo "$RESULT" | head -1)" + sleep 2 + fi + done + + if [ "$REGISTERED" = "false" ]; then + if is_universal_validator_registered "$VALOPER_ADDR" "$PEER_ID" "$GENESIS_RPC"; then + echo "✅ Universal-validator-$VALIDATOR_ID registered (post-retry check)" + else + echo "⚠️ Registration TX failed after $MAX_RETRIES attempts; continuing so validator stays in consensus" + fi + fi fi fi + + echo "🔄 Node running as validator..." + wait $NODE_PID + exit 0 fi - echo "🔄 Node running as validator..." - wait $NODE_PID - exit 0 + echo "⚠️ Existing state is not reusable; reinitializing validator-$VALIDATOR_ID" fi fi @@ -391,13 +503,13 @@ echo "Validator operator address: $VALOPER_ADDR" # Check if already bonded VALIDATOR_STATUS=$($BINARY query staking validator "$VALOPER_ADDR" \ --node="$GENESIS_RPC" \ - --output json 2>/dev/null | jq -r '.status' || echo "NOT_FOUND") + --output json 2>/dev/null | jq -r '.validator.status // "NOT_FOUND"' || echo "NOT_FOUND") if [ "$VALIDATOR_STATUS" = "BOND_STATUS_BONDED" ]; then echo "✅ Validator-$VALIDATOR_ID is already bonded!" VALIDATOR_TOKENS=$($BINARY query staking validator "$VALOPER_ADDR" \ --node="$GENESIS_RPC" \ - --output json 2>/dev/null | jq -r '.tokens' || echo "0") + --output json 2>/dev/null | jq -r '.validator.tokens // "0"' || echo "0") echo " Bonded tokens: $VALIDATOR_TOKENS" else echo "📤 Submitting create-validator transaction..." @@ -444,6 +556,14 @@ EOF while [ "$CREATE_RETRY" -lt "$MAX_CREATE_RETRIES" ] && [ "$CREATED" = "false" ]; do CREATE_RETRY=$((CREATE_RETRY + 1)) + + # If the validator is already bonded from a previous attempt, stop retrying. + if is_validator_bonded "$VALOPER_ADDR" "$GENESIS_RPC"; then + echo "✅ Validator-$VALIDATOR_ID is already bonded" + CREATED=true + break + fi + echo "📤 Creating validator (attempt $CREATE_RETRY/$MAX_CREATE_RETRIES)..." CREATE_RESULT=$($BINARY tx staking create-validator "$VALIDATOR_JSON" \ @@ -458,12 +578,25 @@ EOF --yes \ --output json 2>&1) - # Check if it looks like a successful TX (has txhash and no Usage message) + # Treat create-validator as success only after deliver tx succeeds. if echo "$CREATE_RESULT" | grep -q '"txhash"' && ! echo "$CREATE_RESULT" | grep -q "Usage:"; then TX_HASH=$(echo "$CREATE_RESULT" | jq -r '.txhash // ""' 2>/dev/null) - echo "✅ Create-validator TX submitted: $TX_HASH" - CREATED=true + if [ -n "$TX_HASH" ] && wait_for_deliver_tx_success "$TX_HASH" "$GENESIS_RPC" 30; then + echo "✅ Create-validator TX confirmed: $TX_HASH" + CREATED=true + elif is_validator_bonded "$VALOPER_ADDR" "$GENESIS_RPC"; then + echo "✅ Validator-$VALIDATOR_ID became bonded after tx submission" + CREATED=true + else + echo "⚠️ Create-validator TX was not successful in deliver phase" + sleep 3 + fi else + if is_validator_bonded "$VALOPER_ADDR" "$GENESIS_RPC"; then + echo "✅ Validator-$VALIDATOR_ID is bonded despite non-standard CLI output" + CREATED=true + continue + fi echo "⚠️ Create-validator attempt failed, retrying in 3s..." echo " Result: $(echo "$CREATE_RESULT" | head -c 200)" sleep 3 @@ -472,6 +605,8 @@ EOF if [ "$CREATED" = "false" ]; then echo "❌ Create-validator failed after $MAX_CREATE_RETRIES attempts" + kill $NODE_PID + exit 1 fi # Re-enable exit-on-error @@ -483,20 +618,26 @@ EOF # Verify bonding VALIDATOR_STATUS=$($BINARY query staking validator "$VALOPER_ADDR" \ --node="$GENESIS_RPC" \ - --output json 2>/dev/null | jq -r '.status' || echo "NOT_FOUND") + --output json 2>/dev/null | jq -r '.validator.status // "NOT_FOUND"' || echo "NOT_FOUND") if [ "$VALIDATOR_STATUS" = "BOND_STATUS_BONDED" ]; then echo "✅ Validator-$VALIDATOR_ID is now bonded!" VALIDATOR_TOKENS=$($BINARY query staking validator "$VALOPER_ADDR" \ --node="$GENESIS_RPC" \ - --output json 2>/dev/null | jq -r '.tokens' || echo "0") + --output json 2>/dev/null | jq -r '.validator.tokens // "0"' || echo "0") echo " Bonded tokens: $VALIDATOR_TOKENS" elif [ "$VALIDATOR_STATUS" = "BOND_STATUS_UNBONDING" ]; then echo "⚠️ Validator-$VALIDATOR_ID is unbonding" + kill $NODE_PID + exit 1 elif [ "$VALIDATOR_STATUS" = "BOND_STATUS_UNBONDED" ]; then echo "⚠️ Validator-$VALIDATOR_ID is unbonded" + kill $NODE_PID + exit 1 else echo "⚠️ Validator status: $VALIDATOR_STATUS" + kill $NODE_PID + exit 1 fi fi @@ -580,13 +721,14 @@ if [ -n "$VALOPER_ADDR" ]; then --yes \ --output json 2>&1 || echo "{}") - # Check TX result - TX_CODE=$(echo "$RESULT" | jq -r '.code // "null"' 2>/dev/null) TX_HASH=$(echo "$RESULT" | jq -r '.txhash // ""' 2>/dev/null) - if [ "$TX_CODE" = "0" ] && [ -n "$TX_HASH" ]; then + if [ -n "$TX_HASH" ] && wait_for_deliver_tx_success "$TX_HASH" "$GENESIS_RPC" 30; then echo "✅ Universal-validator-$VALIDATOR_ID registered! TX: $TX_HASH" REGISTERED=true + elif is_universal_validator_registered "$VALOPER_ADDR" "$PEER_ID" "$GENESIS_RPC"; then + echo "✅ Universal-validator-$VALIDATOR_ID confirmed registered on-chain" + REGISTERED=true elif echo "$RESULT" | grep -q "sequence mismatch"; then echo "⚠️ Sequence mismatch, retrying in 3s..." sleep 3 @@ -595,13 +737,17 @@ if [ -n "$VALOPER_ADDR" ]; then REGISTERED=true else RAW_LOG=$(echo "$RESULT" | jq -r '.raw_log // ""' 2>/dev/null) - echo "⚠️ Registration attempt failed (code: $TX_CODE): ${RAW_LOG:-$(echo "$RESULT" | head -1)}" + echo "⚠️ Registration attempt failed: ${RAW_LOG:-$(echo "$RESULT" | head -1)}" sleep 2 fi done if [ "$REGISTERED" = "false" ]; then - echo "❌ Registration failed after $MAX_RETRIES attempts" + if is_universal_validator_registered "$VALOPER_ADDR" "$PEER_ID" "$GENESIS_RPC"; then + echo "✅ Universal-validator-$VALIDATOR_ID registered (post-retry check)" + else + echo "⚠️ Registration failed after $MAX_RETRIES attempts; continuing so validator stays in consensus" + fi fi fi else diff --git a/local-native/README.md b/local-native/README.md index 50f3cc4d..b911c8f5 100644 --- a/local-native/README.md +++ b/local-native/README.md @@ -49,7 +49,8 @@ cd local-native |---------|-------------| | `./devnet start [n]` | Start n core validators (default: 1) | | `./devnet setup-uvalidators` | Register UVs on-chain + create AuthZ grants | -| `./devnet start-uv [n]` | Start n universal validators (default: 4) | +| `./devnet start-uv [n]` | Start n universal validators (default: 4) and auto-set Sepolia `event_start_from` | +| `./devnet configure` | Manually refresh Sepolia `event_start_from` in existing UV configs | | `./devnet down` | Stop all validators | | `./devnet status` | Show network status | | `./devnet logs [service]` | View logs | diff --git a/local-native/devnet b/local-native/devnet index 081590fa..c4e31d71 100755 --- a/local-native/devnet +++ b/local-native/devnet @@ -92,6 +92,31 @@ get_block_height() { echo "$height" } +wait_chain_tx() { + local txhash="$1" node="$2" max="${3:-30}" i=0 + while (( i < max )); do + local code + code=$("$PCHAIND_BIN" query tx "$txhash" --node="$node" --output json 2>/dev/null \ + | jq -r '.code // empty' 2>/dev/null || true) + [[ "$code" == "0" ]] && return 0 + [[ -n "$code" && "$code" != "0" ]] && return 1 + sleep 1; (( i++ )) + done + return 1 +} + +get_current_tss_key_id() { + local genesis_rpc="tcp://127.0.0.1:26657" + "$PCHAIND_BIN" query utss current-key --node="$genesis_rpc" --output json 2>/dev/null \ + | jq -r '.key.key_id // .current_key.key_id // empty' 2>/dev/null || echo "" +} + +get_utss_admin() { + local genesis_rpc="tcp://127.0.0.1:26657" + "$PCHAIND_BIN" query utss params --node="$genesis_rpc" --output json 2>/dev/null \ + | jq -r '.params.admin // ""' 2>/dev/null || echo "" +} + # ═══════════════════════════════════════════════════════════════════════════════ # STATUS DISPLAY # ═══════════════════════════════════════════════════════════════════════════════ @@ -236,6 +261,7 @@ start_validator() { start_universal() { local id=$1 + local sepolia_start_height=${2:-} local pid_file="$DATA_DIR/universal$id.pid" # Check if already running @@ -251,7 +277,7 @@ start_universal() { mkdir -p "$DATA_DIR/universal$id" print_status "Starting universal validator $id..." - UNIVERSAL_ID=$id "$SCRIPT_DIR/scripts/setup-universal.sh" > "$DATA_DIR/universal$id/universal.log" 2>&1 & + UNIVERSAL_ID=$id SEPOLIA_EVENT_START_FROM="$sepolia_start_height" "$SCRIPT_DIR/scripts/setup-universal.sh" > "$DATA_DIR/universal$id/universal.log" 2>&1 & echo $! > "$pid_file" print_success "Universal validator $id started (PID: $(cat $pid_file))" @@ -297,12 +323,23 @@ cmd_up() { cmd_start_uv() { require_binaries print_header "Starting Universal Validators..." + + # Use SEPOLIA_EVENT_START_FROM from environment if already set (e.g. passed by e2e setup + # with a pre-fetched local anvil block number). Otherwise fetch from live Sepolia RPC. + local sepolia_start_height="${SEPOLIA_EVENT_START_FROM:-}" + if [[ -z "$sepolia_start_height" ]]; then + if ! sepolia_start_height=$(bash "$SCRIPT_DIR/scripts/configure-pushuv.sh" --get-height); then + print_error "Failed to fetch latest Sepolia height" + exit 1 + fi + fi + print_status "Using Sepolia event_start_from: $sepolia_start_height" local num_uv=${1:-4} for i in $(seq 1 $num_uv); do if [ $i -le 4 ]; then - start_universal $i + start_universal $i "$sepolia_start_height" sleep 3 fi done @@ -313,6 +350,14 @@ cmd_start_uv() { cmd_status } +# ═══════════════════════════════════════════════════════════════════════════════ +# CONFIGURE COMMANDS +# ═══════════════════════════════════════════════════════════════════════════════ +cmd_configure() { + print_header "Configuring local-native universal relayer configs..." + bash "$SCRIPT_DIR/scripts/configure-pushuv.sh" +} + # ═══════════════════════════════════════════════════════════════════════════════ # STOP/DOWN COMMANDS # ═══════════════════════════════════════════════════════════════════════════════ @@ -415,18 +460,88 @@ cmd_clean() { cmd_tss_keygen() { require_binaries print_header "TSS Key Generation" - print_status "Initiating TSS keygen process..." - "$PCHAIND_BIN" tx utss initiate-tss-key-process \ - --process-type tss-process-keygen \ - --from genesis-acc-1 \ - --chain-id "$CHAIN_ID" \ - --keyring-backend test \ - --home "$DATA_DIR/validator1/.pchain" \ - --fees 1000000000000000upc \ - --yes + # Check for existing TSS key — return early if already present + local existing + existing=$(get_current_tss_key_id) + if [[ -n "$existing" ]]; then + print_success "TSS key already present: $existing" + return 0 + fi + + # Validate that at least 2 universal validators are registered + local genesis_rpc="tcp://127.0.0.1:26657" + local uv_count + uv_count=$("$PCHAIND_BIN" query uvalidator all-universal-validators \ + --node="$genesis_rpc" --output json 2>/dev/null \ + | jq -r '.universal_validator | length // 0' 2>/dev/null || echo "0") + if (( uv_count < 2 )); then + print_error "Need at least 2 registered universal validators (found: $uv_count)" + return 1 + fi + + # Find the key whose address matches the UTSS admin + local admin_addr + admin_addr=$(get_utss_admin) + local val1_home="$DATA_DIR/validator1/.pchain" + local signer="" + while IFS= read -r key_name; do + local addr + addr=$("$PCHAIND_BIN" --home="$val1_home" keys show "$key_name" -a \ + --keyring-backend "$KEYRING" 2>/dev/null || true) + if [[ "$addr" == "$admin_addr" ]]; then signer="$key_name"; break; fi + done < <("$PCHAIND_BIN" --home="$val1_home" keys list \ + --keyring-backend "$KEYRING" --output json 2>/dev/null \ + | jq -r '.[] | .name' 2>/dev/null || true) + + if [[ -z "$signer" ]]; then + print_error "No local key matches UTSS admin address: $admin_addr" + return 1 + fi + + local attempt max_attempts=5 + for (( attempt=1; attempt<=max_attempts; attempt++ )); do + print_status "Initiating TSS keygen (attempt $attempt/$max_attempts, signer=$signer)..." + local result tx_hash + result=$("$PCHAIND_BIN" --home="$val1_home" tx utss initiate-tss-key-process \ + --process-type tss-process-keygen \ + --from "$signer" \ + --chain-id "$CHAIN_ID" \ + --keyring-backend "$KEYRING" \ + --node="$genesis_rpc" \ + --fees 1000000000000000upc \ + --yes --output json 2>&1 || true) + + local code + code=$(echo "$result" | jq -r '.code // "0"' 2>/dev/null || echo "0") + tx_hash=$(echo "$result" | jq -r '.txhash // ""' 2>/dev/null || true) + + if [[ "$code" != "0" ]]; then + print_warning "Keygen tx code=$code; retrying..." + sleep 5; continue + fi + + if [[ -n "$tx_hash" ]]; then + wait_chain_tx "$tx_hash" "$genesis_rpc" 30 || true + fi - print_success "TSS keygen initiated!" + # Wait up to 300s for the TSS key to materialize on-chain + print_status "Waiting for TSS key to materialize on-chain..." + local waited=0 + while (( waited < 300 )); do + local kid + kid=$(get_current_tss_key_id) + if [[ -n "$kid" ]]; then + print_success "TSS key ready: $kid" + return 0 + fi + sleep 3; (( waited += 3 )) + done + print_warning "TSS key not ready after 300s on attempt $attempt" + done + + print_error "TSS keygen failed after $max_attempts attempts" + return 1 } cmd_tss_refresh() { @@ -487,6 +602,7 @@ cmd_help() { echo -e "${BOLD}${CYAN}UNIVERSAL VALIDATORS${NC}" printf " ${BOLD}%-20s${NC}%s\n" "setup-uvalidators" "Register UVs and create AuthZ grants" printf " ${BOLD}%-20s${NC}%s\n" "start-uv [n]" "Start n universal validators (default: 4)" + printf " ${BOLD}%-20s${NC}%s\n" "configure" "Set Sepolia event_start_from to latest block" echo echo -e "${BOLD}${CYAN}TSS COMMANDS${NC}" printf " ${BOLD}%-20s${NC}%s\n" "tss-keygen" "Initiate TSS key generation" @@ -536,6 +652,7 @@ case "${1:-help}" in # Universal validators setup-uvalidators) "$SCRIPT_DIR/scripts/setup-uvalidators.sh" ;; start-uv) shift; cmd_start_uv "$@" ;; + configure) cmd_configure ;; # Maintenance clean) cmd_clean ;; diff --git a/local-native/scripts/configure-pushuv.sh b/local-native/scripts/configure-pushuv.sh new file mode 100644 index 00000000..8eea0098 --- /dev/null +++ b/local-native/scripts/configure-pushuv.sh @@ -0,0 +1,112 @@ +#!/usr/bin/env bash +set -euo pipefail + +SCRIPT_DIR="$(cd -P "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +LOCAL_NATIVE_DIR="$(cd -P "$SCRIPT_DIR/.." && pwd)" +DATA_DIR="$LOCAL_NATIVE_DIR/data" + +require_bin() { + local bin="$1" + if ! command -v "$bin" >/dev/null 2>&1; then + echo "❌ Required binary not found: $bin" + exit 1 + fi +} + +require_bin curl +require_bin jq + +SEPOLIA_CHAIN_ID="eip155:11155111" +DEFAULT_RPC_URL="https://sepolia.drpc.org" + +# Prefer RPC URL from existing config, fallback to default. +detect_rpc_url() { + local cfg="$1" + jq -r --arg chain "$SEPOLIA_CHAIN_ID" '.chain_configs[$chain].rpc_url[0] // empty' "$cfg" 2>/dev/null || true +} + +fetch_sepolia_height() { + local rpc_url="$1" + local response + response=$(curl -sS -X POST "$rpc_url" \ + -H "Content-Type: application/json" \ + --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}') + + local hex_height + hex_height=$(echo "$response" | jq -r '.result // empty') + + if [[ -z "$hex_height" || "$hex_height" == "null" || ! "$hex_height" =~ ^0x[0-9a-fA-F]+$ ]]; then + return 1 + fi + + echo "$((16#${hex_height#0x}))" +} + +find_pushuv_configs() { + find "$DATA_DIR" -type f -path '*/.puniversal/config/pushuv_config.json' | sort +} + +print_only_height() { + local rpc_url="$DEFAULT_RPC_URL" + local height="" + + if ! height=$(fetch_sepolia_height "$rpc_url"); then + echo "❌ Failed to fetch Sepolia block height from $rpc_url" >&2 + exit 1 + fi + + echo "$height" +} + +main() { + if [ "${1:-}" = "--get-height" ]; then + print_only_height + return 0 + fi + + local configs=() + while IFS= read -r cfg; do + configs+=("$cfg") + done < <(find_pushuv_configs) + + if [ "${#configs[@]}" -eq 0 ]; then + echo "❌ No pushuv_config.json files found under $DATA_DIR" + echo " Start universal validators first with: ./devnet start-uv 4" + exit 1 + fi + + local rpc_url="" + rpc_url=$(detect_rpc_url "${configs[0]}") + if [ -z "$rpc_url" ]; then + rpc_url="$DEFAULT_RPC_URL" + fi + + local height="" + if ! height=$(fetch_sepolia_height "$rpc_url"); then + echo "⚠️ Failed using configured RPC ($rpc_url), retrying default RPC ($DEFAULT_RPC_URL)..." + if ! height=$(fetch_sepolia_height "$DEFAULT_RPC_URL"); then + echo "❌ Failed to fetch Sepolia block height from both RPC endpoints" + exit 1 + fi + rpc_url="$DEFAULT_RPC_URL" + fi + + echo "ℹ️ Sepolia latest block height: $height" + echo "ℹ️ RPC used: $rpc_url" + + local updated=0 + for cfg in "${configs[@]}"; do + local tmp + tmp=$(mktemp) + jq --arg chain "$SEPOLIA_CHAIN_ID" --argjson height "$height" \ + '.chain_configs[$chain].event_start_from = $height' \ + "$cfg" > "$tmp" + mv "$tmp" "$cfg" + updated=$((updated + 1)) + echo "✅ Updated: $cfg" + done + + echo "🎉 Updated event_start_from for $updated config file(s)." +} + +main "$@" diff --git a/local-native/scripts/setup-genesis-auto.sh b/local-native/scripts/setup-genesis-auto.sh index 1ab5e6d6..f4ed4c6d 100755 --- a/local-native/scripts/setup-genesis-auto.sh +++ b/local-native/scripts/setup-genesis-auto.sh @@ -109,6 +109,7 @@ update_genesis '.app_state["gov"]["params"]["max_deposit_period"]="300s"' update_genesis '.app_state["gov"]["params"]["voting_period"]="300s"' update_genesis ".app_state[\"evm\"][\"params\"][\"evm_denom\"]=\"$DENOM\"" update_genesis ".app_state[\"evm\"][\"params\"][\"chain_config\"][\"chain_id\"]=$EVM_CHAIN_ID" +update_genesis '.app_state["evm"]["params"]["active_static_precompiles"]=["0x00000000000000000000000000000000000000CB","0x00000000000000000000000000000000000000ca","0x0000000000000000000000000000000000000100","0x0000000000000000000000000000000000000400","0x0000000000000000000000000000000000000800","0x0000000000000000000000000000000000000801","0x0000000000000000000000000000000000000802","0x0000000000000000000000000000000000000803","0x0000000000000000000000000000000000000804","0x0000000000000000000000000000000000000805"]' update_genesis ".app_state[\"staking\"][\"params\"][\"bond_denom\"]=\"$DENOM\"" update_genesis ".app_state[\"mint\"][\"params\"][\"mint_denom\"]=\"$DENOM\"" update_genesis ".app_state[\"uregistry\"][\"params\"][\"admin\"]=\"$GENESIS_ADDR1\"" diff --git a/local-native/scripts/setup-universal.sh b/local-native/scripts/setup-universal.sh index e8023cde..7c557266 100755 --- a/local-native/scripts/setup-universal.sh +++ b/local-native/scripts/setup-universal.sh @@ -86,6 +86,48 @@ jq --argjson port "$QUERY_PORT" '.query_server_port = $port' \ "$HOME_DIR/config/pushuv_config.json" > "$HOME_DIR/config/pushuv_config.json.tmp" && \ mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" +# Optionally override Sepolia event start height (set by ./devnet start-uv) +if [ -n "${SEPOLIA_EVENT_START_FROM:-}" ]; then + jq --argjson height "$SEPOLIA_EVENT_START_FROM" \ + '.chain_configs["eip155:11155111"].event_start_from = $height' \ + "$HOME_DIR/config/pushuv_config.json" > "$HOME_DIR/config/pushuv_config.json.tmp" && \ + mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" +fi + +# Apply chain RPC URL overrides if set (e.g. for LOCAL anvil forks) +apply_rpc_override() { + local chain_id="$1" rpc_url="$2" + [ -n "$rpc_url" ] || return 0 + jq --arg c "$chain_id" --arg u "$rpc_url" \ + '.chain_configs[$c].rpc_urls = [$u]' \ + "$HOME_DIR/config/pushuv_config.json" > "$HOME_DIR/config/pushuv_config.json.tmp" && \ + mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" +} + +apply_event_start_override() { + local chain_id="$1" height="$2" + [ -n "$height" ] && [[ "$height" =~ ^[0-9]+$ ]] || return 0 + jq --arg c "$chain_id" --argjson h "$height" \ + '.chain_configs[$c].event_start_from = $h' \ + "$HOME_DIR/config/pushuv_config.json" > "$HOME_DIR/config/pushuv_config.json.tmp" && \ + mv "$HOME_DIR/config/pushuv_config.json.tmp" "$HOME_DIR/config/pushuv_config.json" +} + +apply_rpc_override "eip155:11155111" "${SEPOLIA_RPC_URL_OVERRIDE:-}" +apply_rpc_override "eip155:421614" "${ARBITRUM_RPC_URL_OVERRIDE:-}" +apply_rpc_override "eip155:84532" "${BASE_RPC_URL_OVERRIDE:-}" +apply_rpc_override "eip155:97" "${BSC_RPC_URL_OVERRIDE:-}" +apply_rpc_override "solana:EtWTRABZaYq6iMfeYKouRu166VU2xqa1" "${SOLANA_RPC_URL_OVERRIDE:-}" + +apply_event_start_override "eip155:421614" "${ARBITRUM_EVENT_START_FROM:-}" +apply_event_start_override "eip155:84532" "${BASE_EVENT_START_FROM:-}" +apply_event_start_override "eip155:97" "${BSC_EVENT_START_FROM:-}" +apply_event_start_override "solana:EtWTRABZaYq6iMfeYKouRu166VU2xqa1" "${SOLANA_EVENT_START_FROM:-}" + +# Always start from block 1 for the local devnet chain so UVs see TSS key processes immediately +apply_event_start_override "localchain_9000-1" "1" +apply_event_start_override "push_42101-1" "1" + # Enable TSS TSS_PRIVATE_KEY=$(printf '%02x' $UNIVERSAL_ID | head -c 2) TSS_PRIVATE_KEY=$(yes $TSS_PRIVATE_KEY | head -32 | tr -d '\n') diff --git a/local-native/scripts/setup-uvalidators.sh b/local-native/scripts/setup-uvalidators.sh index 6355a256..b80b5eee 100755 --- a/local-native/scripts/setup-uvalidators.sh +++ b/local-native/scripts/setup-uvalidators.sh @@ -29,6 +29,28 @@ get_tss_port() { echo $((39000 + $1 - 1)) } +# Helper: wait for a TX to be included in a block, check its result code +wait_for_tx() { + local txhash="$1" max_attempts="${2:-30}" i=0 + while [ $i -lt $max_attempts ]; do + sleep 2 + local code + code=$(curl -s "http://127.0.0.1:26657/tx?hash=0x${txhash}" 2>/dev/null \ + | jq -r '.result.tx_result.code // empty' 2>/dev/null) + [ "$code" = "0" ] && return 0 + if [ -n "$code" ] && [ "$code" != "null" ]; then + local log + log=$(curl -s "http://127.0.0.1:26657/tx?hash=0x${txhash}" 2>/dev/null \ + | jq -r '.result.tx_result.log // ""' 2>/dev/null) + echo " ❌ TX failed (code=$code): $log" >&2 + return 1 + fi + i=$((i + 1)) + done + echo " ⚠️ TX not confirmed after $((max_attempts * 2))s" >&2 + return 1 +} + echo "🔧 Setting up Universal Validators..." echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" @@ -63,7 +85,9 @@ echo "" echo "📝 Registering Universal Validators..." echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" -for i in 1 2 3 4; do +# Only register 2 UVs for local devnet — UV1↔UV3 libp2p noise handshake is incompatible +NUM_UV=${NUM_UV:-2} +for i in $(seq 1 $NUM_UV); do echo "" echo "📋 Registering universal-validator-$i" @@ -103,16 +127,18 @@ for i in 1 2 3 4; do if echo "$RESULT" | grep -q '"txhash"'; then TX_HASH=$(echo "$RESULT" | jq -r '.txhash' 2>/dev/null) - echo " ✅ Registered! TX: $TX_HASH" + if wait_for_tx "$TX_HASH"; then + echo " ✅ Registered! TX: $TX_HASH" + else + echo " ⚠️ Registration TX failed on-chain" + fi else echo " ⚠️ Registration may have failed" fi - - sleep 2 # Wait between registrations done # ═══════════════════════════════════════════════════════════════════════════════ -# CREATE AUTHZ GRANTS (batched - 4 grants per transaction) +# CREATE AUTHZ GRANTS (batched - 4 grants per transaction, with confirmation) # ═══════════════════════════════════════════════════════════════════════════════ echo "" @@ -128,12 +154,12 @@ TEMP_DIR=$(mktemp -d) MSG_TYPES=( "/uexecutor.v1.MsgVoteInbound" - "/uexecutor.v1.MsgVoteGasPrice" + "/uexecutor.v1.MsgVoteChainMeta" "/uexecutor.v1.MsgVoteOutbound" "/utss.v1.MsgVoteTssKeyProcess" ) -for i in 1 2 3 4; do +for i in $(seq 1 ${NUM_UV:-2}); do HOTKEY_ADDR=$(jq -r ".[$((i-1))].address" "$HOTKEYS_FILE") VALIDATOR_ADDR=$("$PCHAIND_BIN" keys show "validator-$i" -a --keyring-backend "$KEYRING" --home "$HOME_DIR" 2>/dev/null) @@ -143,16 +169,16 @@ for i in 1 2 3 4; do fi echo "" - echo "📋 validator-$i → hotkey-$i (4 grants in 1 tx)" + echo "📋 validator-$i → hotkey-$i" echo " Granter: $VALIDATOR_ADDR" echo " Grantee: $HOTKEY_ADDR" - # Generate unsigned txs for all 4 message types + BATCH_OK=false + + # Attempt batch: all 4 grants in one TX MESSAGES="[]" for j in "${!MSG_TYPES[@]}"; do MSG_TYPE="${MSG_TYPES[$j]}" - - # Generate unsigned tx UNSIGNED_TX=$("$PCHAIND_BIN" tx authz grant "$HOTKEY_ADDR" generic \ --msg-type="$MSG_TYPE" \ --from "validator-$i" \ @@ -163,16 +189,16 @@ for i in 1 2 3 4; do --gas=50000 \ --gas-prices="1000000000upc" \ --generate-only 2>/dev/null) - - # Extract the message and add to array MSG=$(echo "$UNSIGNED_TX" | jq -c '.body.messages[0]' 2>/dev/null) if [ -n "$MSG" ] && [ "$MSG" != "null" ]; then MESSAGES=$(echo "$MESSAGES" | jq --argjson msg "$MSG" '. + [$msg]') fi done - # Create combined transaction with all 4 messages - COMBINED_TX=$(cat </dev/null || echo "0") + + if [ "${MSG_COUNT}" = "4" ]; then + COMBINED_TX=$(cat < "$TEMP_DIR/combined_tx_$i.json" - - # Sign the combined transaction - SIGNED_TX=$("$PCHAIND_BIN" tx sign "$TEMP_DIR/combined_tx_$i.json" \ - --from "validator-$i" \ - --chain-id "$CHAIN_ID" \ - --keyring-backend "$KEYRING" \ - --home "$HOME_DIR" \ - --node="$RPC_NODE" \ - --output-document="$TEMP_DIR/signed_tx_$i.json" 2>&1) - - # Broadcast the signed transaction - BROADCAST_RESULT=$("$PCHAIND_BIN" tx broadcast "$TEMP_DIR/signed_tx_$i.json" \ - --node="$RPC_NODE" \ - --broadcast-mode sync 2>&1) - - # Check result - if echo "$BROADCAST_RESULT" | grep -q "txhash"; then - TX_HASH=$(echo "$BROADCAST_RESULT" | grep -o 'txhash: [A-F0-9]*' | cut -d' ' -f2 || echo "$BROADCAST_RESULT" | jq -r '.txhash' 2>/dev/null) - echo " ✅ 4 grants created! TX: ${TX_HASH:0:16}..." - TOTAL_GRANTS=$((TOTAL_GRANTS + 4)) + echo "$COMBINED_TX" > "$TEMP_DIR/combined_tx_$i.json" + + "$PCHAIND_BIN" tx sign "$TEMP_DIR/combined_tx_$i.json" \ + --from "validator-$i" \ + --chain-id "$CHAIN_ID" \ + --keyring-backend "$KEYRING" \ + --home "$HOME_DIR" \ + --node="$RPC_NODE" \ + --output-document="$TEMP_DIR/signed_tx_$i.json" 2>/dev/null + + BROADCAST_RESULT=$("$PCHAIND_BIN" tx broadcast "$TEMP_DIR/signed_tx_$i.json" \ + --node="$RPC_NODE" \ + --broadcast-mode sync 2>&1) + + TX_HASH=$(echo "$BROADCAST_RESULT" | jq -r '.txhash // empty' 2>/dev/null) + [ -z "$TX_HASH" ] && TX_HASH=$(echo "$BROADCAST_RESULT" | grep -o 'txhash: [A-F0-9]*' | awk '{print $2}') + + if [ -n "$TX_HASH" ] && wait_for_tx "$TX_HASH" 30; then + echo " ✅ Batch grant confirmed (TX: ${TX_HASH:0:16}...)" + TOTAL_GRANTS=$((TOTAL_GRANTS + 4)) + BATCH_OK=true + else + echo " ⚠️ Batch TX failed or unconfirmed, trying individual grants..." + fi else - echo " ⚠️ Batch may have failed, trying individual grants..." - # Fallback to individual grants + echo " ⚠️ Could not build batch TX (got $MSG_COUNT messages), trying individual grants..." + fi + + # Fallback: individual grants with per-TX confirmation + if [ "$BATCH_OK" = "false" ]; then for MSG_TYPE in "${MSG_TYPES[@]}"; do MSG_NAME=$(basename "$MSG_TYPE") GRANT_RESULT=$("$PCHAIND_BIN" tx authz grant "$HOTKEY_ADDR" generic \ @@ -230,19 +260,19 @@ EOF --keyring-backend "$KEYRING" \ --home "$HOME_DIR" \ --node="$RPC_NODE" \ - --gas=auto \ - --gas-adjustment=1.5 \ - --gas-prices="1000000000upc" \ - --yes 2>&1) + --gas 300000 \ + --gas-prices "1000000000upc" \ + --yes --output json 2>&1) - if echo "$GRANT_RESULT" | grep -q "txhash"; then + GRANT_TX_HASH=$(echo "$GRANT_RESULT" | jq -r '.txhash // empty' 2>/dev/null) + if [ -n "$GRANT_TX_HASH" ] && wait_for_tx "$GRANT_TX_HASH" 15; then + echo " ✅ Granted $MSG_NAME" TOTAL_GRANTS=$((TOTAL_GRANTS + 1)) + else + echo " ⚠️ Failed to grant $MSG_NAME" fi - sleep 2 done fi - - sleep 2 # Wait between validators done # Cleanup @@ -257,7 +287,7 @@ echo "📊 Total AuthZ grants created: $TOTAL_GRANTS/16" if [ "$TOTAL_GRANTS" -ge 16 ]; then echo "✅ All grants created successfully!" else - echo "⚠️ Some grants may be missing" + echo "⚠️ Some grants may be missing ($TOTAL_GRANTS/16)" fi echo "" diff --git a/local-native/scripts/setup-validator-auto.sh b/local-native/scripts/setup-validator-auto.sh index 8b1eb8f1..8da05780 100755 --- a/local-native/scripts/setup-validator-auto.sh +++ b/local-native/scripts/setup-validator-auto.sh @@ -83,6 +83,9 @@ sed -i.bak "s/address = \"localhost:9090\"/address = \"0.0.0.0:${GRPC_PORT}\"/g" sed -i.bak "s/laddr = \"tcp:\/\/0.0.0.0:26656\"/laddr = \"tcp:\/\/0.0.0.0:${P2P_PORT}\"/g" "$HOME_DIR/config/config.toml" sed -i.bak 's/timeout_commit = "5s"/timeout_commit = "1s"/g' "$HOME_DIR/config/config.toml" +# Pre-create WAL directory to prevent CometBFT panic when transitioning to active validator +mkdir -p "$HOME_DIR/data/cs.wal" + # Start node echo "🚀 Starting validator $VALIDATOR_ID..." "$PCHAIND_BIN" start \ diff --git a/local-setup-e2e/devnet b/local-setup-e2e/devnet new file mode 100755 index 00000000..21e83e29 --- /dev/null +++ b/local-setup-e2e/devnet @@ -0,0 +1,1119 @@ +#!/usr/bin/env bash +# devnet - Push Chain Local Network Manager (local processes, no Docker) +# Drop-in replacement for local-multi-validator/devnet using native binaries. + +set -euo pipefail +IFS=$'\n\t' + +SCRIPT_DIR="$(cd -P "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PUSH_CHAIN_DIR="$(cd -P "$SCRIPT_DIR/.." && pwd)" +DATA_DIR="$SCRIPT_DIR/data" +LOG_DIR="$SCRIPT_DIR/logs" +PID_DIR="$DATA_DIR/pids" +TMP_DIR="/tmp/push-accounts" + +# Prefer freshly built binaries; fall back to PATH +PCHAIND="${PCHAIND:-$PUSH_CHAIN_DIR/build/pchaind}" +PUNIVERSALD="${PUNIVERSALD:-$PUSH_CHAIN_DIR/build/puniversald}" +[[ -x "$PCHAIND" ]] || PCHAIND=pchaind +[[ -x "$PUNIVERSALD" ]] || PUNIVERSALD=puniversald + +CHAIN_ID="${CHAIN_ID:-localchain_9000-1}" +EVM_CHAIN_ID="9000" +DENOM="upc" +KEYRING="test" +KEYALGO="eth_secp256k1" +BLOCK_TIME="1s" + +# Chain IDs for UV config +SEPOLIA_CHAIN_ID="eip155:11155111" +ARBITRUM_CHAIN_ID="eip155:421614" +BASE_CHAIN_ID="eip155:84532" +BSC_CHAIN_ID="eip155:97" +SOLANA_CHAIN_ID="solana:EtWTRABZaYq6iMfeYKouRu166VU2xqa1" +LOCALCHAIN_CHAIN_ID="localchain_9000-1" + +# Per-validator ports (matching docker-compose.yml host port mapping) +val_rpc() { case $1 in 1) echo 26657;; 2) echo 26658;; 3) echo 26659;; 4) echo 26660;; esac; } +val_rest() { case $1 in 1) echo 1317;; 2) echo 1318;; 3) echo 1319;; 4) echo 1320;; esac; } +val_grpc() { case $1 in 1) echo 9090;; 2) echo 9093;; 3) echo 9095;; 4) echo 9097;; esac; } +val_grpcweb() { case $1 in 1) echo 9091;; 2) echo 9094;; 3) echo 9096;; 4) echo 9098;; esac; } +val_p2p() { case $1 in 1) echo 26656;; 2) echo 26666;; 3) echo 26676;; 4) echo 26686;; esac; } +val_evm() { case $1 in 1) echo 8545;; 2) echo 8547;; 3) echo 8549;; 4) echo 8551;; esac; } +val_evmws() { case $1 in 1) echo 8546;; 2) echo 8548;; 3) echo 8550;; 4) echo 8552;; esac; } +val_pprof() { echo $((6060 + $1)); } + +uv_query() { case $1 in 1) echo 8080;; 2) echo 8081;; 3) echo 8082;; 4) echo 8083;; esac; } +uv_tss() { echo $((39000 + $1 - 1)); } +uv_grpc() { echo "localhost:$(val_grpc $1)"; } + +# TSS peer IDs (deterministic from private keys 01..01 02..02 etc.) +uv_peer_id() { + case $1 in + 1) echo "12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5";; + 2) echo "12D3KooWJWoaqZhDaoEFshF7Rh1bpY9ohihFhzcW6d69Lr2NASuq";; + 3) echo "12D3KooWRndVhVZPCiQwHBBBdg769GyrPUW13zxwqQyf9r3ANaba";; + 4) echo "12D3KooWPT98FXMfDQYavZm66EeVjTqP9Nnehn1gyaydqV8L8BQw";; + esac +} + +# TSS private keys +uv_tss_key() { + local byte; byte=$(printf '%02x' "$1") + local result="" + for _ in {1..32}; do result+="$byte"; done + echo "$result" +} + +GREEN='\033[0;32m'; RED='\033[0;31m'; YELLOW='\033[0;33m' +CYAN='\033[0;36m'; BLUE='\033[1;94m'; NC='\033[0m'; BOLD='\033[1m' + +log() { printf "%b\n" "${CYAN}==>${NC} $*"; } +ok() { printf "%b\n" "${GREEN}✓${NC} $*"; } +warn() { printf "%b\n" "${YELLOW}!${NC} $*"; } +err() { printf "%b\n" "${RED}✗${NC} $*"; } +header() { printf "\n%b\n" "${BOLD}${BLUE}═══ $* ═══${NC}"; } + +require_cmd() { for c in "$@"; do command -v "$c" >/dev/null 2>&1 || { err "Required command not found: $c"; exit 1; }; done; } + +# ─── PID management ─────────────────────────────────────────────────────────── + +write_pid() { local name="$1" pid="$2"; mkdir -p "$PID_DIR"; echo "$pid" > "$PID_DIR/$name.pid"; } +read_pid() { local f="$PID_DIR/$1.pid"; [[ -f "$f" ]] && cat "$f" || echo ""; } + +is_alive() { + local pid="${1:-}" + [[ -n "$pid" ]] && kill -0 "$pid" 2>/dev/null +} + +# ─── Health checks ───────────────────────────────────────────────────────────── + +wait_rpc() { + local port="$1" label="$2" max="${3:-120}" i=0 + while (( i < max )); do + if curl -sf "http://127.0.0.1:$port/status" >/dev/null 2>&1; then return 0; fi + sleep 1; (( i++ )) + done + err "$label RPC not ready after ${max}s"; return 1 +} + +wait_block() { + local port="$1" label="$2" max="${3:-120}" i=0 + while (( i < max )); do + local h + h=$(curl -sf "http://127.0.0.1:$port/status" 2>/dev/null \ + | jq -r '.result.sync_info.latest_block_height // "0"' 2>/dev/null || echo "0") + if [[ "$h" != "0" && "$h" != "null" && -n "$h" ]]; then return 0; fi + sleep 2; (( i += 2 )) + done + err "$label not producing blocks after ${max}s"; return 1 +} + +wait_uv_health() { + local port="$1" label="$2" max="${3:-180}" i=0 + while (( i < max )); do + if curl -sf "http://127.0.0.1:$port/health" >/dev/null 2>&1; then return 0; fi + sleep 2; (( i += 2 )) + done + err "$label not healthy after ${max}s"; return 1 +} + +wait_validator_bonded() { + local valoper="$1" max="${2:-120}" i=0 genesis_rpc="http://127.0.0.1:$(val_rpc 1)" + while (( i < max )); do + local status + status=$("$PCHAIND" query staking validator "$valoper" --node="$genesis_rpc" --output json 2>/dev/null \ + | jq -r '.validator.status // "NOT_FOUND"' 2>/dev/null || echo "NOT_FOUND") + [[ "$status" == "BOND_STATUS_BONDED" ]] && return 0 + sleep 3; (( i += 3 )) + done + return 1 +} + +wait_chain_tx() { + local txhash="$1" node="$2" max="${3:-30}" i=0 + while (( i < max )); do + local code + code=$("$PCHAIND" query tx "$txhash" --node="$node" --output json 2>/dev/null \ + | jq -r '.code // empty' 2>/dev/null || true) + [[ "$code" == "0" ]] && return 0 + [[ -n "$code" && "$code" != "0" ]] && return 1 + sleep 1; (( i++ )) + done + return 1 +} + +# ─── Account generation ──────────────────────────────────────────────────────── + +generate_accounts() { + if [[ -f "$TMP_DIR/genesis_accounts.json" && -f "$TMP_DIR/validators.json" && -f "$TMP_DIR/hotkeys.json" ]]; then + ok "Account files already exist in $TMP_DIR — skipping generation" + return 0 + fi + + require_cmd jq "$PCHAIND" + mkdir -p "$TMP_DIR" + + log "Generating genesis, validator, and hotkey accounts..." + + # genesis-acc-1: hardcoded admin mnemonic + local ADMIN_MNEMONIC="surface task term spring horse impact tortoise often session cable off catch harvest rain able jealous coral cargo portion surge spring genre mix avoid" + local GENESIS_ACCOUNTS_FILE="$TMP_DIR/genesis_accounts.json" + echo "[]" > "$GENESIS_ACCOUNTS_FILE" + + # Use a temp home to avoid conflicts with any pre-existing global keyring entries + # NOTE: use a global-scoped variable so traps work correctly under set -u + _KEYGEN_HOME=$(mktemp -d) + + for (( i=1; i<=5; i++ )); do + local key="genesis-acc-$i" output mnemonic address + if (( i == 1 )); then + output=$(echo "$ADMIN_MNEMONIC" | "$PCHAIND" keys add "$key" \ + --home="$_KEYGEN_HOME" --keyring-backend=$KEYRING --algo=$KEYALGO --recover --output=json 2>&1) + mnemonic="$ADMIN_MNEMONIC" + else + output=$("$PCHAIND" keys add "$key" \ + --home="$_KEYGEN_HOME" --keyring-backend=$KEYRING --algo=$KEYALGO --output=json 2>&1) + mnemonic=$(echo "$output" | jq -r '.mnemonic // empty' 2>/dev/null) + if [[ -z "$mnemonic" ]]; then + mnemonic=$(echo "$output" | grep -A1 "Important" | tail -1 | tr -d '\n' || true) + fi + fi + address=$("$PCHAIND" keys show "$key" -a --home="$_KEYGEN_HOME" --keyring-backend=$KEYRING 2>/dev/null) + jq --arg n "$key" --arg a "$address" --arg m "$mnemonic" \ + '. += [{name:$n,address:$a,mnemonic:$m}]' "$GENESIS_ACCOUNTS_FILE" > "$TMP_DIR/tmp.json" \ + && mv "$TMP_DIR/tmp.json" "$GENESIS_ACCOUNTS_FILE" + ok "genesis-acc-$i: $address" + done + + local VALIDATORS_FILE="$TMP_DIR/validators.json" + echo "[]" > "$VALIDATORS_FILE" + for (( i=1; i<=4; i++ )); do + local key="validator-$i" output mnemonic address valoper + output=$("$PCHAIND" keys add "$key" \ + --home="$_KEYGEN_HOME" --keyring-backend=$KEYRING --algo=$KEYALGO --output=json 2>&1) + mnemonic=$(echo "$output" | jq -r '.mnemonic // empty' 2>/dev/null) + if [[ -z "$mnemonic" ]]; then + mnemonic=$(echo "$output" | grep -A1 "Important" | tail -1 | tr -d '\n' || true) + fi + address=$("$PCHAIND" keys show "$key" -a --home="$_KEYGEN_HOME" --keyring-backend=$KEYRING 2>/dev/null) + valoper=$("$PCHAIND" keys show "$key" --bech val -a --home="$_KEYGEN_HOME" --keyring-backend=$KEYRING 2>/dev/null) + jq --argjson id "$i" --arg n "$key" --arg a "$address" --arg v "$valoper" --arg m "$mnemonic" \ + '. += [{id:$id,name:$n,address:$a,valoper_address:$v,mnemonic:$m}]' \ + "$VALIDATORS_FILE" > "$TMP_DIR/tmp.json" && mv "$TMP_DIR/tmp.json" "$VALIDATORS_FILE" + ok "validator-$i: $address (valoper: $valoper)" + done + + local HOTKEYS_FILE="$TMP_DIR/hotkeys.json" + echo "[]" > "$HOTKEYS_FILE" + for (( i=1; i<=4; i++ )); do + local key="hotkey-$i" output mnemonic address + output=$("$PCHAIND" keys add "$key" \ + --home="$_KEYGEN_HOME" --keyring-backend=$KEYRING --algo=$KEYALGO --output=json 2>&1) + mnemonic=$(echo "$output" | jq -r '.mnemonic // empty' 2>/dev/null) + if [[ -z "$mnemonic" ]]; then + mnemonic=$(echo "$output" | grep -A1 "Important" | tail -1 | tr -d '\n' || true) + fi + address=$("$PCHAIND" keys show "$key" -a --home="$_KEYGEN_HOME" --keyring-backend=$KEYRING 2>/dev/null) + jq --argjson id "$i" --arg n "$key" --arg a "$address" --arg m "$mnemonic" \ + '. += [{id:$id,name:$n,address:$a,mnemonic:$m}]' \ + "$HOTKEYS_FILE" > "$TMP_DIR/tmp.json" && mv "$TMP_DIR/tmp.json" "$HOTKEYS_FILE" + ok "hotkey-$i: $address" + done + + ok "Account generation complete" + rm -rf "$_KEYGEN_HOME" 2>/dev/null || true +} + +# ─── Core validator setup ────────────────────────────────────────────────────── + +setup_genesis_validator() { + local HOME_DIR="$DATA_DIR/validator-1" + local RPC_PORT=$(val_rpc 1) REST_PORT=$(val_rest 1) GRPC_PORT=$(val_grpc 1) + local GRPC_WEB_PORT=$(val_grpcweb 1) P2P_PORT=$(val_p2p 1) + local EVM_PORT=$(val_evm 1) EVM_WS_PORT=$(val_evmws 1) + local PPROF_PORT=$(val_pprof 1) + local LOG_FILE="$LOG_DIR/validator-1.log" + + local TWO_BILLION="2000000000000000000000000000" + local ONE_MILLION="1000000000000000000000000" + local VALIDATOR_STAKE="100000000000000000000000" + local HOTKEY_FUNDING="10000000000000000000000" + + mkdir -p "$HOME_DIR" "$LOG_DIR" + rm -rf "$HOME_DIR"/* "$HOME_DIR"/.[!.]* 2>/dev/null || true + + log "Initializing genesis validator (validator-1)..." + + # Load mnemonics + local G1_MN G2_MN G3_MN G4_MN G5_MN V1_MN V2_MN V3_MN V4_MN + G1_MN=$(jq -r '.[0].mnemonic' "$TMP_DIR/genesis_accounts.json") + G2_MN=$(jq -r '.[1].mnemonic' "$TMP_DIR/genesis_accounts.json") + G3_MN=$(jq -r '.[2].mnemonic' "$TMP_DIR/genesis_accounts.json") + G4_MN=$(jq -r '.[3].mnemonic' "$TMP_DIR/genesis_accounts.json") + G5_MN=$(jq -r '.[4].mnemonic' "$TMP_DIR/genesis_accounts.json") + V1_MN=$(jq -r '.[] | select(.id==1) | .mnemonic' "$TMP_DIR/validators.json") + V2_MN=$(jq -r '.[] | select(.id==2) | .mnemonic' "$TMP_DIR/validators.json") + V3_MN=$(jq -r '.[] | select(.id==3) | .mnemonic' "$TMP_DIR/validators.json") + V4_MN=$(jq -r '.[] | select(.id==4) | .mnemonic' "$TMP_DIR/validators.json") + + "$PCHAIND" --home="$HOME_DIR" init genesis-validator --chain-id "$CHAIN_ID" --default-denom "$DENOM" + + # Add keys + echo "$G1_MN" | "$PCHAIND" --home="$HOME_DIR" keys add genesis-acc-1 --keyring-backend=$KEYRING --algo=$KEYALGO --recover + echo "$G2_MN" | "$PCHAIND" --home="$HOME_DIR" keys add genesis-acc-2 --keyring-backend=$KEYRING --algo=$KEYALGO --recover + echo "$G3_MN" | "$PCHAIND" --home="$HOME_DIR" keys add genesis-acc-3 --keyring-backend=$KEYRING --algo=$KEYALGO --recover + echo "$G4_MN" | "$PCHAIND" --home="$HOME_DIR" keys add genesis-acc-4 --keyring-backend=$KEYRING --algo=$KEYALGO --recover + echo "$G5_MN" | "$PCHAIND" --home="$HOME_DIR" keys add genesis-acc-5 --keyring-backend=$KEYRING --algo=$KEYALGO --recover + echo "$V1_MN" | "$PCHAIND" --home="$HOME_DIR" keys add validator-1 --keyring-backend=$KEYRING --algo=$KEYALGO --recover + echo "$V2_MN" | "$PCHAIND" --home="$HOME_DIR" keys add validator-2 --keyring-backend=$KEYRING --algo=$KEYALGO --recover + echo "$V3_MN" | "$PCHAIND" --home="$HOME_DIR" keys add validator-3 --keyring-backend=$KEYRING --algo=$KEYALGO --recover + echo "$V4_MN" | "$PCHAIND" --home="$HOME_DIR" keys add validator-4 --keyring-backend=$KEYRING --algo=$KEYALGO --recover + + local GA1 GA2 GA3 GA4 GA5 VA1 VA2 VA3 VA4 + GA1=$("$PCHAIND" --home="$HOME_DIR" keys show genesis-acc-1 -a --keyring-backend=$KEYRING) + GA2=$("$PCHAIND" --home="$HOME_DIR" keys show genesis-acc-2 -a --keyring-backend=$KEYRING) + GA3=$("$PCHAIND" --home="$HOME_DIR" keys show genesis-acc-3 -a --keyring-backend=$KEYRING) + GA4=$("$PCHAIND" --home="$HOME_DIR" keys show genesis-acc-4 -a --keyring-backend=$KEYRING) + GA5=$("$PCHAIND" --home="$HOME_DIR" keys show genesis-acc-5 -a --keyring-backend=$KEYRING) + VA1=$("$PCHAIND" --home="$HOME_DIR" keys show validator-1 -a --keyring-backend=$KEYRING) + VA2=$("$PCHAIND" --home="$HOME_DIR" keys show validator-2 -a --keyring-backend=$KEYRING) + VA3=$("$PCHAIND" --home="$HOME_DIR" keys show validator-3 -a --keyring-backend=$KEYRING) + VA4=$("$PCHAIND" --home="$HOME_DIR" keys show validator-4 -a --keyring-backend=$KEYRING) + + local HK1 HK2 HK3 HK4 + HK1=$(jq -r '.[0].address' "$TMP_DIR/hotkeys.json") + HK2=$(jq -r '.[1].address' "$TMP_DIR/hotkeys.json") + HK3=$(jq -r '.[2].address' "$TMP_DIR/hotkeys.json") + HK4=$(jq -r '.[3].address' "$TMP_DIR/hotkeys.json") + + # Fund genesis + "$PCHAIND" --home="$HOME_DIR" genesis add-genesis-account "$GA1" "${TWO_BILLION}${DENOM}" + "$PCHAIND" --home="$HOME_DIR" genesis add-genesis-account "$GA2" "${TWO_BILLION}${DENOM}" + "$PCHAIND" --home="$HOME_DIR" genesis add-genesis-account "$GA3" "${TWO_BILLION}${DENOM}" + "$PCHAIND" --home="$HOME_DIR" genesis add-genesis-account "$GA4" "${TWO_BILLION}${DENOM}" + "$PCHAIND" --home="$HOME_DIR" genesis add-genesis-account "$GA5" "${TWO_BILLION}${DENOM}" + "$PCHAIND" --home="$HOME_DIR" genesis add-genesis-account "$VA1" "${ONE_MILLION}${DENOM}" + "$PCHAIND" --home="$HOME_DIR" genesis add-genesis-account "$VA2" "${ONE_MILLION}${DENOM}" + "$PCHAIND" --home="$HOME_DIR" genesis add-genesis-account "$VA3" "${ONE_MILLION}${DENOM}" + "$PCHAIND" --home="$HOME_DIR" genesis add-genesis-account "$VA4" "${ONE_MILLION}${DENOM}" + "$PCHAIND" --home="$HOME_DIR" genesis add-genesis-account "$HK1" "${HOTKEY_FUNDING}${DENOM}" + "$PCHAIND" --home="$HOME_DIR" genesis add-genesis-account "$HK2" "${HOTKEY_FUNDING}${DENOM}" + "$PCHAIND" --home="$HOME_DIR" genesis add-genesis-account "$HK3" "${HOTKEY_FUNDING}${DENOM}" + "$PCHAIND" --home="$HOME_DIR" genesis add-genesis-account "$HK4" "${HOTKEY_FUNDING}${DENOM}" + + "$PCHAIND" --home="$HOME_DIR" genesis gentx validator-1 "${VALIDATOR_STAKE}${DENOM}" \ + --keyring-backend=$KEYRING --chain-id "$CHAIN_ID" --gas-prices "1000000000${DENOM}" + "$PCHAIND" --home="$HOME_DIR" genesis collect-gentxs + "$PCHAIND" --home="$HOME_DIR" genesis validate-genesis + + # Genesis parameters + upd() { cat "$HOME_DIR/config/genesis.json" | jq "$1" > "$HOME_DIR/config/tmp_genesis.json" && mv "$HOME_DIR/config/tmp_genesis.json" "$HOME_DIR/config/genesis.json"; } + upd '.consensus["params"]["block"]["time_iota_ms"]="1000"' + upd ".app_state[\"gov\"][\"params\"][\"min_deposit\"]=[{\"denom\":\"$DENOM\",\"amount\":\"1000000\"}]" + upd '.app_state["gov"]["params"]["max_deposit_period"]="300s"' + upd '.app_state["gov"]["params"]["voting_period"]="300s"' + upd '.app_state["gov"]["params"]["expedited_voting_period"]="150s"' + upd ".app_state[\"evm\"][\"params\"][\"evm_denom\"]=\"$DENOM\"" + upd '.app_state["evm"]["params"]["active_static_precompiles"]=["0x00000000000000000000000000000000000000CB","0x00000000000000000000000000000000000000ca","0x0000000000000000000000000000000000000100","0x0000000000000000000000000000000000000400","0x0000000000000000000000000000000000000800","0x0000000000000000000000000000000000000801","0x0000000000000000000000000000000000000802","0x0000000000000000000000000000000000000803","0x0000000000000000000000000000000000000804","0x0000000000000000000000000000000000000805"]' + upd ".app_state[\"evm\"][\"params\"][\"chain_config\"][\"chain_id\"]=$EVM_CHAIN_ID" + upd ".app_state[\"evm\"][\"params\"][\"chain_config\"][\"denom\"]=\"$DENOM\"" + upd '.app_state["evm"]["params"]["chain_config"]["decimals"]="18"' + upd '.app_state["erc20"]["params"]["native_precompiles"]=["0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE"]' + upd ".app_state[\"erc20\"][\"token_pairs\"]=[{contract_owner:1,erc20_address:\"0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE\",denom:\"$DENOM\",enabled:true}]" + upd '.app_state["feemarket"]["params"]["no_base_fee"]=false' + upd '.app_state["feemarket"]["params"]["base_fee"]="1000000.000000000000000000"' + upd '.app_state["feemarket"]["params"]["min_gas_price"]="1000000.000000000000000000"' + upd ".app_state[\"staking\"][\"params\"][\"bond_denom\"]=\"$DENOM\"" + upd '.app_state["staking"]["params"]["min_commission_rate"]="0.050000000000000000"' + upd ".app_state[\"mint\"][\"params\"][\"mint_denom\"]=\"$DENOM\"" + upd ".app_state[\"crisis\"][\"constant_fee\"]={\"denom\":\"$DENOM\",\"amount\":\"1000\"}" + upd '.app_state["distribution"]["params"]["community_tax"]="0.000000000000000000"' + upd '.consensus["params"]["abci"]["vote_extensions_enable_height"]="2"' + upd '.app_state["tokenfactory"]["params"]["denom_creation_fee"]=[]' + upd '.app_state["tokenfactory"]["params"]["denom_creation_gas_consume"]=100000' + upd ".app_state[\"uregistry\"][\"params\"][\"admin\"]=\"$GA1\"" + upd ".app_state[\"utss\"][\"params\"][\"admin\"]=\"$GA1\"" + + # Config patching + local cfg="$HOME_DIR/config/config.toml" app="$HOME_DIR/config/app.toml" + sed -i '' -e "s|laddr = \"tcp://127.0.0.1:26657\"|laddr = \"tcp://0.0.0.0:${RPC_PORT}\"|g" "$cfg" + sed -i '' -e 's|cors_allowed_origins = \[\]|cors_allowed_origins = ["*"]|g' "$cfg" + sed -i '' -e "s|laddr = \"tcp://0.0.0.0:26656\"|laddr = \"tcp://0.0.0.0:${P2P_PORT}\"|g" "$cfg" + sed -i '' -e "s|timeout_commit = \"5s\"|timeout_commit = \"${BLOCK_TIME}\"|g" "$cfg" + sed -i '' -e "s|pprof_laddr = \"localhost:6060\"|pprof_laddr = \"localhost:${PPROF_PORT}\"|g" "$cfg" + sed -i '' -e 's|allow_duplicate_ip = false|allow_duplicate_ip = true|g' "$cfg" + sed -i '' -e 's|addr_book_strict = true|addr_book_strict = false|g' "$cfg" + sed -i '' -e "s|address = \"tcp://localhost:1317\"|address = \"tcp://0.0.0.0:${REST_PORT}\"|g" "$app" + sed -i '' -e 's|enable = false|enable = true|g' "$app" + sed -i '' -e 's|enabled-unsafe-cors = false|enabled-unsafe-cors = true|g' "$app" + sed -i '' -e "s|address = \"localhost:9090\"|address = \"0.0.0.0:${GRPC_PORT}\"|g" "$app" + sed -i '' -e "s|address = \"localhost:9091\"|address = \"0.0.0.0:${GRPC_WEB_PORT}\"|g" "$app" + + # Copy genesis for other validators + cp "$HOME_DIR/config/genesis.json" "$TMP_DIR/genesis.json" + ok "Genesis prepared and shared" + + # Start validator-1 + log "Starting validator-1 (genesis) on RPC=$RPC_PORT EVM=$EVM_PORT..." + "$PCHAIND" start \ + --home="$HOME_DIR" \ + --pruning=nothing \ + --minimum-gas-prices="1000000000${DENOM}" \ + --rpc.laddr="tcp://0.0.0.0:${RPC_PORT}" \ + --json-rpc.address="0.0.0.0:${EVM_PORT}" \ + --json-rpc.ws-address="0.0.0.0:${EVM_WS_PORT}" \ + --json-rpc.api=eth,txpool,personal,net,debug,web3 \ + --chain-id="$CHAIN_ID" \ + >> "$LOG_FILE" 2>&1 & + + local pid=$! + disown $pid 2>/dev/null || true + write_pid "validator-1" "$pid" + ok "validator-1 started (pid=$pid)" + log "Waiting for validator-1 to produce blocks..." + wait_block "$RPC_PORT" "validator-1" 120 + + # Register UV1 + setup all AuthZ grants + local genesis_rpc="tcp://127.0.0.1:${RPC_PORT}" + sleep 5 + + # Register UV1 (peer_id matches ip4 multi-addr locally) + local PEER_UV1 MULTI_UV1 VALOPER_V1 + PEER_UV1=$(uv_peer_id 1) + MULTI_UV1="/ip4/127.0.0.1/tcp/$(uv_tss 1)" + VALOPER_V1=$("$PCHAIND" --home="$HOME_DIR" keys show validator-1 --bech val -a --keyring-backend=$KEYRING) + + log "Registering universal-validator-1..." + local reg_result + reg_result=$("$PCHAIND" --home="$HOME_DIR" tx uvalidator add-universal-validator \ + --core-validator-address "$VALOPER_V1" \ + --network "{\"peer_id\": \"$PEER_UV1\", \"multi_addrs\": [\"$MULTI_UV1\"]}" \ + --from genesis-acc-1 --chain-id "$CHAIN_ID" --keyring-backend=$KEYRING \ + --node="$genesis_rpc" --fees 1000000000000000upc --yes --output json 2>&1 || true) + local reg_tx; reg_tx=$(echo "$reg_result" | jq -r '.txhash // ""' 2>/dev/null || true) + [[ -n "$reg_tx" ]] && wait_chain_tx "$reg_tx" "$genesis_rpc" 30 || true + ok "UV1 registered" + + # Setup AuthZ grants for all 4 validators + sleep 5 + log "Creating AuthZ grants for all 4 validators..." + set +e + local total_grants=0 + for v in 1 2 3 4; do + local hk_addr + hk_addr=$(jq -r ".[$(( v - 1 ))].address" "$TMP_DIR/hotkeys.json") + for msg in /uexecutor.v1.MsgVoteInbound /uexecutor.v1.MsgVoteChainMeta /uexecutor.v1.MsgVoteOutbound /utss.v1.MsgVoteTssKeyProcess; do + "$PCHAIND" --home="$HOME_DIR" tx authz grant "$hk_addr" generic \ + --msg-type="$msg" \ + --from "validator-$v" \ + --chain-id "$CHAIN_ID" \ + --keyring-backend=$KEYRING \ + --node="$genesis_rpc" \ + --gas=auto --gas-adjustment=1.5 --gas-prices="1000000000${DENOM}" \ + --yes --broadcast-mode sync --output json >/dev/null 2>&1 + total_grants=$(( total_grants + 1 )) + sleep 1 + done + done + set -e + ok "Created $total_grants AuthZ grants" + + # Fund test address + "$PCHAIND" --home="$HOME_DIR" tx bank send genesis-acc-1 \ + push1w7xnyp3hf79vyetj3cvw8l32u6unun8yr6zn60 \ + "1000000000000000000${DENOM}" \ + --chain-id "$CHAIN_ID" --keyring-backend=$KEYRING \ + --node="$genesis_rpc" --gas-prices="100000000000${DENOM}" --yes >/dev/null 2>&1 || true +} + +setup_regular_validator() { + local ID="$1" + local HOME_DIR="$DATA_DIR/validator-$ID" + local RPC_PORT=$(val_rpc "$ID") REST_PORT=$(val_rest "$ID") GRPC_PORT=$(val_grpc "$ID") + local GRPC_WEB_PORT=$(val_grpcweb "$ID") P2P_PORT=$(val_p2p "$ID") + local EVM_PORT=$(val_evm "$ID") EVM_WS_PORT=$(val_evmws "$ID") + local PPROF_PORT=$(val_pprof "$ID") + local LOG_FILE="$LOG_DIR/validator-$ID.log" + local GENESIS_RPC="http://127.0.0.1:$(val_rpc 1)" + + mkdir -p "$HOME_DIR" "$LOG_DIR" + rm -rf "$HOME_DIR"/* "$HOME_DIR"/.[!.]* 2>/dev/null || true + + log "Initializing validator-$ID..." + + local VALIDATOR_STAKE="100000000000000000000000" + local VAL_MN + VAL_MN=$(jq -r ".[] | select(.id==$ID) | .mnemonic" "$TMP_DIR/validators.json") + + "$PCHAIND" --home="$HOME_DIR" init "validator-$ID" --chain-id "$CHAIN_ID" --default-denom "$DENOM" + + # Wait for genesis file + local i=0 + while [[ ! -f "$TMP_DIR/genesis.json" ]] && (( i < 60 )); do sleep 2; (( i++ )); done + [[ -f "$TMP_DIR/genesis.json" ]] || { err "Genesis file not found"; return 1; } + cp "$TMP_DIR/genesis.json" "$HOME_DIR/config/genesis.json" + + echo "$VAL_MN" | "$PCHAIND" --home="$HOME_DIR" keys add "validator-$ID" \ + --keyring-backend=$KEYRING --algo=$KEYALGO --recover + + # Get genesis node_id and set peer + local genesis_node_id + genesis_node_id=$(curl -sf "$GENESIS_RPC/status" | jq -r '.result.node_info.id') + local persistent_peer="$genesis_node_id@127.0.0.1:$(val_p2p 1)" + + # Config patching + local cfg="$HOME_DIR/config/config.toml" app="$HOME_DIR/config/app.toml" + sed -i '' -e "s|laddr = \"tcp://127.0.0.1:26657\"|laddr = \"tcp://0.0.0.0:${RPC_PORT}\"|g" "$cfg" + sed -i '' -e 's|cors_allowed_origins = \[\]|cors_allowed_origins = ["*"]|g' "$cfg" + sed -i '' -e "s|laddr = \"tcp://0.0.0.0:26656\"|laddr = \"tcp://0.0.0.0:${P2P_PORT}\"|g" "$cfg" + sed -i '' -e "s|^persistent_peers *=.*|persistent_peers = \"$persistent_peer\"|" "$cfg" + sed -i '' -e 's|timeout_commit = "5s"|timeout_commit = "1s"|g' "$cfg" + sed -i '' -e "s|pprof_laddr = \"localhost:6060\"|pprof_laddr = \"localhost:${PPROF_PORT}\"|g" "$cfg" + sed -i '' -e 's|allow_duplicate_ip = false|allow_duplicate_ip = true|g' "$cfg" + sed -i '' -e 's|addr_book_strict = true|addr_book_strict = false|g' "$cfg" + sed -i '' -e "s|address = \"tcp://localhost:1317\"|address = \"tcp://0.0.0.0:${REST_PORT}\"|g" "$app" + sed -i '' -e 's|enable = false|enable = true|g' "$app" + sed -i '' -e 's|enabled-unsafe-cors = false|enabled-unsafe-cors = true|g' "$app" + sed -i '' -e "s|address = \"localhost:9090\"|address = \"0.0.0.0:${GRPC_PORT}\"|g" "$app" + sed -i '' -e "s|address = \"localhost:9091\"|address = \"0.0.0.0:${GRPC_WEB_PORT}\"|g" "$app" + + # Start + log "Starting validator-$ID on RPC=$RPC_PORT EVM=$EVM_PORT..." + "$PCHAIND" start \ + --home="$HOME_DIR" \ + --pruning=nothing \ + --minimum-gas-prices="1000000000${DENOM}" \ + --rpc.laddr="tcp://0.0.0.0:${RPC_PORT}" \ + --json-rpc.address="0.0.0.0:${EVM_PORT}" \ + --json-rpc.ws-address="0.0.0.0:${EVM_WS_PORT}" \ + --json-rpc.api=eth,txpool,personal,net,debug,web3 \ + --chain-id="$CHAIN_ID" \ + >> "$LOG_FILE" 2>&1 & + + local pid=$! + disown $pid 2>/dev/null || true + write_pid "validator-$ID" "$pid" + ok "validator-$ID started (pid=$pid)" + + # Wait for sync + wait_rpc "$RPC_PORT" "validator-$ID" 180 || { warn "validator-$ID RPC slow — continuing anyway"; } + log "Waiting for validator-$ID to sync..." + local j=0 + while (( j < 120 )); do + local catching_up + catching_up=$(curl -sf "http://127.0.0.1:$RPC_PORT/status" | jq -r '.result.sync_info.catching_up' 2>/dev/null || echo "true") + [[ "$catching_up" == "false" ]] && break + sleep 5; (( j += 5 )) + done + ok "validator-$ID synced" + + # Create validator stake + sleep 10 + local VALOPER + VALOPER=$("$PCHAIND" --home="$HOME_DIR" keys show "validator-$ID" --bech val -a --keyring-backend=$KEYRING) + local PUBKEY + PUBKEY=$("$PCHAIND" tendermint show-validator --home="$HOME_DIR") + + cat > "$HOME_DIR/validator.json" <&1) + local create_tx; create_tx=$(echo "$create_result" | jq -r '.txhash // ""' 2>/dev/null || true) + if [[ -n "$create_tx" ]]; then + wait_chain_tx "$create_tx" "tcp://127.0.0.1:$(val_rpc 1)" 30 || true + fi + set -e + + # Wait for bonded + if wait_validator_bonded "$VALOPER" 120; then + ok "validator-$ID is bonded (valoper: $VALOPER)" + else + warn "validator-$ID may not be bonded yet; continuing" + fi + + # Register UV for this validator + local PEER MULTI genesis_rpc_tcp="tcp://127.0.0.1:$(val_rpc 1)" + local G1_MN + G1_MN=$(jq -r '.[0].mnemonic' "$TMP_DIR/genesis_accounts.json") + echo "$G1_MN" | "$PCHAIND" --home="$HOME_DIR" keys add genesis-acc-1 \ + --keyring-backend=$KEYRING --algo=$KEYALGO --recover >/dev/null 2>&1 || true + + PEER=$(uv_peer_id "$ID") + MULTI="/ip4/127.0.0.1/tcp/$(uv_tss "$ID")" + sleep $(( ID * 2 )) + set +e + local reg + reg=$("$PCHAIND" --home="$HOME_DIR" tx uvalidator add-universal-validator \ + --core-validator-address "$VALOPER" \ + --network "{\"peer_id\": \"$PEER\", \"multi_addrs\": [\"$MULTI\"]}" \ + --from genesis-acc-1 --chain-id "$CHAIN_ID" --keyring-backend=$KEYRING \ + --node="$genesis_rpc_tcp" --fees 1000000000000000upc --yes --output json 2>&1 || true) + local reg_tx; reg_tx=$(echo "$reg" | jq -r '.txhash // ""' 2>/dev/null || true) + [[ -n "$reg_tx" ]] && wait_chain_tx "$reg_tx" "$genesis_rpc_tcp" 30 || true + set -e + ok "UV$ID registered" +} + +# ─── Universal validator setup ───────────────────────────────────────────────── + +setup_universal_validator() { + local ID="$1" + local HOME_DIR="$DATA_DIR/universal-$ID" + local UV_HOME="$HOME_DIR/.puniversal" + local QUERY_PORT=$(uv_query "$ID") + local TSS_PORT=$(uv_tss "$ID") + local CORE_GRPC=$(uv_grpc "$ID") + local GENESIS_RPC_HTTP="http://127.0.0.1:$(val_rpc 1)" + local LOG_FILE="$LOG_DIR/universal-$ID.log" + + mkdir -p "$HOME_DIR" "$LOG_DIR" + + # Preserve keyshares (puniversald stores them at $UV_HOME/keyshares) + local _ks_tmp="" + if [[ -d "$UV_HOME/keyshares" ]] && ls "$UV_HOME/keyshares"/* >/dev/null 2>&1; then + _ks_tmp=$(mktemp -d) + cp -r "$UV_HOME/keyshares/." "$_ks_tmp/" + ok "Preserved $(ls "$_ks_tmp" | wc -l | tr -d ' ') keyshare(s) for UV$ID" + fi + + rm -rf "$UV_HOME" 2>/dev/null || true + HOME="$HOME_DIR" "$PUNIVERSALD" init + + # Restore keyshares + if [[ -n "$_ks_tmp" ]] && [[ -d "$_ks_tmp" ]]; then + mkdir -p "$UV_HOME/keyshares" + cp -r "$_ks_tmp/." "$UV_HOME/keyshares/" + rm -rf "$_ks_tmp" + ok "Restored $(ls "$UV_HOME/keyshares" | wc -l | tr -d ' ') keyshare(s) for UV$ID" + fi + + # Wait for first block + local BLOCK_HEIGHT=0 + local i=0 + while (( i < 120 )); do + BLOCK_HEIGHT=$(curl -sf "$GENESIS_RPC_HTTP/status" 2>/dev/null \ + | jq -r '.result.sync_info.latest_block_height // "0"' 2>/dev/null || echo "0") + [[ "$BLOCK_HEIGHT" != "0" && "$BLOCK_HEIGHT" != "null" ]] && break + sleep 2; (( i += 2 )) + done + + local cfg="$UV_HOME/config/pushuv_config.json" + + # Set grpc + keyring + jq --arg grpc "$CORE_GRPC" \ + '.push_chain_grpc_urls = [$grpc] | .keyring_backend = "test"' \ + "$cfg" > "${cfg}.tmp" && mv "${cfg}.tmp" "$cfg" + + # Debug logging + jq '.log_level = 0' "$cfg" > "${cfg}.tmp" && mv "${cfg}.tmp" "$cfg" + + # TSS + local TSS_PRIVATE_KEY + TSS_PRIVATE_KEY=$(uv_tss_key "$ID") + local TSS_P2P_LISTEN="/ip4/0.0.0.0/tcp/$TSS_PORT" + local TSS_HOME_DIR="$UV_HOME/tss" + + jq --arg pk "$TSS_PRIVATE_KEY" \ + --arg pw "testpassword" \ + --arg listen "$TSS_P2P_LISTEN" \ + --arg home "$TSS_HOME_DIR" \ + '.tss_enabled = true | .tss_p2p_private_key_hex = $pk | .tss_password = $pw | .tss_p2p_listen = $listen | .tss_home_dir = $home' \ + "$cfg" > "${cfg}.tmp" && mv "${cfg}.tmp" "$cfg" + + # Query port + if [[ "$QUERY_PORT" != "8080" ]]; then + jq --argjson p "$QUERY_PORT" '.query_server_port = $p' "$cfg" > "${cfg}.tmp" && mv "${cfg}.tmp" "$cfg" + fi + + # RPC overrides: use localhost ports (not host.docker.internal) + local SEPOLIA_RPC="${SEPOLIA_RPC_URL_OVERRIDE:-http://localhost:9545}" + local ARBITRUM_RPC="${ARBITRUM_RPC_URL_OVERRIDE:-http://localhost:9546}" + local BASE_RPC="${BASE_RPC_URL_OVERRIDE:-http://localhost:9547}" + local BSC_RPC="${BSC_RPC_URL_OVERRIDE:-http://localhost:9548}" + local SOLANA_RPC="${SOLANA_RPC_URL_OVERRIDE:-http://localhost:8899}" + local ARBITRUM_TENDERLY="https://arbitrum-sepolia.gateway.tenderly.co" + local BSC_DEFAULT="https://bsc-testnet-rpc.publicnode.com" + + jq --arg c "$ARBITRUM_CHAIN_ID" --arg u "$ARBITRUM_TENDERLY" \ + '.chain_configs[$c].rpc_urls = [$u]' "$cfg" > "${cfg}.tmp" && mv "${cfg}.tmp" "$cfg" + jq --arg c "$BSC_CHAIN_ID" --arg u "$BSC_DEFAULT" \ + '.chain_configs[$c].rpc_urls = [$u]' "$cfg" > "${cfg}.tmp" && mv "${cfg}.tmp" "$cfg" + jq --arg c "$SEPOLIA_CHAIN_ID" --arg u "$SEPOLIA_RPC" \ + '.chain_configs[$c].rpc_urls = [$u]' "$cfg" > "${cfg}.tmp" && mv "${cfg}.tmp" "$cfg" + jq --arg c "$ARBITRUM_CHAIN_ID" --arg u "$ARBITRUM_RPC" \ + '.chain_configs[$c].rpc_urls = [$u]' "$cfg" > "${cfg}.tmp" && mv "${cfg}.tmp" "$cfg" + jq --arg c "$BASE_CHAIN_ID" --arg u "$BASE_RPC" \ + '.chain_configs[$c].rpc_urls = [$u]' "$cfg" > "${cfg}.tmp" && mv "${cfg}.tmp" "$cfg" + jq --arg c "$BSC_CHAIN_ID" --arg u "$BSC_RPC" \ + '.chain_configs[$c].rpc_urls = [$u]' "$cfg" > "${cfg}.tmp" && mv "${cfg}.tmp" "$cfg" + jq --arg c "$SOLANA_CHAIN_ID" --arg u "$SOLANA_RPC" \ + '.chain_configs[$c].rpc_urls = [$u]' "$cfg" > "${cfg}.tmp" && mv "${cfg}.tmp" "$cfg" + + # Set localchain start height + local start_from="$BLOCK_HEIGHT" + (( start_from > 20 )) && start_from=$(( start_from - 20 )) + jq --arg c "$LOCALCHAIN_CHAIN_ID" --argjson h "$start_from" \ + '.chain_configs[$c].event_start_from = $h' "$cfg" > "${cfg}.tmp" && mv "${cfg}.tmp" "$cfg" + + # Set event_start_from for external chains from env + set_chain_start_from() { + local cid="$1" val="${2:-}" + [[ -z "$val" ]] && return 0 + jq --arg c "$cid" --argjson h "$val" \ + '.chain_configs[$c].event_start_from = $h' "$cfg" > "${cfg}.tmp" && mv "${cfg}.tmp" "$cfg" + } + set_chain_start_from "$SEPOLIA_CHAIN_ID" "${SEPOLIA_EVENT_START_FROM:-}" + set_chain_start_from "$BASE_CHAIN_ID" "${BASE_EVENT_START_FROM:-}" + set_chain_start_from "$ARBITRUM_CHAIN_ID" "${ARBITRUM_EVENT_START_FROM:-}" + set_chain_start_from "$BSC_CHAIN_ID" "${BSC_EVENT_START_FROM:-}" + set_chain_start_from "$SOLANA_CHAIN_ID" "${SOLANA_EVENT_START_FROM:-}" + + # Valoper address + local VALOPER + VALOPER=$(jq -r ".[$(( ID - 1 ))].valoper_address" "$TMP_DIR/validators.json") + if [[ -n "$VALOPER" && "$VALOPER" != "null" ]]; then + jq --arg v "$VALOPER" '.push_valoper_address = $v' "$cfg" > "${cfg}.tmp" && mv "${cfg}.tmp" "$cfg" + fi + + # Import hotkey + local HOTKEY_MN HOTKEY_ADDR + HOTKEY_MN=$(jq -r ".[$(( ID - 1 ))].mnemonic" "$TMP_DIR/hotkeys.json") + HOTKEY_ADDR=$(jq -r ".[$(( ID - 1 ))].address" "$TMP_DIR/hotkeys.json") + mkdir -p "$UV_HOME/keyring-test" + echo "$HOTKEY_MN" | HOME="$HOME_DIR" "$PUNIVERSALD" keys add "hotkey-$ID" \ + --recover --keyring-backend=test >/dev/null 2>&1 || true + ok "UV$ID hotkey imported: $HOTKEY_ADDR" + + # Wait for AuthZ grants + log "UV$ID: waiting for AuthZ grants for $HOTKEY_ADDR..." + local required_msgs='["/uexecutor.v1.MsgVoteInbound","/uexecutor.v1.MsgVoteChainMeta","/uexecutor.v1.MsgVoteOutbound","/utss.v1.MsgVoteTssKeyProcess"]' + local wait_max=120 waited=0 matched=0 + while (( waited < wait_max )); do + matched=$(curl -sf "http://127.0.0.1:$(val_rest 1)/cosmos/authz/v1beta1/grants/grantee/$HOTKEY_ADDR" 2>/dev/null \ + | jq -r --argjson req "$required_msgs" \ + '[.grants[]? | (.authorization.msg // .authorization.value.msg // "") as $m | select($req | index($m))] | length' \ + 2>/dev/null || echo "0") + (( matched >= 4 )) && break + sleep 2; (( waited += 2 )) + done + if (( matched >= 4 )); then + ok "UV$ID: found $matched/4 AuthZ grants" + else + warn "UV$ID: only $matched/4 grants found after ${wait_max}s — continuing" + fi + + # Wait for on-chain registration + local EXPECTED_PEER + EXPECTED_PEER=$(uv_peer_id "$ID") + log "UV$ID: waiting for on-chain registration (peer_id=$EXPECTED_PEER)..." + local reg_wait=0 found="" + while (( reg_wait < 120 )); do + found=$(curl -sf "http://127.0.0.1:$(val_rest 1)/uvalidator/v1/universal_validators" 2>/dev/null \ + | jq -r --arg pid "$EXPECTED_PEER" \ + '.universal_validator[]? | select(.network_info.peer_id == $pid) | .network_info.peer_id' \ + 2>/dev/null || echo "") + [[ -n "$found" ]] && break + sleep 2; (( reg_wait += 2 )) + done + if [[ -z "$found" ]]; then + err "UV$ID not found on-chain after 120s"; return 1 + fi + ok "UV$ID confirmed on-chain" + + # Start puniversald (set stack to max hard limit on macOS; ignore if already at limit) + log "Starting universal-validator-$ID (query=$QUERY_PORT tss=$TSS_PORT)..." + ( + ulimit -s 65520 2>/dev/null || true + export HOME="$HOME_DIR" + exec "$PUNIVERSALD" start + ) >> "$LOG_FILE" 2>&1 & + + local pid=$! + disown $pid 2>/dev/null || true + write_pid "universal-$ID" "$pid" + ok "universal-validator-$ID started (pid=$pid)" +} + +# ─── TSS keygen ──────────────────────────────────────────────────────────────── + +get_current_tss_key_id() { + local genesis_rpc="tcp://127.0.0.1:$(val_rpc 1)" + "$PCHAIND" query utss current-key --node="$genesis_rpc" 2>/dev/null \ + | grep -E "^\s*key_id:" | awk '{print $2}' 2>/dev/null || echo "" +} + +get_utss_admin() { + local genesis_rpc="tcp://127.0.0.1:$(val_rpc 1)" + "$PCHAIND" query utss params --node="$genesis_rpc" --output json 2>/dev/null \ + | jq -r '.params.admin // ""' 2>/dev/null || echo "" +} + +cmd_tss_keygen() { + header "TSS Key Generation" + + # Check existing key + local existing + existing=$(get_current_tss_key_id) + if [[ -n "$existing" ]]; then + ok "TSS key already present: $existing" + return 0 + fi + + # Validate UVs registered + local genesis_rpc="tcp://127.0.0.1:$(val_rpc 1)" + local uv_count + uv_count=$("$PCHAIND" query uvalidator all-universal-validators \ + --node="$genesis_rpc" --output json 2>/dev/null \ + | jq -r '.universal_validator | length // 0' 2>/dev/null || echo "0") + if (( uv_count < 2 )); then + err "Need at least 2 registered universal validators (found: $uv_count)"; return 1 + fi + + # Find signer key + local admin_addr + admin_addr=$(get_utss_admin) + local val1_home="$DATA_DIR/validator-1" + local signer="" + while IFS= read -r key_name; do + local addr + addr=$("$PCHAIND" --home="$val1_home" keys show "$key_name" -a --keyring-backend=$KEYRING 2>/dev/null || true) + if [[ "$addr" == "$admin_addr" ]]; then signer="$key_name"; break; fi + done < <("$PCHAIND" --home="$val1_home" keys list --keyring-backend=$KEYRING --output json 2>/dev/null \ + | jq -r '.[] | .name' 2>/dev/null || true) + + if [[ -z "$signer" ]]; then + err "No local key matches UTSS admin: $admin_addr"; return 1 + fi + + local attempt max_attempts=5 + for (( attempt=1; attempt<=max_attempts; attempt++ )); do + log "Initiating TSS keygen (attempt $attempt/$max_attempts, signer=$signer)..." + local result tx_hash + result=$("$PCHAIND" --home="$val1_home" tx utss initiate-tss-key-process \ + --process-type tss-process-keygen \ + --from "$signer" \ + --chain-id "$CHAIN_ID" \ + --keyring-backend=$KEYRING \ + --node="$genesis_rpc" \ + --fees 1000000000000000upc \ + --yes --output json 2>&1 || true) + + local code; code=$(echo "$result" | jq -r '.code // "0"' 2>/dev/null || echo "0") + tx_hash=$(echo "$result" | jq -r '.txhash // ""' 2>/dev/null || true) + + if [[ "$code" != "0" ]]; then + warn "Keygen tx code=$code; retrying..." + sleep 5; continue + fi + + if [[ -n "$tx_hash" ]]; then + wait_chain_tx "$tx_hash" "$genesis_rpc" 30 || true + fi + + # Wait for key + log "Waiting for TSS key to materialize on-chain..." + local waited=0 + while (( waited < 300 )); do + local kid; kid=$(get_current_tss_key_id) + if [[ -n "$kid" ]]; then ok "TSS key ready: $kid"; return 0; fi + sleep 3; (( waited += 3 )) + done + warn "Key not ready after 300s on attempt $attempt" + done + + err "TSS keygen failed after $max_attempts attempts" + return 1 +} + +# ─── Setup uvalidators (re-registration + authz) ─────────────────────────────── + +cmd_setup_uvalidators() { + header "Setting up Universal Validators" + + local genesis_rpc_tcp="tcp://127.0.0.1:$(val_rpc 1)" + local genesis_rpc_http="http://127.0.0.1:$(val_rpc 1)" + local failures=0 + + for i in 1 2 3 4; do + log "Processing UV$i..." + local val_home="$DATA_DIR/validator-$i" + + local VALOPER PEER MULTI + VALOPER=$("$PCHAIND" --home="$val_home" keys show "validator-$i" --bech val -a --keyring-backend=$KEYRING 2>/dev/null || true) + PEER=$(uv_peer_id "$i") + MULTI="/ip4/127.0.0.1/tcp/$(uv_tss "$i")" + + if [[ -z "$VALOPER" ]]; then + warn "Could not get valoper for validator-$i; skipping" + (( failures++ )); continue + fi + + # Wait bonded + if ! wait_validator_bonded "$VALOPER" 90; then + warn "validator-$i not bonded; skipping UV$i"; (( failures++ )); continue + fi + + # Register / update UV + set +e + local reg_result reg_tx + reg_result=$("$PCHAIND" --home="$val_home" tx uvalidator add-universal-validator \ + --core-validator-address "$VALOPER" \ + --network "{\"peer_id\": \"$PEER\", \"multi_addrs\": [\"$MULTI\"]}" \ + --from genesis-acc-1 --chain-id "$CHAIN_ID" --keyring-backend=$KEYRING \ + --node="$genesis_rpc_tcp" --fees 1000000000000000upc --yes --output json 2>&1 || true) + reg_tx=$(echo "$reg_result" | jq -r '.txhash // ""' 2>/dev/null || true) + [[ -n "$reg_tx" ]] && wait_chain_tx "$reg_tx" "$genesis_rpc_tcp" 30 || true + set -e + ok "UV$i registered" + + # AuthZ grants + local HOTKEY_ADDR + HOTKEY_ADDR=$(jq -r ".[$(( i - 1 ))].address" "$TMP_DIR/hotkeys.json") + + set +e + for msg in /uexecutor.v1.MsgVoteInbound /uexecutor.v1.MsgVoteChainMeta /uexecutor.v1.MsgVoteOutbound /utss.v1.MsgVoteTssKeyProcess; do + "$PCHAIND" --home="$val_home" tx authz grant "$HOTKEY_ADDR" generic \ + --msg-type="$msg" \ + --from "validator-$i" \ + --chain-id "$CHAIN_ID" \ + --keyring-backend=$KEYRING \ + --node="$genesis_rpc_tcp" \ + --fees 200000000000000upc \ + --yes >/dev/null 2>&1 || true + sleep 1 + done + set -e + ok "UV$i AuthZ grants created for $HOTKEY_ADDR" + done + + if (( failures > 0 )); then + warn "Setup completed with $failures failure(s)" + else + ok "All UV setup complete" + fi +} + +# ─── Start command ───────────────────────────────────────────────────────────── + +cmd_start() { + local clean=false + for arg in "$@"; do [[ "$arg" == "--build" || "$arg" == "--clean" ]] && clean=true; done + + require_cmd "$PCHAIND" "$PUNIVERSALD" jq curl nc + + header "Starting Push Chain Local Network" + mkdir -p "$DATA_DIR" "$LOG_DIR" "$PID_DIR" + + if [[ "$clean" == "true" ]]; then + log "Clean start: removing existing validator data..." + rm -rf "$DATA_DIR"/validator-* "$DATA_DIR"/universal-* + log "Clearing account files for fresh generation..." + rm -f "$TMP_DIR/genesis_accounts.json" "$TMP_DIR/validators.json" "$TMP_DIR/hotkeys.json" "$TMP_DIR/genesis.json" + fi + + # Kill any stale processes + for i in 1 2 3 4; do + local pid + pid=$(read_pid "validator-$i") + [[ -n "$pid" ]] && is_alive "$pid" && kill "$pid" 2>/dev/null || true + pid=$(read_pid "universal-$i") + [[ -n "$pid" ]] && is_alive "$pid" && kill "$pid" 2>/dev/null || true + done + sleep 2 + + generate_accounts + + # Genesis validator (foreground setup, background run) + if [[ "$clean" == "true" ]] || [[ ! -d "$DATA_DIR/validator-1" ]]; then + setup_genesis_validator + else + log "Reusing existing validator-1 data..." + local RPC_PORT=$(val_rpc 1) EVM_PORT=$(val_evm 1) EVM_WS_PORT=$(val_evmws 1) + local LOG_FILE="$LOG_DIR/validator-1.log" + "$PCHAIND" start \ + --home="$DATA_DIR/validator-1" \ + --pruning=nothing \ + --minimum-gas-prices="1000000000${DENOM}" \ + --rpc.laddr="tcp://0.0.0.0:${RPC_PORT}" \ + --json-rpc.address="0.0.0.0:${EVM_PORT}" \ + --json-rpc.ws-address="0.0.0.0:${EVM_WS_PORT}" \ + --json-rpc.api=eth,txpool,personal,net,debug,web3 \ + --chain-id="$CHAIN_ID" \ + >> "$LOG_FILE" 2>&1 & + local _v1pid=$! + disown $_v1pid 2>/dev/null || true + write_pid "validator-1" "$_v1pid" + wait_block "$RPC_PORT" "validator-1" 120 + fi + + # Regular validators in parallel + log "Starting validators 2, 3, 4..." + for i in 2 3 4; do + if [[ "$clean" == "true" ]] || [[ ! -d "$DATA_DIR/validator-$i" ]]; then + setup_regular_validator "$i" & + else + log "Reusing existing validator-$i data..." + local RPC=$(val_rpc "$i") EVM=$(val_evm "$i") WS=$(val_evmws "$i") + "$PCHAIND" start \ + --home="$DATA_DIR/validator-$i" \ + --pruning=nothing \ + --minimum-gas-prices="1000000000${DENOM}" \ + --rpc.laddr="tcp://0.0.0.0:${RPC}" \ + --json-rpc.address="0.0.0.0:${EVM}" \ + --json-rpc.ws-address="0.0.0.0:${WS}" \ + --json-rpc.api=eth,txpool,personal,net,debug,web3 \ + --chain-id="$CHAIN_ID" \ + >> "$LOG_DIR/validator-$i.log" 2>&1 & + local _vpid=$! + disown $_vpid 2>/dev/null || true + write_pid "validator-$i" "$_vpid" + fi + done + wait || true + ok "All core validators running" + + # Universal validators in parallel + log "Starting universal validators..." + for i in 1 2 3 4; do + setup_universal_validator "$i" & + done + wait || true + ok "All universal validators started" + + # Health checks + log "Waiting for universal validators to become healthy..." + local all_healthy=true + for i in 1 2 3 4; do + if ! wait_uv_health "$(uv_query "$i")" "universal-validator-$i" 180; then + all_healthy=false + fi + done + + if [[ "$all_healthy" == "true" ]]; then + ok "All validators healthy!" + else + warn "Some validators may not be healthy — check logs in $LOG_DIR" + fi + + cmd_status +} + +# ─── Stop / Down ─────────────────────────────────────────────────────────────── + +cmd_stop() { + log "Stopping all local validators..." + for i in 1 2 3 4; do + local pid + pid=$(read_pid "validator-$i") + if [[ -n "$pid" ]] && is_alive "$pid"; then + kill "$pid" 2>/dev/null || true + ok "Stopped validator-$i (pid=$pid)" + fi + pid=$(read_pid "universal-$i") + if [[ -n "$pid" ]] && is_alive "$pid"; then + kill "$pid" 2>/dev/null || true + ok "Stopped universal-validator-$i (pid=$pid)" + fi + done + # Catch any stragglers by binary name + pkill -f "pchaind start" 2>/dev/null || true + pkill -f "puniversald start" 2>/dev/null || true + sleep 2 + ok "All validators stopped" +} + +cmd_down() { + cmd_stop + log "Removing validator data..." + rm -rf "$DATA_DIR"/validator-* "$DATA_DIR"/universal-* + rm -f "$TMP_DIR/genesis.json" + ok "Data removed (accounts in /tmp/push-accounts preserved)" +} + +# ─── Status ──────────────────────────────────────────────────────────────────── + +cmd_status() { + header "Local Devnet Status" + for i in 1 2 3 4; do + local pid rpc_port + pid=$(read_pid "validator-$i") + rpc_port=$(val_rpc "$i") + if [[ -n "$pid" ]] && is_alive "$pid"; then + local height + height=$(curl -sf "http://127.0.0.1:$rpc_port/status" 2>/dev/null \ + | jq -r '.result.sync_info.latest_block_height // "?"' 2>/dev/null || echo "?") + ok "validator-$i (pid=$pid) — rpc=:$rpc_port evm=:$(val_evm "$i") height=$height" + else + err "validator-$i — NOT RUNNING" + fi + done + for i in 1 2 3 4; do + local pid qport + pid=$(read_pid "universal-$i") + qport=$(uv_query "$i") + if [[ -n "$pid" ]] && is_alive "$pid"; then + local health + health=$(curl -sf "http://127.0.0.1:$qport/health" 2>/dev/null && echo "healthy" || echo "starting") + ok "universal-validator-$i (pid=$pid) — query=:$qport tss=:$(uv_tss "$i") [$health]" + else + err "universal-validator-$i — NOT RUNNING" + fi + done +} + +# ─── Logs ────────────────────────────────────────────────────────────────────── + +cmd_logs() { + local name="${1:-all}" + mkdir -p "$LOG_DIR" + if [[ "$name" == "all" ]]; then + tail -f "$LOG_DIR"/*.log 2>/dev/null || echo "No logs found in $LOG_DIR" + else + local f="$LOG_DIR/${name}.log" + [[ -f "$f" ]] && tail -f "$f" || { err "Log not found: $f"; exit 1; } + fi +} + +# ─── Main ────────────────────────────────────────────────────────────────────── + +case "${1:-help}" in + start) shift; cmd_start "$@" ;; + stop) cmd_stop ;; + down) cmd_down ;; + status) cmd_status ;; + logs) shift; cmd_logs "${1:-all}" ;; + tss-keygen) cmd_tss_keygen ;; + setup-uvalidators) cmd_setup_uvalidators ;; + *) + echo "Usage: $(basename "$0") " + echo "" + echo "Commands:" + echo " start [--build] Start all validators (--build for clean start)" + echo " stop Stop all validators (keep data)" + echo " down Stop and remove validator data" + echo " status Show status of all validators" + echo " logs [name] Tail logs (e.g. validator-1, universal-2, all)" + echo " tss-keygen Initiate TSS key generation" + echo " setup-uvalidators Register universal validators and setup AuthZ" + ;; +esac diff --git a/scripts/replace_addresses.sh b/scripts/replace_addresses.sh new file mode 100644 index 00000000..63ecc00e --- /dev/null +++ b/scripts/replace_addresses.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash + +set -euo pipefail + +ROOT_DIR="$(cd -P "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +cd "$ROOT_DIR" + +ENV_FILE="e2e-tests/.env" +if [[ ! -f "$ENV_FILE" ]]; then + echo "e2e-tests/.env not found" >&2 + exit 1 +fi + +PRIVATE_KEY=$(grep '^PRIVATE_KEY=' "$ENV_FILE" | cut -d= -f2 | tr -d '"' | tr -d "'") +if [[ -z "$PRIVATE_KEY" ]]; then + echo "PRIVATE_KEY not found in $ENV_FILE" >&2 + exit 1 +fi + +# Derive EVM address +if ! command -v cast >/dev/null 2>&1; then + echo "cast command not found (install foundry/cast)" >&2 + exit 1 +fi +EVM_ADDRESS=$(cast wallet address $PRIVATE_KEY) + +# Derive push (cosmos) address +if ! command -v $PWD/build/pchaind >/dev/null 2>&1; then + echo "pchaind binary not found in build/ (run make build)" >&2 + exit 1 +fi +PUSH_ADDRESS=push1gjaw568e35hjc8udhat0xnsxxmkm2snrexxz20 + +echo "Replacing with PUSH_ADDRESS: $PUSH_ADDRESS" +echo "Replacing with EVM_ADDRESS: $EVM_ADDRESS" + +# Replace Admin in params.go files +for f in x/utss/types/params.go x/uregistry/types/params.go x/uvalidator/types/params.go; do + if [[ -f "$f" ]]; then + perl -pi -e "s/Admin: \"push1[0-9a-z]+\"/Admin: \"$PUSH_ADDRESS\"/g" "$f" + echo "Updated Admin in $f" + fi +done + +# Replace PROXY_ADMIN_OWNER_ADDRESS in constants.go files +for f in x/uexecutor/types/constants.go x/uregistry/types/constants.go; do + if [[ -f "$f" ]]; then + perl -pi -e "s/PROXY_ADMIN_OWNER_ADDRESS_HEX = \"0x[a-fA-F0-9]{40}\"/PROXY_ADMIN_OWNER_ADDRESS_HEX = \"$EVM_ADDRESS\"/g" "$f" + perl -pi -e "s/PROXY_ADMIN_OWNER_ADDRESS = \"0x[a-fA-F0-9]{40}\"/PROXY_ADMIN_OWNER_ADDRESS = \"$EVM_ADDRESS\"/g" "$f" + echo "Updated PROXY_ADMIN_OWNER_ADDRESS in $f" + fi +done + +echo "Address replacement completed."