diff --git a/.github/workflows/container-release.yml b/.github/workflows/container-release.yml new file mode 100644 index 0000000..dadd0b6 --- /dev/null +++ b/.github/workflows/container-release.yml @@ -0,0 +1,79 @@ +name: Container Release + +on: + workflow_dispatch: + inputs: + version: + description: 'Image tag (e.g. v1.0.0, latest)' + required: true + type: string + default: 'latest' + push_latest: + description: 'Also tag as latest' + required: false + type: boolean + default: true + +permissions: + contents: read + packages: write + +env: + REGISTRY: ghcr.io + IMAGE_NAME: squid-socks + +jobs: + build-and-push: + name: Build & Push Container Image + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - uses: actions/checkout@v4 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Prepare tags + id: tags + run: | + OWNER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]') + FULL_IMAGE="${{ env.REGISTRY }}/${OWNER}/${{ env.IMAGE_NAME }}" + TAGS="${FULL_IMAGE}:${{ inputs.version }}" + if [ "${{ inputs.push_latest }}" = "true" ] && [ "${{ inputs.version }}" != "latest" ]; then + TAGS="${TAGS},${FULL_IMAGE}:latest" + fi + echo "tags=${TAGS}" >> "$GITHUB_OUTPUT" + echo "image=${FULL_IMAGE}" >> "$GITHUB_OUTPUT" + echo "Tags to push: ${TAGS}" + + - name: Build and push + uses: docker/build-push-action@v6 + with: + context: ./squid_patch + file: ./squid_patch/Dockerfile + platforms: linux/amd64,linux/arm64 + push: true + tags: ${{ steps.tags.outputs.tags }} + cache-from: type=gha + cache-to: type=gha,mode=max + labels: | + org.opencontainers.image.title=squid-socks + org.opencontainers.image.description=Squid 6.10 with native SOCKS4/SOCKS5 cache_peer support + org.opencontainers.image.source=https://github.com/${{ github.repository }} + org.opencontainers.image.version=${{ inputs.version }} + + - name: Verify pushed image + run: | + docker pull ${{ steps.tags.outputs.image }}:${{ inputs.version }} + docker run --rm ${{ steps.tags.outputs.image }}:${{ inputs.version }} squid -v + echo "--- Image verification OK ---" diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..2cc9021 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,72 @@ +name: Release + +on: + workflow_dispatch: + inputs: + version: + description: 'Release version (e.g. v1.0.0)' + required: true + type: string + prerelease: + description: 'Mark as pre-release' + required: false + type: boolean + default: false + +permissions: + contents: write + +jobs: + release: + name: Create GitHub Release + runs-on: ubuntu-latest + timeout-minutes: 5 + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Validate version format + run: | + if ! echo "${{ inputs.version }}" | grep -qE '^v[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.]+)?$'; then + echo "ERROR: Version must match vX.Y.Z or vX.Y.Z-suffix format" + exit 1 + fi + + - name: Check tag does not already exist + run: | + if git rev-parse "refs/tags/${{ inputs.version }}" >/dev/null 2>&1; then + echo "ERROR: Tag ${{ inputs.version }} already exists" + exit 1 + fi + + - name: Generate release notes + id: notes + run: | + PREVIOUS_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "") + if [ -n "${PREVIOUS_TAG}" ]; then + echo "Previous tag: ${PREVIOUS_TAG}" + CHANGELOG=$(git log "${PREVIOUS_TAG}..HEAD" --pretty=format:"- %s (%h)" --no-merges) + else + echo "No previous tag found, using full history" + CHANGELOG=$(git log --pretty=format:"- %s (%h)" --no-merges -50) + fi + + { + echo "notes<> "$GITHUB_OUTPUT" + + - name: Create GitHub Release + uses: softprops/action-gh-release@v2 + with: + tag_name: ${{ inputs.version }} + name: ${{ inputs.version }} + body: ${{ steps.notes.outputs.notes }} + prerelease: ${{ inputs.prerelease }} + generate_release_notes: false diff --git a/.github/workflows/squid-build-test.yml b/.github/workflows/squid-build-test.yml new file mode 100644 index 0000000..c196331 --- /dev/null +++ b/.github/workflows/squid-build-test.yml @@ -0,0 +1,519 @@ +name: Squid SOCKS Patch Build & Test + +on: + push: + paths: + - 'squid_patch/**' + - 'setup/**' + - 'template/**' + - '.github/workflows/squid-build-test.yml' + pull_request: + paths: + - 'squid_patch/**' + - 'setup/**' + - 'template/**' + - '.github/workflows/squid-build-test.yml' + workflow_dispatch: + +env: + SQUID_IMAGE: squid-socks:6.10 + +jobs: + # ------------------------------------------------------------------ + # 1. Build the custom Squid image with SOCKS patch + # ------------------------------------------------------------------ + build: + name: Build Squid 6.10 + SOCKS Patch + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build Squid image + uses: docker/build-push-action@v6 + with: + context: ./squid_patch + file: ./squid_patch/Dockerfile + tags: ${{ env.SQUID_IMAGE }} + load: true + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Verify Squid binary + run: | + docker run --rm ${{ env.SQUID_IMAGE }} squid -v + echo "--- Squid binary OK ---" + + - name: Save image for test jobs + run: docker save ${{ env.SQUID_IMAGE }} -o /tmp/squid-image.tar + + - name: Upload image artifact + uses: actions/upload-artifact@v4 + with: + name: squid-image + path: /tmp/squid-image.tar + retention-days: 1 + + # ------------------------------------------------------------------ + # 2. Test: Squid config parsing (socks4/socks5 options) + # ------------------------------------------------------------------ + test-config: + name: Test Squid Config Parsing + needs: build + runs-on: ubuntu-latest + timeout-minutes: 5 + steps: + - uses: actions/checkout@v4 + + - name: Download image artifact + uses: actions/download-artifact@v4 + with: + name: squid-image + path: /tmp + + - name: Load image + run: docker load -i /tmp/squid-image.tar + + - name: Test SOCKS5 cache_peer config parsing + run: | + cat > /tmp/squid-socks5.conf <<'CONF' + http_port 3128 + http_access allow all + never_direct allow all + cache_peer 127.0.0.1 parent 1080 0 no-query no-digest round-robin proxy-only originserver name=test_socks5 socks5 socks-user=testuser socks-pass=testpass + CONF + + docker run --rm \ + -v /tmp/squid-socks5.conf:/etc/squid/conf.d/squid.conf:ro \ + ${{ env.SQUID_IMAGE }} \ + squid -k parse -f /etc/squid/conf.d/squid.conf 2>&1 | tee /tmp/parse-output.txt + + echo "--- SOCKS5 config parse OK ---" + + - name: Test SOCKS4 cache_peer config parsing + run: | + cat > /tmp/squid-socks4.conf <<'CONF' + http_port 3128 + http_access allow all + never_direct allow all + cache_peer 127.0.0.1 parent 1080 0 no-query no-digest round-robin proxy-only originserver name=test_socks4 socks4 + CONF + + docker run --rm \ + -v /tmp/squid-socks4.conf:/etc/squid/conf.d/squid.conf:ro \ + ${{ env.SQUID_IMAGE }} \ + squid -k parse -f /etc/squid/conf.d/squid.conf 2>&1 | tee /tmp/parse-output.txt + + echo "--- SOCKS4 config parse OK ---" + + - name: Test multiple SOCKS peers config + run: | + cat > /tmp/squid-multi.conf <<'CONF' + http_port 3128 + http_access allow all + never_direct allow all + cache_peer 10.0.0.1 parent 1080 0 no-query no-digest round-robin proxy-only originserver name=socks1 socks5 socks-user=user1 socks-pass=pass1 + cache_peer 10.0.0.2 parent 1080 0 no-query no-digest round-robin proxy-only originserver name=socks2 socks5 socks-user=user2 socks-pass=pass2 + cache_peer 10.0.0.3 parent 1081 0 no-query no-digest round-robin proxy-only originserver name=socks3 socks4 + CONF + + docker run --rm \ + -v /tmp/squid-multi.conf:/etc/squid/conf.d/squid.conf:ro \ + ${{ env.SQUID_IMAGE }} \ + squid -k parse -f /etc/squid/conf.d/squid.conf 2>&1 + + echo "--- Multiple SOCKS peers config OK ---" + + # ------------------------------------------------------------------ + # 3. Test: SOCKS5 proxy end-to-end via Squid + # ------------------------------------------------------------------ + test-socks5-e2e: + name: Test SOCKS5 End-to-End + needs: build + runs-on: ubuntu-latest + timeout-minutes: 10 + steps: + - uses: actions/checkout@v4 + + - name: Download image artifact + uses: actions/download-artifact@v4 + with: + name: squid-image + path: /tmp + + - name: Load image + run: docker load -i /tmp/squid-image.tar + + - name: Start local HTTP test server + run: | + mkdir -p /tmp/www + echo '{"origin":"127.0.0.1","test":"ok"}' > /tmp/www/ip + python3 -m http.server 18080 --directory /tmp/www & + sleep 1 + curl -sf http://127.0.0.1:18080/ip + echo "--- Local HTTP server ready ---" + + - name: Build and start SOCKS5 test server (microsocks) + run: | + git clone --depth 1 https://github.com/rofl0r/microsocks.git /tmp/microsocks + cd /tmp/microsocks && make -j"$(nproc)" + /tmp/microsocks/microsocks -p 11080 & + sleep 1 + echo "--- Verify SOCKS5 server directly ---" + curl -sf --socks5-hostname 127.0.0.1:11080 http://127.0.0.1:18080/ip || { + echo "ERROR: SOCKS5 server not working" + exit 1 + } + echo "--- SOCKS5 server OK ---" + + - name: Create Squid config for SOCKS5 peer + run: | + mkdir -p /tmp/squid-conf + cat > /tmp/squid-conf/squid.conf <&1 || true + exit 1 + fi + echo "=== Early Squid logs ===" + docker logs squid-test 2>&1 || true + + - name: Wait for Squid to listen + run: | + echo "Waiting for Squid to listen on port 3128..." + for i in $(seq 1 30); do + if bash -c 'echo > /dev/tcp/127.0.0.1/3128' 2>/dev/null; then + echo "Squid is listening after ${i}s" + exit 0 + fi + sleep 1 + done + echo "ERROR: Squid never started listening" + docker logs squid-test 2>&1 || true + exit 1 + + - name: Verify HTTP server is still up + run: curl -sf http://127.0.0.1:18080/ip + + - name: Test HTTP request through SOCKS5 peer + run: | + echo "--- Attempting proxy request ---" + HTTP_CODE=$(curl -s -o /tmp/proxy-response.txt -w '%{http_code}' --max-time 15 -x http://127.0.0.1:3128 http://127.0.0.1:18080/ip 2>/tmp/proxy-stderr.txt || true) + echo "HTTP status: ${HTTP_CODE}" + echo "Response body:" + cat /tmp/proxy-response.txt || true + echo "" + echo "Curl stderr:" + cat /tmp/proxy-stderr.txt || true + echo "" + echo "=== Squid logs after request ===" + docker logs squid-test 2>&1 | tail -30 || true + echo "" + # Now assert + [ "${HTTP_CODE}" = "200" ] || { echo "FAIL: expected 200, got ${HTTP_CODE}"; exit 1; } + grep -q "test" /tmp/proxy-response.txt || { echo "FAIL: unexpected response body"; exit 1; } + echo "--- HTTP via SOCKS5 OK ---" + + - name: Post Squid logs to PR on failure + if: failure() + uses: actions/github-script@v7 + with: + script: | + const { execSync } = require('child_process'); + let logs = ''; + try { logs = execSync('docker logs squid-test 2>&1', {encoding: 'utf8', maxBuffer: 50*1024}); } catch(e) { logs = e.stdout || e.message; } + const body = `### E2E Test Squid Logs\n\`\`\`\n${logs.slice(-3000)}\n\`\`\``; + const issueNumber = context.issue.number; + if (!issueNumber) { + console.log('No PR context; skipping comment.'); + } else { + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: issueNumber, + body: body + }); + } + + - name: Collect logs + if: always() + run: | + mkdir -p /tmp/test-logs + docker logs squid-test > /tmp/test-logs/squid.log 2>&1 || true + cp /tmp/squid-conf/squid.conf /tmp/test-logs/ 2>/dev/null || true + cp /tmp/proxy-response.txt /tmp/test-logs/ 2>/dev/null || true + cp /tmp/proxy-stderr.txt /tmp/test-logs/ 2>/dev/null || true + + - name: Upload test logs + if: always() + uses: actions/upload-artifact@v4 + with: + name: e2e-test-logs + path: /tmp/test-logs/ + retention-days: 3 + + - name: Cleanup + if: always() + run: | + docker rm -f squid-test 2>/dev/null || true + pkill microsocks 2>/dev/null || true + pkill -f 'python3 -m http.server' 2>/dev/null || true + + # ------------------------------------------------------------------ + # 4. Test: SOCKS5 with authentication + # ------------------------------------------------------------------ + test-socks5-auth: + name: Test SOCKS5 with Auth + needs: build + runs-on: ubuntu-latest + timeout-minutes: 10 + steps: + - uses: actions/checkout@v4 + + - name: Download image artifact + uses: actions/download-artifact@v4 + with: + name: squid-image + path: /tmp + + - name: Load image + run: docker load -i /tmp/squid-image.tar + + - name: Start local HTTP test server + run: | + mkdir -p /tmp/www + echo '{"origin":"127.0.0.1","test":"ok"}' > /tmp/www/ip + python3 -m http.server 18081 --directory /tmp/www & + sleep 1 + curl -sf http://127.0.0.1:18081/ip + + - name: Build and start SOCKS5 server with auth (microsocks) + run: | + if [ ! -f /tmp/microsocks/microsocks ]; then + git clone --depth 1 https://github.com/rofl0r/microsocks.git /tmp/microsocks + cd /tmp/microsocks && make -j"$(nproc)" + fi + /tmp/microsocks/microsocks -u testuser -P testpass -p 11081 & + sleep 1 + echo "--- Verify SOCKS5 auth server directly ---" + curl -sf --socks5-hostname testuser:testpass@127.0.0.1:11081 http://127.0.0.1:18081/ip || { + echo "ERROR: SOCKS5 auth server not working" + exit 1 + } + echo "--- SOCKS5 auth server OK ---" + + - name: Create Squid config with SOCKS5 auth + run: | + mkdir -p /tmp/squid-conf-auth + cat > /tmp/squid-conf-auth/squid.conf <&1 || true + exit 1 + fi + echo "=== Early Squid logs ===" + docker logs squid-auth 2>&1 || true + + - name: Wait for Squid to listen + run: | + for i in $(seq 1 30); do + if bash -c 'echo > /dev/tcp/127.0.0.1/3128' 2>/dev/null; then + echo "Squid is listening after ${i}s" + exit 0 + fi + sleep 1 + done + echo "ERROR: Squid never started listening" + docker logs squid-auth 2>&1 || true + exit 1 + + - name: Verify HTTP server is still up + run: curl -sf http://127.0.0.1:18081/ip + + - name: Test HTTP through authenticated SOCKS5 + run: | + echo "--- Attempting proxy request ---" + HTTP_CODE=$(curl -s -o /tmp/proxy-response.txt -w '%{http_code}' --max-time 15 -x http://127.0.0.1:3128 http://127.0.0.1:18081/ip 2>/tmp/proxy-stderr.txt || true) + echo "HTTP status: ${HTTP_CODE}" + echo "Response body:" + cat /tmp/proxy-response.txt || true + echo "" + echo "Curl stderr:" + cat /tmp/proxy-stderr.txt || true + echo "" + echo "=== Squid logs after request ===" + docker logs squid-auth 2>&1 | tail -30 || true + echo "" + [ "${HTTP_CODE}" = "200" ] || { echo "FAIL: expected 200, got ${HTTP_CODE}"; exit 1; } + grep -q "test" /tmp/proxy-response.txt || { echo "FAIL: unexpected body"; exit 1; } + echo "--- HTTP via SOCKS5 auth OK ---" + + - name: Post Squid logs to PR on failure + if: failure() + uses: actions/github-script@v7 + with: + script: | + const { execSync } = require('child_process'); + let logs = ''; + try { logs = execSync('docker logs squid-auth 2>&1', {encoding: 'utf8', maxBuffer: 50*1024}); } catch(e) { logs = e.stdout || e.message; } + const body = `### Auth Test Squid Logs\n\`\`\`\n${logs.slice(-3000)}\n\`\`\``; + const issueNumber = context.issue.number; + if (!issueNumber) { + console.log('No PR context; skipping comment.'); + } else { + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: issueNumber, + body: body + }); + } + + - name: Collect logs + if: always() + run: | + mkdir -p /tmp/test-logs-auth + docker logs squid-auth > /tmp/test-logs-auth/squid.log 2>&1 || true + cp /tmp/squid-conf-auth/squid.conf /tmp/test-logs-auth/ 2>/dev/null || true + cp /tmp/proxy-response.txt /tmp/test-logs-auth/ 2>/dev/null || true + cp /tmp/proxy-stderr.txt /tmp/test-logs-auth/ 2>/dev/null || true + + - name: Upload test logs + if: always() + uses: actions/upload-artifact@v4 + with: + name: auth-test-logs + path: /tmp/test-logs-auth/ + retention-days: 3 + + - name: Cleanup + if: always() + run: | + docker rm -f squid-auth 2>/dev/null || true + pkill microsocks 2>/dev/null || true + pkill -f 'python3 -m http.server' 2>/dev/null || true + + # ------------------------------------------------------------------ + # 5. Test: generate.php produces correct config + # ------------------------------------------------------------------ + test-generate: + name: Test Config Generator + runs-on: ubuntu-latest + timeout-minutes: 5 + steps: + - uses: actions/checkout@v4 + + - name: Install PHP dependencies + run: | + docker run --rm -v "$(pwd)/setup:/app" -w /app composer:2 install --no-interaction --quiet + + - name: Run generate.php with SOCKS proxy list + run: | + cat > proxyList.txt <<'LIST' + 10.0.0.1:1080:socks5:user1:pass1 + 10.0.0.2:1080:socks5:user2:pass2 + 10.0.0.3:1081:socks4:: + 192.168.1.1:8080 + 10.0.0.4:3128:httpsquid:admin:secret + 10.0.0.5:8080:http:user:pass + LIST + + docker run --rm -v "$(pwd):/app" php:8.2-cli php /app/setup/generate.php + + - name: Verify generated squid.conf has SOCKS peers + run: | + echo "=== Generated squid.conf ===" + cat config/squid.conf + echo "" + + # Check socks5 peers use native SOCKS options + grep -q 'socks5' config/squid.conf || { echo "FAIL: socks5 option not found"; exit 1; } + grep -q 'socks-user=user1' config/squid.conf || { echo "FAIL: socks-user not found"; exit 1; } + grep -q 'socks-pass=pass1' config/squid.conf || { echo "FAIL: socks-pass not found"; exit 1; } + grep -q 'originserver' config/squid.conf || { echo "FAIL: originserver not found"; exit 1; } + + # Check socks4 peer + grep -q 'socks4' config/squid.conf || { echo "FAIL: socks4 option not found"; exit 1; } + + # Check open proxy (no socks, no gost) + grep -q 'name=public' config/squid.conf || { echo "FAIL: open proxy not found"; exit 1; } + + # Check httpsquid peer + grep -q 'name=private' config/squid.conf || { echo "FAIL: httpsquid peer not found"; exit 1; } + + echo "--- squid.conf generation OK ---" + + - name: Verify generated docker-compose.yml + run: | + echo "=== Generated docker-compose.yml ===" + cat docker-compose.yml + echo "" + + # Gost container should only exist for http type (not for socks4/socks5) + # We have 1 http proxy -> 1 gost container + GOST_COUNT=$(grep -c 'ginuerzh/gost' docker-compose.yml || true) + echo "Gost containers: ${GOST_COUNT}" + [ "${GOST_COUNT}" -eq 1 ] || { echo "FAIL: expected 1 gost container, got ${GOST_COUNT}"; exit 1; } + + echo "--- docker-compose.yml generation OK ---" + + - name: Verify no Gost for SOCKS proxies + run: | + # socks5/socks4 should NOT create gost containers + # Only 'http' type should use gost + if grep -q 'dockergost_1\|dockergost_2\|dockergost_3' docker-compose.yml; then + echo "FAIL: SOCKS proxies should not create Gost containers" + exit 1 + fi + echo "--- No Gost for SOCKS proxies OK ---" diff --git a/setup/generate.php b/setup/generate.php index 95a9885..1af1a9b 100644 --- a/setup/generate.php +++ b/setup/generate.php @@ -15,6 +15,10 @@ $keys = ['host', 'port', 'scheme', 'user', 'pass']; $squid_default = 'cache_peer %s parent %d 0 no-digest no-netdb-exchange connect-fail-limit=2 connect-timeout=8 round-robin no-query allow-miss proxy-only name=%s'; +// SOCKS cache_peer template: uses originserver because after SOCKS tunnel +// the connection is direct to the target (not an HTTP proxy). +$squid_socks = 'cache_peer %s parent %d 0 no-digest no-netdb-exchange connect-fail-limit=2 connect-timeout=8 round-robin no-query allow-miss proxy-only originserver name=%s %s'; + while ($line = fgets($proxies)){ $line = trim($line); $proxyInfo = array_combine($keys, array_pad((explode(":", $line, 5)), 5, '')); @@ -36,8 +40,26 @@ //Username:Password Auth $squid_conf[] = vsprintf('login=%s:%s', array_map('urlencode', [$proxyInfo['user'], $proxyInfo['pass']])); } - }else{ - //other proxy type ex:socks + } + elseif(in_array($proxyInfo['scheme'], ['socks4', 'socks5'], true)){ + // Native SOCKS support via Squid cache_peer patch (no Gost needed) + $socksOpt = $proxyInfo['scheme']; // "socks4" or "socks5" + if ($proxyInfo['user'] && $proxyInfo['pass']) { + // SOCKS5 RFC1929 uses raw username/password; do not URL-encode. + $socksOpt .= sprintf(' socks-user=%s socks-pass=%s', + $proxyInfo['user'], + $proxyInfo['pass'] + ); + } + $squid_conf[] = sprintf($squid_socks, + $proxyInfo['host'], + $proxyInfo['port'], + 'socks'.$i, + $socksOpt + ); + } + else{ + // Other proxy types (http, https, etc.) – use Gost as HTTP proxy bridge if ($proxyInfo['user'] && $proxyInfo['pass']) { $cred = vsprintf('%s:%s@', array_map('urlencode', [$proxyInfo['user'], $proxyInfo['pass']])); } @@ -147,4 +169,4 @@ function isArm64() { } return false; -} \ No newline at end of file +} diff --git a/squid_patch/.dockerignore b/squid_patch/.dockerignore new file mode 100644 index 0000000..2866966 --- /dev/null +++ b/squid_patch/.dockerignore @@ -0,0 +1,2 @@ +.dockerignore +*.md diff --git a/squid_patch/Dockerfile b/squid_patch/Dockerfile new file mode 100644 index 0000000..d14cb4e --- /dev/null +++ b/squid_patch/Dockerfile @@ -0,0 +1,100 @@ +# ========================================================================== +# Custom Squid build with SOCKS4/SOCKS5 cache_peer support +# +# Pinned version: Squid 6.10 +# Reference: https://wiki.squid-cache.org/Features/Socks +# ========================================================================== + +ARG SQUID_VERSION=6.10 + +# ---------- stage 1: build ------------------------------------------------ +FROM debian:bookworm-slim AS builder + +ARG SQUID_VERSION +ENV SQUID_VERSION=${SQUID_VERSION} + +RUN apt-get update && apt-get install -y --no-install-recommends \ + build-essential \ + wget \ + ca-certificates \ + pkg-config \ + autoconf \ + automake \ + libtool \ + libssl-dev \ + libcap-dev \ + libexpat1-dev \ + libltdl-dev \ + python3 \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /build + +# Download and extract Squid source (pinned version + integrity check) +ARG SQUID_TAR_SHA256=0b07b187e723f04770dd25beb89aec12030a158696aa8892d87c8b26853408a7 +RUN wget -q "https://www.squid-cache.org/Versions/v6/squid-${SQUID_VERSION}.tar.xz" \ + -O squid.tar.xz \ + && echo "${SQUID_TAR_SHA256} squid.tar.xz" | sha256sum -c - \ + && tar xf squid.tar.xz \ + && rm squid.tar.xz + +# Copy patch sources and apply +COPY src/ /patches/src/ +COPY patch_apply.sh /patches/ + +RUN chmod +x /patches/patch_apply.sh \ + && bash /patches/patch_apply.sh /patches/src "/build/squid-${SQUID_VERSION}" + +# Configure and build +RUN cd "squid-${SQUID_VERSION}" \ + && ./configure \ + --prefix=/usr \ + --sysconfdir=/etc/squid \ + --localstatedir=/var \ + --libexecdir=/usr/lib/squid \ + --datadir=/usr/share/squid \ + --with-openssl \ + --enable-ssl-crtd \ + --enable-delay-pools \ + --enable-removal-policies="lru,heap" \ + --enable-cache-digests \ + --enable-follow-x-forwarded-for \ + --disable-arch-native \ + --with-large-files \ + --with-default-user=squid \ + --disable-strict-error-checking \ + && make -j"$(nproc)" \ + && make install DESTDIR=/install \ + && rm -rf /install/var/run + +# ---------- stage 2: runtime ---------------------------------------------- +FROM debian:bookworm-slim + +ARG SQUID_VERSION=6.10 +LABEL maintainer="docker-rotating-proxy" +LABEL description="Squid ${SQUID_VERSION} with SOCKS4/SOCKS5 cache_peer support" + +RUN apt-get update && apt-get install -y --no-install-recommends \ + libssl3 \ + libcap2 \ + libexpat1 \ + libltdl7 \ + ca-certificates \ + curl \ + gosu \ + && rm -rf /var/lib/apt/lists/* + +COPY --from=builder /install/ / + +RUN useradd -r -M -d /var/cache/squid -s /sbin/nologin squid \ + && mkdir -p /var/cache/squid /var/log/squid /var/run/squid /etc/squid/conf.d \ + && chown -R squid:squid /var/cache/squid /var/log/squid /var/run/squid /etc/squid/conf.d + +COPY docker-entrypoint.sh /usr/local/bin/ +RUN chmod +x /usr/local/bin/docker-entrypoint.sh + +EXPOSE 3128 + +# Starts as root so docker-entrypoint.sh can run squid -z (cache init) and chown; +# privileges are dropped to squid user via gosu before starting the daemon. +ENTRYPOINT ["docker-entrypoint.sh"] diff --git a/squid_patch/docker-entrypoint.sh b/squid_patch/docker-entrypoint.sh new file mode 100755 index 0000000..82e1e35 --- /dev/null +++ b/squid_patch/docker-entrypoint.sh @@ -0,0 +1,31 @@ +#!/bin/sh +set -e + +SQUID_CONFIG_FILE="${SQUID_CONFIG_FILE:-/etc/squid/squid.conf}" + +# Initialize cache directory if needed +if [ ! -d /var/cache/squid/00 ]; then + echo "Initializing Squid cache..." + squid -z -N -f "${SQUID_CONFIG_FILE}" 2>&1 || echo "Warning: squid -z failed (may be OK if cache is unused)" +fi + +# Ensure proper ownership +chown -R squid:squid /var/cache/squid /var/log/squid /var/run/squid 2>/dev/null || true + +# Remove stale PID file left by squid -z (created as root) +rm -f /var/run/squid.pid /var/run/squid/squid.pid 2>/dev/null || true + +# Support both direct squid invocation and arbitrary commands. +# If the first argument is "squid", drop it to avoid "squid squid ..." duplication. +if [ "$#" -gt 0 ] && [ "$1" = "squid" ]; then + shift +fi + +# If there are no arguments, or the first arg starts with "-", treat them as squid options. +if [ "$#" -eq 0 ] || [ "${1#-}" != "$1" ]; then + echo "Starting Squid with config: ${SQUID_CONFIG_FILE}" + exec gosu squid squid -N -f "${SQUID_CONFIG_FILE}" "$@" +fi + +# Otherwise, run the provided command as-is (e.g., a shell or another tool). +exec "$@" diff --git a/squid_patch/patch_apply.sh b/squid_patch/patch_apply.sh new file mode 100755 index 0000000..aecaf95 --- /dev/null +++ b/squid_patch/patch_apply.sh @@ -0,0 +1,349 @@ +#!/bin/bash +# +# patch_apply.sh - Apply SOCKS cache_peer support patches to Squid source +# +# Usage: patch_apply.sh +# +# Modifies the Squid 6.x source tree to add SOCKS4/SOCKS5 support for +# cache_peer directives. Uses pattern-based modifications (sed + Python) +# so that the script is tolerant of minor changes across 6.x point releases. +# +set -euo pipefail + +PATCH_SRC="${1:?Usage: $0 }" +SQUID_SRC="${2:?Usage: $0 }" + +die() { echo "PATCH ERROR: $*" >&2; exit 1; } + +echo "==> Copying SocksPeerConnector.h into ${SQUID_SRC}/src/" +cp "${PATCH_SRC}/SocksPeerConnector.h" "${SQUID_SRC}/src/SocksPeerConnector.h" \ + || die "Failed to copy SocksPeerConnector.h" + +# --------------------------------------------------------------------------- +# 1. CachePeer.h – add socks_type / socks_user / socks_pass fields +# --------------------------------------------------------------------------- +CACHE_PEER_H="${SQUID_SRC}/src/CachePeer.h" +echo "==> Patching ${CACHE_PEER_H}" +[ -f "${CACHE_PEER_H}" ] || die "CachePeer.h not found" + +grep -q 'class CachePeer' "${CACHE_PEER_H}" || die "CachePeer class not found" + +# Do NOT include SocksPeerConnector.h here – it pulls in POSIX headers +# that must come after squid.h. Use plain int/char* for the fields. +if ! grep -q 'socks_type' "${CACHE_PEER_H}"; then + if grep -q '} options;' "${CACHE_PEER_H}" 2>/dev/null; then + sed -i '/} options;/a\ +\ + /* SOCKS proxy support for cache_peer (0=none, 4=SOCKS4, 5=SOCKS5) */\ + int socks_type = 0;\ + char *socks_user = nullptr;\ + char *socks_pass = nullptr;' "${CACHE_PEER_H}" + else + sed -i '/^};/i\ +\ + /* SOCKS proxy support for cache_peer (0=none, 4=SOCKS4, 5=SOCKS5) */\ + int socks_type = 0;\ + char *socks_user = nullptr;\ + char *socks_pass = nullptr;\ +' "${CACHE_PEER_H}" + fi +fi + +echo " CachePeer.h patched OK" + +# --------------------------------------------------------------------------- +# 1b. CachePeer.cc – free socks_user / socks_pass in destructor +# --------------------------------------------------------------------------- +CACHE_PEER_CC="${SQUID_SRC}/src/CachePeer.cc" +echo "==> Patching ${CACHE_PEER_CC}" +[ -f "${CACHE_PEER_CC}" ] || die "CachePeer.cc not found" + +if ! grep -q 'socks_user' "${CACHE_PEER_CC}"; then + # Insert xfree calls next to existing xfree(login) in the destructor + sed -i '/xfree(login);/a\ +\ + xfree(socks_user);\ + xfree(socks_pass);' "${CACHE_PEER_CC}" + grep -q 'socks_user' "${CACHE_PEER_CC}" || die "Failed to patch CachePeer.cc destructor" +fi + +echo " CachePeer.cc patched OK" + +# --------------------------------------------------------------------------- +# 2. cache_cf.cc – parse socks4 / socks5 / socks-user= / socks-pass= +# --------------------------------------------------------------------------- +CACHE_CF="${SQUID_SRC}/src/cache_cf.cc" +echo "==> Patching ${CACHE_CF}" +[ -f "${CACHE_CF}" ] || die "cache_cf.cc not found" + +if ! grep -q 'socks_type' "${CACHE_CF}"; then + ANCHOR="" + for pattern in 'proxy-only' 'no-digest' 'no-query' 'round-robin' 'originserver'; do + if grep -q "\"${pattern}\"" "${CACHE_CF}"; then + ANCHOR="${pattern}" + break + fi + done + + [ -n "${ANCHOR}" ] || die "Could not find peer option parsing anchor in cache_cf.cc" + echo " Using anchor: '${ANCHOR}'" + + python3 - "${CACHE_CF}" "${ANCHOR}" << 'PYEOF' +import sys, re + +filepath = sys.argv[1] +anchor = sys.argv[2] + +with open(filepath, 'r') as f: + content = f.read() + +# The code to insert. Starts with " else if" (no leading "}") and closes +# the final branch with "}". The insertion point is right after the "}" +# that closes the anchor's if-block, so " else if" continues the chain. +socks_code = ''' else if (!strcmp(token, "socks4")) { + p->socks_type = 4; + } else if (!strcmp(token, "socks5")) { + p->socks_type = 5; + } else if (!strncmp(token, "socks-user=", 11)) { + safe_free(p->socks_user); + p->socks_user = xstrdup(token + 11); + } else if (!strncmp(token, "socks-pass=", 11)) { + safe_free(p->socks_pass); + p->socks_pass = xstrdup(token + 11); + }''' + +# Find the anchor in a strcmp/strncmp context +# Try matching "else if" variant first (most options), then plain "if" (first option) +for pat_template in [ + r'else\s+if\s*\(!(?:strcmp|strncmp)\(token,\s*"' + re.escape(anchor) + r'"', + r'if\s*\(!(?:strcmp|strncmp)\(token,\s*"' + re.escape(anchor) + r'"', +]: + pat = re.compile(pat_template) + match = pat.search(content) + if match: + break + +if not match: + print(f"ERROR: Could not find '{anchor}' in cache_cf.cc", file=sys.stderr) + sys.exit(1) + +# From the match position, find the opening brace and count to the closing brace +idx = match.start() +brace_start = content.find('{', idx) +if brace_start < 0: + print("ERROR: Could not find opening brace", file=sys.stderr) + sys.exit(1) +depth = 1 +pos = brace_start + 1 +while pos < len(content) and depth > 0: + if content[pos] == '{': depth += 1 + elif content[pos] == '}': depth -= 1 + pos += 1 +# pos is now right after the closing "}" of the anchor block +content = content[:pos] + socks_code + content[pos:] + +# Also add a post-parse validation: socks4/socks5 requires originserver. +# Options can appear in any order, so we validate after the while loop ends. +# findCachePeerByName is the first check after the option-parsing loop. +validation = ''' + /* Validate: SOCKS peers must use originserver */ + if (p->socks_type && !p->options.originserver) + throw TextException(ToSBuf("cache_peer ", *p, ": socks4/socks5 requires the originserver option"), Here()); + + /* Validate: socks-user/socks-pass only valid with socks5 and must be set together */ + if (p->socks_type != 5 && (p->socks_user || p->socks_pass)) + throw TextException(ToSBuf("cache_peer ", *p, ": socks-user/socks-pass options require socks5"), Here()); + if (p->socks_type == 5 && ((!p->socks_user) != (!p->socks_pass))) + throw TextException(ToSBuf("cache_peer ", *p, ": socks-user and socks-pass must both be set or both omitted"), Here()); + +''' +marker = 'findCachePeerByName' +marker_idx = content.find(marker, pos) +if marker_idx > pos: + line_start = content.rfind('\n', 0, marker_idx) + if line_start > 0: + content = content[:line_start] + validation + content[line_start:] + print(" Inserted SOCKS+originserver validation after option parsing loop") +else: + print("ERROR: Could not insert originserver validation", file=sys.stderr) + sys.exit(1) + +with open(filepath, 'w') as f: + f.write(content) +print(f" Inserted SOCKS parsing after '{anchor}' block") +PYEOF +fi + +echo " cache_cf.cc patched OK" + +# --------------------------------------------------------------------------- +# 3. FwdState.cc – SOCKS negotiation at the top of dispatch() +# --------------------------------------------------------------------------- +FWD_STATE="${SQUID_SRC}/src/FwdState.cc" +echo "==> Patching ${FWD_STATE}" +[ -f "${FWD_STATE}" ] || die "FwdState.cc not found" + +# Add include AFTER squid.h (squid.h MUST be the first include in every .cc) +if ! grep -q 'SocksPeerConnector.h' "${FWD_STATE}"; then + sed -i '/#include "squid.h"/a\ +#include "SocksPeerConnector.h"' "${FWD_STATE}" + grep -q 'SocksPeerConnector.h' "${FWD_STATE}" || die "Failed to add include to FwdState.cc" +fi + +if ! grep -q 'socks_type' "${FWD_STATE}"; then + python3 - "${FWD_STATE}" << 'PYEOF' +import sys, re + +filepath = sys.argv[1] + +with open(filepath, 'r') as f: + content = f.read() + +# Squid 6.10 API: +# serverConnection() returns Comm::ConnectionPointer const & +# ->getPeer() returns CachePeer* +# ->fd is int (public member of Comm::Connection) +# request->url.host() returns const char* +# request->url.port() returns unsigned short +# retryOrBail() is a private method of FwdState +socks_hook = r''' + /* SOCKS peer negotiation: after TCP connect, before HTTP dispatch */ + if (const auto sp = serverConnection()->getPeer()) { + if (sp->socks_type) { + const auto targetPort = static_cast(request->url.port()); + debugs(17, 3, "SOCKS" << sp->socks_type + << " negotiation with peer " << sp->host + << " for " << request->url.host() << ":" << targetPort); + if (!SocksPeerConnector::negotiate( + serverConnection()->fd, + static_cast(sp->socks_type), + std::string(request->url.host()), + targetPort, + sp->socks_user ? std::string(sp->socks_user) : std::string(), + sp->socks_pass ? std::string(sp->socks_pass) : std::string())) { + debugs(17, 2, "SOCKS negotiation FAILED for peer " << sp->host); + retryOrBail(); + return; + } + debugs(17, 3, "SOCKS negotiation OK for peer " << sp->host); + } + } + +''' + +inserted = False + +for pat in [ + r'(void\s+FwdState::dispatch\s*\(\s*\)\s*\{)', + r'(FwdState::dispatch\s*\(\s*\)\s*\n?\s*\{)', +]: + match = re.search(pat, content) + if match: + insert_pos = match.end() + content = content[:insert_pos] + socks_hook + content[insert_pos:] + inserted = True + print(" Inserted SOCKS hook at top of FwdState::dispatch()") + break + +if not inserted: + print("ERROR: Could not find dispatch() insertion point in FwdState.cc", file=sys.stderr) + print(" SOCKS support for HTTP requests will not work", file=sys.stderr) + sys.exit(1) + +with open(filepath, 'w') as f: + f.write(content) +PYEOF +fi + +echo " FwdState.cc patched OK" + +# --------------------------------------------------------------------------- +# 4. tunnel.cc – SOCKS negotiation in connectDone() for CONNECT/HTTPS +# --------------------------------------------------------------------------- +TUNNEL_CC="${SQUID_SRC}/src/tunnel.cc" +echo "==> Patching ${TUNNEL_CC}" +[ -f "${TUNNEL_CC}" ] || die "tunnel.cc not found" + +# Add include AFTER squid.h +if ! grep -q 'SocksPeerConnector.h' "${TUNNEL_CC}"; then + sed -i '/#include "squid.h"/a\ +#include "SocksPeerConnector.h"' "${TUNNEL_CC}" + grep -q 'SocksPeerConnector.h' "${TUNNEL_CC}" || die "Failed to add include to tunnel.cc" +fi + +if ! grep -q 'socks_type' "${TUNNEL_CC}"; then + python3 - "${TUNNEL_CC}" << 'PYEOF' +import sys, re + +filepath = sys.argv[1] + +with open(filepath, 'r') as f: + content = f.read() + +# tunnel.cc API (Squid 6.10): +# TunnelStateData has: server.conn, request (HttpRequestPointer) +# connectDone(const Comm::ConnectionPointer &conn, ...) - after TCP connect +# conn->getPeer() returns CachePeer* +# conn->fd is int +# request->url.host() returns const char* +socks_tunnel_hook = r''' + /* SOCKS peer: negotiate tunnel right after TCP connect */ + if (conn->getPeer() && conn->getPeer()->socks_type) { + const auto sp = conn->getPeer(); + const auto targetPort = static_cast(request->url.port()); + debugs(26, 3, "SOCKS" << sp->socks_type + << " tunnel negotiation with peer " << sp->host + << " for " << request->url.host() << ":" << targetPort); + if (!SocksPeerConnector::negotiate( + conn->fd, + static_cast(sp->socks_type), + std::string(request->url.host()), + targetPort, + sp->socks_user ? std::string(sp->socks_user) : std::string(), + sp->socks_pass ? std::string(sp->socks_pass) : std::string())) { + debugs(26, 2, "SOCKS tunnel negotiation FAILED for " << sp->host); + saveError(new ErrorState(ERR_CONNECT_FAIL, Http::scBadGateway, request.getRaw(), al)); + retryOrBail("SOCKS negotiation failed"); + return; + } + debugs(26, 3, "SOCKS tunnel negotiation OK for " << sp->host); + } + +''' + +inserted = False + +for pat in [ + r'(void\s+TunnelStateData::connectDone\s*\([^)]*\)\s*\{)', + r'(TunnelStateData::connectDone\s*\([^)]*\)\s*\n?\s*\{)', + r'(void\s+tunnelConnectDone\s*\([^)]*\)\s*\{)', +]: + match = re.search(pat, content) + if match: + insert_pos = match.end() + content = content[:insert_pos] + socks_tunnel_hook + content[insert_pos:] + inserted = True + print(f" Inserted SOCKS tunnel hook in {match.group(0).strip()[:70]}...") + break + +if not inserted: + print("ERROR: Could not patch tunnel.cc - HTTPS tunneling through SOCKS peers will not work", file=sys.stderr) + sys.exit(1) + +with open(filepath, 'w') as f: + f.write(content) +PYEOF +fi + +echo " tunnel.cc patched OK" + +echo "" +echo "==> All patches applied successfully" +echo "" +echo "Modified files:" +echo " - src/CachePeer.h (added socks_type/user/pass fields)" +echo " - src/CachePeer.cc (added socks_user/pass cleanup in destructor)" +echo " - src/cache_cf.cc (added socks4/socks5 option parsing)" +echo " - src/FwdState.cc (SOCKS negotiation in dispatch())" +echo " - src/tunnel.cc (SOCKS negotiation in connectDone())" +echo " - src/SocksPeerConnector.h (new: SOCKS4/5 protocol implementation)" diff --git a/squid_patch/src/SocksPeerConnector.h b/squid_patch/src/SocksPeerConnector.h new file mode 100644 index 0000000..581e097 --- /dev/null +++ b/squid_patch/src/SocksPeerConnector.h @@ -0,0 +1,324 @@ +/* + * SocksPeerConnector.h - SOCKS4/SOCKS5 negotiation for Squid cache_peer + * + * Performs synchronous SOCKS handshake on an established TCP connection. + * After negotiation, the connection acts as a direct tunnel to the target. + * + * Reference: https://wiki.squid-cache.org/Features/Socks + * SOCKS4: RFC 1928 predecessor (de facto standard) + * SOCKS4a: Extension for hostname resolution by proxy + * SOCKS5: RFC 1928 + RFC 1929 (username/password auth) + */ + +#ifndef SQUID_SRC_SOCKS_PEER_CONNECTOR_H +#define SQUID_SRC_SOCKS_PEER_CONNECTOR_H + +#include +#include +#include +#include +#include +#include +#include +#include + +enum SocksPeerType { + SOCKS_NONE = 0, + SOCKS_V4 = 4, + SOCKS_V5 = 5 +}; + +namespace SocksPeerConnector { + +/* ---- low-level helpers ------------------------------------------------ */ + +static inline bool syncSend(int fd, const void *buf, size_t len) +{ + const char *p = static_cast(buf); + size_t sent = 0; + while (sent < len) { + ssize_t n = ::send(fd, p + sent, len - sent, MSG_NOSIGNAL); + if (n < 0) { + if (errno == EINTR) continue; + return false; + } + if (n == 0) return false; + sent += static_cast(n); + } + return true; +} + +static inline bool syncRecv(int fd, void *buf, size_t len) +{ + char *p = static_cast(buf); + size_t got = 0; + while (got < len) { + ssize_t n = ::recv(fd, p + got, len - got, 0); + if (n < 0) { + if (errno == EINTR) continue; + return false; + } + if (n == 0) return false; + got += static_cast(n); + } + return true; +} + +/* ---- SOCKS4 / SOCKS4a ------------------------------------------------ */ + +static inline bool socks4Connect(int fd, + const std::string &host, uint16_t port, + const std::string &user) +{ + struct in_addr addr; + bool useSocks4a = (inet_pton(AF_INET, host.c_str(), &addr) != 1); + + if (useSocks4a) { + /* SOCKS4a: set IP to 0.0.0.x (x != 0) and append hostname */ + addr.s_addr = htonl(0x00000001); + } + + /* Bounds check: 8 (header) + userid + 1 (null) + hostname + 1 (null) */ + const size_t needed = 8 + user.size() + 1 + (useSocks4a ? host.size() + 1 : 0); + uint8_t req[600]; + if (needed > sizeof(req)) + return false; + + size_t pos = 0; + + req[pos++] = 0x04; /* VN = 4 */ + req[pos++] = 0x01; /* CD = CONNECT */ + req[pos++] = static_cast((port >> 8) & 0xFF); + req[pos++] = static_cast(port & 0xFF); + std::memcpy(req + pos, &addr.s_addr, 4); /* DSTIP */ + pos += 4; + + /* USERID */ + if (!user.empty()) { + std::memcpy(req + pos, user.c_str(), user.length()); + pos += user.length(); + } + req[pos++] = 0x00; /* NULL terminator */ + + /* SOCKS4a hostname */ + if (useSocks4a) { + std::memcpy(req + pos, host.c_str(), host.length()); + pos += host.length(); + req[pos++] = 0x00; + } + + if (!syncSend(fd, req, pos)) + return false; + + uint8_t resp[8]; + if (!syncRecv(fd, resp, 8)) + return false; + + return (resp[1] == 0x5A); /* 0x5A = granted */ +} + +/* ---- SOCKS5 (RFC 1928 + RFC 1929) ----------------------------------- */ + +static inline bool socks5Connect(int fd, + const std::string &host, uint16_t port, + const std::string &user, + const std::string &pass) +{ + const bool hasAuth = (!user.empty() && !pass.empty()); + + /* --- greeting ---------------------------------------------------- */ + uint8_t greeting[4]; + size_t gLen; + if (hasAuth) { + greeting[0] = 0x05; /* VER */ + greeting[1] = 0x02; /* NMETHODS */ + greeting[2] = 0x00; /* NO AUTHENTICATION */ + greeting[3] = 0x02; /* USERNAME / PASSWORD */ + gLen = 4; + } else { + greeting[0] = 0x05; + greeting[1] = 0x01; + greeting[2] = 0x00; + gLen = 3; + } + + if (!syncSend(fd, greeting, gLen)) + return false; + + uint8_t gResp[2]; + if (!syncRecv(fd, gResp, 2)) + return false; + + if (gResp[0] != 0x05) + return false; + + /* --- authentication (RFC 1929) ----------------------------------- */ + if (gResp[1] == 0x02) { + if (!hasAuth) + return false; + + /* RFC 1929: username and password are each max 255 bytes */ + if (user.length() > 255 || pass.length() > 255) + return false; + + uint8_t auth[515]; + size_t aPos = 0; + auth[aPos++] = 0x01; /* sub-negotiation VER */ + auth[aPos++] = static_cast(user.length()); + std::memcpy(auth + aPos, user.c_str(), user.length()); + aPos += user.length(); + auth[aPos++] = static_cast(pass.length()); + std::memcpy(auth + aPos, pass.c_str(), pass.length()); + aPos += pass.length(); + + if (!syncSend(fd, auth, aPos)) + return false; + + uint8_t aResp[2]; + if (!syncRecv(fd, aResp, 2)) + return false; + + if (aResp[0] != 0x01 || aResp[1] != 0x00) + return false; /* auth failed or wrong sub-negotiation version */ + + } else if (gResp[1] == 0x00) { + /* no auth required */ + } else { + return false; /* unsupported or unacceptable method (includes 0xFF) */ + } + + /* --- connect request --------------------------------------------- */ + uint8_t connReq[263]; + size_t cPos = 0; + + connReq[cPos++] = 0x05; /* VER */ + connReq[cPos++] = 0x01; /* CMD = CONNECT */ + connReq[cPos++] = 0x00; /* RSV */ + + /* Detect address type: IPv4, IPv6, or domain name */ + struct in_addr ipv4; + struct in6_addr ipv6; + if (inet_pton(AF_INET, host.c_str(), &ipv4) == 1) { + connReq[cPos++] = 0x01; /* ATYP = IPv4 */ + std::memcpy(connReq + cPos, &ipv4, sizeof(ipv4)); + cPos += sizeof(ipv4); + } else if (inet_pton(AF_INET6, host.c_str(), &ipv6) == 1) { + connReq[cPos++] = 0x04; /* ATYP = IPv6 */ + std::memcpy(connReq + cPos, &ipv6, sizeof(ipv6)); + cPos += sizeof(ipv6); + } else { + if (host.length() > 255) + return false; + connReq[cPos++] = 0x03; /* ATYP = DOMAINNAME */ + connReq[cPos++] = static_cast(host.length()); + std::memcpy(connReq + cPos, host.c_str(), host.length()); + cPos += host.length(); + } + + connReq[cPos++] = static_cast((port >> 8) & 0xFF); + connReq[cPos++] = static_cast(port & 0xFF); + + if (!syncSend(fd, connReq, cPos)) + return false; + + /* --- connect response -------------------------------------------- */ + uint8_t cResp[4]; + if (!syncRecv(fd, cResp, 4)) + return false; + + if (cResp[0] != 0x05 || cResp[1] != 0x00) + return false; /* connection failed */ + + /* drain the BND.ADDR + BND.PORT */ + switch (cResp[3]) { + case 0x01: { /* IPv4 */ + uint8_t skip[6]; /* 4 addr + 2 port */ + if (!syncRecv(fd, skip, 6)) return false; + break; + } + case 0x03: { /* DOMAINNAME */ + uint8_t dLen; + if (!syncRecv(fd, &dLen, 1)) return false; + uint8_t skip[258]; + if (!syncRecv(fd, skip, dLen + 2)) return false; + break; + } + case 0x04: { /* IPv6 */ + uint8_t skip[18]; /* 16 addr + 2 port */ + if (!syncRecv(fd, skip, 18)) return false; + break; + } + default: + return false; + } + + return true; +} + +/* ---- public entry point ---------------------------------------------- */ + +/** + * Perform SOCKS negotiation on an established TCP connection. + * + * Temporarily switches the socket to blocking mode, performs the + * SOCKS handshake (with a 10-second timeout), and restores the + * original socket flags. + * + * @return true on success; the fd is then a tunnel to targetHost:targetPort + */ +static inline bool negotiate(int fd, SocksPeerType type, + const std::string &targetHost, + uint16_t targetPort, + const std::string &user = "", + const std::string &pass = "") +{ + if (type == SOCKS_NONE) + return true; + + /* save original flags */ + int flags = fcntl(fd, F_GETFL); + if (flags < 0) + return false; + + /* switch to blocking for the handshake */ + if (fcntl(fd, F_SETFL, flags & ~O_NONBLOCK) < 0) + return false; + + /* save original timeouts and set a 10 s limit for the handshake */ + struct timeval origRecvTv = {0, 0}, origSendTv = {0, 0}; + socklen_t tvLen = sizeof(struct timeval); + if (getsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &origRecvTv, &tvLen) < 0 || + getsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &origSendTv, &tvLen) < 0) { + fcntl(fd, F_SETFL, flags); + return false; + } + + struct timeval tv; + tv.tv_sec = 10; + tv.tv_usec = 0; + if (setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)) < 0 || + setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)) < 0) { + fcntl(fd, F_SETFL, flags); + return false; + } + + bool ok = false; + if (type == SOCKS_V4) + ok = socks4Connect(fd, targetHost, targetPort, user); + else if (type == SOCKS_V5) + ok = socks5Connect(fd, targetHost, targetPort, user, pass); + + /* restore original flags and timeouts (best-effort, log-worthy but not fatal) */ + int restoreOk = 0; + restoreOk |= fcntl(fd, F_SETFL, flags); + restoreOk |= setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &origRecvTv, sizeof(origRecvTv)); + restoreOk |= setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &origSendTv, sizeof(origSendTv)); + if (restoreOk < 0 && ok) + return false; /* negotiation succeeded but socket is in bad state */ + + return ok; +} + +} /* namespace SocksPeerConnector */ + +#endif /* SQUID_SRC_SOCKS_PEER_CONNECTOR_H */ diff --git a/template/docker-compose.yml b/template/docker-compose.yml index 5ac2e8e..00f538e 100644 --- a/template/docker-compose.yml +++ b/template/docker-compose.yml @@ -3,7 +3,10 @@ services: squid: ports: - 3128:3128 - image: b4tman/squid:5.8 + build: + context: ./squid_patch + dockerfile: Dockerfile + image: squid-socks:6.10 volumes: - './config:/etc/squid/conf.d:ro' container_name: dockersquid_rotate @@ -12,7 +15,7 @@ services: extra_hosts: - "host.docker.internal:host-gateway" healthcheck: - test: [ "CMD-SHELL", "export https_proxy=127.0.0.1:3128 && export http_proxy=127.0.0.1:3128 && wget -q -Y on -O - http://httpbin.org/ip || exit 1" ] + test: [ "CMD-SHELL", "export https_proxy=127.0.0.1:3128 && export http_proxy=127.0.0.1:3128 && curl -sf -o /dev/null http://httpbin.org/ip || exit 1" ] retries: 5 timeout: "10s" start_period: "60s"