From 67698a9291d0e3e49f724aeba12430bc2b7b3861 Mon Sep 17 00:00:00 2001
From: onionjake <113088+onionjake@users.noreply.github.com>
Date: Fri, 29 Aug 2025 11:35:23 -0600
Subject: [PATCH 1/8] Start moving towards Diataxis Framework
This is going to be multiple commits, doing the large sweeping updates almost entirely with AI first, then refining by hand later.
---
CLAUDE.md | 264 +++++++
app/(docs)/dcs/getting-started/page.md | 90 ++-
app/(docs)/dcs/how-to/_meta.json | 17 +
app/(docs)/dcs/how-to/configure-cors.md | 183 +++++
app/(docs)/dcs/how-to/create-buckets.md | 125 ++++
app/(docs)/dcs/how-to/use-rclone.md | 134 ++++
app/(docs)/dcs/reference/_meta.json | 6 +
app/(docs)/dcs/reference/cli-commands.md | 276 ++++++++
app/(docs)/dcs/reference/error-codes.md | 146 ++++
app/(docs)/dcs/reference/limits.md | 165 +++++
app/(docs)/dcs/reference/s3-api.md | 208 ++++++
app/(docs)/dcs/third-party-tools/page.md | 69 +-
app/(docs)/node/how-to/_meta.json | 17 +
.../node/how-to/change-payout-address.md | 230 +++++++
app/(docs)/node/how-to/migrate-node.md | 344 ++++++++++
.../node/how-to/troubleshoot-offline-node.md | 364 ++++++++++
app/(docs)/node/reference/_meta.json | 5 +
app/(docs)/node/reference/configuration.md | 310 +++++++++
.../node/reference/dashboard-metrics.md | 275 ++++++++
.../node/reference/system-requirements.md | 288 ++++++++
app/(docs)/node/tutorials/_meta.json | 9 +
app/(docs)/node/tutorials/setup-first-node.md | 649 ++++++++++++++++++
.../concepts/object-mount-vs-filesystems.md | 175 +++++
.../object-mount/linux/user-guides/page.md | 130 +++-
app/(docs)/object-mount/reference/_meta.json | 5 +
.../object-mount/reference/cli-reference.md | 266 +++++++
.../object-mount/reference/compatibility.md | 318 +++++++++
.../object-mount/reference/configuration.md | 289 ++++++++
app/(docs)/object-mount/tutorials/_meta.json | 9 +
.../tutorials/your-first-mount.md | 299 ++++++++
30 files changed, 5622 insertions(+), 43 deletions(-)
create mode 100644 CLAUDE.md
create mode 100644 app/(docs)/dcs/how-to/_meta.json
create mode 100644 app/(docs)/dcs/how-to/configure-cors.md
create mode 100644 app/(docs)/dcs/how-to/create-buckets.md
create mode 100644 app/(docs)/dcs/how-to/use-rclone.md
create mode 100644 app/(docs)/dcs/reference/_meta.json
create mode 100644 app/(docs)/dcs/reference/cli-commands.md
create mode 100644 app/(docs)/dcs/reference/error-codes.md
create mode 100644 app/(docs)/dcs/reference/limits.md
create mode 100644 app/(docs)/dcs/reference/s3-api.md
create mode 100644 app/(docs)/node/how-to/_meta.json
create mode 100644 app/(docs)/node/how-to/change-payout-address.md
create mode 100644 app/(docs)/node/how-to/migrate-node.md
create mode 100644 app/(docs)/node/how-to/troubleshoot-offline-node.md
create mode 100644 app/(docs)/node/reference/_meta.json
create mode 100644 app/(docs)/node/reference/configuration.md
create mode 100644 app/(docs)/node/reference/dashboard-metrics.md
create mode 100644 app/(docs)/node/reference/system-requirements.md
create mode 100644 app/(docs)/node/tutorials/_meta.json
create mode 100644 app/(docs)/node/tutorials/setup-first-node.md
create mode 100644 app/(docs)/object-mount/concepts/object-mount-vs-filesystems.md
create mode 100644 app/(docs)/object-mount/reference/_meta.json
create mode 100644 app/(docs)/object-mount/reference/cli-reference.md
create mode 100644 app/(docs)/object-mount/reference/compatibility.md
create mode 100644 app/(docs)/object-mount/reference/configuration.md
create mode 100644 app/(docs)/object-mount/tutorials/_meta.json
create mode 100644 app/(docs)/object-mount/tutorials/your-first-mount.md
diff --git a/CLAUDE.md b/CLAUDE.md
new file mode 100644
index 000000000..7c753c00f
--- /dev/null
+++ b/CLAUDE.md
@@ -0,0 +1,264 @@
+# CLAUDE.md
+
+This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
+
+## Development Commands
+
+### Essential Commands
+- `npm install` - Install dependencies
+- `npm run dev` - Start development server on localhost:3000
+- `npm run build` - Build the site for production (outputs to `/dist`)
+- `npm start` - Start production server
+- `npm run lint` - Run ESLint to check for code issues
+- `npm run prettier` - Format code files (JS/TS/CSS/HTML)
+- `npm run mdprettier` - Format Markdown files
+
+### Build Process
+The build process includes a pre-build step that fetches image sizes (`node scripts/fetch-image-sizes.mjs`) before running the Next.js build.
+
+## Architecture Overview
+
+This is a **Next.js documentation site** built with:
+
+- **Next.js 15** with App Router (app directory structure)
+- **Markdoc** for parsing and rendering markdown content
+- **Tailwind CSS** for styling
+- **FlexSearch** for client-side search functionality
+- **React components** for interactive elements
+
+### Key Architecture Concepts
+
+#### Directory Structure
+The site follows Next.js App Router conventions where URLs map to directory structure:
+- `app/(docs)/` - Main documentation content
+- `app/(blog)/` - Blog posts
+- All documentation pages must be named `page.md`
+- URL structure mirrors folder structure (e.g., `/app/(docs)/dcs/api/page.md` → `/dcs/api`)
+
+#### Content Management
+- **Markdown with Markdoc**: All content is written in Markdown and processed by Markdoc
+- **Custom Components**: Available through Markdoc tags (see `src/markdoc/tags.js`)
+- **Frontmatter**: Each page requires frontmatter with `title`, `docId`, and `metadata`
+- **Internal Linking**: Use `[](docId:your-doc-id-here)` syntax for cross-references
+- **Navigation**: Auto-generated from `_meta.json` files in directories
+
+#### Styling System
+- **Tailwind CSS** with custom Storj brand colors
+- **Dark Mode**: Implemented with `next-themes` and class-based dark mode
+- **Custom Grid**: Uses CSS Grid with custom templates for sidebar/content/TOC layouts
+- **Typography**: Inter font with custom font sizing scale
+
+#### Search Implementation
+- **Client-side search** powered by FlexSearch
+- **Auto-indexing** of all documentation content
+- **Keyboard shortcut** (⌘K) for quick access
+- Configuration in `src/markdoc/search.mjs`
+
+### Important File Locations
+
+#### Configuration Files
+- `next.config.mjs` - Next.js configuration with Markdoc integration
+- `markdoc.config.json` - Markdoc schema configuration
+- `src/markdoc/config.mjs` - Markdoc tags and nodes
+- `tailwind.config.js` - Tailwind configuration with Storj branding
+
+#### Core Components
+- `src/components/Navigation.jsx` - Main site navigation
+- `src/components/Hero.jsx` - Homepage hero section
+- `src/components/MarkdownLayout.jsx` - Layout for documentation pages
+- `src/components/Search.jsx` - Search functionality
+- `src/markdoc/tags.js` - Custom Markdoc components
+
+#### Content Structure
+- `app/(docs)/` - Documentation content organized by product/feature
+- `src/markdoc/partials/` - Reusable content snippets
+- `public/` - Static assets and installation scripts
+
+## Content Guidelines
+
+### Creating New Pages
+1. Create `page.md` file in appropriate directory
+2. Include required frontmatter:
+ ```markdown
+ ---
+ title: "Page Title"
+ docId: "unique-16-char-id"
+ metadata:
+ title: "Browser Title"
+ description: "Page description for SEO"
+ ---
+ ```
+3. Generate unique docId with `pwgen -1 16` or similar
+
+### Internal Linking
+- Use docId syntax: `[](docId:abc123)`
+- Override link text: `[Custom Text](docId:abc123)`
+- Add fragments: `[](docId:abc123#section)`
+
+### Images
+- Store in Storj's internal "Website Assets" project
+- Use prefix: `https://link.us1.storjshare.io/raw/jua7rls6hkx5556qfcmhrqed2tfa/docs/images`
+
+## Documentation Style Guide (Diataxis Framework)
+
+This site follows the [Diataxis framework](https://diataxis.fr/) for systematic technical documentation. Use this guide to determine the appropriate documentation type and writing approach.
+
+### The Four Documentation Types
+
+Documentation should serve one of four distinct purposes. Use the **Diataxis Compass** to decide:
+
+| **Purpose** | **Skill Acquisition (Learning)** | **Skill Application (Working)** |
+|-------------|-----------------------------------|----------------------------------|
+| **Action-oriented (Doing)** | **Tutorials** | **How-to Guides** |
+| **Knowledge-oriented (Understanding)** | **Explanation** | **Reference** |
+
+### 1. Tutorials (Learning by Doing)
+
+**Purpose**: Help newcomers learn by completing meaningful tasks
+**Example**: "Getting Started with Storj DCS", "Your First Upload"
+
+#### Writing Guidelines:
+- Use first-person plural ("We will create...")
+- Provide step-by-step instructions
+- Show expected results at each step
+- Minimize explanation - focus on actions
+- Ensure instructions work reliably
+- Build confidence through early wins
+
+#### Structure:
+```markdown
+# Tutorial Title
+Brief introduction of what they'll achieve
+
+## What you'll build
+Show the end goal
+
+## Step 1: Setup
+Clear, specific instructions
+
+## Step 2: First action
+Expected output/result
+
+## What's next
+Link to related how-to guides
+```
+
+### 2. How-to Guides (Goal-oriented Solutions)
+
+**Purpose**: Guide competent users through specific tasks or problems
+**Example**: "Configure CORS for Buckets", "Set up Presigned URLs"
+
+#### Writing Guidelines:
+- Use conditional imperatives ("If you want X, do Y")
+- Assume basic competence
+- Focus on the specific goal
+- Address real-world complexity
+- Avoid unnecessary background
+
+#### Structure:
+```markdown
+# How to [achieve specific goal]
+Brief problem/goal statement
+
+## Prerequisites
+What they need to know/have
+
+## Steps
+1. Clear action
+2. Next action
+3. Final step
+
+## Verification
+How to confirm success
+```
+
+### 3. Reference (Information Lookup)
+
+**Purpose**: Provide authoritative technical information
+**Example**: "API Endpoints", "CLI Command Reference", "Error Codes"
+
+#### Writing Guidelines:
+- Be strictly descriptive and neutral
+- Use austere, factual language
+- Organize by system structure
+- Provide complete, accurate information
+- Avoid opinions or explanations
+
+#### Structure:
+```markdown
+# API Reference
+
+## Endpoints
+
+### GET /api/buckets
+**Description**: Retrieves list of buckets
+**Parameters**:
+- `limit` (optional): Number of results
+**Response**: JSON array of bucket objects
+```
+
+### 4. Explanation (Conceptual Understanding)
+
+**Purpose**: Provide context, background, and deeper understanding
+**Example**: "Why Decentralized Storage", "Understanding Access Controls"
+
+#### Writing Guidelines:
+- Discuss the bigger picture
+- Explain design decisions and trade-offs
+- Provide historical context
+- Make connections between concepts
+- Admit opinions and perspectives
+
+#### Structure:
+```markdown
+# Understanding [Concept]
+High-level overview
+
+## Background
+Why this matters
+
+## How it works
+Conceptual explanation
+
+## Design decisions
+Why things are this way
+
+## Related concepts
+Links to other explanations
+```
+
+### Content Organization Guidelines
+
+#### Choose the Right Type
+Ask yourself:
+1. **Action or Knowledge?** (What vs. Why/How)
+2. **Learning or Working?** (Study vs. Apply)
+
+#### Common Storj Examples:
+- **Tutorial**: "Build Your First App with Storj"
+- **How-to**: "Migrate from AWS S3 to Storj DCS"
+- **Reference**: "S3 API Compatibility Matrix"
+- **Explanation**: "Understanding Storj's Decentralized Architecture"
+
+#### Writing Best Practices:
+- Keep content types pure - don't mix tutorial steps with reference information
+- Use consistent language patterns within each type
+- Cross-link between related content of different types
+- Update `_meta.json` files to reflect content organization
+
+## Development Notes
+
+### Static Export
+The site is configured for static export (`output: 'export'`) and builds to `/dist` directory for deployment.
+
+### Image Optimization
+Images are unoptimized (`images: { unoptimized: true }`) due to static export requirements.
+
+### Environment Variables
+- `SITE_URL` and `NEXT_PUBLIC_SITE_URL` set to `https://storj.dev`
+- Analytics via Plausible in production
+
+### Custom Webpack Configuration
+The Next.js config includes custom webpack rules for processing Markdown files and extracting metadata for canonical URLs.
+
+# IMPORTANT: Do not use emojis.
diff --git a/app/(docs)/dcs/getting-started/page.md b/app/(docs)/dcs/getting-started/page.md
index c3f49aefb..7559be57c 100644
--- a/app/(docs)/dcs/getting-started/page.md
+++ b/app/(docs)/dcs/getting-started/page.md
@@ -15,13 +15,27 @@ redirects:
weight: 1
---
-Storj is the leading provider of enterprise-grade, globally distributed cloud object storage.
+Storj is the leading provider of enterprise-grade, globally distributed cloud object storage that delivers default multi-region CDN-like performance with zero-trust security at a [cost that's 80%](docId:59T_2l7c1rvZVhI8p91VX) lower than AWS S3.
-It is a drop-in replacement for any S3-compatible object storage that is just as durable but with 99.95% availability and better global performance from a single upload.
+## What you'll build
-Storj delivers default multi-region CDN-like performance with zero-trust security at a [cost that’s 80%](docId:59T_2l7c1rvZVhI8p91VX) lower than AWS S3.
+In this tutorial, you'll complete your first complete storage workflow with Storj. By the end, you'll have:
-## Before you begin
+- Generated S3-compatible credentials for secure access
+- Installed and configured command-line tools (rclone or AWS CLI)
+- Created your first bucket for file storage
+- Uploaded, downloaded, listed, and deleted files
+- Managed bucket operations including cleanup
+
+**Expected time to complete**: 15-20 minutes
+
+## Prerequisites
+
+Before you begin, you'll need:
+
+- A computer with internet access and terminal/command line access
+- Administrative privileges to install software on your system
+- Basic familiarity with command-line operations
To get started, create an account with Storj. You'll automatically begin a free trial that gives you access to try our storage with your [third-party tool](docId:REPde_t8MJMDaE2BU8RfQ) or project.
@@ -38,11 +52,13 @@ If you already have a Storj account, log in to get started
{% /quick-link %}
{% /quick-links %}
-## Generate S3 compatible credentials
+## Step 1: Generate S3 compatible credentials
{% partial file="s3-credentials.md" /%}
-## Install command-line tools
+**Expected outcome**: You should now have an Access Key ID and Secret Access Key that will allow your applications to authenticate with Storj.
+
+## Step 2: Install command-line tools
Storj works with a variety command-line tools. Rclone is recommended for its compatibility with various cloud providers and ease of use.
@@ -121,7 +137,9 @@ However, some may already be familiar with AWS CLI which is also a suitable opti
{% /tab %}
{% /tabs %}
-## Create a bucket
+**Expected outcome**: Your command-line tool should now be configured to authenticate with Storj using your credentials.
+
+## Step 3: Create a bucket
Now that the command-line tool is configured, let's make a bucket to store our files.
@@ -140,7 +158,9 @@ aws s3 --endpoint-url=https://gateway.storjshare.io mb s3://my-bucket
{% /code-group %}
-## List buckets
+**Expected outcome**: You've successfully created a bucket named "my-bucket" that's ready to store files.
+
+## Step 4: List buckets
The bucket will show up in our bucket list (not to be mistaken with a life's to-do list)
@@ -164,7 +184,9 @@ aws s3 --endpoint-url=https://gateway.storjshare.io ls s3://
{% /code-group %}
-## Upload file
+**Expected outcome**: You should see "my-bucket/" listed, confirming your bucket was created successfully.
+
+## Step 5: Upload file
Next we'll upload a file. Here is an example image of a tree growing hard drives (while Storj doesn't grow hard drives on trees, it does emphasize [sustainability](https://www.storj.io/benefits/sustainability)). Right-click on it and save as `storj-tree.png` to your Downloads.
@@ -197,7 +219,9 @@ upload: Downloads/storj-tree.png to s3://my-bucket/storj-tree.png
{% /tabs %}
-## Download file
+**Expected outcome**: The file has been successfully uploaded to your bucket. You should see output confirming the upload completed.
+
+## Step 6: Download file
To retrieve the file, use the same command as upload but reverse the order of the arguments
@@ -223,7 +247,9 @@ aws s3 --endpoint-url=https://gateway.storjshare.io cp s3://my-bucket/ ~/Downloa
{% /tabs %}
-## List files
+**Expected outcome**: The file should be downloaded to your local machine. Check your Downloads folder to confirm the file was retrieved successfully.
+
+## Step 7: List files
Let's see what files we have in the bucket.
@@ -247,9 +273,9 @@ aws s3 --endpoint-url=https://gateway.storjshare.io ls s3://my-bucket
{% /code-group %}
-Yep there's the Storj tree!
+**Expected outcome**: You should see your uploaded file listed with its size, confirming it's stored in your bucket.
-## Delete file
+## Step 8: Delete file
Okay time to remove the file.
@@ -272,7 +298,9 @@ delete: s3://my-bucket/storj-tree.png
{% /code-group %}
-## Delete buckets
+**Expected outcome**: The file should be removed from your bucket. You can verify this by listing files again - the bucket should now be empty.
+
+## Step 9: Delete buckets
Last but not least, we'll delete the bucket.
@@ -317,6 +345,36 @@ remove_bucket: my-bucket
{% /code-group %}
-## Next Steps
+**Expected outcome**: Your bucket should be completely removed from your account. Running the list buckets command again should show no buckets.
+
+## What you've accomplished
+
+Congratulations! You've successfully completed your first Storj workflow. You now know how to:
+
+- Generate secure S3-compatible credentials
+- Set up command-line tools for Storj access
+- Create and manage buckets
+- Upload, download, and organize files
+- Clean up resources when finished
+
+## What's next
+
+Now that you understand the basics, you can explore more advanced Storj capabilities:
+
+### Integrate with Applications
+- [Use Storj with third-party tools](docId:REPde_t8MJMDaE2BU8RfQ) - Connect existing tools like Duplicati, Nextcloud, and more
+- Build applications using SDKs for your preferred programming language
+
+### Advanced Storage Operations
+- Set up bucket versioning for file history
+- Configure CORS for web applications
+- Use presigned URLs for secure direct uploads
+- Optimize upload performance for large files
+
+### Production Deployment
+- Learn about Storj's multi-region architecture
+- Understand pricing and billing
+- Set up monitoring and logging
+- Plan your migration from other storage providers
-Congratulations on getting started with Storj!
+Ready to dive deeper? Check out our [third-party integrations](docId:REPde_t8MJMDaE2BU8RfQ) to see Storj working with popular tools and applications.
diff --git a/app/(docs)/dcs/how-to/_meta.json b/app/(docs)/dcs/how-to/_meta.json
new file mode 100644
index 000000000..1a7157edd
--- /dev/null
+++ b/app/(docs)/dcs/how-to/_meta.json
@@ -0,0 +1,17 @@
+{
+ "title": "How-to Guides",
+ "nav": [
+ {
+ "title": "Create buckets",
+ "id": "create-buckets"
+ },
+ {
+ "title": "Use Rclone",
+ "id": "use-rclone"
+ },
+ {
+ "title": "Configure CORS",
+ "id": "configure-cors"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/app/(docs)/dcs/how-to/configure-cors.md b/app/(docs)/dcs/how-to/configure-cors.md
new file mode 100644
index 000000000..788d6efe8
--- /dev/null
+++ b/app/(docs)/dcs/how-to/configure-cors.md
@@ -0,0 +1,183 @@
+---
+title: How to configure CORS for web applications
+docId: configure-cors-how-to
+metadata:
+ title: How to Configure CORS for Storj Web Applications
+ description: Step-by-step guide to understand and work with Storj's CORS policy for secure web application development.
+---
+
+This guide explains how to work with Cross-Origin Resource Sharing (CORS) when building web applications that access Storj storage.
+
+## Prerequisites
+
+Before configuring CORS for your application, ensure you have:
+
+- A web application that needs to access Storj storage from a browser
+- Basic understanding of CORS and web security concepts
+- Storj S3-compatible credentials configured
+
+## Understanding Storj's CORS policy
+
+Storj's S3-compatible API includes a permissive CORS policy by default:
+
+- **Access-Control-Allow-Origin**: `*` (allows access from any domain)
+- **Automatic configuration**: No manual CORS setup required
+- **Immediate access**: Your web applications can access Storj resources directly
+
+This eliminates the need for proxy servers or backend-only access patterns common with other storage providers.
+
+## Secure your application access
+
+While Storj's permissive CORS policy simplifies development, follow these security best practices:
+
+### 1. Use restricted access keys
+
+Create access keys with minimal required permissions:
+
+```shell
+# Create restricted access key for web app
+uplink access restrict \
+ --readonly \
+ --bucket=my-web-app-bucket \
+ --path-prefix=public/ \
+ my-main-access
+```
+
+### 2. Implement client-side validation
+
+Add validation in your web application:
+
+```javascript
+// Example: Validate file types before upload
+function validateFile(file) {
+ const allowedTypes = ['image/jpeg', 'image/png', 'image/gif'];
+ if (!allowedTypes.includes(file.type)) {
+ throw new Error('File type not allowed');
+ }
+}
+```
+
+### 3. Use presigned URLs for sensitive operations
+
+Generate time-limited URLs for uploads:
+
+```javascript
+// Request presigned URL from your backend
+const response = await fetch('/api/presigned-url', {
+ method: 'POST',
+ body: JSON.stringify({ filename: 'user-upload.jpg' })
+});
+const { uploadUrl } = await response.json();
+
+// Use presigned URL for direct upload
+await fetch(uploadUrl, {
+ method: 'PUT',
+ body: file
+});
+```
+
+## Test CORS access
+
+Verify your web application can access Storj storage:
+
+### 1. Create a test HTML page
+
+```html
+
+
+
+ Storj CORS Test
+
+
+
+
+
+```
+
+### 2. Check browser developer tools
+
+1. Open the page in your browser
+2. Open Developer Tools (F12)
+3. Check the Console tab for any CORS errors
+4. Verify the Network tab shows successful requests
+
+## Handle CORS in different frameworks
+
+### React/Next.js
+```javascript
+// pages/api/upload.js
+export default async function handler(req, res) {
+ // Set CORS headers if needed for your API routes
+ res.setHeader('Access-Control-Allow-Origin', 'https://yourdomain.com');
+
+ // Your Storj integration code
+}
+```
+
+### Vue.js
+```javascript
+// In your component
+async uploadFile(file) {
+ const formData = new FormData();
+ formData.append('file', file);
+
+ try {
+ const response = await this.$http.put(
+ 'https://gateway.storjshare.io/v1/buckets/my-bucket/objects/file.jpg',
+ formData,
+ {
+ headers: {
+ 'Authorization': 'Bearer ' + this.accessToken
+ }
+ }
+ );
+ console.log('Upload successful');
+ } catch (error) {
+ console.error('Upload failed:', error);
+ }
+}
+```
+
+## Troubleshooting CORS issues
+
+**"Access blocked by CORS policy"**: This typically indicates an issue with your authorization headers or request format, not Storj's CORS policy.
+
+**Preflight request failures**: Ensure your access tokens are valid and have appropriate permissions.
+
+**Mixed content warnings**: Use HTTPS for your web application when accessing Storj's HTTPS endpoints.
+
+**Network errors in development**: Consider using a local development server (like `http-server` or your framework's dev server) instead of opening HTML files directly.
+
+## Security considerations
+
+**Risk assessment**: The permissive CORS policy means any website can attempt to access your Storj resources if they have credentials.
+
+**Mitigation strategies**:
+- Use read-only access keys for public content
+- Implement server-side validation for sensitive operations
+- Monitor access logs for unusual activity
+- Rotate access keys regularly
+
+**Best practices**:
+- Store sensitive credentials on your backend, not in client-side code
+- Use environment variables for configuration
+- Implement proper authentication and authorization in your application
+
+## Next steps
+
+Once CORS is working correctly:
+
+- [Implement presigned URLs for secure uploads](#)
+- [Set up client-side file validation](#)
+- [Configure bucket policies for web hosting](#)
+- [Optimize web application performance](#)
\ No newline at end of file
diff --git a/app/(docs)/dcs/how-to/create-buckets.md b/app/(docs)/dcs/how-to/create-buckets.md
new file mode 100644
index 000000000..0b6aa12df
--- /dev/null
+++ b/app/(docs)/dcs/how-to/create-buckets.md
@@ -0,0 +1,125 @@
+---
+title: How to create buckets
+docId: create-buckets-how-to
+metadata:
+ title: How to Create Storj Buckets
+ description: Step-by-step guide to create Storj buckets using command-line tools or the Storj Console.
+---
+
+This guide shows you how to create a new bucket in Storj DCS for storing your files and data.
+
+## Prerequisites
+
+Before creating a bucket, ensure you have:
+
+- A Storj account with valid credentials
+- One of the following tools configured:
+ - [Rclone with Storj configuration](docId:AsyYcUJFbO1JI8-Tu8tW3)
+ - [AWS CLI with Storj endpoint](docId:AsyYcUJFbO1JI8-Tu8tW3)
+ - [Uplink CLI installed and configured](docId:hFL-goCWqrQMJPcTN82NB)
+ - Access to the Storj Console web interface
+
+## Create a bucket
+
+Choose your preferred method to create a bucket:
+
+{% tabs %}
+
+{% tab label="rclone" %}
+
+```shell {% title="rclone" %}
+# link[1:6] https://rclone.org/install/
+# link[8:12] https://rclone.org/commands/rclone_mkdir/
+# terminal
+rclone mkdir storj:my-bucket
+```
+
+Replace `my-bucket` with your desired bucket name.
+
+{% /tab %}
+
+{% tab label="aws cli" %}
+
+```shell {% title="aws cli" %}
+# link[1:3] https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html
+# terminal
+aws s3 --endpoint-url=https://gateway.storjshare.io mb s3://my-bucket
+```
+
+Replace `my-bucket` with your desired bucket name.
+
+{% /tab %}
+
+{% tab label="uplink" %}
+
+```shell {% title="uplink" %}
+# link[1:6] docId:hFL-goCWqrQMJPcTN82NB
+# link[8:9] docId:F77kaGpjXx7w-JYv2rkhf
+# terminal
+uplink mb sj://my-bucket
+```
+
+Replace `my-bucket` with your desired bucket name.
+
+{% /tab %}
+
+{% tab label="Storj Console" %}
+
+{% partial file="create-bucket.md" /%}
+
+{% /tab %}
+
+{% /tabs %}
+
+## Verify bucket creation
+
+Confirm your bucket was created successfully by listing your buckets:
+
+{% tabs %}
+
+{% tab label="rclone" %}
+
+```shell
+rclone lsf storj:
+```
+
+{% /tab %}
+
+{% tab label="aws cli" %}
+
+```shell
+aws s3 --endpoint-url=https://gateway.storjshare.io ls
+```
+
+{% /tab %}
+
+{% tab label="uplink" %}
+
+```shell
+uplink ls
+```
+
+{% /tab %}
+
+{% /tabs %}
+
+You should see your new bucket listed in the output.
+
+## Bucket naming requirements
+
+When creating buckets, follow these naming conventions:
+
+- Use only lowercase letters, numbers, and hyphens
+- Must be 3-63 characters long
+- Cannot start or end with a hyphen
+- Must be globally unique across all Storj users
+- Cannot contain periods or underscores
+
+## Next steps
+
+Now that you have a bucket, you can:
+
+- [Upload files to your bucket](#)
+- [Configure bucket settings like CORS](#)
+- [Set up object versioning](#)
+- [Configure bucket lifecycle policies](#)
\ No newline at end of file
diff --git a/app/(docs)/dcs/how-to/use-rclone.md b/app/(docs)/dcs/how-to/use-rclone.md
new file mode 100644
index 000000000..c649548c1
--- /dev/null
+++ b/app/(docs)/dcs/how-to/use-rclone.md
@@ -0,0 +1,134 @@
+---
+title: How to use Rclone with Storj
+docId: use-rclone-how-to
+metadata:
+ title: How to Use Rclone with Storj DCS
+ description: Step-by-step guide to configure and use Rclone with Storj, including choosing between S3-compatible and native integration.
+---
+
+This guide shows you how to set up and use Rclone with Storj DCS, including how to choose the right integration method for your needs.
+
+## Prerequisites
+
+Before using Rclone with Storj, ensure you have:
+
+- A Storj account with S3-compatible credentials
+- Basic familiarity with command-line operations
+- Rclone installed on your system
+
+If you need to set up credentials or install Rclone, follow the [Getting Started guide](docId:AsyYcUJFbO1JI8-Tu8tW3) first.
+
+## Choose your integration method
+
+Storj offers two ways to use Rclone, each with different advantages:
+
+### S3-Compatible Integration
+Best for: Upload-heavy workloads, server applications, bandwidth-limited connections
+
+**Advantages:**
+- Faster upload performance
+- Reduced network bandwidth usage (1GB file = 1GB uploaded)
+- Server-side encryption handled automatically
+- Lower system resource usage
+
+**Trade-offs:**
+- Data passes through Storj gateway servers
+- Relies on Storj's server-side encryption
+
+### Native Integration
+Best for: Download-heavy workloads, maximum security requirements, distributed applications
+
+**Advantages:**
+- End-to-end client-side encryption
+- Faster download performance
+- Direct connection to storage nodes
+- Maximum privacy and security
+
+**Trade-offs:**
+- Higher upload bandwidth usage (1GB file = ~2.7GB uploaded due to erasure coding)
+- More CPU usage for local erasure coding
+
+## Configure S3-compatible integration
+
+If you chose S3-compatible integration, configure Rclone with these settings:
+
+1. Edit your Rclone configuration file:
+
+ ```shell
+ rclone config file
+ ```
+
+2. Add or update your Storj configuration:
+
+ ```ini
+ [storj]
+ type = s3
+ provider = Storj
+ access_key_id = your_access_key
+ secret_access_key = your_secret_key
+ endpoint = gateway.storjshare.io
+ chunk_size = 64Mi
+ disable_checksum = true
+ ```
+
+3. Test your configuration:
+
+ ```shell
+ rclone lsf storj:
+ ```
+
+For complete setup instructions and common commands, see the [S3-compatible Rclone guide](docId:AsyYcUJFbO1JI8-Tu8tW3).
+
+## Configure native integration
+
+If you chose native integration, follow these steps:
+
+1. Set up native Rclone integration with Storj's uplink protocol
+2. Configure client-side encryption settings
+3. Test connectivity to the distributed network
+
+For detailed setup instructions and commands, see the [Native Rclone guide](docId:Mk51zylAE6xmqP7jUYAuX).
+
+## Verify your setup
+
+After configuration, verify Rclone works correctly:
+
+1. **List buckets** to confirm connectivity:
+ ```shell
+ rclone lsf storj:
+ ```
+
+2. **Test upload** with a small file:
+ ```shell
+ echo "test content" > test.txt
+ rclone copy test.txt storj:my-test-bucket/
+ ```
+
+3. **Test download** to verify the round trip:
+ ```shell
+ rclone copy storj:my-test-bucket/test.txt ./downloaded-test.txt
+ ```
+
+4. **Clean up** the test file:
+ ```shell
+ rclone delete storj:my-test-bucket/test.txt
+ ```
+
+## Troubleshooting
+
+**Configuration not found**: Run `rclone config file` to locate your configuration file path.
+
+**Access denied errors**: Verify your credentials are correct and have the necessary permissions.
+
+**Slow performance**: For S3-compatible mode, ensure `chunk_size = 64Mi` is set. For native mode, this is expected for uploads due to erasure coding.
+
+**Connection timeouts**: Check your internet connection and firewall settings. Native mode requires access to distributed storage nodes.
+
+## Next steps
+
+Once Rclone is working with Storj:
+
+- [Optimize upload performance for large files](#)
+- [Set up automated sync workflows](#)
+- [Configure Rclone for backup applications](#)
+- [Explore advanced Rclone features](#)
\ No newline at end of file
diff --git a/app/(docs)/dcs/reference/_meta.json b/app/(docs)/dcs/reference/_meta.json
new file mode 100644
index 000000000..4a63982f8
--- /dev/null
+++ b/app/(docs)/dcs/reference/_meta.json
@@ -0,0 +1,6 @@
+{
+ "cli-commands": "CLI Commands",
+ "s3-api": "S3 API",
+ "error-codes": "Error Codes",
+ "limits": "Limits"
+}
\ No newline at end of file
diff --git a/app/(docs)/dcs/reference/cli-commands.md b/app/(docs)/dcs/reference/cli-commands.md
new file mode 100644
index 000000000..686ffb289
--- /dev/null
+++ b/app/(docs)/dcs/reference/cli-commands.md
@@ -0,0 +1,276 @@
+---
+title: "CLI Commands Reference"
+docId: "cli-reference-001"
+metadata:
+ title: "Uplink CLI Commands Reference"
+ description: "Complete reference for all Uplink CLI commands, flags, and usage patterns for managing Storj DCS storage."
+---
+
+Complete reference for the Uplink CLI tool commands and options.
+
+{% callout type="info" %}
+For installation instructions, see [Uplink CLI Installation](docId:TbMdOGCAXNWyPpQmH6EOq).
+{% /callout %}
+
+## Global Flags
+
+| Flag | Description |
+| :-------------------- | :---------------------------------------------- |
+| `--advanced` | if used in with `-h`, print advanced flags help |
+| `--config-dir string` | main directory for uplink configuration |
+
+## Core Commands
+
+### uplink access
+
+Manage access grants for secure access to buckets and objects.
+
+#### Subcommands
+
+| Command | Description |
+|---------|-------------|
+| `access create` | Create a new access grant |
+| `access export` | Export an access grant to a string |
+| `access import` | Import an access grant from a string |
+| `access inspect` | Inspect an access grant |
+| `access list` | List stored access grants |
+| `access register` | Register an access grant with a satellite |
+| `access remove` | Remove an access grant |
+| `access restrict` | Create a restricted access grant |
+| `access revoke` | Revoke an access grant |
+| `access use` | Set default access grant |
+
+**Usage Examples:**
+```bash
+uplink access create --name my-access
+uplink access export my-access
+uplink access restrict my-access --readonly
+```
+
+### uplink cp
+
+Copy files between local filesystem and Storj buckets.
+
+**Syntax:**
+```
+uplink cp [source] [destination] [flags]
+```
+
+**Common Flags:**
+- `--recursive, -r` - Copy directories recursively
+- `--parallelism int` - Number of parallel transfers (default 1)
+- `--parallelism-chunk-size memory` - Size of chunks for parallel transfers
+
+**Usage Examples:**
+```bash
+# Upload file
+uplink cp local-file.txt sj://mybucket/remote-file.txt
+
+# Download file
+uplink cp sj://mybucket/remote-file.txt local-file.txt
+
+# Upload directory recursively
+uplink cp local-dir/ sj://mybucket/remote-dir/ --recursive
+```
+
+### uplink ls
+
+List objects and prefixes in buckets.
+
+**Syntax:**
+```
+uplink ls [path] [flags]
+```
+
+**Common Flags:**
+- `--recursive, -r` - List recursively
+- `--encrypted` - Show encrypted object names
+- `--pending` - Show pending multipart uploads
+
+**Usage Examples:**
+```bash
+# List all buckets
+uplink ls
+
+# List objects in bucket
+uplink ls sj://mybucket/
+
+# List recursively
+uplink ls sj://mybucket/ --recursive
+```
+
+### uplink mb
+
+Create a new bucket.
+
+**Syntax:**
+```
+uplink mb sj://bucket-name [flags]
+```
+
+**Usage Examples:**
+```bash
+uplink mb sj://my-new-bucket
+```
+
+### uplink rb
+
+Remove an empty bucket.
+
+**Syntax:**
+```
+uplink rb sj://bucket-name [flags]
+```
+
+**Common Flags:**
+- `--force` - Remove bucket and all objects
+
+**Usage Examples:**
+```bash
+uplink rb sj://my-bucket
+uplink rb sj://my-bucket --force
+```
+
+### uplink rm
+
+Remove objects from buckets.
+
+**Syntax:**
+```
+uplink rm sj://bucket/path [flags]
+```
+
+**Common Flags:**
+- `--recursive, -r` - Remove recursively
+- `--pending` - Remove pending multipart uploads
+
+**Usage Examples:**
+```bash
+# Remove single object
+uplink rm sj://mybucket/file.txt
+
+# Remove directory recursively
+uplink rm sj://mybucket/folder/ --recursive
+```
+
+### uplink mv
+
+Move or rename objects within Storj.
+
+**Syntax:**
+```
+uplink mv sj://source sj://destination
+```
+
+**Usage Examples:**
+```bash
+uplink mv sj://mybucket/oldname.txt sj://mybucket/newname.txt
+uplink mv sj://mybucket/file.txt sj://anotherbucket/file.txt
+```
+
+### uplink share
+
+Create shareable URLs for objects with restricted access.
+
+**Syntax:**
+```
+uplink share [flags] sj://bucket/path
+```
+
+**Common Flags:**
+- `--readonly` - Create read-only access
+- `--writeonly` - Create write-only access
+- `--not-after time` - Access expires after this time
+- `--not-before time` - Access not valid before this time
+
+**Usage Examples:**
+```bash
+uplink share sj://mybucket/file.txt --readonly --not-after +24h
+uplink share sj://mybucket/ --writeonly
+```
+
+## Metadata Commands
+
+### uplink meta
+
+Manage object metadata.
+
+#### Subcommands
+
+| Command | Description |
+|---------|-------------|
+| `meta get` | Get object metadata |
+
+**Usage Examples:**
+```bash
+uplink meta get sj://mybucket/file.txt
+```
+
+## Configuration Commands
+
+### uplink setup
+
+Create initial uplink configuration.
+
+**Syntax:**
+```
+uplink setup [flags]
+```
+
+This command walks you through the initial configuration process.
+
+### uplink import
+
+Import serialized access grant into configuration.
+
+**Syntax:**
+```
+uplink import [name] [serialized access] [flags]
+```
+
+**Usage Examples:**
+```bash
+uplink import my-access 13GRuHAiA...
+```
+
+## Advanced Usage
+
+### Environment Variables
+
+- `UPLINK_CONFIG_DIR` - Override configuration directory
+- `UPLINK_ACCESS` - Set default access grant
+- `UPLINK_DEBUG` - Enable debug output
+
+### Configuration File
+
+The uplink configuration is stored at:
+- Linux/macOS: `$HOME/.config/storj/uplink/config.yaml`
+- Windows: `%AppData%\storj\uplink\config.yaml`
+
+### Exit Codes
+
+- `0` - Success
+- `1` - General error
+- `2` - Access denied
+- `3` - Network error
+
+## Performance Tuning
+
+### Parallelism Settings
+
+For large files or directories, adjust parallelism:
+
+```bash
+uplink cp large-file.bin sj://bucket/ --parallelism 10
+uplink cp dir/ sj://bucket/ --recursive --parallelism 8
+```
+
+### Chunk Size Optimization
+
+For very large files, adjust chunk size:
+
+```bash
+uplink cp huge-file.bin sj://bucket/ --parallelism-chunk-size 64MiB
+```
+
+This reference covers all major Uplink CLI commands and common usage patterns. For specific flag details, use `uplink [command] --help`.
\ No newline at end of file
diff --git a/app/(docs)/dcs/reference/error-codes.md b/app/(docs)/dcs/reference/error-codes.md
new file mode 100644
index 000000000..a8f3b65bc
--- /dev/null
+++ b/app/(docs)/dcs/reference/error-codes.md
@@ -0,0 +1,146 @@
+---
+title: "Error Codes Reference"
+docId: "error-codes-ref-001"
+metadata:
+ title: "Error Codes and Troubleshooting Reference"
+ description: "Reference for common error codes, HTTP status codes, and troubleshooting information for Storj DCS."
+---
+
+Reference for error codes and common issues when working with Storj DCS.
+
+## CLI Exit Codes
+
+| Code | Description | Resolution |
+|------|-------------|------------|
+| `0` | Success | Operation completed successfully |
+| `1` | General error | Check command syntax and parameters |
+| `2` | Access denied | Verify access grant permissions |
+| `3` | Network error | Check internet connectivity and satellite endpoints |
+
+## HTTP Status Codes
+
+### 2xx Success
+| Code | Status | Description |
+|------|--------|-------------|
+| `200` | OK | Request successful |
+| `201` | Created | Resource created successfully |
+| `204` | No Content | Request successful, no content returned |
+
+### 4xx Client Errors
+| Code | Status | Description | Common Causes |
+|------|--------|-------------|---------------|
+| `400` | Bad Request | Invalid request format | Malformed JSON, invalid parameters |
+| `401` | Unauthorized | Authentication failed | Invalid access key, expired token |
+| `403` | Forbidden | Access denied | Insufficient permissions, restricted access |
+| `404` | Not Found | Resource not found | Bucket/object doesn't exist, wrong path |
+| `409` | Conflict | Resource conflict | Bucket already exists, object locked |
+
+### 5xx Server Errors
+| Code | Status | Description | Resolution |
+|------|--------|-------------|------------|
+| `500` | Internal Server Error | Server error | Retry request, contact support if persistent |
+| `502` | Bad Gateway | Gateway error | Check satellite status, retry request |
+| `503` | Service Unavailable | Service temporarily unavailable | Wait and retry with backoff |
+
+## Common Error Messages
+
+### Access Grant Errors
+
+**"Access grant invalid"**
+- **Cause**: Malformed or expired access grant
+- **Resolution**: Generate new access grant, verify serialization
+
+**"Insufficient permissions"**
+- **Cause**: Access grant lacks required permissions
+- **Resolution**: Create access grant with appropriate permissions
+
+### Network Errors
+
+**"Dial timeout"**
+- **Cause**: Network connectivity issues
+- **Resolution**: Check internet connection, firewall settings
+
+**"Connection refused"**
+- **Cause**: Satellite unreachable
+- **Resolution**: Verify satellite address, check network access
+
+### Storage Errors
+
+**"Bucket already exists"**
+- **Cause**: Bucket name already taken
+- **Resolution**: Choose different bucket name
+
+**"Object not found"**
+- **Cause**: Object path incorrect or object deleted
+- **Resolution**: Verify object path, check bucket listing
+
+**"Upload failed"**
+- **Cause**: Network interruption or insufficient space
+- **Resolution**: Retry upload, check available storage
+
+### S3 API Errors
+
+**"SignatureDoesNotMatch"**
+- **Cause**: Incorrect access credentials or clock skew
+- **Resolution**: Verify access keys, sync system clock
+
+**"NoSuchBucket"**
+- **Cause**: Bucket name incorrect or doesn't exist
+- **Resolution**: Create bucket or verify bucket name
+
+**"InvalidAccessKeyId"**
+- **Cause**: Access key not recognized
+- **Resolution**: Verify access key, regenerate if necessary
+
+## Troubleshooting Steps
+
+### General Troubleshooting
+
+1. **Verify credentials**: Ensure access grant or S3 keys are correct
+2. **Check permissions**: Confirm access grant has required permissions
+3. **Test connectivity**: Verify network access to satellites
+4. **Review syntax**: Double-check command syntax and parameters
+5. **Check limits**: Ensure request doesn't exceed service limits
+
+### Debug Mode
+
+Enable debug output for detailed error information:
+
+```bash
+# CLI debug mode
+export UPLINK_DEBUG=true
+uplink ls sj://mybucket/
+
+# Or use debug flag
+uplink --debug ls sj://mybucket/
+```
+
+### Log Analysis
+
+**CLI logs**: Look for specific error messages and stack traces
+**S3 client logs**: Enable verbose logging in S3 client configuration
+**Network logs**: Use tools like `curl` or `wget` to test endpoints
+
+### Performance Issues
+
+**Slow uploads/downloads**:
+- Adjust parallelism settings
+- Check network bandwidth
+- Consider chunked upload for large files
+
+**Timeouts**:
+- Increase client timeout settings
+- Use smaller chunk sizes
+- Check for network stability
+
+## Getting Help
+
+When reporting issues, include:
+
+1. **Error message**: Complete error text and codes
+2. **Command used**: Full command with parameters (sanitize credentials)
+3. **Environment**: OS, CLI version, client library version
+4. **Network**: Connection type and any proxies/firewalls
+5. **Timing**: When error occurs and frequency
+
+For persistent issues, contact support through the [support portal](https://supportdcs.storj.io/).
\ No newline at end of file
diff --git a/app/(docs)/dcs/reference/limits.md b/app/(docs)/dcs/reference/limits.md
new file mode 100644
index 000000000..27467bd61
--- /dev/null
+++ b/app/(docs)/dcs/reference/limits.md
@@ -0,0 +1,165 @@
+---
+title: "Service Limits Reference"
+docId: "service-limits-ref-001"
+metadata:
+ title: "Storj DCS Service Limits and Specifications"
+ description: "Complete reference for service limits, quotas, and technical specifications for Storj DCS object storage."
+---
+
+Complete reference for service limits and technical specifications.
+
+## Storage Limits
+
+| Resource | Limit | Notes |
+|----------|-------|-------|
+| **Buckets per account** | 100 | Contact support for increases |
+| **Objects per bucket** | No limit | |
+| **Object size** | No limit | Unlike AWS S3's 5 TiB limit |
+| **Minimum object size** | 0 B | Empty objects supported |
+| **Maximum PUT operation size** | No limit | Use multipart for large objects |
+| **Object name length (encrypted)** | 1,280 characters | Path encryption adds overhead |
+| **Object metadata size** | 2 KiB | Custom metadata storage |
+
+## Bucket Limits
+
+| Resource | Limit | Notes |
+|----------|-------|-------|
+| **Bucket name minimum length** | 3 characters | |
+| **Bucket name maximum length** | 63 characters | |
+| **Bucket name format** | DNS-compliant | Lowercase letters, numbers, hyphens |
+
+## Multipart Upload Limits
+
+| Resource | Limit | Notes |
+|----------|-------|-------|
+| **Maximum parts per upload** | 10,000 | Standard S3 limit |
+| **Minimum part size** | 5 MiB | Last part can be smaller |
+| **Maximum part size** | 5 GiB | Standard S3 limit |
+| **Parts returned per list request** | 10,000 | Pagination available |
+
+## API Request Limits
+
+| Operation | Limit | Notes |
+|-----------|-------|-------|
+| **Objects per ListObjects request** | 1,000 | Use pagination for more |
+| **Multipart uploads per list request** | 1,000 | Pagination available |
+| **Parts per ListParts request** | 10,000 | |
+
+## Network and Performance
+
+| Resource | Specification | Notes |
+|----------|---------------|-------|
+| **Upload bandwidth** | No artificial limits | Limited by network and node capacity |
+| **Download bandwidth** | No artificial limits | Limited by network and node capacity |
+| **Concurrent connections** | No specific limit | Best practice: 10-100 concurrent |
+| **Request rate** | No specific limit | Use exponential backoff for retries |
+
+## Access and Security
+
+| Resource | Limit | Notes |
+|----------|-------|-------|
+| **Access grants per account** | No limit | Store securely |
+| **Access grant size** | ~1-2 KB typical | Varies based on restrictions |
+| **Encryption key size** | 32 bytes | AES-256 encryption |
+| **Macaroon restrictions** | 64 KB serialized | Access grant restrictions |
+
+## Geographic Distribution
+
+| Resource | Specification | Notes |
+|----------|---------------|-------|
+| **Default redundancy** | 80 pieces (29 required) | Erasure coding parameters |
+| **Storage nodes** | Thousands globally | Decentralized network |
+| **Satellite regions** | Multiple | US1, EU1, AP1 available |
+
+## Large Object Considerations
+
+### Objects Larger Than 5 TiB
+
+Unlike AWS S3, Storj supports objects larger than 5 TiB. Configure S3 clients appropriately:
+
+**Required multipart configuration for 6 TiB file:**
+```bash
+aws configure set s3.multipart_chunksize 630MiB
+```
+
+**Formula for chunk size:**
+```
+chunk_size = object_size / 10000 (rounded up to nearest MiB)
+```
+
+## Rate Limiting and Backoff
+
+### Best Practices
+
+**Recommended retry strategy:**
+- Initial delay: 100ms
+- Maximum delay: 30 seconds
+- Exponential backoff with jitter
+- Maximum retry attempts: 5
+
+**Connection pooling:**
+- Reuse HTTP connections
+- Limit concurrent connections per endpoint
+- Use appropriate timeout values
+
+## Monitoring and Quotas
+
+### Account Usage Monitoring
+
+Monitor usage through:
+- Satellite web console
+- CLI commands: `uplink ls --recursive`
+- S3 API: ListBuckets, ListObjects
+
+### Cost Optimization
+
+**Storage efficiency:**
+- Delete unnecessary objects regularly
+- Use object expiration for temporary data
+- Monitor duplicate objects
+
+**Bandwidth optimization:**
+- Use appropriate parallelism settings
+- Implement client-side caching where appropriate
+- Consider CDN for frequently accessed public data
+
+## Regional Specifications
+
+### Placement Options
+
+| Region | Description | Compliance |
+|--------|-------------|------------|
+| **Global** | Worldwide distributed | Standard |
+| **US-Select-1** | Continental US only | SOC 2 Type 2 |
+
+### Performance Characteristics
+
+**Global placement:**
+- Lowest cost
+- Best global performance
+- Highest durability
+
+**US-Select-1 placement:**
+- Compliance focused
+- US-based infrastructure
+- Premium pricing
+
+## Support and Escalation
+
+### Limit Increase Requests
+
+For limit increases, contact support with:
+- Current usage patterns
+- Projected growth requirements
+- Business justification
+- Timeline requirements
+
+### Enterprise Features
+
+Additional limits and features available for enterprise customers:
+- Custom redundancy parameters
+- Private satellite deployment
+- Dedicated support channels
+- SLA guarantees
+
+This reference covers all standard service limits. For enterprise requirements or limit increases, contact [Storj support](https://supportdcs.storj.io/).
\ No newline at end of file
diff --git a/app/(docs)/dcs/reference/s3-api.md b/app/(docs)/dcs/reference/s3-api.md
new file mode 100644
index 000000000..ff9c5c473
--- /dev/null
+++ b/app/(docs)/dcs/reference/s3-api.md
@@ -0,0 +1,208 @@
+---
+title: "S3 API Reference"
+docId: "s3-api-reference-001"
+metadata:
+ title: "S3 API Compatibility Reference"
+ description: "Complete reference for S3 API compatibility with Storj DCS, including supported operations, limits, and Storj-specific extensions."
+---
+
+Complete reference for S3 API compatibility with Storj DCS.
+
+## API Compatibility Overview
+
+The Storj S3-compatible Gateway supports a RESTful API that is compatible with the basic data access model of the Amazon S3 API.
+
+### Support Definitions
+
+- **Full** - Complete support for all features except those requiring unsupported dependencies
+- **Partial** - Limited support (see specific caveats)
+- **No** - Not supported
+
+## Supported Operations
+
+### Bucket Operations
+
+| Operation | Support | Notes |
+|-----------|---------|-------|
+| CreateBucket | Full | |
+| DeleteBucket | Full | |
+| HeadBucket | Full | |
+| ListBuckets | Full | |
+| GetBucketLocation | Full | Gateway-MT only |
+| GetBucketTagging | Full | |
+| PutBucketTagging | Full | |
+| DeleteBucketTagging | Full | |
+| GetBucketVersioning | Yes | See [Object Versioning](docId:oogh5vaiGei6atohm5thi) |
+| PutBucketVersioning | Yes | See [Object Versioning](docId:oogh5vaiGei6atohm5thi) |
+
+### Object Operations
+
+| Operation | Support | Notes |
+|-----------|---------|-------|
+| PutObject | Full | |
+| GetObject | Partial | Need to add partNumber parameter support |
+| DeleteObject | Full | |
+| DeleteObjects | Full | |
+| HeadObject | Full | |
+| CopyObject | Full | Supports objects up to ~671 GB (vs AWS 5 GB limit) |
+| GetObjectAttributes | Partial | Etag, StorageClass, and ObjectSize only |
+| GetObjectTagging | Full | Tags can be modified outside tagging endpoints |
+| PutObjectTagging | Full | Tags can be modified outside tagging endpoints |
+| DeleteObjectTagging | Full | Tags can be modified outside tagging endpoints |
+
+### Object Lock Operations
+
+| Operation | Support | Notes |
+|-----------|---------|-------|
+| GetObjectLockConfiguration | Yes | See [Object Lock](docId:gjrGzPNnhpYrAGTTAUaj) |
+| PutObjectLockConfiguration | Yes | See [Object Lock](docId:gjrGzPNnhpYrAGTTAUaj) |
+| GetObjectLegalHold | Yes | See [Object Lock](docId:gjrGzPNnhpYrAGTTAUaj) |
+| PutObjectLegalHold | Yes | See [Object Lock](docId:gjrGzPNnhpYrAGTTAUaj) |
+| GetObjectRetention | Yes | See [Object Lock](docId:gjrGzPNnhpYrAGTTAUaj) |
+| PutObjectRetention | Yes | See [Object Lock](docId:gjrGzPNnhpYrAGTTAUaj) |
+
+### Multipart Upload Operations
+
+| Operation | Support | Notes |
+|-----------|---------|-------|
+| CreateMultipartUpload | Full | |
+| UploadPart | Full | |
+| UploadPartCopy | Partial | Available on request |
+| CompleteMultipartUpload | Full | |
+| AbortMultipartUpload | Full | |
+| ListMultipartUploads | Partial | See ListMultipartUploads section |
+| ListParts | Full | |
+
+### Listing Operations
+
+| Operation | Support | Notes |
+|-----------|---------|-------|
+| ListObjects | Partial | See ListObjects section |
+| ListObjectsV2 | Partial | See ListObjects section |
+| ListObjectVersions | Yes | See [Object Versioning](docId:oogh5vaiGei6atohm5thi) |
+
+## Service Limits
+
+| Limit | Value |
+|-------|--------|
+| Maximum buckets per account | 100 |
+| Maximum objects per bucket | No limit |
+| Maximum object size | No limit |
+| Minimum object size | 0 B |
+| Maximum object size per PUT | No limit |
+| Maximum parts per multipart upload | 10,000 |
+| Minimum part size | 5 MiB (last part can be 0 B) |
+| Maximum parts returned per list request | 10,000 |
+| Maximum objects per list request | 1,000 |
+| Maximum multipart uploads per list request | 1,000 |
+| Maximum bucket name length | 63 characters |
+| Minimum bucket name length | 3 characters |
+| Maximum encrypted object name length | 1,280 characters |
+| Maximum metadata size | 2 KiB |
+
+## API Behavior Notes
+
+### ListObjects Behavior
+
+#### Encrypted Object Keys
+Object paths are end-to-end encrypted. Since we don't use ordering-preserving encryption, lexicographical ordering may not match expectations:
+
+- **Forward-slash terminated prefix/delimiter**: Fast listing in encrypted path order
+- **Non-forward-slash terminated prefix/delimiter**: Exhaustive listing in correct lexicographical order
+
+#### Unencrypted Object Keys
+Always lists in lexicographical order per S3 specification.
+
+### ListMultipartUploads Behavior
+
+- Same ordering characteristics as ListObjects
+- Only supports forward-slash terminated prefixes and delimiters
+- `UploadIdMarker` and `NextUploadIdMarker` not supported
+
+### GetBucketLocation Response
+
+Returns placement regions for bucket data:
+
+| Value | Description |
+|-------|-------------|
+| `global` | Stored on global public network |
+| `us-select-1` | SOC 2 Type 2 certified US facilities |
+
+## Storj-Specific Extensions
+
+### Object TTL (Time To Live)
+
+Set object expiration using the `X-Amz-Meta-Object-Expires` header:
+
+**Supported Formats:**
+- Duration: `+300ms`, `+1.5h`, `+2h45m`
+- RFC3339 timestamp: `2024-05-19T00:10:55Z`
+- `none` for no expiration
+
+**Example:**
+```bash
+aws s3 --endpoint-url https://gateway.storjshare.io cp file s3://bucket/object \
+ --metadata Object-Expires=+2h
+```
+
+### ListBucketsWithAttribution (Gateway-MT only)
+
+Returns bucket listing with attribution information.
+
+**Request:**
+```http
+GET /?attribution HTTP/1.1
+Host: gateway.storjshare.io
+```
+
+**Response includes additional Attribution element:**
+```xml
+
+ string
+ timestamp
+ string
+
+```
+
+## Large Object Handling
+
+### Objects Larger Than 5 TiB
+
+For objects exceeding AWS S3's 5 TiB limit, configure multipart chunk size:
+
+```bash
+# For 6 TiB files, set chunk size to ~630 MiB
+aws --profile storj configure set s3.multipart_chunksize 630MiB
+aws --profile storj --endpoint-url https://gateway.storjshare.io s3 cp 6TiB_file s3://bucket/
+```
+
+## Client Compatibility
+
+### Python boto3 / AWS CLI
+
+**Supported versions:** boto3 up to 1.35.99
+
+**Issue:** Newer versions enable default integrity protections not yet supported by Storj.
+
+**Recommendation:** Downgrade rather than using `WHEN_REQUIRED` workaround.
+
+## Unsupported Features
+
+### Security Features
+- ACL operations (GetObjectAcl, PutObjectAcl, etc.)
+- Bucket policies (except Gateway-ST with --website)
+- Public access blocks
+
+### Advanced Features
+- Lifecycle management
+- Cross-region replication
+- Analytics configurations
+- Metrics configurations
+- Inventory configurations
+- Notification configurations
+- Intelligent tiering
+- Acceleration
+- Website hosting
+- Logging (available on request)
+
+This reference provides complete S3 API compatibility information for integration planning and troubleshooting.
\ No newline at end of file
diff --git a/app/(docs)/dcs/third-party-tools/page.md b/app/(docs)/dcs/third-party-tools/page.md
index 308c3f3ca..baa60067f 100644
--- a/app/(docs)/dcs/third-party-tools/page.md
+++ b/app/(docs)/dcs/third-party-tools/page.md
@@ -7,14 +7,36 @@ redirects:
- /dcs/file-transfer
- /dcs/multimedia-storage-and-streaming
metadata:
- title: Guides to Using Third-Party Tools
+ title: How to Use Third-Party Tools with Storj
description:
- Step-by-step guides on leveraging third-party tools, including backups,
+ Practical how-to guides for integrating Storj with popular third-party tools for backups,
large file handling, file management, content delivery, scientific applications,
and cloud operations.
---
-Practical step-by-step guides to help you achieve a specific goal. Most useful when you're trying to get something done.
+This section contains practical how-to guides for integrating Storj DCS with popular third-party tools and applications. These guides help you achieve specific goals with step-by-step instructions.
+
+## Prerequisites
+
+Before using most third-party tools with Storj, ensure you have:
+
+- A Storj account with valid S3-compatible credentials
+- The third-party tool installed and accessible
+- Basic familiarity with the tool's interface or command-line usage
+- Network connectivity and appropriate firewall configurations
+
+For credential setup, see the [Getting Started guide](docId:AsyYcUJFbO1JI8-Tu8tW3).
+
+## How to choose the right tool
+
+Each tool category serves different use cases:
+
+- **Backups**: Automated, scheduled data protection with versioning and retention
+- **Large Files**: Optimized handling of multi-gigabyte files and datasets
+- **File Management**: User-friendly interfaces for everyday file operations
+- **Content Delivery**: Web hosting, media streaming, and public file sharing
+- **Scientific**: Research data management, analysis pipelines, and collaboration
+- **Cloud Ops**: Infrastructure automation, monitoring, and DevOps workflows
## Backups
@@ -45,3 +67,44 @@ Practical step-by-step guides to help you achieve a specific goal. Most useful w
{% tag-links tag="cloud-ops" directory="./app/(docs)/dcs/third-party-tools" %}
{% /tag-links %}
+
+## Verification steps
+
+After configuring any third-party tool with Storj:
+
+1. **Test connectivity**: Verify the tool can list your buckets or existing files
+2. **Test upload**: Upload a small test file to confirm write access
+3. **Test download**: Download the test file to verify read access
+4. **Check permissions**: Ensure the tool has appropriate access for your use case
+5. **Validate settings**: Confirm endpoint URLs, regions, and other configuration
+
+## Common troubleshooting
+
+**"Access Denied" errors**:
+- Verify your S3 credentials are correct and active
+- Check that your access key has the required permissions
+- Ensure you're using the correct endpoint: `gateway.storjshare.io`
+
+**Connection timeouts**:
+- Check your internet connection and firewall settings
+- Verify the tool supports custom S3 endpoints
+- Try reducing concurrent connection limits in tool settings
+
+**Upload/download failures**:
+- For large files, ensure the tool supports multipart uploads
+- Check available disk space and network stability
+- Verify file paths and naming conventions are correct
+
+**Performance issues**:
+- Use the recommended chunk/part size of 64MB for uploads
+- Enable multipart uploads for files larger than 64MB
+- Consider network latency and bandwidth limitations
+
+## Getting help
+
+If you encounter issues not covered in individual tool guides:
+
+1. Check the tool's official documentation for S3 compatibility
+2. Review Storj's [S3 API compatibility reference](docId:eZ4caegh9queuQuaazoo)
+3. Search the [Storj community forum](https://forum.storj.io) for similar issues
+4. Contact Storj support with specific error messages and configuration details
diff --git a/app/(docs)/node/how-to/_meta.json b/app/(docs)/node/how-to/_meta.json
new file mode 100644
index 000000000..f868d38e6
--- /dev/null
+++ b/app/(docs)/node/how-to/_meta.json
@@ -0,0 +1,17 @@
+{
+ "title": "How-to Guides",
+ "nav": [
+ {
+ "title": "Change payout address",
+ "id": "change-payout-address"
+ },
+ {
+ "title": "Migrate node",
+ "id": "migrate-node"
+ },
+ {
+ "title": "Troubleshoot offline node",
+ "id": "troubleshoot-offline-node"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/app/(docs)/node/how-to/change-payout-address.md b/app/(docs)/node/how-to/change-payout-address.md
new file mode 100644
index 000000000..80868e32e
--- /dev/null
+++ b/app/(docs)/node/how-to/change-payout-address.md
@@ -0,0 +1,230 @@
+---
+title: How to change your payout address
+docId: change-payout-address-how-to
+metadata:
+ title: How to Change Your Storage Node Payout Address
+ description: Step-by-step guide to update the wallet address where you receive payments for your storage node operations.
+---
+
+This guide shows you how to change the wallet address where you receive payments for operating your storage node.
+
+## Prerequisites
+
+Before changing your payout address, ensure you have:
+
+- A running Storj storage node (CLI or Windows GUI installation)
+- Administrative access to the system running your node
+- A valid wallet address that supports the payment tokens you'll receive
+- Backup of your current configuration (recommended)
+
+## Important considerations
+
+**Timing**: You can change your payout address at any time, but changes only affect future payments. Any pending payments will still be sent to your previous address.
+
+**Wallet compatibility**: Ensure your new wallet address supports the token types used for payouts (currently STORJ tokens and other cryptocurrencies).
+
+**Verification**: Double-check your new wallet address is correct - incorrect addresses may result in lost payments.
+
+## Change payout address
+
+Choose the method that matches your storage node installation:
+
+{% tabs %}
+
+{% tab label="CLI Install (Docker)" %}
+
+### Step 1: Stop the storage node
+
+Stop your running storage node container safely:
+
+```bash
+docker stop -t 300 storagenode
+docker rm storagenode
+```
+
+The `-t 300` flag allows the node 5 minutes to gracefully shut down and complete any ongoing operations.
+
+### Step 2: Update configuration
+
+Edit your configuration file to add or update the wallet address. The location depends on how you set up your node:
+
+**If using config.yaml file:**
+
+```bash
+# Edit the config file (adjust path as needed)
+nano /path/to/your/storagenode/config.yaml
+```
+
+Find the `operator.wallet` section and update it:
+
+```yaml
+operator:
+ wallet: "0xYourNewWalletAddressHere"
+```
+
+**If using environment variables or command-line parameters:**
+
+Update your docker run command to include the new wallet address:
+
+```bash
+# Example docker run with new wallet address
+docker run -d --restart unless-stopped \
+ --name storagenode \
+ -p 28967:28967/tcp \
+ -p 28967:28967/udp \
+ -p 14002:14002 \
+ -e WALLET="0xYourNewWalletAddressHere" \
+ -e EMAIL="your@email.com" \
+ -e ADDRESS="your.external.address:28967" \
+ -e STORAGE="1TB" \
+ -v /path/to/identity:/app/identity \
+ -v /path/to/storage:/app/config \
+ storjlabs/storagenode:latest
+```
+
+### Step 3: Restart the storage node
+
+Start your storage node with the updated configuration:
+
+```bash
+# If using config.yaml, use your standard docker run command
+# If using the command above with updated parameters, run it now
+```
+
+{% /tab %}
+
+{% tab label="Windows GUI Install" %}
+
+### Step 1: Stop the storage node service
+
+Open an elevated PowerShell window (Run as Administrator) and stop the service:
+
+```powershell
+Stop-Service storagenode
+```
+
+Alternatively, you can use the Windows Services applet:
+1. Press `Win + R`, type `services.msc`, and press Enter
+2. Find "Storj V3 Storage Node" in the list
+3. Right-click and select "Stop"
+
+### Step 2: Edit configuration file
+
+Open the configuration file with a text editor. **Important**: Use Notepad++ or another advanced text editor - the regular Windows Notepad may not work properly with the file format.
+
+```powershell
+# Open the config file location
+notepad++ "C:\Program Files\Storj\Storage Node\config.yaml"
+```
+
+Find the `operator.wallet` line and update it with your new wallet address:
+
+```yaml
+operator:
+ wallet: "0xYourNewWalletAddressHere"
+```
+
+Save the file.
+
+### Step 3: Restart the storage node service
+
+Restart the service to apply the changes:
+
+```powershell
+Start-Service storagenode
+```
+
+Or using the Windows Services applet:
+1. Right-click "Storj V3 Storage Node"
+2. Select "Start"
+
+{% /tab %}
+
+{% /tabs %}
+
+## Verify the change
+
+After restarting your storage node, verify the new payout address is configured correctly:
+
+### Check the dashboard
+
+1. Access your node's web dashboard (usually at `http://localhost:14002`)
+2. Log in with your authentication credentials
+3. Look for the wallet address displayed in the node information section
+4. Confirm it matches your new address
+
+### Check the logs
+
+Review your storage node logs to confirm successful startup with the new configuration:
+
+{% tabs %}
+
+{% tab label="CLI Install" %}
+
+```bash
+# View recent logs
+docker logs storagenode --tail 50
+
+# Look for lines confirming the wallet address
+# Should not show any errors about invalid wallet format
+```
+
+{% /tab %}
+
+{% tab label="Windows GUI Install" %}
+
+Check the logs in the installation directory:
+
+```powershell
+# View recent log entries
+Get-Content "C:\Program Files\Storj\Storage Node\logs\*" -Tail 50
+```
+
+{% /tab %}
+
+{% /tabs %}
+
+Look for log entries that confirm your node started successfully without wallet-related errors.
+
+## Troubleshooting
+
+**Service won't start after change**:
+- Verify the wallet address format is correct (typically starts with "0x" for Ethereum addresses)
+- Check that you saved the configuration file properly
+- Review logs for specific error messages
+
+**Dashboard shows old address**:
+- Clear your browser cache and reload the dashboard
+- Wait a few minutes for the dashboard to update
+- Verify you restarted the service completely
+
+**Invalid wallet address format errors**:
+- Confirm your wallet address is valid for the payment system
+- Check for extra spaces or characters in the configuration
+- Ensure you're using the correct address format (e.g., Ethereum format for STORJ tokens)
+
+**Configuration file changes not taking effect**:
+- Verify you have write permissions to the configuration file
+- Confirm you're editing the correct configuration file path
+- Make sure the service completely stopped before making changes
+
+## Important notes
+
+**Payment timing**: The address change takes effect immediately for new payments, but any payments already processed will still go to your previous address.
+
+**Multiple nodes**: If you operate multiple storage nodes, you'll need to update each one individually following these steps.
+
+**Backup configuration**: Always keep a backup of your working configuration before making changes.
+
+**Address validation**: Some storage node software versions may validate wallet addresses. If you receive validation errors, double-check your address format.
+
+## Next steps
+
+After successfully changing your payout address:
+
+- Monitor your node's operation to ensure it continues running normally
+- [Set up monitoring for your node performance](#)
+- [Learn about payment schedules and amounts](#)
+- [Configure additional node settings](#)
+
+For other storage node configuration changes, see the [complete configuration guide](#).
\ No newline at end of file
diff --git a/app/(docs)/node/how-to/migrate-node.md b/app/(docs)/node/how-to/migrate-node.md
new file mode 100644
index 000000000..153a7e9ce
--- /dev/null
+++ b/app/(docs)/node/how-to/migrate-node.md
@@ -0,0 +1,344 @@
+---
+title: How to migrate your node to a new device
+docId: migrate-node-how-to
+metadata:
+ title: How to Migrate Your Storage Node to a New Device
+ description: Complete step-by-step guide to safely migrate your Storj storage node to new hardware or storage location while preserving data and reputation.
+---
+
+This guide shows you how to migrate your storage node to a new device, drive, or location while preserving your node's reputation and stored data.
+
+## Prerequisites
+
+Before migrating your storage node, ensure you have:
+
+- A running Storj storage node that you want to migrate
+- Access to both the source and destination systems
+- Sufficient storage space on the destination (at least equal to your current data)
+- Network access between source and destination (if different machines)
+- Administrative privileges on both systems
+- Time to complete the migration (can take several hours for large datasets)
+
+## Important considerations
+
+**Downtime**: Plan for some downtime during the final migration steps. Minimize this by pre-copying data while your node is running.
+
+**Reputation preservation**: Your node's identity must be preserved exactly to maintain your reputation and avoid disqualification.
+
+**Platform compatibility**: If migrating across different architectures (x86 to ARM, etc.), additional steps are required.
+
+**Network storage warning**: Network-attached storage is not supported and may cause performance issues or disqualification.
+
+## Locate your current data
+
+First, identify where your storage node data is currently located:
+
+{% tabs %}
+
+{% tab label="Windows GUI Install" %}
+
+**Identity folder**: `%APPDATA%\Storj\Identity\storagenode`
+**Orders folder**: `%ProgramFiles%\Storj\Storage Node\orders`
+**Storage data**: The path specified in your configuration
+
+**Find exact paths**:
+```powershell
+# Check configuration for data paths
+Get-Content "C:\Program Files\Storj\Storage Node\config.yaml" | Select-String "storage-dir\|identity-dir"
+```
+
+{% /tab %}
+
+{% tab label="Linux/macOS CLI Install" %}
+
+**Linux identity**: `~/.local/share/storj/identity/storagenode`
+**macOS identity**: `~/Library/Application Support/Storj/identity/storagenode`
+**Data location**: Specified in your Docker run command or configuration
+
+**Find exact paths**:
+```bash
+# Check your Docker run command or configuration
+docker inspect storagenode | grep -E "Source|Destination"
+```
+
+{% /tab %}
+
+{% /tabs %}
+
+## Migration methods
+
+Choose the migration method that matches your setup:
+
+## Method 1: Same-platform migration (recommended)
+
+This method works for migrating between systems with the same architecture (e.g., x86-64 to x86-64).
+
+### Step 1: Prepare the destination
+
+Set up the destination paths on your new system:
+
+```bash
+# Create destination directories (adjust paths as needed)
+mkdir -p /mnt/storj-new/identity
+mkdir -p /mnt/storj-new/storage
+mkdir -p /mnt/storj-new/orders
+```
+
+### Step 2: Copy identity files (critical first step)
+
+**Important**: Copy identity files first while your node is running:
+
+```bash
+# Copy identity (must be exact - any corruption causes disqualification)
+rsync -aP /source/identity/storagenode/ /mnt/storj-new/identity/
+```
+
+**Verify identity copy**:
+```bash
+# Compare file counts and sizes
+find /source/identity/storagenode -type f | wc -l
+find /mnt/storj-new/identity -type f | wc -l
+
+# Files should match exactly
+```
+
+### Step 3: Pre-copy orders and data (while node running)
+
+Start copying data while your node is still operational to minimize downtime:
+
+```bash
+# Copy orders folder
+rsync -aP /source/orders/ /mnt/storj-new/orders/
+
+# Copy storage data (this may take hours for large datasets)
+rsync -aP /source/storage/ /mnt/storj-new/storage/
+```
+
+### Step 4: Repeat sync to minimize differences
+
+Run the copy commands multiple times to reduce the amount of data to transfer during downtime:
+
+```bash
+# Repeat these commands until differences are minimal
+rsync -aP /source/orders/ /mnt/storj-new/orders/
+rsync -aP /source/storage/ /mnt/storj-new/storage/
+```
+
+### Step 5: Final migration (downtime required)
+
+When you're ready for the final migration:
+
+**Stop your storage node**:
+
+{% tabs %}
+
+{% tab label="CLI Install" %}
+
+```bash
+# Stop the container gracefully (allows up to 5 minutes for cleanup)
+docker stop -t 300 storagenode
+docker rm storagenode
+```
+
+{% /tab %}
+
+{% tab label="Windows GUI Install" %}
+
+```powershell
+# Stop the Windows service
+Stop-Service storagenode
+```
+
+{% /tab %}
+
+{% /tabs %}
+
+**Complete the final sync**:
+
+```bash
+# Final sync with --delete to ensure exact copy
+rsync -aP --delete /source/orders/ /mnt/storj-new/orders/
+rsync -aP --delete /source/storage/ /mnt/storj-new/storage/
+```
+
+**Copy configuration files**:
+
+```bash
+# Copy configuration and other important files
+cp /source/config.yaml /mnt/storj-new/
+cp /source/revocations.db /mnt/storj-new/
+
+# Preserve any other important files in your config directory
+```
+
+### Step 6: Update your configuration
+
+Update your node configuration to use the new paths:
+
+{% tabs %}
+
+{% tab label="CLI Install" %}
+
+Update your Docker run command to use the new mount points:
+
+```bash
+# Example updated docker run command
+docker run -d --restart unless-stopped \
+ --name storagenode \
+ -p 28967:28967/tcp \
+ -p 28967:28967/udp \
+ -p 14002:14002 \
+ -e WALLET="0xYourWalletAddress" \
+ -e EMAIL="your@email.com" \
+ -e ADDRESS="your.external.address:28967" \
+ -e STORAGE="2TB" \
+ --mount type=bind,source=/mnt/storj-new/identity,destination=/app/identity \
+ --mount type=bind,source=/mnt/storj-new,destination=/app/config \
+ storjlabs/storagenode:latest
+```
+
+**Important mount point notes**:
+- Use `/mnt/storj-new` as the config mount source (not `/mnt/storj-new/storage`)
+- The container automatically creates a `storage` subdirectory
+- Ensure your data is in `/mnt/storj-new/storage/` on the host
+
+{% /tab %}
+
+{% tab label="Windows GUI Install" %}
+
+Update the configuration file:
+
+```powershell
+# Edit the config file with new paths
+notepad++ "C:\Program Files\Storj\Storage Node\config.yaml"
+```
+
+Update paths in the configuration file:
+```yaml
+storage-dir: "C:\NewStorjLocation\storage"
+identity-dir: "C:\NewStorjLocation\identity"
+```
+
+Start the service:
+```powershell
+Start-Service storagenode
+```
+
+{% /tab %}
+
+{% /tabs %}
+
+## Method 2: Cross-platform migration
+
+If migrating between different architectures (x86-64 to ARM, Windows to Linux, etc.):
+
+### Additional step: Remove platform-specific binaries
+
+Before starting your migrated node, remove old binaries:
+
+```bash
+# Remove binaries from the storage location
+rm -rf /mnt/storj-new/storage/bin/
+
+# The container will download appropriate binaries for the new platform
+```
+
+### Follow same process as Method 1
+
+Complete all other steps from Method 1, but include the binary removal step before starting your node on the new platform.
+
+## Verification
+
+After migration, verify your node is working correctly:
+
+### Check node startup
+
+Monitor logs during startup:
+
+{% tabs %}
+
+{% tab label="CLI Install" %}
+
+```bash
+# Follow logs in real-time
+docker logs storagenode -f
+
+# Look for successful startup messages and no error about missing data
+```
+
+{% /tab %}
+
+{% tab label="Windows GUI Install" %}
+
+```powershell
+# Check recent logs
+Get-Content "C:\Program Files\Storj\Storage Node\logs\*" -Tail 50
+```
+
+{% /tab %}
+
+{% /tabs %}
+
+### Verify data integrity
+
+Confirm your data migrated correctly:
+
+1. **Check dashboard**: Access your node dashboard (usually `http://localhost:14002`)
+2. **Verify storage usage**: Should match your previous usage amounts
+3. **Monitor for errors**: Watch for any data corruption or missing file errors
+4. **Check reputation**: Your reputation scores should remain unchanged
+
+### Monitor for issues
+
+Watch your node for the first 24-48 hours after migration:
+
+- No disqualification warnings
+- Normal audit success rates
+- Proper connectivity to all satellites
+- Expected payout calculations
+
+## Troubleshooting
+
+**Node starts but shows empty storage**:
+- Verify the mount paths in your Docker run command
+- Ensure data is located in the correct subdirectories
+- Check file permissions on the new location
+
+**Identity-related errors**:
+- Verify identity files copied completely and without corruption
+- Check that identity directory permissions allow reading
+- Ensure no extra or missing files in identity directory
+
+**Performance issues after migration**:
+- Verify the new storage location has adequate I/O performance
+- Check network connectivity between node and satellites
+- Monitor system resource usage (CPU, memory, disk I/O)
+
+**Database errors**:
+- Ensure all database files copied completely
+- Verify database files are not corrupted (compare file sizes)
+- Check that storage location has adequate free space
+
+## Important warnings
+
+**Critical identity preservation**: Any corruption or modification of identity files will result in immediate disqualification. Always verify identity files copied perfectly.
+
+**Avoid network storage**: Network-attached storage can cause performance issues and potential disqualification due to latency and reliability concerns.
+
+**Don't rush the process**: Take time to verify each step. A failed migration can result in permanent disqualification and loss of earnings.
+
+**Test with a backup**: If possible, test the migration process with a copy of your data before migrating your production node.
+
+## Next steps
+
+After successful migration:
+
+- [Set up monitoring for your node](#) to track performance
+- [Optimize node configuration](#) for your new environment
+- [Plan for future backups](#) of your node data
+- [Consider disaster recovery](#) planning for your infrastructure
+
+For additional migration scenarios, see:
+- [Migrate between Windows installations](#)
+- [Migrate from CLI to GUI installation](#)
+- [Set up redundant storage configurations](#)
\ No newline at end of file
diff --git a/app/(docs)/node/how-to/troubleshoot-offline-node.md b/app/(docs)/node/how-to/troubleshoot-offline-node.md
new file mode 100644
index 000000000..6614384e9
--- /dev/null
+++ b/app/(docs)/node/how-to/troubleshoot-offline-node.md
@@ -0,0 +1,364 @@
+---
+title: How to troubleshoot an offline node
+docId: troubleshoot-offline-node-how-to
+metadata:
+ title: How to Troubleshoot Storage Node Offline Issues
+ description: Step-by-step guide to diagnose and fix storage node connectivity issues when your node appears offline or unreachable.
+---
+
+This guide helps you diagnose and resolve issues when your storage node appears offline or unreachable to the Storj network.
+
+## Prerequisites
+
+Before troubleshooting, ensure you have:
+
+- Access to your storage node system and configuration
+- Administrative privileges on your router/firewall
+- Basic understanding of port forwarding concepts
+- Your node's external address and port information
+
+## Identify the problem
+
+**Signs your node is offline**:
+- Email notifications about node being offline
+- Dashboard warnings about connectivity issues
+- Low audit success rates or failed audits
+- Reduced earnings or payout warnings
+
+**Common causes**:
+- Port forwarding issues
+- Firewall blocking connections
+- Dynamic IP address changes
+- Node configuration errors
+- Internet connectivity problems
+
+## Step-by-step troubleshooting
+
+Follow these steps in order to diagnose and fix offline issues:
+
+### Step 1: Verify node identity
+
+Ensure your node identity is intact and valid:
+
+```bash
+# For CLI installations - check identity files exist
+ls -la /path/to/identity/storagenode/
+
+# Should show files like: ca.cert, identity.cert, ca.key, identity.key
+# If any files are missing or corrupted, your node will be offline
+```
+
+**For Windows GUI installations**:
+```powershell
+# Check identity folder contents
+Get-ChildItem "$env:APPDATA\Storj\Identity\storagenode"
+```
+
+**If identity files are missing**: You cannot recover - this results in permanent disqualification. You'll need to create a new node with a new identity.
+
+### Step 2: Check port forwarding configuration
+
+Verify your router forwards the correct port to your node:
+
+**Required port forwarding**:
+- **Port**: 28967
+- **Protocol**: Both TCP and UDP
+- **Destination**: Internal IP of your node system
+- **External IP**: Should match your public IP
+
+**Test port forwarding**:
+
+1. **Find your public IP**:
+ ```bash
+ curl ifconfig.me
+ ```
+
+2. **Test port accessibility**:
+ - Visit [https://www.yougetsignal.com/tools/open-ports/](https://www.yougetsignal.com/tools/open-ports/)
+ - Enter your public IP and port 28967
+ - Click "Check" - should show "Open" if working correctly
+
+### Step 3: Verify external address configuration
+
+Check that your node is configured with the correct external address:
+
+{% tabs %}
+
+{% tab label="CLI Install (Docker)" %}
+
+**Check your Docker run command**:
+```bash
+# View your container configuration
+docker inspect storagenode | grep -A5 -B5 ADDRESS
+
+# Should show something like:
+# "ADDRESS=your.external.address:28967"
+```
+
+**If ADDRESS is incorrect, update it**:
+```bash
+# Stop and remove container
+docker stop -t 300 storagenode
+docker rm storagenode
+
+# Restart with correct ADDRESS
+docker run -d --restart unless-stopped \
+ --name storagenode \
+ -p 28967:28967/tcp \
+ -p 28967:28967/udp \
+ -p 14002:14002 \
+ -e ADDRESS="your.correct.external.address:28967" \
+ # ... other parameters
+```
+
+{% /tab %}
+
+{% tab label="Windows GUI Install" %}
+
+**Check configuration file**:
+```powershell
+# View current external address setting
+Get-Content "C:\Program Files\Storj\Storage Node\config.yaml" | Select-String "external-address"
+```
+
+**Update if incorrect**:
+1. Stop the service: `Stop-Service storagenode`
+2. Edit config with Notepad++: `notepad++ "C:\Program Files\Storj\Storage Node\config.yaml"`
+3. Update the line:
+ ```yaml
+ contact.external-address: your.correct.external.address:28967
+ ```
+4. Save and restart: `Start-Service storagenode`
+
+{% /tab %}
+
+{% /tabs %}
+
+### Step 4: Handle dynamic IP addresses
+
+If your internet connection has a dynamic IP that changes:
+
+**Set up Dynamic DNS (DDNS)**:
+
+1. **Register with a DDNS provider** (e.g., [NoIP](https://www.noip.com/), DynDNS)
+2. **Create a domain** (e.g., `mynode.ddns.net`)
+3. **Configure automatic updates**:
+
+ **Option A: Router configuration**:
+ - Access router admin panel
+ - Find DDNS section
+ - Enter your DDNS provider credentials
+ - Enable automatic IP updates
+
+ **Option B: Client software**:
+ - Download provider's update client (e.g., NoIP DUC)
+ - Configure with your credentials
+ - Install and run on your node system
+
+4. **Update node configuration** to use your DDNS domain instead of IP address
+
+**Important**: Only use ONE update method (router OR client software), not both.
+
+### Step 5: Configure firewall rules
+
+Ensure your firewall allows storage node traffic:
+
+{% tabs %}
+
+{% tab label="Windows Firewall" %}
+
+**Add inbound rule**:
+```powershell
+# Allow inbound traffic on port 28967
+New-NetFirewallRule -DisplayName "Storj Node Inbound" -Direction Inbound -Protocol TCP -LocalPort 28967 -Action Allow
+New-NetFirewallRule -DisplayName "Storj Node Inbound UDP" -Direction Inbound -Protocol UDP -LocalPort 28967 -Action Allow
+```
+
+**Add outbound rule** (if you have restrictive outbound rules):
+```powershell
+# Allow outbound traffic from storage node
+New-NetFirewallRule -DisplayName "Storj Node Outbound" -Direction Outbound -Action Allow
+```
+
+{% /tab %}
+
+{% tab label="Linux Firewall (UFW)" %}
+
+**Allow required ports**:
+```bash
+# Allow inbound traffic on port 28967
+sudo ufw allow 28967/tcp
+sudo ufw allow 28967/udp
+
+# Reload firewall
+sudo ufw reload
+```
+
+{% /tab %}
+
+{% tab label="Linux Firewall (iptables)" %}
+
+**Add rules**:
+```bash
+# Allow inbound traffic
+sudo iptables -A INPUT -p tcp --dport 28967 -j ACCEPT
+sudo iptables -A INPUT -p udp --dport 28967 -j ACCEPT
+
+# Allow outbound traffic (if you have restrictive rules)
+sudo iptables -A OUTPUT -j ACCEPT
+
+# Save rules (method varies by distribution)
+sudo iptables-save > /etc/iptables/rules.v4
+```
+
+{% /tab %}
+
+{% /tabs %}
+
+### Step 6: Test connectivity
+
+After making changes, test your node's connectivity:
+
+**Check dashboard**:
+1. Access your node dashboard (usually `http://localhost:14002`)
+2. Look for connectivity status indicators
+3. Check for error messages or warnings
+
+**Monitor logs**:
+
+{% tabs %}
+
+{% tab label="CLI Install" %}
+
+```bash
+# Follow logs in real-time
+docker logs storagenode -f
+
+# Look for connection success/failure messages
+# Should see successful communication with satellites
+```
+
+{% /tab %}
+
+{% tab label="Windows GUI Install" %}
+
+```powershell
+# Check recent logs
+Get-Content "C:\Program Files\Storj\Storage Node\logs\*" -Tail 100 | Select-String "error\|offline\|connection"
+```
+
+{% /tab %}
+
+{% /tabs %}
+
+**Use external tools**:
+```bash
+# Test from external system (if available)
+telnet your.external.address 28967
+
+# Should connect successfully
+```
+
+## Verification checklist
+
+After troubleshooting, verify these items are correct:
+
+- [ ] **Identity files**: Present and intact
+- [ ] **Port forwarding**: 28967 TCP+UDP forwarded to correct internal IP
+- [ ] **External address**: Correct IP/domain and port in node configuration
+- [ ] **DDNS**: Configured and updating if using dynamic IP
+- [ ] **Firewall**: Allows inbound traffic on port 28967
+- [ ] **Router firewall**: Not blocking the storage node traffic
+- [ ] **Network connectivity**: Node can reach the internet
+- [ ] **Dashboard**: Shows node as online and connected
+
+## Common issues and solutions
+
+**Port still shows closed after forwarding**:
+- Verify internal IP hasn't changed (DHCP reassignment)
+- Check router has correct port forwarding syntax
+- Some routers require reboot after port forwarding changes
+- Verify no double-NAT situation (router behind another router)
+
+**Node works intermittently**:
+- Usually indicates dynamic IP issues
+- Set up DDNS as described above
+- Consider static IP from ISP if available
+
+**Firewall software blocks despite rules**:
+- Some antivirus software includes firewalls that override system settings
+- Check antivirus software firewall settings
+- Consider temporarily disabling to test (remember to re-enable)
+
+**ISP blocks or throttles traffic**:
+- Some ISPs block or limit certain ports
+- Contact ISP to verify no restrictions on port 28967
+- Consider using a VPN as a workaround (though this may impact performance)
+
+**Double-NAT situation**:
+- Occurs when your router is behind another router/modem
+- Both devices need port forwarding configuration
+- Consider setting upstream device to bridge mode if possible
+
+## Advanced troubleshooting
+
+If basic steps don't resolve the issue:
+
+**Check for IP conflicts**:
+```bash
+# Verify no other device uses same internal IP
+nmap -sP your.network.range.0/24
+```
+
+**Test from different networks**:
+- Use mobile hotspot to test external connectivity
+- Helps identify ISP-specific issues
+
+**Check satellite connectivity**:
+```bash
+# Test connectivity to known Storj satellites (example)
+ping satellite.address.storj.io
+```
+
+**Review detailed logs**:
+```bash
+# Enable debug logging (if supported in your version)
+# Look for specific error patterns
+```
+
+## When to seek help
+
+Contact support if:
+
+- You've followed all steps but node remains offline
+- Your ISP confirms no restrictions but connectivity fails
+- Hardware appears to be failing
+- You suspect account or identity issues
+
+**Provide this information when seeking help**:
+- Your node ID
+- External IP address and port
+- Router make/model
+- Operating system details
+- Relevant log entries showing errors
+- Results of port forwarding tests
+
+## Prevention tips
+
+To avoid future offline issues:
+
+- Set up monitoring alerts for your node status
+- Use DDNS from the start if you have dynamic IP
+- Document your port forwarding configuration
+- Regularly backup your identity and configuration files
+- Monitor router firmware updates that might reset configurations
+- Consider uninterruptible power supply (UPS) for stability
+
+## Next steps
+
+Once your node is back online:
+
+- [Monitor node performance](#) to ensure stable operation
+- [Set up automated monitoring](#) to detect future issues quickly
+- [Optimize node configuration](#) for better reliability
+- [Plan backup strategies](#) to prevent data loss
\ No newline at end of file
diff --git a/app/(docs)/node/reference/_meta.json b/app/(docs)/node/reference/_meta.json
new file mode 100644
index 000000000..873d257d0
--- /dev/null
+++ b/app/(docs)/node/reference/_meta.json
@@ -0,0 +1,5 @@
+{
+ "configuration": "Configuration",
+ "dashboard-metrics": "Dashboard Metrics",
+ "system-requirements": "System Requirements"
+}
\ No newline at end of file
diff --git a/app/(docs)/node/reference/configuration.md b/app/(docs)/node/reference/configuration.md
new file mode 100644
index 000000000..e04206f2d
--- /dev/null
+++ b/app/(docs)/node/reference/configuration.md
@@ -0,0 +1,310 @@
+---
+title: "Storage Node Configuration Reference"
+docId: "node-config-ref-001"
+metadata:
+ title: "Storage Node Configuration Reference"
+ description: "Complete reference for Storage Node configuration parameters, config.yaml options, and environment variables."
+---
+
+Complete reference for Storage Node configuration options and parameters.
+
+## Configuration File Location
+
+### Docker Installation
+- **Path**: `$HOME/storj/storagenode/config.yaml`
+- **Mount**: Bound to `/app/config/config.yaml` in container
+
+### Native Installation
+
+#### Linux
+- **Path**: `~/.local/share/storj/storagenode/config.yaml`
+- **System**: `/etc/storj/storagenode/config.yaml`
+
+#### Windows
+- **Path**: `C:\Program Files\Storj\Storage Node\config.yaml`
+- **User**: `%APPDATA%\Storj\Storage Node\config.yaml`
+
+#### macOS
+- **Path**: `~/Library/Application Support/storj/storagenode/config.yaml`
+
+## Core Configuration Parameters
+
+### Identity and Network
+
+| Parameter | Type | Description | Example |
+|-----------|------|-------------|---------|
+| `identity.cert-path` | string | Identity certificate path | `/app/identity/identity.cert` |
+| `identity.key-path` | string | Identity private key path | `/app/identity/identity.key` |
+| `server.address` | string | External address for node | `your-ddns-hostname:28967` |
+| `server.private-address` | string | Internal listening address | `0.0.0.0:28967` |
+
+### Storage Configuration
+
+| Parameter | Type | Description | Default |
+|-----------|------|-------------|---------|
+| `storage.allocated-disk-space` | string | Total allocated space | `1TB` |
+| `storage2.allocated-disk-space` | string | Storage v2 allocated space | `1TB` |
+| `storage.path` | string | Data storage directory path | `/app/config` |
+| `storage2.path` | string | Storage v2 data path | `/app/config/storage2` |
+
+### Bandwidth Allocation
+
+| Parameter | Type | Description | Default |
+|-----------|------|-------------|---------|
+| `storage.allocated-bandwidth` | string | Monthly bandwidth limit | `2TB` |
+| `server.revocation-dburl` | string | Revocation database path | `bolt://path/to/revocations.db` |
+
+### Satellite Configuration
+
+| Parameter | Type | Description |
+|-----------|------|-------------|
+| `contact.external-address` | string | Node's external contact address |
+| `storage2.trust.sources` | array | Trusted satellite URLs |
+
+### Database Settings
+
+| Parameter | Type | Description | Default |
+|-----------|------|-------------|---------|
+| `pieces.database-url` | string | Pieces database connection | `bolt://path/to/piecestore.db` |
+| `filestore.write-buffer-size` | string | Write buffer size | `128KB` |
+| `storage2.database-url` | string | Storage v2 database URL | `bolt://path/to/storage2.db` |
+
+### Network and Performance
+
+| Parameter | Type | Description | Default |
+|-----------|------|-------------|---------|
+| `server.use-peer-ca-whitelist` | boolean | Use peer CA whitelist | `true` |
+| `console.address` | string | Web dashboard address | `127.0.0.1:14002` |
+| `console.static-dir` | string | Web assets directory | `/app/static` |
+
+## Docker Configuration Examples
+
+### Basic Docker Command
+```bash
+docker run -d --restart unless-stopped \
+ --stop-timeout 300 \
+ -p 28967:28967/tcp \
+ -p 28967:28967/udp \
+ -p 14002:14002 \
+ --name storagenode \
+ --user $(id -u):$(id -g) \
+ --mount type=bind,source=$HOME/storj/identity/storagenode,destination=/app/identity \
+ --mount type=bind,source=$HOME/storj/storagenode,destination=/app/config \
+ -e WALLET="your-wallet-address" \
+ -e EMAIL="your-email@example.com" \
+ -e ADDRESS="your-ddns-hostname:28967" \
+ -e STORAGE="2TB" \
+ storjlabs/storagenode:latest
+```
+
+### Docker Compose Configuration
+```yaml
+version: '3.8'
+services:
+ storagenode:
+ image: storjlabs/storagenode:latest
+ container_name: storagenode
+ restart: unless-stopped
+ stop_grace_period: 300s
+ ports:
+ - "28967:28967/tcp"
+ - "28967:28967/udp"
+ - "14002:14002"
+ volumes:
+ - /home/user/storj/identity/storagenode:/app/identity
+ - /home/user/storj/storagenode:/app/config
+ environment:
+ - WALLET=your-wallet-address
+ - EMAIL=your-email@example.com
+ - ADDRESS=your-ddns-hostname:28967
+ - STORAGE=2TB
+ - BANDWIDTH=2TB
+ user: "${UID}:${GID}"
+```
+
+## Environment Variables
+
+### Required Variables
+
+| Variable | Description | Example |
+|----------|-------------|---------|
+| `WALLET` | Ethereum wallet address for payments | `0x1234567890abcdef...` |
+| `EMAIL` | Contact email address | `operator@example.com` |
+| `ADDRESS` | External node address | `node.example.com:28967` |
+| `STORAGE` | Allocated disk space | `2TB`, `500GB` |
+
+### Optional Variables
+
+| Variable | Description | Default |
+|----------|-------------|---------|
+| `BANDWIDTH` | Monthly bandwidth allocation | `2TB` |
+| `LOG_LEVEL` | Logging verbosity | `info` |
+| `STORAGE2_TRUST_SOURCES` | Comma-separated satellite URLs | Default satellites |
+
+## Advanced Configuration
+
+### Custom Satellite Configuration
+```yaml
+storage2:
+ trust:
+ sources:
+ - "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFHpkmw2GT1RtLUod@satellite.example.com:7777"
+ exclusions:
+ sources: []
+ cache-url: "trust://path/to/trust-cache.json"
+```
+
+### Database Tuning
+```yaml
+pieces:
+ database-url: "postgres://user:pass@localhost/storagenode?sslmode=disable"
+
+storage2:
+ database-url: "postgres://user:pass@localhost/storage2?sslmode=disable"
+
+# OR for SQLite with custom settings
+pieces:
+ database-url: "sqlite3://path/to/piecestore.db?cache=shared&mode=rwc&_journal_mode=WAL"
+```
+
+### Performance Tuning
+```yaml
+filestore:
+ write-buffer-size: 256KB
+ force-sync: true
+
+storage2:
+ monitor:
+ minimum-disk-space: 500MB
+ minimum-bandwidth: 1MB
+```
+
+## Network Configuration
+
+### Port Configuration
+
+| Port | Protocol | Purpose | Required |
+|------|----------|---------|----------|
+| `28967` | TCP/UDP | Storage node communication | Yes |
+| `14002` | TCP | Web dashboard (local only) | Optional |
+
+### Firewall Rules
+
+#### Linux (iptables)
+```bash
+# Allow incoming storage node traffic
+iptables -A INPUT -p tcp --dport 28967 -j ACCEPT
+iptables -A INPUT -p udp --dport 28967 -j ACCEPT
+
+# Allow outgoing traffic
+iptables -A OUTPUT -p tcp --dport 28967 -j ACCEPT
+iptables -A OUTPUT -p udp --dport 28967 -j ACCEPT
+```
+
+#### Router/Firewall Configuration
+- **External Port**: 28967 (TCP/UDP)
+- **Internal Port**: 28967 (TCP/UDP)
+- **Protocol**: Both TCP and UDP required
+- **Direction**: Bidirectional
+
+## Logging Configuration
+
+### Log Levels
+
+| Level | Description | Use Case |
+|-------|-------------|----------|
+| `debug` | Very verbose output | Development/troubleshooting |
+| `info` | General information | Normal operation |
+| `warn` | Warning messages | Monitoring issues |
+| `error` | Error messages only | Production minimal |
+
+### Log Configuration
+```yaml
+log:
+ level: info
+ output: stdout
+ caller: false
+ stack: false
+ encoding: console
+```
+
+### Docker Logging
+```bash
+# View logs
+docker logs storagenode
+
+# Follow logs
+docker logs -f storagenode
+
+# View specific number of lines
+docker logs --tail 100 storagenode
+```
+
+## Health Check Configuration
+
+### Built-in Health Checks
+```yaml
+console:
+ address: 127.0.0.1:14002
+
+healthcheck:
+ enabled: true
+ interval: 30s
+ timeout: 10s
+```
+
+### External Monitoring
+```bash
+# Health check endpoint
+curl http://localhost:14002/api/sno
+
+# Satellite status
+curl http://localhost:14002/api/sno/satellites
+```
+
+## Configuration Validation
+
+### Syntax Check
+```bash
+# Docker validation
+docker run --rm -v $HOME/storj/storagenode:/app/config \
+ storjlabs/storagenode:latest --config-dir /app/config --help
+
+# Native installation
+storagenode --config-dir ~/.local/share/storj/storagenode --help
+```
+
+### Common Configuration Errors
+
+| Error | Cause | Solution |
+|-------|-------|----------|
+| Identity verification failed | Wrong identity path | Check identity.cert-path and identity.key-path |
+| Address not reachable | Firewall/NAT issues | Configure port forwarding |
+| Disk space unavailable | Insufficient storage | Increase allocated-disk-space or free up space |
+| Database corruption | Improper shutdown | Restore from backup or rebuild |
+
+## Migration and Backup
+
+### Configuration Backup
+```bash
+# Backup entire config directory
+tar -czf storagenode-config-backup-$(date +%Y%m%d).tar.gz \
+ -C $HOME/storj storagenode/
+
+# Backup just configuration file
+cp $HOME/storj/storagenode/config.yaml \
+ $HOME/storj/storagenode/config.yaml.backup
+```
+
+### Configuration Migration
+```bash
+# Copy to new location
+rsync -av $HOME/storj/storagenode/ /new/path/storagenode/
+
+# Update docker mount points
+docker run ... \
+ --mount type=bind,source=/new/path/storagenode,destination=/app/config \
+ ...
+```
+
+This reference covers all major Storage Node configuration options. For specific deployment scenarios, refer to the installation guides for your platform.
\ No newline at end of file
diff --git a/app/(docs)/node/reference/dashboard-metrics.md b/app/(docs)/node/reference/dashboard-metrics.md
new file mode 100644
index 000000000..3b9899df7
--- /dev/null
+++ b/app/(docs)/node/reference/dashboard-metrics.md
@@ -0,0 +1,275 @@
+---
+title: "Dashboard Metrics Reference"
+docId: "node-dashboard-ref-001"
+metadata:
+ title: "Storage Node Dashboard Metrics Reference"
+ description: "Complete reference for all Storage Node dashboard metrics, monitoring data, and performance indicators."
+---
+
+Complete reference for Storage Node dashboard metrics and monitoring information.
+
+## Accessing the Dashboard
+
+### Local Dashboard
+- **URL**: `http://localhost:14002` (default)
+- **Access**: Local machine only (for security)
+- **Port**: Configurable in `config.yaml` (`console.address`)
+
+### External Dashboard Access
+
+For remote monitoring, use SSH tunneling:
+```bash
+# SSH tunnel to access remote node dashboard
+ssh -L 14002:localhost:14002 user@your-node-server
+# Then access http://localhost:14002 locally
+```
+
+## Overview Metrics
+
+### Node Status Indicators
+
+| Metric | Description | Values |
+|--------|-------------|--------|
+| **Node Status** | Overall node health | Online, Offline, Disqualified |
+| **Uptime** | Time since node started | Hours, days |
+| **Last Ping** | Last successful satellite ping | Timestamp |
+| **Node Version** | Storage node software version | e.g., `v1.95.1` |
+
+### Suspension and Disqualification
+
+| Status | Description | Impact |
+|--------|-------------|--------|
+| **Good Standing** | Node operating normally | Full participation |
+| **Suspended** | Temporary suspension from satellite | No new data, existing data served |
+| **Disqualified** | Permanent removal from satellite | Data deleted, no participation |
+
+## Storage Metrics
+
+### Disk Usage
+
+| Metric | Description | Calculation |
+|--------|-------------|-------------|
+| **Used Space** | Currently stored data | Sum of all piece sizes |
+| **Available Space** | Remaining allocated space | Allocated - Used |
+| **Allocated Space** | Total space allocated to node | From configuration |
+| **Trash** | Data marked for deletion | Pending garbage collection |
+
+### Storage Breakdown by Satellite
+
+| Field | Description |
+|-------|-------------|
+| **Satellite ID** | Unique satellite identifier |
+| **Data Stored** | Amount of data from this satellite |
+| **Percentage** | Portion of total storage from satellite |
+
+## Bandwidth Metrics
+
+### Current Period (Monthly)
+
+| Metric | Description | Reset Period |
+|--------|-------------|--------------|
+| **Ingress** | Data uploaded to node | Monthly (satellite billing cycle) |
+| **Egress** | Data downloaded from node | Monthly |
+| **Total Bandwidth** | Ingress + Egress | Monthly |
+| **Remaining Bandwidth** | Allocated - Used | Monthly |
+
+### Bandwidth by Satellite
+
+| Field | Description |
+|-------|-------------|
+| **Satellite** | Satellite name/ID |
+| **Ingress** | Upload traffic from satellite |
+| **Egress** | Download traffic to satellite |
+| **Total** | Combined satellite bandwidth |
+
+## Earnings Metrics
+
+### Current Month
+
+| Metric | Description | Currency |
+|--------|-------------|----------|
+| **Estimated Earnings** | Projected month earnings | STORJ tokens |
+| **Disk Space Compensation** | Payment for storage | STORJ tokens |
+| **Bandwidth Compensation** | Payment for traffic | STORJ tokens |
+| **Payout Address** | Wallet receiving payments | Ethereum address |
+
+### Earnings History
+
+| Field | Description |
+|-------|-------------|
+| **Month** | Billing period |
+| **Disk Average** | Average monthly disk usage |
+| **Bandwidth** | Total monthly bandwidth |
+| **Payout** | Amount paid |
+| **Receipt** | Payment transaction ID |
+
+## Reputation Metrics
+
+### Audit Scores
+
+| Metric | Range | Description |
+|--------|-------|-------------|
+| **Audit Score** | 0-100% | Success rate for audit requests |
+| **Suspension Score** | 0-100% | Threshold: <60% triggers suspension |
+| **Disqualification Score** | 0-100% | Threshold: <60% triggers disqualification |
+
+### Online Score
+
+| Metric | Range | Description |
+|--------|-------|-------------|
+| **Online Score** | 0-100% | Node availability percentage |
+| **Downtime Events** | Count | Number of offline periods |
+| **Last Offline** | Timestamp | Most recent offline event |
+
+### Uptime Tracking
+
+| Field | Description |
+|-------|-------------|
+| **Current Uptime** | Continuous online time |
+| **Today** | Uptime percentage for current day |
+| **This Month** | Uptime percentage for current month |
+| **All Time** | Historical uptime average |
+
+## Satellite-Specific Metrics
+
+### Per-Satellite Data
+
+| Satellite | Description |
+|-----------|-------------|
+| **12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFHpkmw2GT1RtLUod** | US Central |
+| **12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs** | Europe North |
+| **1wFTAgs9DP5RSnCqKV1eLf6N9wtk4EAtmN5DpSxcs8EjT69tGE** | Asia Pacific |
+
+### Satellite Metrics
+
+| Metric | Description |
+|--------|-------------|
+| **Node Age** | Time since first contact with satellite |
+| **Vetted Status** | Whether node is vetted (trusted) |
+| **Joined Date** | When node first connected |
+| **Data Stored** | Current data volume |
+| **Audit Success Rate** | Historical audit performance |
+
+## System Performance
+
+### Resource Utilization
+
+| Metric | Description | Units |
+|--------|-------------|-------|
+| **CPU Usage** | Processor utilization | Percentage |
+| **Memory Usage** | RAM consumption | MB/GB |
+| **Disk I/O** | Read/write operations | IOPS |
+| **Network I/O** | Network throughput | Mbps |
+
+### Database Metrics
+
+| Metric | Description |
+|--------|-------------|
+| **Pieces Database Size** | Piece metadata database size |
+| **Info Database Size** | Node information database size |
+| **Database Queries** | Query performance metrics |
+
+## Notifications and Alerts
+
+### Dashboard Notifications
+
+| Type | Description | Action Required |
+|------|-------------|----------------|
+| **Version Update** | New software version available | Update recommended |
+| **Low Disk Space** | Storage nearly full | Free up space or increase allocation |
+| **Suspension Warning** | Reputation score declining | Investigate connectivity/performance |
+| **Payment Info** | Payout information | Check wallet address |
+
+### Health Indicators
+
+| Indicator | Status | Meaning |
+|-----------|--------|---------|
+| 🟢 Green | Healthy | All systems normal |
+| 🟡 Yellow | Warning | Attention needed |
+| 🔴 Red | Critical | Immediate action required |
+
+## API Endpoints for Monitoring
+
+### Dashboard API
+
+| Endpoint | Description | Response |
+|----------|-------------|----------|
+| `/api/sno` | Node overview data | JSON summary |
+| `/api/sno/satellites` | Satellite-specific data | JSON per satellite |
+| `/api/sno/estimated-payouts` | Earnings estimates | JSON payout data |
+
+### Monitoring Script Example
+
+```bash
+#!/bin/bash
+# Basic node health check
+response=$(curl -s http://localhost:14002/api/sno)
+status=$(echo $response | jq -r '.status')
+
+if [ "$status" = "online" ]; then
+ echo "Node is healthy"
+else
+ echo "Node issue detected: $status"
+fi
+```
+
+## Historical Data Tracking
+
+### Data Retention
+
+| Metric | Retention Period | Purpose |
+|--------|------------------|---------|
+| **Bandwidth** | 12 months | Payout calculation |
+| **Storage** | 12 months | Trend analysis |
+| **Audit Results** | Permanent | Reputation tracking |
+| **Uptime** | 12 months | Performance monitoring |
+
+### Export Options
+
+Dashboard data can be extracted via:
+- **API endpoints** - Real-time data
+- **Log files** - Historical events
+- **Database queries** - Direct data access
+
+## Performance Optimization
+
+### Key Metrics to Monitor
+
+1. **Audit Success Rate** - Should stay >95%
+2. **Online Score** - Should stay >98%
+3. **Bandwidth Utilization** - Higher is better
+4. **Storage Growth** - Indicates network demand
+
+### Warning Thresholds
+
+| Metric | Warning | Critical |
+|--------|---------|----------|
+| **Audit Score** | <85% | <60% |
+| **Online Score** | <95% | <90% |
+| **Disk Free** | <10% | <5% |
+| **Version Behind** | >1 version | >3 versions |
+
+## Troubleshooting Dashboard Issues
+
+### Dashboard Not Accessible
+
+1. **Check port binding**:
+ ```bash
+ netstat -tulnp | grep 14002
+ ```
+
+2. **Verify configuration**:
+ ```yaml
+ console:
+ address: 127.0.0.1:14002
+ ```
+
+3. **Check firewall rules** (if accessing remotely)
+
+### Missing Data
+
+1. **Restart node** if metrics not updating
+2. **Check database integrity**
+3. **Verify satellite connectivity**
+
+This reference covers all dashboard metrics for effective Storage Node monitoring and management. Use these metrics to ensure optimal node performance and maximize earnings.
\ No newline at end of file
diff --git a/app/(docs)/node/reference/system-requirements.md b/app/(docs)/node/reference/system-requirements.md
new file mode 100644
index 000000000..401efb2bd
--- /dev/null
+++ b/app/(docs)/node/reference/system-requirements.md
@@ -0,0 +1,288 @@
+---
+title: "System Requirements Reference"
+docId: "node-system-req-ref-001"
+metadata:
+ title: "Storage Node System Requirements Reference"
+ description: "Complete reference for Storage Node hardware, software, and network requirements for optimal performance."
+---
+
+Complete reference for Storage Node system requirements and specifications.
+
+## Hardware Requirements
+
+### Minimum Requirements
+
+| Component | Requirement | Notes |
+|-----------|-------------|-------|
+| **CPU** | 1 core, 1 GHz | ARM or x86_64 |
+| **RAM** | 1 GB | Minimum for basic operation |
+| **Storage** | 500 GB available | Dedicated to Storj (not OS) |
+| **Network** | 1 Mbps up/down | Sustained bandwidth |
+
+### Recommended Requirements
+
+| Component | Requirement | Benefit |
+|-----------|-------------|---------|
+| **CPU** | 2+ cores, 2+ GHz | Better concurrent processing |
+| **RAM** | 4+ GB | Improved caching and performance |
+| **Storage** | 2+ TB available | Higher earning potential |
+| **Network** | 10+ Mbps up/down | Faster data transfers |
+
+### Optimal Performance Configuration
+
+| Component | Specification | Purpose |
+|-----------|---------------|---------|
+| **CPU** | 4+ cores, modern architecture | Handle multiple satellite operations |
+| **RAM** | 8+ GB | Large cache for frequently accessed data |
+| **Storage** | 8+ TB, SSD or NVMe | Maximum storage capacity and speed |
+| **Network** | 50+ Mbps symmetric | High-throughput data transfers |
+
+## Storage Requirements
+
+### Storage Types
+
+| Type | Performance | Reliability | Cost | Recommendation |
+|------|-------------|-------------|------|----------------|
+| **HDD (7200 RPM)** | Good | Good | Low | ✅ Recommended for most setups |
+| **SSD (SATA)** | Excellent | Excellent | Medium | ⭐ Optimal for performance |
+| **NVMe SSD** | Outstanding | Excellent | High | 🚀 Best performance |
+| **USB/External** | Poor | Variable | Low | ❌ Not recommended |
+
+### Storage Considerations
+
+| Factor | Requirement | Rationale |
+|--------|-------------|-----------|
+| **Dedicated Drive** | Highly recommended | Prevents OS disk space conflicts |
+| **File System** | NTFS, ext4, XFS | Reliable journaling file systems |
+| **Available Space** | 90% of drive or less | Leave space for metadata and growth |
+| **SMART Monitoring** | Essential | Early failure detection |
+
+### Storage Allocation Guidelines
+
+```
+Example for 2TB drive:
+├── OS/System: 100 GB (separate drive preferred)
+├── Storj Data: 1,800 GB (allocated to node)
+└── Free Space: 100 GB (buffer for operations)
+```
+
+## Network Requirements
+
+### Internet Connection
+
+| Requirement | Specification | Purpose |
+|-------------|---------------|---------|
+| **Connection Type** | Residential/Business | Stable, always-on connection |
+| **Bandwidth** | 1+ Mbps sustained | Handle storage/retrieval requests |
+| **Data Cap** | Unlimited preferred | Monthly bandwidth usage varies |
+| **Latency** | <100ms to satellites | Responsive to network requests |
+
+### Network Specifications
+
+| Metric | Minimum | Recommended | Optimal |
+|--------|---------|-------------|---------|
+| **Upload Speed** | 1 Mbps | 5 Mbps | 25+ Mbps |
+| **Download Speed** | 1 Mbps | 5 Mbps | 25+ Mbps |
+| **Monthly Data** | 2+ TB | 5+ TB | Unlimited |
+| **Uptime** | 95% | 98% | 99.5%+ |
+
+### Port and Protocol Requirements
+
+| Protocol | Port | Direction | Purpose |
+|----------|------|-----------|---------|
+| **TCP** | 28967 | Inbound/Outbound | Primary communication |
+| **UDP** | 28967 | Inbound/Outbound | QUIC protocol |
+| **HTTP** | 14002 | Localhost only | Dashboard access |
+
+## Operating System Support
+
+### Linux Distributions
+
+| Distribution | Version | Support Level | Installation Method |
+|--------------|---------|---------------|-------------------|
+| **Ubuntu** | 18.04+ | Full | Docker, Native |
+| **Debian** | 10+ | Full | Docker, Native |
+| **CentOS/RHEL** | 7+ | Full | Docker, Native |
+| **Fedora** | 30+ | Full | Docker |
+| **openSUSE** | 15+ | Full | Docker |
+| **Arch Linux** | Latest | Community | Docker |
+
+### Other Operating Systems
+
+| OS | Support | Method | Notes |
+|----|---------|--------|-------|
+| **Windows** | Full | Native installer, Docker | Windows 10/11, Server 2016+ |
+| **macOS** | Limited | Docker | Intel and Apple Silicon |
+| **FreeBSD** | Community | Docker/Ports | Limited testing |
+| **Synology DSM** | Community | Docker | Package available |
+| **QNAP** | Community | Docker | Container station |
+
+## Virtualization and Containers
+
+### Docker Requirements
+
+| Component | Requirement | Notes |
+|-----------|-------------|-------|
+| **Docker Version** | 19.03+ | Supports required features |
+| **Docker Compose** | 1.25+ | For compose deployments |
+| **Host OS** | Linux, Windows, macOS | Docker Desktop or native |
+| **Container Runtime** | Docker or compatible | Podman, containerd support |
+
+### Virtual Machine Specifications
+
+| Resource | Minimum | Recommended |
+|----------|---------|-------------|
+| **vCPU** | 1 core | 2+ cores |
+| **vRAM** | 1 GB | 4+ GB |
+| **vDisk** | 500 GB | 2+ TB |
+| **Network** | Bridged mode | Direct external access |
+
+### Hardware Pass-through
+
+| Component | Benefit | Requirement |
+|-----------|---------|-------------|
+| **Disk Pass-through** | Better performance | Direct disk access |
+| **Network Pass-through** | Lower latency | Dedicated network interface |
+| **CPU Pinning** | Consistent performance | Multi-core host system |
+
+## Network Architecture
+
+### Home Network Setup
+
+```
+Internet -> Router -> Storage Node
+ ↓
+ Port Forward
+ 28967 TCP/UDP
+```
+
+### Advanced Network Configuration
+
+```
+Internet -> Firewall -> DMZ -> Storage Node
+ ↓
+ Dedicated VLAN
+ QoS Priority
+```
+
+### Dynamic DNS Requirements
+
+| Scenario | Solution | Purpose |
+|----------|----------|---------|
+| **Dynamic IP** | DDNS service | Maintain consistent address |
+| **Multiple Nodes** | Subdomain per node | Unique addressing |
+| **IPv6** | DDNS with AAAA records | Future-proofing |
+
+## Platform-Specific Requirements
+
+### Raspberry Pi
+
+| Model | Minimum | Recommended | Notes |
+|-------|---------|-------------|-------|
+| **Pi 3B+** | Marginal | Not recommended | Limited performance |
+| **Pi 4 (4GB)** | Workable | Entry level | USB 3.0 for storage |
+| **Pi 4 (8GB)** | Good | Recommended | Better caching |
+
+**Pi-Specific Considerations:**
+- Use USB 3.0 SSD for storage
+- Ensure adequate power supply (3A+)
+- Monitor CPU temperature
+- Use quality SD card for OS
+
+### Synology NAS
+
+| Series | Support | Method |
+|--------|---------|--------|
+| **Plus Series** | Full | Docker package |
+| **Value Series** | Limited | Manual Docker setup |
+| **J Series** | Not recommended | Insufficient resources |
+
+**NAS-Specific Requirements:**
+- DSM 6.0+ with Docker support
+- Dedicated volume for Storj data
+- SSH access for advanced configuration
+
+### QNAP NAS
+
+| Architecture | Support | Notes |
+|--------------|---------|-------|
+| **x86_64** | Full | Container Station |
+| **ARM** | Limited | Performance considerations |
+
+## Power and Environmental
+
+### Power Requirements
+
+| Component | Consumption | Annual Cost* |
+|-----------|-------------|-------------|
+| **Raspberry Pi 4** | 15W | $13 |
+| **Mini PC** | 30W | $26 |
+| **Desktop PC** | 100W | $88 |
+| **Server** | 300W | $263 |
+
+*Based on $0.10/kWh electricity rate
+
+### Environmental Considerations
+
+| Factor | Requirement | Rationale |
+|--------|-------------|-----------|
+| **Temperature** | 10-35°C (50-95°F) | Component longevity |
+| **Humidity** | 20-80% RH | Prevent corrosion |
+| **Ventilation** | Adequate airflow | Heat dissipation |
+| **Power Stability** | UPS recommended | Prevent data corruption |
+
+## Security Requirements
+
+### Network Security
+
+| Component | Requirement | Purpose |
+|-----------|-------------|---------|
+| **Firewall** | Port 28967 only | Limit attack surface |
+| **Router Security** | WPA3, strong passwords | Secure network access |
+| **VPN** | For remote management | Secure administrative access |
+
+### System Security
+
+| Component | Requirement | Purpose |
+|-----------|-------------|---------|
+| **OS Updates** | Regular patching | Security vulnerabilities |
+| **User Accounts** | Non-root operation | Principle of least privilege |
+| **File Permissions** | Proper ownership | Data protection |
+
+## Performance Monitoring
+
+### Key Metrics to Monitor
+
+| Metric | Tool | Threshold |
+|--------|------|-----------|
+| **CPU Usage** | htop, Task Manager | <80% sustained |
+| **RAM Usage** | free, Task Manager | <90% |
+| **Disk I/O** | iostat, Performance Monitor | <80% utilization |
+| **Network Usage** | iftop, Resource Monitor | Within bandwidth limits |
+
+### Monitoring Tools
+
+| Platform | Tools | Purpose |
+|----------|-------|---------|
+| **Linux** | htop, iotop, iftop | Real-time monitoring |
+| **Windows** | Task Manager, perfmon | System monitoring |
+| **Cross-platform** | Grafana, Prometheus | Advanced monitoring |
+
+## Upgrade Considerations
+
+### Hardware Upgrade Path
+
+1. **RAM** - Easy upgrade, immediate benefit
+2. **Storage** - Add drives or upgrade to SSD
+3. **Network** - Faster internet connection
+4. **CPU** - Usually requires new system
+
+### Capacity Planning
+
+| Growth Rate | Hardware Planning |
+|-------------|------------------|
+| **Monthly** | Monitor storage usage trends |
+| **Quarterly** | Evaluate performance metrics |
+| **Annually** | Plan major upgrades |
+
+This reference ensures your Storage Node meets all requirements for reliable operation and optimal earnings potential. Regularly review system performance and upgrade components as needed.
\ No newline at end of file
diff --git a/app/(docs)/node/tutorials/_meta.json b/app/(docs)/node/tutorials/_meta.json
new file mode 100644
index 000000000..689149d1f
--- /dev/null
+++ b/app/(docs)/node/tutorials/_meta.json
@@ -0,0 +1,9 @@
+{
+ "title": "Tutorials",
+ "nav": [
+ {
+ "title": "Setup your first node",
+ "id": "setup-first-node"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/app/(docs)/node/tutorials/setup-first-node.md b/app/(docs)/node/tutorials/setup-first-node.md
new file mode 100644
index 000000000..6b77c6865
--- /dev/null
+++ b/app/(docs)/node/tutorials/setup-first-node.md
@@ -0,0 +1,649 @@
+---
+title: Setup your first node
+docId: setup-first-storage-node
+metadata:
+ title: Setup Your First Storage Node Tutorial
+ description: Complete 60-minute tutorial to set up your first Storj storage node from start to finish with step-by-step instructions.
+---
+
+This comprehensive tutorial walks you through setting up your first Storj storage node from start to finish. By the end, you'll have a running node that earns STORJ tokens for providing storage and bandwidth to the network.
+
+## What you'll build
+
+In this 60-minute hands-on tutorial, you'll:
+
+- Set up the necessary hardware and network infrastructure
+- Create a unique node identity for network participation
+- Configure port forwarding and firewall settings
+- Install and configure the storage node software
+- Connect to the Storj network and begin earning rewards
+- Set up monitoring and maintenance procedures
+
+**Expected time to complete**: 60-90 minutes
+
+## Prerequisites
+
+Before starting your storage node, ensure you have:
+
+### Hardware requirements (minimum)
+- **CPU**: 1 processor core dedicated to your node
+- **Storage**: 500 GB available disk space (non-SMR hard drive recommended)
+- **RAM**: 2 GB available (4 GB recommended)
+- **Network**: Stable internet connection with minimum 1 Mbps upload, 3 Mbps download per TB capacity
+
+### Hardware requirements (recommended)
+- **CPU**: 1 processor core per TB of storage
+- **Storage**: 2 TB+ available space on dedicated drive
+- **RAM**: 8 GB+ for optimal performance
+- **Network**: 3 Mbps upload, 5 Mbps download per TB capacity
+- **Uptime**: 99.5%+ monthly (maximum 3.6 hours downtime/month)
+
+### System requirements
+- **Operating System**: Linux (Ubuntu 18.04+, Debian 9+), Windows 10+, or macOS 10.15+
+- **Administrative privileges**: Ability to install software and configure network settings
+- **Router access**: Administrative access to configure port forwarding
+- **Static IP or DDNS**: Consistent external address (dynamic DNS acceptable)
+
+### Important considerations
+
+**Network setup**: You must be behind a router/firewall, never connect directly to the internet.
+
+**Power stability**: Consider UPS (Uninterrupted Power Supply) if you experience frequent power outages.
+
+**Drive selection**: Avoid SMR drives, RAID 0, or network-attached storage for best performance.
+
+## Step 1: Assess your setup
+
+Before proceeding, verify your environment meets the requirements:
+
+### Check your internet connection
+
+Test your connection speed and stability:
+
+```bash
+# Test connection speed
+speedtest-cli
+
+# Test connection stability (run for several minutes)
+ping -c 100 8.8.8.8
+
+# Check your public IP
+curl ifconfig.me
+```
+
+**Expected outcome**: Your connection should meet the minimum bandwidth requirements with stable ping times.
+
+### Verify hardware compatibility
+
+**Check available disk space**:
+
+{% tabs %}
+
+{% tab label="Linux" %}
+
+```bash
+# Check disk space and filesystem
+df -h
+lsblk -f
+
+# Verify filesystem type (ext4 recommended for Linux)
+mount | grep "your-storage-drive"
+```
+
+{% /tab %}
+
+{% tab label="Windows" %}
+
+```powershell
+# Check disk space
+Get-WmiObject -Class Win32_LogicalDisk | Select-Object DeviceID,Size,FreeSpace
+
+# Check filesystem (NTFS recommended for Windows)
+Get-Volume
+```
+
+{% /tab %}
+
+{% /tabs %}
+
+**Expected outcome**: You should have adequate free space on a native filesystem (ext4 for Linux, NTFS for Windows).
+
+## Step 2: Configure network access
+
+Set up network infrastructure to make your node accessible from the internet:
+
+### Set up port forwarding
+
+Configure your router to forward port 28967 to your node system:
+
+1. **Find your internal IP address**:
+
+ {% tabs %}
+
+ {% tab label="Linux" %}
+ ```bash
+ ip addr show
+ # Look for your primary network interface IP
+ ```
+ {% /tab %}
+
+ {% tab label="Windows" %}
+ ```powershell
+ ipconfig /all
+ # Look for your primary network adapter IP
+ ```
+ {% /tab %}
+
+ {% /tabs %}
+
+2. **Access router admin panel**:
+ - Open web browser to your router's IP (usually 192.168.1.1 or 192.168.0.1)
+ - Log in with admin credentials
+
+3. **Configure port forwarding**:
+ - Navigate to Port Forwarding or Virtual Servers section
+ - Add new rule:
+ - **Service Name**: Storj Node
+ - **Port Range**: 28967-28967
+ - **Local IP**: Your computer's internal IP
+ - **Local Port**: 28967
+ - **Protocol**: Both TCP and UDP
+ - Save and apply settings
+
+### Configure dynamic DNS (if needed)
+
+If your ISP assigns dynamic IP addresses:
+
+1. **Sign up for DDNS service** (e.g., NoIP.com)
+2. **Create a domain** (e.g., mynode.ddns.net)
+3. **Configure auto-update**:
+ - **Option A**: Configure in router DDNS settings
+ - **Option B**: Install DDNS client software on your system
+
+### Test port accessibility
+
+Verify your port forwarding works:
+
+1. **Get your public IP**: `curl ifconfig.me`
+2. **Test port**: Visit [https://www.yougetsignal.com/tools/open-ports/]
+3. **Enter your public IP and port 28967**
+4. **Result should show "Open"**
+
+**Expected outcome**: Port 28967 should be accessible from the internet on both TCP and UDP.
+
+## Step 3: Configure firewall settings
+
+Ensure your system firewall allows storage node traffic:
+
+{% tabs %}
+
+{% tab label="Linux (UFW)" %}
+
+```bash
+# Allow storage node port
+sudo ufw allow 28967/tcp
+sudo ufw allow 28967/udp
+
+# Allow dashboard port (optional, for local access only)
+sudo ufw allow from 192.168.0.0/16 to any port 14002
+
+# Reload firewall
+sudo ufw reload
+
+# Check status
+sudo ufw status
+```
+
+{% /tab %}
+
+{% tab label="Windows Defender" %}
+
+```powershell
+# Allow inbound traffic on storage node port
+New-NetFirewallRule -DisplayName "Storj Node TCP" -Direction Inbound -Protocol TCP -LocalPort 28967 -Action Allow
+New-NetFirewallRule -DisplayName "Storj Node UDP" -Direction Inbound -Protocol UDP -LocalPort 28967 -Action Allow
+
+# Allow dashboard port for local access
+New-NetFirewallRule -DisplayName "Storj Dashboard" -Direction Inbound -Protocol TCP -LocalPort 14002 -Action Allow
+```
+
+{% /tab %}
+
+{% /tabs %}
+
+**Expected outcome**: Firewall should allow traffic on port 28967 and optionally 14002 for the dashboard.
+
+## Step 4: Create your node identity
+
+Generate a unique cryptographic identity for your storage node:
+
+{% tabs %}
+
+{% tab label="Linux" %}
+
+### Install identity creation tools
+
+```bash
+# Download identity creation binary
+curl -L https://github.com/storj/storj/releases/latest/download/identity_linux_amd64.zip -o identity_linux_amd64.zip
+unzip identity_linux_amd64.zip
+sudo mv identity /usr/local/bin/
+chmod +x /usr/local/bin/identity
+```
+
+### Create identity
+
+```bash
+# Create identity (this may take several hours)
+identity create storagenode
+
+# Check progress (in another terminal)
+identity status storagenode
+```
+
+**Identity creation time varies**:
+- Fast CPU: 2-8 hours
+- Slower CPU: 8-24+ hours
+- Raspberry Pi: 1-3+ days
+
+{% /tab %}
+
+{% tab label="Windows" %}
+
+### Download and install identity tools
+
+1. **Download**: Go to [Storj releases page](https://github.com/storj/storj/releases)
+2. **Download**: `identity_windows_amd64.zip`
+3. **Extract**: To a folder like `C:\storj-identity\`
+4. **Add to PATH**: Or use full path in commands
+
+### Create identity
+
+```powershell
+# Open PowerShell as Administrator
+# Navigate to identity tool location
+cd C:\storj-identity\
+
+# Create identity (this may take several hours)
+.\identity.exe create storagenode
+
+# Check progress (in another PowerShell window)
+.\identity.exe status storagenode
+```
+
+{% /tab %}
+
+{% /tabs %}
+
+**Expected outcome**: After completion, you should have identity files in your identity directory. The process generates cryptographic keys that uniquely identify your node.
+
+**Important**: Never share or modify your identity files. Losing them means losing your node reputation permanently.
+
+## Step 5: Install storage node software
+
+Choose the installation method that works best for your system:
+
+{% tabs %}
+
+{% tab label="Linux CLI (Docker)" %}
+
+### Install Docker
+
+```bash
+# Update package index
+sudo apt update
+
+# Install Docker
+curl -fsSL https://get.docker.com -o get-docker.sh
+sudo sh get-docker.sh
+
+# Add user to docker group
+sudo usermod -aG docker $USER
+
+# Log out and back in, then verify
+docker --version
+```
+
+### Create storage directories
+
+```bash
+# Create directories for node data
+mkdir -p $HOME/storj/storagenode
+mkdir -p $HOME/storj/identity/storagenode
+
+# Copy identity files
+cp -r ~/.local/share/storj/identity/storagenode/* $HOME/storj/identity/storagenode/
+```
+
+### Run storage node
+
+```bash
+# Replace values with your actual information
+docker run -d --restart unless-stopped \
+ --name storagenode \
+ -p 28967:28967/tcp \
+ -p 28967:28967/udp \
+ -p 14002:14002 \
+ -e WALLET="0xYOUR_WALLET_ADDRESS_HERE" \
+ -e EMAIL="your-email@example.com" \
+ -e ADDRESS="your.ddns.domain:28967" \
+ -e STORAGE="2TB" \
+ --mount type=bind,source=$HOME/storj/identity/storagenode,destination=/app/identity \
+ --mount type=bind,source=$HOME/storj/storagenode,destination=/app/config \
+ storjlabs/storagenode:latest
+```
+
+{% /tab %}
+
+{% tab label="Windows GUI" %}
+
+### Download Windows installer
+
+1. **Download**: [Storage Node Windows Installer](https://github.com/storj/storj/releases)
+2. **Run installer**: `storagenode_windows_amd64.msi`
+3. **Follow wizard**: Accept defaults or customize installation path
+
+### Configure the node
+
+1. **Copy identity files**:
+ ```powershell
+ # Copy identity to program directory (adjust paths as needed)
+ Copy-Item -Recurse "C:\Users\$env:USERNAME\.local\share\storj\identity\storagenode\*" "C:\Program Files\Storj\Storage Node\identity\"
+ ```
+
+2. **Edit configuration**:
+ ```powershell
+ # Edit config file (use Notepad++ or similar)
+ notepad++ "C:\Program Files\Storj\Storage Node\config.yaml"
+ ```
+
+ **Update these values**:
+ ```yaml
+ operator:
+ email: "your-email@example.com"
+ wallet: "0xYOUR_WALLET_ADDRESS_HERE"
+ contact:
+ external-address: "your.ddns.domain:28967"
+ storage:
+ allocated-bandwidth: "2TB"
+ allocated-disk-space: "2TB"
+ ```
+
+3. **Start the service**:
+ ```powershell
+ # Start service
+ Start-Service storagenode
+
+ # Verify it's running
+ Get-Service storagenode
+ ```
+
+{% /tab %}
+
+{% /tabs %}
+
+**Replace these placeholders**:
+- `0xYOUR_WALLET_ADDRESS_HERE`: Your Ethereum wallet address for payments
+- `your-email@example.com`: Your contact email
+- `your.ddns.domain:28967`: Your external address (IP or domain + port)
+- `2TB`: Your desired storage allocation
+
+**Expected outcome**: Your storage node should start successfully and begin connecting to the Storj network.
+
+## Step 6: Verify node operation
+
+Confirm your storage node is working correctly:
+
+### Check the dashboard
+
+1. **Open browser**: Navigate to `http://localhost:14002`
+2. **Review status**: Should show "Node" connected and stats
+3. **Check connectivity**: All satellites should show green status
+
+### Monitor logs
+
+{% tabs %}
+
+{% tab label="Linux CLI" %}
+
+```bash
+# View real-time logs
+docker logs storagenode -f
+
+# Check for errors
+docker logs storagenode 2>&1 | grep -i error
+
+# Look for successful startup messages
+docker logs storagenode 2>&1 | grep -i "started"
+```
+
+{% /tab %}
+
+{% tab label="Windows GUI" %}
+
+```powershell
+# View recent logs
+Get-Content "C:\Program Files\Storj\Storage Node\logs\*" -Tail 50
+
+# Follow logs in real-time (PowerShell 7+)
+Get-Content "C:\Program Files\Storj\Storage Node\logs\*" -Wait
+```
+
+{% /tab %}
+
+{% /tabs %}
+
+**Good log messages to look for**:
+- "Server started" or similar startup confirmation
+- Successful connections to satellites
+- No persistent error messages
+- Initial storage and bandwidth allocations
+
+### Test external connectivity
+
+Verify your node is reachable from outside your network:
+
+```bash
+# From another computer/network, test connectivity
+telnet your.external.address 28967
+
+# Should connect successfully
+```
+
+**Expected outcome**: Your node should be accessible externally, showing successful connections in logs and dashboard.
+
+## Step 7: Monitor initial operation
+
+During your node's first days of operation:
+
+### Understand the vetting process
+
+**New node behavior**:
+- Initial uploads will be limited (vetting process)
+- Storage usage grows slowly over first few months
+- Earnings start small and increase over time
+- Node reputation builds gradually through successful audits
+
+**Typical timeline**:
+- **Days 1-7**: Very limited activity, system testing
+- **Weeks 2-8**: Gradual increase in storage uploads
+- **Months 2-12**: Continued growth, reputation building
+- **After 12 months**: Full earning potential unlocked
+
+### Monitor key metrics
+
+**Daily checks** (first week):
+- Node uptime and connectivity
+- Log files for errors or warnings
+- Dashboard showing satellite connections
+- Gradual increase in storage usage
+
+**Weekly checks** (ongoing):
+- Storage utilization trends
+- Bandwidth usage patterns
+- Audit success rates (should stay >95%)
+- Payout predictions and actual earnings
+
+### Common first-week issues
+
+**Node appears offline**:
+- Verify port forwarding configuration
+- Check firewall settings
+- Confirm external address is correct
+- Test connectivity from external network
+
+**Low activity/earnings**:
+- Normal for new nodes during vetting period
+- Ensure node has consistent uptime
+- Verify sufficient available storage space
+- Be patient - growth takes time
+
+**Database or storage errors**:
+- Check disk space and filesystem health
+- Verify permissions on storage directories
+- Monitor system resources (CPU, RAM, disk I/O)
+
+**Expected outcome**: Your node should show stable operation with gradually increasing activity over the first weeks.
+
+## Step 8: Set up ongoing maintenance
+
+Establish procedures to keep your node healthy long-term:
+
+### Automated monitoring
+
+Set up basic monitoring:
+
+{% tabs %}
+
+{% tab label="Linux" %}
+
+```bash
+# Create monitoring script
+cat > $HOME/check-storj-node.sh << 'EOF'
+#!/bin/bash
+# Check if container is running
+if ! docker ps | grep -q storagenode; then
+ echo "ERROR: Storage node container not running"
+ # Add notification/restart logic here
+fi
+
+# Check disk space
+USAGE=$(df $HOME/storj/storagenode | tail -1 | awk '{print $5}' | sed 's/%//')
+if [ $USAGE -gt 90 ]; then
+ echo "WARNING: Storage directory $USAGE% full"
+fi
+EOF
+
+# Make executable
+chmod +x $HOME/check-storj-node.sh
+
+# Add to crontab (run every 5 minutes)
+(crontab -l 2>/dev/null; echo "*/5 * * * * $HOME/check-storj-node.sh") | crontab -
+```
+
+{% /tab %}
+
+{% tab label="Windows" %}
+
+```powershell
+# Create monitoring script
+@'
+# Check if service is running
+$service = Get-Service storagenode -ErrorAction SilentlyContinue
+if ($service.Status -ne "Running") {
+ Write-Host "ERROR: Storage node service not running"
+ # Add notification/restart logic here
+}
+
+# Check disk space
+$disk = Get-WmiObject -Class Win32_LogicalDisk | Where-Object { $_.DeviceID -eq "C:" }
+$usage = [math]::Round(($disk.Size - $disk.FreeSpace) / $disk.Size * 100, 2)
+if ($usage -gt 90) {
+ Write-Host "WARNING: Disk ${usage}% full"
+}
+'@ | Out-File -FilePath "$env:USERPROFILE\check-storj-node.ps1"
+
+# Set up scheduled task (run every 5 minutes)
+schtasks /create /tn "Storj Node Monitor" /tr "powershell.exe -File $env:USERPROFILE\check-storj-node.ps1" /sc minute /mo 5
+```
+
+{% /tab %}
+
+{% /tabs %}
+
+### Update procedures
+
+**Software updates**:
+- Storage node software updates automatically
+- Monitor for update announcements in Storj community
+- Plan maintenance windows for major updates
+
+**System maintenance**:
+- Regular system updates and security patches
+- Periodic filesystem checks and optimization
+- Monitor and clean up log files
+- Backup identity files securely
+
+### Performance optimization
+
+As your node matures:
+
+**Storage optimization**:
+- Monitor disk I/O performance
+- Consider SSD caching for better performance
+- Ensure adequate free space (10%+ recommended)
+
+**Network optimization**:
+- Monitor bandwidth utilization
+- Optimize router QoS settings if needed
+- Consider dedicated internet connection for large nodes
+
+**Expected outcome**: You should have monitoring and maintenance procedures in place to ensure long-term reliable operation.
+
+## What you've accomplished
+
+Congratulations! You've successfully set up your first Storj storage node. You now have:
+
+- A fully configured storage node connected to the Storj network
+- Proper network infrastructure with port forwarding and firewall rules
+- Monitoring systems to track node health and performance
+- Understanding of the vetting process and earnings timeline
+- Maintenance procedures for long-term operation
+
+## Understanding your node's journey
+
+**The vetting period**: Your node is now in a 9-month vetting process where Storj gradually increases your allowed storage and bandwidth based on performance.
+
+**Reputation building**: Your node builds reputation through successful audits, uptime, and reliable service. Better reputation leads to higher earnings.
+
+**Earnings timeline**:
+- Month 1-9: Held amount (50% of earnings held as insurance)
+- Month 9+: Held amount returned, full earnings for ongoing service
+- Full earning potential: Typically achieved after 12+ months of operation
+
+## What's next
+
+Now that your storage node is running:
+
+### Optimize your setup
+- [Monitor and optimize node performance](#)
+- [Set up advanced monitoring and alerting](#)
+- [Plan for scaling to multiple nodes](#)
+- [Implement backup and disaster recovery](#)
+
+### Join the community
+- [Join the Storj community forum](https://forum.storj.io) for support and updates
+- [Follow best practices](#) from experienced node operators
+- [Stay updated](#) on network changes and opportunities
+
+### Advanced topics
+- [Understanding storage node economics](#) - detailed earnings analysis
+- [Multi-node deployment strategies](#) - scaling your operation
+- [Hardware optimization](#) - improving performance and efficiency
+
+### Troubleshooting resources
+- [Troubleshoot offline node issues](../how-to/troubleshoot-offline-node)
+- [Migrate node to new hardware](../how-to/migrate-node)
+- [Change payout address](../how-to/change-payout-address)
+
+Your storage node is now contributing to the decentralized cloud and you're earning STORJ tokens for providing valuable storage and bandwidth resources to the network. Welcome to the Storj community!
\ No newline at end of file
diff --git a/app/(docs)/object-mount/concepts/object-mount-vs-filesystems.md b/app/(docs)/object-mount/concepts/object-mount-vs-filesystems.md
new file mode 100644
index 000000000..ea69d6bbe
--- /dev/null
+++ b/app/(docs)/object-mount/concepts/object-mount-vs-filesystems.md
@@ -0,0 +1,175 @@
+---
+title: Object Mount vs traditional filesystems
+docId: object-mount-vs-filesystems
+metadata:
+ title: Understanding Object Mount vs Traditional Filesystems
+ description: Conceptual explanation of how Object Mount bridges object storage and POSIX filesystems, with architecture and performance considerations.
+---
+
+Object Mount represents a fundamental shift in how applications access cloud storage by bridging the gap between POSIX filesystem expectations and object storage characteristics.
+
+## The traditional filesystem model
+
+Traditional applications expect filesystems to provide:
+
+- **Hierarchical structure**: Files organized in directories and subdirectories
+- **POSIX compliance**: Standard operations like open, read, write, close, and seek
+- **Metadata support**: Permissions, timestamps, ownership, and symbolic links
+- **Consistency guarantees**: Immediate visibility of changes across all processes
+- **Random access**: Ability to read or write any part of a file efficiently
+
+These expectations work well with local storage (HDDs, SSDs) and network filesystems (NFS, CIFS) but conflict with object storage design principles.
+
+## Object storage characteristics
+
+Object storage systems like Storj, Amazon S3, and Azure Blob Storage are designed for:
+
+- **Write-once, read-many patterns**: Optimized for immutable data
+- **High throughput**: Excellent for large file transfers and streaming
+- **Eventual consistency**: Changes may not be immediately visible everywhere
+- **Flat namespace**: Objects stored with keys, not hierarchical paths
+- **HTTP-based access**: REST APIs rather than POSIX system calls
+
+This fundamental mismatch means traditional applications cannot directly use object storage as if it were a local filesystem.
+
+## How Object Mount solves the problem
+
+Object Mount acts as a translation layer that:
+
+### Intercepts system calls
+- Uses `LD_PRELOAD` to intercept filesystem operations from applications
+- Translates POSIX operations into object storage API calls
+- Works with both dynamically and statically linked applications
+- Requires no application modifications
+
+### Maps filesystem concepts to objects
+- **Files** → Individual objects in the bucket
+- **Directories** → Object key prefixes (simulated hierarchy)
+- **Metadata** → Object metadata and special tracking objects
+- **Permissions** → Cached and synchronized metadata
+
+### Provides performance optimization
+- **Intelligent caching**: Predicts access patterns and caches data locally
+- **Write-behind caching**: Buffers writes for optimal object storage interaction
+- **Partial reads**: Downloads only needed portions of large files
+- **Concurrent operations**: Parallelizes uploads and downloads
+
+## Architecture comparison
+
+### Traditional application + local storage
+```
+Application → POSIX calls → Kernel VFS → Filesystem → Storage device
+```
+
+### Traditional application + Object Mount + object storage
+```
+Application → POSIX calls → Object Mount interception → Object storage API → Cloud storage
+```
+
+### Object Mount modes
+
+**Direct Interception (CLI mode)**:
+- Highest performance
+- Full POSIX compatibility
+- Works in containers and restricted environments
+- Requires compatible applications
+
+**FUSE mode**:
+- Broader application compatibility
+- Standard mount interface
+- Slightly higher overhead
+- Requires FUSE kernel module
+
+**FlexMount (hybrid)**:
+- Automatic fallback between modes
+- Best of both approaches
+- Optimal compatibility and performance
+
+## Performance characteristics
+
+### Object storage optimizations
+
+**Read performance**:
+- First access: Download time from object storage
+- Subsequent access: Cache speed (near-local performance)
+- Large files: Streaming and partial download optimization
+
+**Write performance**:
+- Small writes: Buffered and batched for efficiency
+- Large writes: Direct streaming to object storage
+- Metadata updates: Cached and synchronized
+
+**Memory usage**:
+- Configurable cache size
+- Intelligent eviction policies
+- Minimal overhead for inactive files
+
+### When Object Mount excels
+
+- **Read-heavy workloads**: Excellent caching makes repeated reads very fast
+- **Large file processing**: Streaming and partial access optimization
+- **Development workflows**: Seamless access to cloud data
+- **Container environments**: No privileged access requirements
+
+### When to consider alternatives
+
+- **Write-intensive workloads**: Consider [Object Mount Fusion](./object-mount-fusion) for hybrid storage
+- **Real-time applications**: Network latency may impact performance
+- **Small random I/O**: Traditional block storage may be more efficient
+
+## Consistency model
+
+Object Mount provides **NFS-equivalent consistency** guarantees:
+
+- **Single client**: All operations are immediately consistent
+- **Multiple clients**: Eventually consistent with configurable sync intervals
+- **Metadata operations**: Cached with refresh policies
+- **File locking**: Supported through object metadata
+
+## Provider compatibility
+
+Object Mount works with any S3-compatible storage, but performance varies:
+
+**Fully tested providers**:
+- Amazon S3, Azure Blob Storage, Google Cloud Storage
+- Storj, Wasabi, MinIO, Oracle OCI
+- NetApp StorageGRID, Dell ECS
+
+**Community-validated providers**:
+- IBM Cloud Object Storage, Backblaze B2
+- DigitalOcean Spaces, Cloudflare R2
+
+**Performance considerations by provider**:
+- **Latency**: Geographic proximity affects response times
+- **API compatibility**: Some providers have S3 API variations
+- **Throughput limits**: Provider-specific bandwidth constraints
+- **Cost structure**: Different pricing for operations and bandwidth
+
+## Security model
+
+Object Mount maintains security through:
+
+- **Credential isolation**: Applications never see object storage credentials
+- **Access control**: Standard POSIX permissions enforced by Object Mount
+- **Encryption**: Supports provider-side and client-side encryption
+- **Audit trails**: Comprehensive logging of all operations
+
+## Use case suitability
+
+**Excellent fit**:
+- Media processing and editing workflows
+- Data analysis and machine learning pipelines
+- Development and testing environments
+- Backup and archival applications
+
+**Good fit with considerations**:
+- Database workloads (consider Fusion for write-heavy scenarios)
+- Web serving (cache configuration important)
+- Collaborative editing (understand consistency implications)
+
+**May not be suitable**:
+- Hard real-time applications requiring guaranteed latency
+- Applications requiring strict POSIX lock semantics
+- Workloads with extremely high small-write frequencies
+
+Understanding these fundamental concepts helps you make informed decisions about when and how to deploy Object Mount in your infrastructure.
\ No newline at end of file
diff --git a/app/(docs)/object-mount/linux/user-guides/page.md b/app/(docs)/object-mount/linux/user-guides/page.md
index 07c8ade35..a80c38f2c 100644
--- a/app/(docs)/object-mount/linux/user-guides/page.md
+++ b/app/(docs)/object-mount/linux/user-guides/page.md
@@ -1,51 +1,133 @@
---
-title: User Guides
+title: How-to Guides for Linux
docId: ohs0ailohSh0Vie3
-
metadata:
- title: User Guides
+ title: Object Mount Linux How-to Guides
description:
- User Guides Overview
+ Practical guides for installing, configuring, and using Object Mount on Linux systems with step-by-step instructions.
weight: 4
---
-Object Mount is a scalable, high-performance POSIX compatibility layer that lets you interact with files stored on object storage such as Amazon S3, Azure Blob Storage, Google Cloud Storage, or any S3-compatible object store hosted in the cloud or locally.
+This section provides step-by-step guides for common Object Mount tasks on Linux. These practical guides help you achieve specific goals with clear instructions and troubleshooting tips.
+
+## Prerequisites
+
+Before following these guides, ensure you have:
+
+- A Linux system (Ubuntu 18.04+, Debian 9+, RHEL 7+, CentOS 7+, or compatible)
+- Administrative privileges for installation tasks
+- Basic command-line familiarity
+- Object storage credentials (S3-compatible) for the provider you plan to use
+
+## Getting started
+
+If this is your first time using Object Mount:
+
+1. **Start here**: [Your first mount tutorial](../../../tutorials/your-first-mount) - Complete hands-on introduction
+2. **Understand the concepts**: [Object Mount vs traditional filesystems](../../../concepts/object-mount-vs-filesystems)
+3. **Choose your approach**: Review the guides below for your specific needs
+
+## Installation guides
+
+Choose the installation method that matches your environment:
+
+- [Install on Ubuntu/Debian](../installation/debian) - Package installation for APT-based systems
+- [Install on RHEL/CentOS](../installation/redhat) - Package installation for RPM-based systems
+- [Install generic Linux binary](../installation/glibc) - Universal installation method
+- [Install in Alpine Linux](../installation/alpine) - Lightweight container-focused installation
+- [Install on macOS](../../macos/installation) - Package installation for macOS systems
+- [Install on Windows](../../windows/installation) - Package installation for Windows systems
+
+## Configuration guides
+
+Set up Object Mount for your specific object storage provider:
-## The package
+- [Configure credentials](./credentials) - Set up authentication for your object storage
+- [Configure performance settings](./configuration) - Optimize for your workload
+- [Set up advanced options](./extraopts) - Additional configuration parameters
+- [Configure logging](./appendix) - Set up monitoring and debugging
-Object Mount is Linux software: there's a Object Mount Command Line Interface (Object Mount CLI), `cuno`, providing the highest performance and most straightforward way to interact with object storage. This works through a user-mode library, `cuno.so`, which intercepts (both dynamic and static) applications using [LD_PRELOAD] functionality and fast dynamic binary instrumentation.
+## Usage guides
-Object Mount can also be used with our modified [FUSE] mount solution, [Object Mount on FUSE](./user-guides/basic#object-mount-on-fuse), providing wider support for applications where the [Object Mount CLI cannot be used](./user-guides/limitations#direct-interception).
+Learn how to use Object Mount effectively:
-To match the best performance with the widest support, consider the hybrid solution: [Object Mount FlexMount](./user-guides/basic#object-mount-flex-mount).
+- [Basic operations](./basic) - Mount, unmount, and basic file operations
+- [Advanced usage](./advanced) - Complex scenarios and optimization
+- [Access patterns](./access) - Understanding performance characteristics
+- [Troubleshooting common issues](./limitations) - Solve problems and understand limitations
-Access credentials can also be optionally managed by Object Mount.
+## Deployment guides
-## Wide support for object storage providers
+Deploy Object Mount in different environments:
-Object Mount has native support for:
+- [Container deployment](./k8s) - Use Object Mount in Docker and Kubernetes
+- [Multi-user setup](./tips) - Configure for shared environments
+- [Uninstall Object Mount](./uninstall) - Clean removal procedures
-- [Amazon Web Services S3](https://aws.amazon.com/s3/)
-- [Microsoft Azure Blob Storage](https://azure.microsoft.com/en-gb/services/storage/blobs/)
-- [Google Cloud Storage](https://cloud.google.com/storage/) (however for best performance we currently recommend using S3 with their [S3 accesspoint](https://cloud.google.com/storage/docs/interoperability))
+## Provider compatibility
-In theory, Object Mount supports any S3-compatible object storage provider. In practice, the "S3 API" implementation can have differences in behaviour between providers, so some additional configuration is sometimes necessary. Object Mount has been tested on:
+Object Mount works with any S3-compatible storage provider. Performance and compatibility vary:
+### Fully supported providers
+These providers are regularly tested and validated:
+
+- [Amazon Web Services S3](https://aws.amazon.com/s3/) - Full compatibility and optimal performance
+- [Microsoft Azure Blob Storage](https://azure.microsoft.com/services/storage/blobs/) - Complete S3 API support
+- [Google Cloud Storage](https://cloud.google.com/storage/) - Use with S3 compatibility layer
+- [Storj DCS](https://storj.io/) - Decentralized object storage with full compatibility
- [Oracle Cloud Infrastructure Object Storage](https://www.oracle.com/cloud/storage/object-storage.html)
-- [Storj](https://storj.io/)
-- [Wasabi](https://wasabi.com/)
-- [MinIO](https://min.io/)
+- [Wasabi Hot Cloud Storage](https://wasabi.com/) - High-performance S3-compatible storage
+- [MinIO](https://min.io/) - Self-hosted S3-compatible storage
- [NetApp StorageGRID](https://www.netapp.com/data-storage/storagegrid/)
-- [Dell ECS Object Storage](https://www.delltechnologies.com/en-gb/storage/ecs/index.htm)
+- [Dell ECS Object Storage](https://www.delltechnologies.com/storage/ecs/)
-The following providers have not yet been validated; however, users have reported success with:
+### Community-validated providers
+These providers work with Object Mount based on user reports:
- [IBM Cloud Object Storage](https://www.ibm.com/cloud/object-storage)
- [Backblaze B2 Cloud Storage](https://www.backblaze.com/cloud-storage)
- [DigitalOcean Spaces](https://www.digitalocean.com/products/spaces/)
-- [Cloudflare R2](https://www.cloudflare.com/en-gb/developer-platform/r2/)
+- [Cloudflare R2](https://www.cloudflare.com/developer-platform/r2/)
- [Scality](https://www.scality.com/)
- [DataDirect Networks (DDN) Storage](https://www.ddn.com)
-[fuse]: https://www.kernel.org/doc/html/next/filesystems/fuse.html
-[ld_preload]: https://man7.org/linux/man-pages/man8/ld.so.8.html
+### Configuration considerations by provider
+
+**For best performance**:
+- Configure appropriate endpoint URLs for your provider
+- Set optimal chunk sizes and concurrent connection limits
+- Use provider-specific regions or availability zones
+- Consider provider bandwidth and operation limits
+
+**Provider-specific tips**:
+- **Google Cloud Storage**: Use S3 interoperability mode for best performance
+- **Azure Blob Storage**: Configure hot/cool tier access appropriately
+- **Storj**: Benefits from higher concurrency settings due to distributed architecture
+- **MinIO**: Optimal for on-premises and edge deployment scenarios
+
+## Verification and troubleshooting
+
+After setup, verify your configuration:
+
+1. **Test connectivity**: Ensure Object Mount can access your storage
+2. **Performance validation**: Run benchmarks for your workload
+3. **Monitor resources**: Check memory and CPU usage patterns
+4. **Review logs**: Examine Object Mount operation logs
+
+Common issues and solutions:
+
+- **Mount failures**: Check credentials, endpoints, and network connectivity
+- **Performance issues**: Review cache settings and provider-specific optimizations
+- **Application compatibility**: Understand [limitations](./limitations) and workarounds
+- **Resource usage**: Optimize [configuration](./configuration) for your environment
+
+## Getting help
+
+If you need assistance:
+
+1. Check the specific guide for your use case above
+2. Review [troubleshooting guides](./limitations) for common issues
+3. Search the [community forum](https://forum.storj.io) for similar problems
+4. Contact support with detailed configuration and error information
+
+For conceptual understanding, see [Object Mount vs traditional filesystems](../../../concepts/object-mount-vs-filesystems) to understand how Object Mount bridges object storage and POSIX filesystems.
diff --git a/app/(docs)/object-mount/reference/_meta.json b/app/(docs)/object-mount/reference/_meta.json
new file mode 100644
index 000000000..32eb94390
--- /dev/null
+++ b/app/(docs)/object-mount/reference/_meta.json
@@ -0,0 +1,5 @@
+{
+ "cli-reference": "CLI Reference",
+ "configuration": "Configuration",
+ "compatibility": "Compatibility"
+}
\ No newline at end of file
diff --git a/app/(docs)/object-mount/reference/cli-reference.md b/app/(docs)/object-mount/reference/cli-reference.md
new file mode 100644
index 000000000..12370a923
--- /dev/null
+++ b/app/(docs)/object-mount/reference/cli-reference.md
@@ -0,0 +1,266 @@
+---
+title: "Object Mount CLI Reference"
+docId: "object-mount-cli-ref-001"
+metadata:
+ title: "Object Mount CLI Commands Reference"
+ description: "Complete reference for Object Mount CLI commands, options, and usage patterns."
+---
+
+Complete reference for Object Mount CLI commands and options.
+
+## Core Commands
+
+### cuno
+
+Main command for Object Mount CLI operations.
+
+**Basic Usage:**
+```bash
+# Launch interactive Object Mount shell
+cuno
+
+# Run single command with Object Mount
+cuno run
+
+# Mount Object Mount FUSE filesystem
+cuno mount [options]
+```
+
+### Command Modes
+
+#### Interactive Shell Mode
+```bash
+cuno
+```
+Launches an interactive shell with Object Mount interception enabled. The shell prompt will show `(cuno)` to indicate Object Mount is active.
+
+**Supported shells:** `bash` and `zsh` (full tab completion and wildcard support)
+
+#### Single Command Execution
+```bash
+cuno run bash -c "ls s3://mybucket/"
+cuno run python script.py
+```
+Runs a single command with Object Mount interception enabled.
+
+#### FUSE Mount Mode
+```bash
+cuno mount ~/my-mount-point
+cuno mount ~/my-mount-point --root s3://mybucket/
+```
+Creates a FUSE filesystem mount at the specified location.
+
+## Command Options
+
+### Global Options
+
+| Option | Description | Example |
+|--------|-------------|---------|
+| `-o ` | Specify configuration options | `cuno -o "uid=1000 gid=1000"` |
+| `--help` | Show help information | `cuno --help` |
+
+### Mount-Specific Options
+
+| Option | Description | Example |
+|--------|-------------|---------|
+| `--root ` | Set root directory for mount | `--root s3://mybucket/folder/` |
+| `--foreground` | Run mount in foreground | `cuno mount ~/mnt --foreground` |
+
+## Configuration Options (`CUNO_OPTIONS`)
+
+Set via environment variable or `-o` flag:
+
+```bash
+export CUNO_OPTIONS="uid=1000 gid=1000 filemode=0644"
+# OR
+cuno -o "uid=1000 gid=1000 filemode=0644"
+```
+
+### Core Options
+
+| Option | Type | Description | Default |
+|--------|------|-------------|---------|
+| `uid=` | integer | Set file owner UID | Current user |
+| `gid=` | integer | Set file owner GID | Current group |
+| `filemode=` | octal | Default file permissions | `0666` |
+| `dirmode=` | octal | Default directory permissions | `0777` |
+
+### Advanced Options
+
+| Option | Type | Description |
+|--------|------|-------------|
+| `+static` | flag | Enable static binary interception (default) |
+| `-static` | flag | Disable static binary interception |
+| `+uricompat[=apps]` | string | Enable URI compatibility for apps |
+| `cloudroot=` | string | Custom cloud root path (default: `/cuno`) |
+| `cloudrootover` | flag | Override cloud root for FlexMount |
+
+### URI Compatibility (`+uricompat`)
+
+Enable URI handling override for specific applications:
+
+```bash
+# Default supported apps (automatic)
++uricompat # rsync, ffmpeg, tar, samtools, igv, fastQC
+
+# Custom application list
++uricompat=myapp:otherapp
+
+# Conditional matching (app/arg_index/match_value)
++uricompat=java/2/app.jar:python/*/script.py
+```
+
+### Cloud Root Customization
+
+```bash
+# Custom cloud root
+export CUNO_OPTIONS="cloudroot=/my-storage"
+ls /my-storage/s3/mybucket/ # Instead of /cuno/s3/mybucket/
+```
+
+## Path Formats
+
+Object Mount supports multiple path formats for accessing cloud storage:
+
+### URI Format
+```bash
+s3://bucket/path/file.txt
+az://container/path/file.txt
+gs://bucket/path/file.txt
+```
+
+### Filesystem Format
+```bash
+/cuno/s3/bucket/path/file.txt
+/cuno/az/container/path/file.txt
+/cuno/gs/bucket/path/file.txt
+```
+
+### Custom Cloud Root
+```bash
+# With cloudroot=/storage
+/storage/s3/bucket/path/file.txt
+```
+
+## Credential Management
+
+### cuno creds
+
+Manage cloud storage credentials.
+
+```bash
+# Pair bucket with credentials
+cuno creds pair s3://mybucket
+
+# List paired credentials
+cuno creds list
+
+# Remove credential pairing
+cuno creds unpair s3://mybucket
+```
+
+## Access Modes
+
+### Direct Interception
+- **Default mode** when using `cuno` or `cuno run`
+- **Highest performance** access method
+- **Works with:** Dynamically linked binaries
+- **Limitations:** No support for SUID, Snap, AppImage, Flatpak applications
+
+### FUSE Mount
+- **Compatibility mode** using `cuno mount`
+- **Works with:** All application types including static binaries
+- **Trade-off:** Lower performance than direct interception
+
+### FlexMount
+- **Hybrid approach** combining both modes
+- **Fallback strategy:** Direct interception with FUSE fallback
+- **Setup:** Mount FUSE filesystem, then use with Object Mount CLI
+
+## Environment Variables
+
+| Variable | Description | Example |
+|----------|-------------|---------|
+| `CUNO_OPTIONS` | Configuration options | `"uid=1000 filemode=0644"` |
+| `LD_PRELOAD` | Manual library preload | Set automatically by `cuno` |
+
+## Usage Examples
+
+### Basic File Operations
+```bash
+cuno
+(cuno) $ ls s3://mybucket/
+(cuno) $ cp local-file.txt s3://mybucket/remote-file.txt
+(cuno) $ cat s3://mybucket/data.csv | head -10
+```
+
+### Application Integration
+```bash
+# Python script with cloud paths
+cuno run python -c "import pandas as pd; df = pd.read_csv('s3://bucket/data.csv')"
+
+# rsync with cloud storage
+cuno run rsync -av s3://source-bucket/ s3://dest-bucket/
+
+# Media processing
+cuno run ffmpeg -i s3://bucket/input.mp4 s3://bucket/output.mp4
+```
+
+### FUSE Mount Usage
+```bash
+# Mount entire cloud storage
+cuno mount ~/cloud-storage
+
+# Mount specific bucket
+cuno mount ~/my-bucket --root s3://mybucket/
+
+# Use mounted storage
+ls ~/cloud-storage/s3/mybucket/
+cp ~/cloud-storage/s3/mybucket/file.txt .
+```
+
+## Troubleshooting Commands
+
+### Check Object Mount Status
+```bash
+# Verify Object Mount is active (should show (cuno) in prompt)
+cuno
+(cuno) $ echo $LD_PRELOAD # Should show cuno.so path
+```
+
+### Debug Mode
+```bash
+# Enable verbose logging
+export CUNO_DEBUG=1
+cuno run ls s3://mybucket/
+```
+
+### Mount Debugging
+```bash
+# Check mount status
+mount | grep cuno
+
+# Unmount if needed
+fusermount -u ~/mount-point
+
+# Foreground mode for debugging
+cuno mount ~/test-mount --foreground
+```
+
+## Performance Tuning
+
+### Parallelism
+Object Mount automatically optimizes for parallel operations. For custom tuning:
+
+```bash
+# Set via application-specific options
+cuno run rsync -av --progress s3://source/ s3://dest/
+```
+
+### Memory Usage
+```bash
+# Adjust for large file operations
+export CUNO_OPTIONS="cache_size=1GB"
+```
+
+This reference covers all major Object Mount CLI commands and configuration options. Use `cuno --help` for additional details on specific commands.
\ No newline at end of file
diff --git a/app/(docs)/object-mount/reference/compatibility.md b/app/(docs)/object-mount/reference/compatibility.md
new file mode 100644
index 000000000..436b01c7f
--- /dev/null
+++ b/app/(docs)/object-mount/reference/compatibility.md
@@ -0,0 +1,318 @@
+---
+title: "Compatibility Reference"
+docId: "object-mount-compat-ref-001"
+metadata:
+ title: "Object Mount Compatibility Reference"
+ description: "Complete reference for Object Mount compatibility with operating systems, applications, and cloud storage providers."
+---
+
+Complete reference for Object Mount compatibility across platforms and applications.
+
+## Operating System Compatibility
+
+### Linux Distributions
+
+| Distribution | Versions | Support Level | Installation Method |
+|--------------|----------|---------------|-------------------|
+| **Ubuntu** | 18.04+ | Full | DEB package, binary |
+| **Debian** | 10+ | Full | DEB package, binary |
+| **CentOS/RHEL** | 7+ | Full | RPM package, binary |
+| **Fedora** | 30+ | Full | RPM package, binary |
+| **openSUSE** | 15+ | Full | RPM package, binary |
+| **Alpine Linux** | 3.12+ | Full | Binary (musl) |
+| **Arch Linux** | Latest | Full | Binary, AUR |
+
+### Architecture Support
+
+| Architecture | Support Level | Notes |
+|--------------|---------------|-------|
+| **x86_64** | Full | Primary support |
+| **ARM64** | Full | Native ARM64 builds |
+| **ARMv7** | Limited | Select distributions only |
+
+### Kernel Requirements
+
+| Component | Minimum Version | Recommended |
+|-----------|----------------|-------------|
+| **Linux Kernel** | 3.10+ | 4.0+ |
+| **FUSE** | 2.6+ | 3.0+ |
+| **glibc** | 2.17+ | 2.27+ |
+| **musl** | 1.1.24+ | 1.2.0+ |
+
+## Application Compatibility
+
+### Fully Compatible Applications
+
+Applications that work seamlessly with Object Mount:
+
+#### Development Tools
+- **Python** - All versions, pip, conda
+- **Node.js** - npm, yarn, all frameworks
+- **Java** - JVM applications, Maven, Gradle
+- **Go** - go build, go mod
+- **Rust** - cargo, rustc
+- **Docker** - Container builds and runtime
+
+#### Data Processing
+- **pandas** - DataFrame operations
+- **NumPy** - Array operations
+- **Apache Spark** - Distributed processing
+- **Dask** - Parallel computing
+- **Jupyter** - Notebook operations
+
+#### Media Processing
+- **FFmpeg** - Video/audio transcoding
+- **ImageMagick** - Image manipulation
+- **Handbrake** - Video encoding
+- **Blender** - 3D rendering (with setup)
+
+#### File Management
+- **rsync** - File synchronization
+- **tar** - Archive operations
+- **zip/unzip** - Compression
+- **find/grep** - File search
+
+### Limited Compatibility Applications
+
+Applications with known limitations:
+
+| Application Type | Limitation | Workaround |
+|------------------|------------|------------|
+| **SUID Binaries** | Security restrictions prevent interception | Use FUSE mount mode |
+| **Snap Applications** | Sandboxing prevents LD_PRELOAD | Use FUSE mount, configure permissions |
+| **AppImage** | Self-contained prevents interception | Use FUSE mount mode |
+| **Flatpak** | Sandboxing restrictions | Use FUSE mount with portal access |
+| **Static Binaries** | Limited interception capability | Enable with `+static` flag |
+
+### Database Compatibility
+
+| Database | Direct Support | Recommended Approach |
+|----------|----------------|---------------------|
+| **SQLite** | Yes | Direct file access |
+| **PostgreSQL** | No | Use backup/restore workflows |
+| **MySQL** | No | Use mysqldump to cloud storage |
+| **MongoDB** | Partial | Use mongodump to cloud storage |
+
+## Cloud Storage Provider Support
+
+### Supported Providers
+
+| Provider | Protocol | Authentication | Features |
+|----------|----------|----------------|----------|
+| **Amazon S3** | `s3://` | Access keys, IAM roles | Full S3 API compatibility |
+| **Microsoft Azure** | `az://` | Account keys, SAS tokens | Blob storage support |
+| **Google Cloud** | `gs://` | Service accounts, OAuth | Cloud Storage API |
+| **Storj DCS** | `s3://` | Access grants, S3 gateway | Native decentralized support |
+| **MinIO** | `s3://` | Access keys | Self-hosted S3-compatible |
+| **Wasabi** | `s3://` | Access keys | S3-compatible hot storage |
+
+### Authentication Methods
+
+#### S3-Compatible Providers
+```bash
+# AWS credentials file
+~/.aws/credentials
+
+# Environment variables
+export AWS_ACCESS_KEY_ID="your-key"
+export AWS_SECRET_ACCESS_KEY="your-secret"
+
+# Storj access grants
+cuno creds pair s3://bucket
+```
+
+#### Azure Blob Storage
+```bash
+# Connection string
+export AZURE_STORAGE_CONNECTION_STRING="your-connection-string"
+
+# Account key
+export AZURE_STORAGE_ACCOUNT="account-name"
+export AZURE_STORAGE_KEY="account-key"
+```
+
+#### Google Cloud Storage
+```bash
+# Service account key
+export GOOGLE_APPLICATION_CREDENTIALS="/path/to/service-account.json"
+
+# gcloud authentication
+gcloud auth application-default login
+```
+
+## Shell and Terminal Compatibility
+
+### Fully Supported Shells
+
+| Shell | Tab Completion | Wildcard Expansion | Prompt Indication |
+|-------|----------------|-------------------|-------------------|
+| **bash** | ✅ Full | ✅ Full | ✅ `(cuno)` prefix |
+| **zsh** | ✅ Full | ✅ Full | ✅ `(cuno)` prefix |
+
+### Partially Supported Shells
+
+| Shell | Basic Usage | Limitations |
+|-------|-------------|-------------|
+| **fish** | ✅ Yes | No tab completion for cloud paths |
+| **tcsh** | ✅ Yes | Limited wildcard support |
+| **dash** | ✅ Yes | No advanced features |
+
+## Container and Virtualization
+
+### Docker Compatibility
+
+**Supported Scenarios:**
+```bash
+# Host-mounted Object Mount
+docker run -v ~/cloud-storage:/data ubuntu ls /data/s3/bucket/
+
+# Object Mount inside container
+docker run -it --privileged ubuntu
+# Install Object Mount inside container
+```
+
+**Known Limitations:**
+- Requires `--privileged` for full functionality
+- FUSE support needed in container
+
+### Kubernetes Compatibility
+
+**CSI Driver Support:**
+- Object Mount can be integrated as CSI driver
+- Supports persistent volumes backed by cloud storage
+- Requires cluster-level FUSE support
+
+**Pod-level Usage:**
+```yaml
+apiVersion: v1
+kind: Pod
+spec:
+ containers:
+ - name: app
+ securityContext:
+ privileged: true # Required for FUSE
+ volumeMounts:
+ - name: cloud-storage
+ mountPath: /mnt/cloud
+```
+
+## Programming Language Integration
+
+### Python
+```python
+import pandas as pd
+
+# Direct file path usage
+df = pd.read_csv('s3://bucket/data.csv')
+df.to_parquet('s3://bucket/output.parquet')
+```
+
+**Libraries with confirmed compatibility:**
+- pandas, NumPy, SciPy
+- scikit-learn, TensorFlow, PyTorch
+- Pillow, OpenCV, matplotlib
+- boto3 (when using FUSE paths)
+
+### R
+```r
+# Direct file operations
+data <- read.csv('s3://bucket/data.csv')
+write.csv(data, 's3://bucket/output.csv')
+```
+
+### Node.js
+```javascript
+const fs = require('fs');
+
+// Direct file system operations
+const data = fs.readFileSync('s3://bucket/config.json');
+fs.writeFileSync('s3://bucket/output.json', JSON.stringify(result));
+```
+
+## Performance Characteristics
+
+### Access Method Performance
+
+| Method | Throughput | Latency | CPU Usage | Memory Usage |
+|--------|------------|---------|-----------|-------------|
+| **Direct Interception** | Highest | Lowest | Low | Low |
+| **FUSE Mount** | Moderate | Moderate | Moderate | Moderate |
+| **FlexMount** | High | Low | Low-Moderate | Low-Moderate |
+
+### File Operation Performance
+
+| Operation | Direct | FUSE | Notes |
+|-----------|--------|------|-------|
+| **Sequential Read** | Excellent | Good | Optimized for streaming |
+| **Random Read** | Good | Fair | Caching helps small reads |
+| **Sequential Write** | Excellent | Good | Buffered writes |
+| **Random Write** | Good | Fair | May trigger uploads |
+| **Metadata Operations** | Excellent | Good | Cached when possible |
+
+## Known Issues and Limitations
+
+### General Limitations
+
+1. **Symbolic Links** - Limited cross-boundary support
+2. **Hard Links** - Not supported across cloud boundaries
+3. **File Locking** - Advisory locking only
+4. **Extended Attributes** - Limited support
+5. **Special Files** - No device files, named pipes, sockets
+
+### Platform-Specific Issues
+
+#### Linux
+- SELinux may require policy adjustments
+- AppArmor profiles may need modification
+- systemd services require special configuration
+
+#### Container Environments
+- Requires privileged mode for full FUSE support
+- Some container runtimes limit LD_PRELOAD usage
+- Networking policies may affect cloud storage access
+
+## Troubleshooting Compatibility
+
+### Application Not Working
+
+1. **Check interception status:**
+ ```bash
+ echo $LD_PRELOAD # Should show cuno.so
+ ```
+
+2. **Try FUSE mode:**
+ ```bash
+ cuno mount ~/cloud-storage
+ # Use ~/cloud-storage/s3/bucket/ instead of s3://bucket/
+ ```
+
+3. **Enable compatibility flags:**
+ ```bash
+ export CUNO_OPTIONS="+uricompat=myapp"
+ ```
+
+### Permission Issues
+
+1. **Set appropriate ownership:**
+ ```bash
+ export CUNO_OPTIONS="uid=$(id -u) gid=$(id -g)"
+ ```
+
+2. **Adjust file permissions:**
+ ```bash
+ export CUNO_OPTIONS="filemode=0664 dirmode=0775"
+ ```
+
+### Container Issues
+
+1. **Enable privileged mode:**
+ ```bash
+ docker run --privileged myapp
+ ```
+
+2. **Mount host Object Mount:**
+ ```bash
+ docker run -v ~/cloud:/mnt/cloud myapp
+ ```
+
+This compatibility reference helps determine the best approach for your specific environment and applications. For unlisted applications, test with FUSE mode first, then try direct interception with appropriate compatibility flags.
\ No newline at end of file
diff --git a/app/(docs)/object-mount/reference/configuration.md b/app/(docs)/object-mount/reference/configuration.md
new file mode 100644
index 000000000..524c24c05
--- /dev/null
+++ b/app/(docs)/object-mount/reference/configuration.md
@@ -0,0 +1,289 @@
+---
+title: "Configuration Reference"
+docId: "object-mount-config-ref-001"
+metadata:
+ title: "Object Mount Configuration Reference"
+ description: "Complete reference for Object Mount configuration parameters, environment variables, and advanced settings."
+---
+
+Complete reference for Object Mount configuration options and parameters.
+
+## Configuration Methods
+
+Object Mount can be configured through:
+
+1. **Environment Variables** - `CUNO_OPTIONS`
+2. **Command Line Options** - `cuno -o "option=value"`
+3. **Configuration Files** - Platform-specific locations
+
+## Environment Variables
+
+### Primary Configuration
+
+| Variable | Description | Example |
+|----------|-------------|---------|
+| `CUNO_OPTIONS` | Main configuration options string | `"uid=1000 gid=1000 filemode=0644"` |
+| `CUNO_DEBUG` | Enable debug output | `1` or `true` |
+| `LD_PRELOAD` | Library preload (set automatically) | `/path/to/cuno.so` |
+
+### Usage Examples
+
+```bash
+# Single option
+export CUNO_OPTIONS="uid=1000"
+
+# Multiple options (space-separated, quoted)
+export CUNO_OPTIONS="uid=1000 gid=1000 filemode=0644"
+
+# Debug mode
+export CUNO_DEBUG=1
+```
+
+## Core Configuration Options
+
+### File System Permissions
+
+| Option | Type | Default | Description |
+|--------|------|---------|-------------|
+| `uid=` | integer | current user | File owner user ID |
+| `gid=` | integer | current group | File owner group ID |
+| `filemode=` | octal | `0666` | Default file permissions |
+| `dirmode=` | octal | `0777` | Default directory permissions |
+
+**Examples:**
+```bash
+# Set all files to be owned by root with read-only permissions
+export CUNO_OPTIONS="uid=0 gid=0 filemode=0444 dirmode=0555"
+
+# Web server configuration
+export CUNO_OPTIONS="uid=33 gid=33 filemode=0664 dirmode=0775"
+```
+
+### Static Binary Handling
+
+| Option | Type | Default | Description |
+|--------|------|---------|-------------|
+| `+static` | flag | enabled | Enable static binary interception |
+| `-static` | flag | disabled | Disable static binary interception |
+
+**Usage:**
+```bash
+# Disable static binary support
+export CUNO_OPTIONS="-static"
+
+# Explicitly enable (default behavior)
+export CUNO_OPTIONS="+static"
+```
+
+### Cloud Root Configuration
+
+| Option | Type | Default | Description |
+|--------|------|---------|-------------|
+| `cloudroot=` | string | `/cuno` | Custom cloud storage root path |
+| `cloudrootover` | flag | disabled | Override cloud root for FlexMount |
+
+**Examples:**
+```bash
+# Custom cloud root
+export CUNO_OPTIONS="cloudroot=/my-cloud-storage"
+# Access: /my-cloud-storage/s3/bucket/
+
+# FlexMount override
+export CUNO_OPTIONS="cloudrootover cloudroot=/home/user/mount"
+```
+
+### URI Compatibility
+
+| Option | Format | Description |
+|--------|--------|-------------|
+| `+uricompat` | flag | Enable default URI overrides |
+| `+uricompat=` | string | Enable for specific applications |
+| `+uricompat=//` | string | Conditional URI override |
+
+**Default Supported Applications:**
+- `rsync`
+- `ffmpeg`
+- `tar`
+- `samtools`
+- `igv`
+- `fastQC`
+
+**Custom Application Examples:**
+```bash
+# Enable for custom applications
+export CUNO_OPTIONS="+uricompat=myapp:otherapp"
+
+# Java application with specific JAR
+export CUNO_OPTIONS="+uricompat=java/2/myapp.jar"
+
+# Python script with conditional matching
+export CUNO_OPTIONS="+uricompat=python/*/myscript.py"
+```
+
+## Advanced Configuration
+
+### Memory and Performance
+
+| Option | Type | Description |
+|--------|------|-------------|
+| `cache_size=` | string | Set cache size (e.g., `1GB`, `500MB`) |
+| `max_connections=` | integer | Maximum concurrent connections |
+
+### Network Configuration
+
+| Option | Type | Description |
+|--------|------|-------------|
+| `timeout=` | integer | Network request timeout |
+| `retry_count=` | integer | Number of retry attempts |
+
+### Debug and Logging
+
+| Option | Type | Description |
+|--------|------|-------------|
+| `debug=` | integer | Debug verbosity level (0-5) |
+| `log_file=` | string | Log file path |
+
+## Access Mode Configuration
+
+### Core File Access Mode
+
+Default mode with basic file operations:
+
+```bash
+# Default configuration (no special options needed)
+cuno
+```
+
+**Characteristics:**
+- Dynamic ownership (current user)
+- Standard permissions (`0666` files, `0777` directories)
+- No persistent metadata
+
+### POSIX File Access Mode
+
+Enable persistent file system metadata:
+
+```bash
+# Enable POSIX mode during mount
+cuno mount ~/mount-point --posix
+```
+
+**Capabilities:**
+- Persistent file ownership
+- Modifiable permissions via `chmod`, `chown`
+- File timestamps via `touch`
+- Extended attributes support
+
+## Configuration File Locations
+
+### Linux
+- User config: `~/.config/cuno/config.yaml`
+- System config: `/etc/cuno/config.yaml`
+
+### macOS
+- User config: `~/Library/Preferences/cuno/config.yaml`
+- System config: `/etc/cuno/config.yaml`
+
+### Windows
+- User config: `%APPDATA%\cuno\config.yaml`
+- System config: `%PROGRAMDATA%\cuno\config.yaml`
+
+## Configuration Examples
+
+### Development Environment
+```bash
+export CUNO_OPTIONS="uid=1000 gid=1000 filemode=0644 dirmode=0755 +uricompat=python:node:java"
+cuno
+```
+
+### Production Web Server
+```bash
+export CUNO_OPTIONS="uid=33 gid=33 filemode=0644 dirmode=0755 cloudroot=/var/cloud"
+cuno mount /var/www/cloud --root s3://web-assets/
+```
+
+### Media Processing Workflow
+```bash
+export CUNO_OPTIONS="+uricompat=ffmpeg:handbrake:mkvtoolnix cache_size=2GB"
+cuno
+```
+
+### Data Science Environment
+```bash
+export CUNO_OPTIONS="+uricompat=python:R:jupyter filemode=0664 dirmode=0775"
+cuno
+```
+
+## FlexMount Configuration
+
+FlexMount combines direct interception with FUSE fallback:
+
+### Basic FlexMount Setup
+```bash
+# 1. Create FUSE mount
+cuno mount ~/cloud-storage
+
+# 2. Use with Object Mount CLI
+cuno -o "cloudrootover cloudroot=$(realpath ~/cloud-storage)"
+```
+
+### Advanced FlexMount Configuration
+```bash
+# Custom cloud root FlexMount
+cuno -o "cloudroot=/alt-root" mount ~/storage --root /alt-root
+cuno -o "cloudrootover cloudroot=$(realpath ~/storage)"
+```
+
+## Troubleshooting Configuration
+
+### Verify Current Configuration
+```bash
+# Check environment variables
+env | grep CUNO
+
+# Test configuration
+cuno run bash -c 'echo "Config test successful"'
+```
+
+### Common Configuration Issues
+
+**Permission Problems:**
+```bash
+# Fix: Set appropriate uid/gid
+export CUNO_OPTIONS="uid=$(id -u) gid=$(id -g)"
+```
+
+**Path Resolution Issues:**
+```bash
+# Fix: Use absolute paths for cloudroot
+export CUNO_OPTIONS="cloudroot=$(realpath ~/cloud-storage)"
+```
+
+**Application Compatibility:**
+```bash
+# Fix: Add specific application to uricompat
+export CUNO_OPTIONS="+uricompat=myapp"
+```
+
+## Validation and Testing
+
+### Configuration Validation
+```bash
+# Test basic functionality
+cuno run ls /cuno/
+
+# Test specific path format
+cuno run ls s3://test-bucket/
+
+# Test FUSE mount
+ls ~/mount-point/s3/test-bucket/
+```
+
+### Performance Testing
+```bash
+# Benchmark file operations
+time cuno run cp large-file.bin s3://bucket/
+time cuno run rsync -av directory/ s3://bucket/backup/
+```
+
+This reference covers all Object Mount configuration options. For platform-specific configuration details, refer to the installation guides for your operating system.
\ No newline at end of file
diff --git a/app/(docs)/object-mount/tutorials/_meta.json b/app/(docs)/object-mount/tutorials/_meta.json
new file mode 100644
index 000000000..9b5a1208c
--- /dev/null
+++ b/app/(docs)/object-mount/tutorials/_meta.json
@@ -0,0 +1,9 @@
+{
+ "title": "Tutorials",
+ "nav": [
+ {
+ "title": "Your first mount",
+ "id": "your-first-mount"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/app/(docs)/object-mount/tutorials/your-first-mount.md b/app/(docs)/object-mount/tutorials/your-first-mount.md
new file mode 100644
index 000000000..281403f2e
--- /dev/null
+++ b/app/(docs)/object-mount/tutorials/your-first-mount.md
@@ -0,0 +1,299 @@
+---
+title: Your first mount
+docId: your-first-object-mount
+metadata:
+ title: Your First Object Mount Tutorial
+ description: Complete 15-minute hands-on tutorial to mount and access Storj files using Object Mount with step-by-step instructions.
+---
+
+This tutorial walks you through mounting your first Storj bucket as a filesystem using Object Mount. By the end, you'll understand how to seamlessly access cloud storage as if it were local files.
+
+## What you'll build
+
+In this 15-minute hands-on tutorial, you'll:
+
+- Install Object Mount on your Linux system
+- Configure credentials to access your Storj storage
+- Mount a bucket as a local filesystem
+- Create, edit, and manage files directly in cloud storage
+- Understand the performance characteristics and best practices
+
+**Expected time to complete**: 15-20 minutes
+
+## Prerequisites
+
+Before starting, ensure you have:
+
+- A Linux system (Ubuntu 18.04+, Debian 9+, RHEL 7+, or CentOS 7+)
+- Administrative privileges to install packages
+- A Storj account with S3-compatible credentials
+- At least one existing bucket with some test files
+- Basic command-line familiarity
+
+If you need to set up credentials or buckets, complete the [Getting Started guide](docId:AsyYcUJFbO1JI8-Tu8tW3) first.
+
+## Step 1: Install Object Mount
+
+Choose the installation method for your Linux distribution:
+
+{% tabs %}
+
+{% tab label="Ubuntu/Debian" %}
+
+```shell
+# Download and install the package
+curl -L https://github.com/storj/edge/releases/latest/download/object-mount_linux_amd64.deb -o object-mount.deb
+sudo dpkg -i object-mount.deb
+```
+
+{% /tab %}
+
+{% tab label="RHEL/CentOS" %}
+
+```shell
+# Download and install the package
+curl -L https://github.com/storj/edge/releases/latest/download/object-mount_linux_amd64.rpm -o object-mount.rpm
+sudo rpm -i object-mount.rpm
+```
+
+{% /tab %}
+
+{% tab label="Generic Linux" %}
+
+```shell
+# Download and extract the binary
+curl -L https://github.com/storj/edge/releases/latest/download/object-mount_linux_amd64.tar.gz -o object-mount.tar.gz
+tar -xzf object-mount.tar.gz
+sudo mv object-mount /usr/local/bin/
+sudo chmod +x /usr/local/bin/object-mount
+```
+
+{% /tab %}
+
+{% /tabs %}
+
+**Expected outcome**: Object Mount should be installed and available in your PATH. Verify with:
+
+```shell
+object-mount --version
+```
+
+## Step 2: Configure your credentials
+
+Create a configuration file with your Storj credentials:
+
+```shell
+# Create config directory
+mkdir -p ~/.config/object-mount
+
+# Create configuration file
+cat > ~/.config/object-mount/config.yaml << EOF
+credentials:
+ access_key_id: "your_access_key_here"
+ secret_access_key: "your_secret_key_here"
+ endpoint: "https://gateway.storjshare.io"
+
+# Optional performance settings
+cache:
+ directory: "/tmp/object-mount-cache"
+ size: "1GB"
+
+logging:
+ level: "info"
+EOF
+```
+
+Replace `your_access_key_here` and `your_secret_key_here` with your actual Storj S3 credentials.
+
+**Expected outcome**: Your configuration file should be created successfully. Test connectivity:
+
+```shell
+object-mount test-connection
+```
+
+## Step 3: Create a mount point
+
+Prepare a directory where your bucket will appear:
+
+```shell
+# Create mount directory
+mkdir -p ~/storj-mount
+
+# Verify the directory is empty
+ls -la ~/storj-mount
+```
+
+**Expected outcome**: You should see an empty directory that will serve as your mount point.
+
+## Step 4: Mount your bucket
+
+Now mount your Storj bucket as a filesystem:
+
+```shell
+# Mount bucket (replace 'my-bucket' with your actual bucket name)
+object-mount mount my-bucket ~/storj-mount
+
+# Verify mount succeeded
+mount | grep object-mount
+```
+
+You should see output indicating the mount is active.
+
+**Expected outcome**: Your bucket is now mounted and accessible as a local directory. The command should complete without errors.
+
+## Step 5: Explore your mounted storage
+
+Navigate to your mount point and explore:
+
+```shell
+# Change to mount directory
+cd ~/storj-mount
+
+# List files (should show your bucket contents)
+ls -la
+
+# Check filesystem type
+df -h ~/storj-mount
+```
+
+**Expected outcome**: You should see the files and directories from your Storj bucket listed as if they were local files.
+
+## Step 6: Create and edit files
+
+Now let's create and modify files directly in cloud storage:
+
+```shell
+# Create a new file
+echo "Hello from Object Mount!" > test-file.txt
+
+# View the file
+cat test-file.txt
+
+# Edit the file with your preferred editor
+nano test-file.txt # or vim, emacs, etc.
+```
+
+Add some additional content and save the file.
+
+**Expected outcome**: You should be able to create, view, and edit files seamlessly. The changes are automatically synced to your Storj storage.
+
+## Step 7: Test file operations
+
+Perform various file operations to understand Object Mount capabilities:
+
+```shell
+# Create a directory
+mkdir project-files
+
+# Copy files
+cp test-file.txt project-files/copy-of-test.txt
+
+# Move/rename files
+mv test-file.txt renamed-test.txt
+
+# Check file permissions
+ls -la renamed-test.txt
+
+# Create a symbolic link
+ln -s renamed-test.txt link-to-test.txt
+```
+
+**Expected outcome**: All standard filesystem operations should work normally, with changes reflected in your Storj storage.
+
+## Step 8: Monitor performance
+
+Open a second terminal and monitor Object Mount activity:
+
+```shell
+# In second terminal - monitor mount activity
+object-mount status
+
+# Check cache usage
+du -sh /tmp/object-mount-cache
+
+# Monitor real-time activity (if available)
+object-mount logs --follow
+```
+
+Try copying a larger file in your first terminal and watch the activity in the second.
+
+**Expected outcome**: You should see Object Mount efficiently managing data transfer and caching operations.
+
+## Step 9: Understand the object storage integration
+
+Verify that your files are actually stored in Storj:
+
+```shell
+# In a third terminal, check your bucket using CLI tools
+# (if you have rclone or aws cli configured)
+rclone ls storj:my-bucket
+
+# Or use the Storj Console web interface
+# Navigate to your bucket and verify files are there
+```
+
+**Expected outcome**: Files created through Object Mount should be visible in your Storj bucket through other access methods.
+
+## Step 10: Unmount and cleanup
+
+When finished, properly unmount your storage:
+
+```shell
+# Change out of mount directory
+cd ~
+
+# Unmount the filesystem
+object-mount unmount ~/storj-mount
+
+# Verify unmount
+ls ~/storj-mount
+# (should be empty)
+
+# Clean up cache if desired
+rm -rf /tmp/object-mount-cache
+```
+
+**Expected outcome**: The mount should be cleanly removed and the mount directory should be empty.
+
+## What you've accomplished
+
+Congratulations! You've successfully used Object Mount to:
+
+- Install and configure Object Mount for Storj access
+- Mount cloud storage as a local filesystem
+- Perform standard file operations on cloud-stored data
+- Experience the seamless integration between POSIX applications and object storage
+- Monitor performance and understand caching behavior
+
+## Understanding what happened
+
+**Object Mount magic**: Object Mount intercepted your filesystem calls and translated them to object storage operations. When you created `test-file.txt`, it became an object in your Storj bucket. When you edited it, Object Mount optimized the process using caching and prediction.
+
+**Performance characteristics**:
+- **Reads**: Very fast due to intelligent caching
+- **Writes**: Optimized with write-behind caching
+- **Metadata operations**: Cached for performance
+- **Large files**: Handled efficiently with streaming and partial reads
+
+**POSIX compliance**: Object Mount provides NFS-equivalent consistency guarantees while maintaining compatibility with all standard filesystem operations.
+
+## What's next
+
+Now that you understand the basics of Object Mount:
+
+### Explore Advanced Features
+- [Set up Object Mount Fusion](docId:Xaegoh6iedietahf) for enhanced performance with frequent writes
+- [Configure POSIX permissions and metadata](#) for multi-user environments
+- [Optimize performance settings](#) for your specific workload
+
+### Production Deployment
+- [Install Object Mount in containerized environments](#)
+- [Set up monitoring and logging](#)
+- [Configure high-availability mounts](#)
+
+### Application Integration
+- [Use Object Mount with media editing workflows](#)
+- [Integrate with data processing pipelines](#)
+- [Set up development environments with cloud storage](#)
+
+Ready to explore media workflows? Check out [Object Mount for video editing](docId:media-workflows) to see how to edit large media files directly from cloud storage.
\ No newline at end of file
From 58937b07a81098ddc6976f76f81896dd102c29db Mon Sep 17 00:00:00 2001
From: onionjake <113088+onionjake@users.noreply.github.com>
Date: Fri, 29 Aug 2025 13:47:05 -0600
Subject: [PATCH 2/8] Next phase of adopting Diataxis
---
app/(docs)/dcs/how-to/_meta.json | 20 +
app/(docs)/dcs/how-to/migrate-from-s3.md | 273 ++++++
.../dcs/how-to/optimize-upload-performance.md | 175 ++++
app/(docs)/dcs/how-to/setup-bucket-logging.md | 171 ++++
.../dcs/how-to/setup-object-versioning.md | 126 +++
app/(docs)/dcs/how-to/use-presigned-urls.md | 168 ++++
app/(docs)/dcs/tutorials/_meta.json | 13 +
.../dcs/tutorials/build-your-first-app.md | 827 ++++++++++++++++++
.../tutorials/your-first-week-with-storj.md | 577 ++++++++++++
app/(docs)/node/how-to/_meta.json | 12 +
.../node/how-to/fix-database-corruption.md | 267 ++++++
.../node/how-to/monitor-node-performance.md | 405 +++++++++
app/(docs)/node/how-to/setup-remote-access.md | 382 ++++++++
app/(docs)/object-mount/how-to/_meta.json | 25 +
.../how-to/configure-posix-permissions.md | 204 +++++
.../how-to/install-debian-ubuntu.md | 181 ++++
.../how-to/install-rhel-centos.md | 280 ++++++
.../how-to/optimize-large-files.md | 255 ++++++
.../how-to/troubleshoot-mount-issues.md | 286 ++++++
19 files changed, 4647 insertions(+)
create mode 100644 app/(docs)/dcs/how-to/migrate-from-s3.md
create mode 100644 app/(docs)/dcs/how-to/optimize-upload-performance.md
create mode 100644 app/(docs)/dcs/how-to/setup-bucket-logging.md
create mode 100644 app/(docs)/dcs/how-to/setup-object-versioning.md
create mode 100644 app/(docs)/dcs/how-to/use-presigned-urls.md
create mode 100644 app/(docs)/dcs/tutorials/_meta.json
create mode 100644 app/(docs)/dcs/tutorials/build-your-first-app.md
create mode 100644 app/(docs)/dcs/tutorials/your-first-week-with-storj.md
create mode 100644 app/(docs)/node/how-to/fix-database-corruption.md
create mode 100644 app/(docs)/node/how-to/monitor-node-performance.md
create mode 100644 app/(docs)/node/how-to/setup-remote-access.md
create mode 100644 app/(docs)/object-mount/how-to/_meta.json
create mode 100644 app/(docs)/object-mount/how-to/configure-posix-permissions.md
create mode 100644 app/(docs)/object-mount/how-to/install-debian-ubuntu.md
create mode 100644 app/(docs)/object-mount/how-to/install-rhel-centos.md
create mode 100644 app/(docs)/object-mount/how-to/optimize-large-files.md
create mode 100644 app/(docs)/object-mount/how-to/troubleshoot-mount-issues.md
diff --git a/app/(docs)/dcs/how-to/_meta.json b/app/(docs)/dcs/how-to/_meta.json
index 1a7157edd..b87daf07d 100644
--- a/app/(docs)/dcs/how-to/_meta.json
+++ b/app/(docs)/dcs/how-to/_meta.json
@@ -12,6 +12,26 @@
{
"title": "Configure CORS",
"id": "configure-cors"
+ },
+ {
+ "title": "Set up object versioning",
+ "id": "setup-object-versioning"
+ },
+ {
+ "title": "Use presigned URLs",
+ "id": "use-presigned-urls"
+ },
+ {
+ "title": "Optimize upload performance",
+ "id": "optimize-upload-performance"
+ },
+ {
+ "title": "Set up bucket logging",
+ "id": "setup-bucket-logging"
+ },
+ {
+ "title": "Migrate from AWS S3",
+ "id": "migrate-from-s3"
}
]
}
\ No newline at end of file
diff --git a/app/(docs)/dcs/how-to/migrate-from-s3.md b/app/(docs)/dcs/how-to/migrate-from-s3.md
new file mode 100644
index 000000000..618f8121b
--- /dev/null
+++ b/app/(docs)/dcs/how-to/migrate-from-s3.md
@@ -0,0 +1,273 @@
+---
+title: Migrate from AWS S3
+docId: migrate-from-s3-guide
+metadata:
+ title: How to Migrate from AWS S3 to Storj DCS
+ description: Complete guide to migrate your data and applications from Amazon S3 to Storj DCS
+---
+
+Migrate your data and applications from Amazon S3 to Storj DCS with minimal disruption to your workflows.
+
+## Prerequisites
+
+- AWS S3 buckets and data to migrate
+- AWS CLI or S3-compatible tools (Rclone recommended)
+- Storj DCS account with project set up
+- S3-compatible credentials for Storj DCS
+
+## Migration planning
+
+### Assess your current setup
+
+1. **Inventory your S3 buckets**: List all buckets and estimate data volumes
+2. **Document S3 features in use**: Versioning, lifecycle policies, CORS, etc.
+3. **Review access patterns**: Identify high-traffic vs. archival data
+4. **Check integrations**: Note applications and services using S3
+
+### Create migration timeline
+
+- **Small datasets (< 1TB)**: Can typically migrate in hours
+- **Medium datasets (1-10TB)**: Plan for 1-3 days
+- **Large datasets (> 10TB)**: May require 1-2 weeks with parallel transfers
+
+## Set up Storj DCS
+
+### Create destination buckets
+
+Match your S3 bucket structure in Storj:
+
+```bash
+# Create buckets using uplink CLI
+uplink mb sj://production-data
+uplink mb sj://staging-assets
+uplink mb sj://backup-files
+```
+
+### Configure equivalent features
+
+Enable features that match your S3 setup:
+- **Object versioning**: [Set up versioning](docId:setup-object-vers1) if used in S3
+- **CORS policies**: [Configure CORS](docId:configure-cors) for web applications
+- **Bucket logging**: [Request logging](docId:setup-bucket-logging) if needed
+
+## Choose migration method
+
+### Method 1: Rclone (recommended)
+
+Best for most migrations due to parallel transfers and resume capability.
+
+#### Configure Rclone for both providers
+
+```bash
+# Configure AWS S3 source
+rclone config create aws-source s3 \
+ provider=AWS \
+ access_key_id=YOUR_AWS_ACCESS_KEY \
+ secret_access_key=YOUR_AWS_SECRET_KEY \
+ region=us-east-1
+
+# Configure Storj destination
+rclone config create storj-dest s3 \
+ provider=Other \
+ access_key_id=YOUR_STORJ_ACCESS_KEY \
+ secret_access_key=YOUR_STORJ_SECRET_KEY \
+ endpoint=https://gateway.storjshare.io
+```
+
+#### Perform the migration
+
+```bash
+# Migrate single bucket with progress tracking
+rclone copy aws-source:source-bucket storj-dest:dest-bucket \
+ --progress --stats 30s \
+ --transfers 4 \
+ --s3-chunk-size 64M \
+ --checksum
+
+# Migrate multiple buckets
+rclone copy aws-source: storj-dest: \
+ --progress --stats 30s \
+ --transfers 2 \
+ --s3-chunk-size 64M \
+ --exclude "*.tmp"
+```
+
+### Method 2: AWS CLI to Storj
+
+Good for scripted migrations and AWS CLI users.
+
+#### Set up dual configuration
+
+```bash
+# Configure AWS CLI with Storj profile
+aws configure set profile.storj.s3.endpoint_url https://gateway.storjshare.io
+aws configure set profile.storj.aws_access_key_id YOUR_STORJ_ACCESS_KEY
+aws configure set profile.storj.aws_secret_access_key YOUR_STORJ_SECRET_KEY
+```
+
+#### Migrate data
+
+```bash
+# Sync bucket contents
+aws s3 sync s3://aws-source-bucket s3://storj-dest-bucket \
+ --profile storj \
+ --exclude "*.tmp" \
+ --delete
+```
+
+## Optimize migration performance
+
+### Large file transfers
+
+```bash
+# Use maximum parallelism for large files
+rclone copy aws-source:large-files storj-dest:large-files \
+ --transfers 2 \
+ --s3-upload-concurrency 32 \
+ --s3-chunk-size 64M \
+ --progress
+```
+
+### Many small files
+
+```bash
+# Increase concurrent transfers for small files
+rclone copy aws-source:small-files storj-dest:small-files \
+ --transfers 8 \
+ --s3-chunk-size 64M \
+ --progress
+```
+
+### Resume interrupted transfers
+
+```bash
+# Rclone automatically resumes with same command
+rclone copy aws-source:bucket storj-dest:bucket \
+ --progress \
+ --transfers 4
+```
+
+## Update applications
+
+### Change S3 endpoints
+
+Update your application configuration to use Storj:
+
+```python
+# Before (AWS S3)
+s3_client = boto3.client('s3', region_name='us-east-1')
+
+# After (Storj DCS)
+s3_client = boto3.client(
+ 's3',
+ endpoint_url='https://gateway.storjshare.io',
+ aws_access_key_id='your_storj_access_key',
+ aws_secret_access_key='your_storj_secret_key'
+)
+```
+
+### Update SDK configurations
+
+Most S3-compatible SDKs only need endpoint URL changes:
+
+```javascript
+// Node.js AWS SDK v3
+const s3Client = new S3Client({
+ endpoint: "https://gateway.storjshare.io",
+ credentials: {
+ accessKeyId: "your_storj_access_key",
+ secretAccessKey: "your_storj_secret_key"
+ },
+ region: "us-east-1" // Required but not used by Storj
+});
+```
+
+## Verification
+
+### Validate data integrity
+
+```bash
+# Compare object counts
+aws s3api list-objects-v2 --bucket aws-source | jq '.KeyCount'
+aws s3api list-objects-v2 --bucket storj-dest --profile storj | jq '.KeyCount'
+
+# Verify file checksums with rclone
+rclone check aws-source:bucket storj-dest:bucket --one-way
+```
+
+### Test application functionality
+
+1. **Update staging environment**: Test applications against Storj endpoints
+2. **Verify uploads/downloads**: Confirm all operations work correctly
+3. **Check performance**: Monitor transfer speeds and latency
+4. **Test error handling**: Ensure graceful handling of any compatibility issues
+
+## Production cutover
+
+### Gradual migration approach
+
+1. **Phase 1**: Migrate archival/backup data first
+2. **Phase 2**: Migrate staging environments
+3. **Phase 3**: Switch production traffic during low-usage periods
+
+### DNS and load balancer updates
+
+For applications using custom domains:
+- Update DNS CNAME records to point to Storj endpoints
+- Modify load balancer configurations
+- Update CDN origin settings if applicable
+
+## Post-migration cleanup
+
+### Monitor performance
+
+Track key metrics for the first few weeks:
+- Transfer speeds and latency
+- Error rates and failed requests
+- Storage costs compared to S3
+
+### Decommission AWS resources
+
+After successful migration:
+1. **Backup verification**: Ensure all data migrated correctly
+2. **Stop S3 lifecycle policies**: Prevent unexpected deletions
+3. **Delete S3 buckets**: Remove old buckets to stop billing
+4. **Clean up IAM roles**: Remove unused S3 access policies
+
+## Troubleshooting
+
+**Slow migration speeds**:
+- Increase `--transfers` and `--s3-upload-concurrency`
+- Check bandwidth limitations
+- Consider migrating during off-peak hours
+
+**Authentication errors**:
+- Verify Storj S3 credentials are correct
+- Ensure endpoint URL uses `https://gateway.storjshare.io`
+- Check that access grants have proper permissions
+
+**Application compatibility issues**:
+- Review [S3 API compatibility](docId:your-s3-compat-doc) documentation
+- Test specific S3 features your application uses
+- Contact Storj support for compatibility questions
+
+## Cost optimization
+
+### Compare ongoing costs
+
+Monitor your new costs structure:
+- **Storage**: $4/TB/month vs. S3 pricing
+- **Bandwidth**: $7/TB egress vs. S3 data transfer costs
+- **Operations**: No per-request charges vs. S3 request pricing
+
+### Implement cost controls
+
+- Set up billing alerts
+- Monitor usage patterns
+- Optimize data lifecycle policies
+
+## Next steps
+
+- Set up [performance monitoring](docId:your-monitoring-guide) for ongoing optimization
+- Configure [backup strategies](docId:your-backup-guide) for critical data
+- Learn about [advanced Storj features](docId:your-advanced-guide) to maximize benefits
\ No newline at end of file
diff --git a/app/(docs)/dcs/how-to/optimize-upload-performance.md b/app/(docs)/dcs/how-to/optimize-upload-performance.md
new file mode 100644
index 000000000..82b504e5f
--- /dev/null
+++ b/app/(docs)/dcs/how-to/optimize-upload-performance.md
@@ -0,0 +1,175 @@
+---
+title: Optimize upload performance
+docId: optimize-upload-perf
+metadata:
+ title: How to Optimize Upload Performance - Storj DCS
+ description: Improve file upload speeds using parallel transfers with Rclone and Uplink CLI
+---
+
+Optimize your upload performance to Storj DCS using parallel transfers and proper configuration settings.
+
+## Prerequisites
+
+- Storj DCS account with project and bucket set up
+- Rclone configured with Storj (recommended for multiple files)
+- OR Uplink CLI installed (for single large files)
+- Files ready for upload
+- Sufficient RAM for parallel transfers
+
+## Choose the right tool
+
+**For multiple small-to-medium files (< 1GB each)**: Use Rclone with `--transfers` flag
+**For single large files (> 1GB)**: Use Uplink CLI with `--parallelism` flag
+**For mixed workloads**: Start with Rclone
+
+## Optimize single large file uploads
+
+### Calculate optimal settings
+
+For a large file, determine concurrency based on file size:
+- File size ÷ 64MB = maximum concurrency segments
+- Each segment uses ~64MB of RAM during transfer
+
+Example for 1GB file: 1024MB ÷ 64MB = 16 segments maximum
+
+### Upload with Rclone
+
+```bash
+# Upload 1GB file with optimal parallelism
+rclone copy --progress \
+ --s3-upload-concurrency 16 \
+ --s3-chunk-size 64M \
+ large-file.zip remote:bucket
+```
+
+### Upload with Uplink CLI
+
+```bash
+# Upload with native Storj performance
+uplink cp large-file.zip sj://bucket/large-file.zip --parallelism 4
+```
+
+## Optimize multiple file uploads
+
+### Upload multiple files simultaneously
+
+```bash
+# Upload 4 files at once with Rclone
+rclone copy --progress \
+ --transfers 4 \
+ --s3-upload-concurrency 16 \
+ --s3-chunk-size 64M \
+ /local/folder remote:bucket
+```
+
+### Calculate memory usage
+
+Memory usage = transfers × concurrency × chunk size
+- 4 transfers × 16 concurrency × 64MB = 4GB RAM required
+
+## Configuration examples
+
+### Small files (< 100MB)
+```bash
+rclone copy --progress \
+ --transfers 10 \
+ --s3-chunk-size 64M \
+ /local/photos remote:bucket
+```
+
+### Medium files (100MB - 1GB)
+```bash
+rclone copy --progress \
+ --transfers 4 \
+ --s3-upload-concurrency 8 \
+ --s3-chunk-size 64M \
+ /local/videos remote:bucket
+```
+
+### Large files (> 1GB)
+```bash
+rclone copy --progress \
+ --transfers 2 \
+ --s3-upload-concurrency 32 \
+ --s3-chunk-size 64M \
+ /local/archives remote:bucket
+```
+
+## Monitor and adjust performance
+
+### Watch transfer progress
+
+Add monitoring flags to see performance:
+```bash
+rclone copy --progress --stats 30s \
+ --transfers 4 \
+ --s3-upload-concurrency 16 \
+ --s3-chunk-size 64M \
+ /local/folder remote:bucket
+```
+
+### Test different settings
+
+1. Start with conservative settings
+2. Monitor RAM and CPU usage
+3. Gradually increase concurrency/transfers
+4. Find optimal balance for your system
+
+## Verification
+
+1. **Check upload completion**: Verify all files appear in your bucket
+2. **Monitor system resources**: Ensure RAM/CPU usage stays manageable
+3. **Measure throughput**: Compare upload speeds with different settings
+4. **Test large files**: Confirm segment parallelism works correctly
+
+## Troubleshooting
+
+**Out of memory errors**:
+- Reduce `--transfers` or `--s3-upload-concurrency`
+- Monitor RAM usage during transfers
+- Consider smaller `--s3-chunk-size` for very memory-limited systems
+
+**Slow upload speeds**:
+- Increase concurrency if you have available RAM
+- Check network bandwidth limitations
+- Try Uplink CLI for single large files
+
+**Transfer failures**:
+- Reduce parallelism settings and retry
+- Check network stability
+- Verify bucket permissions and access
+
+## Advanced optimization
+
+### System-specific tuning
+
+Calculate optimal settings for your system:
+```bash
+# Check available RAM
+free -h
+
+# Monitor transfer performance
+htop
+
+# Test network bandwidth
+speedtest-cli
+```
+
+### Batch operations
+
+For regular uploads, create scripts with optimized settings:
+```bash
+#!/bin/bash
+# upload-optimized.sh
+rclone copy --progress \
+ --transfers 6 \
+ --s3-upload-concurrency 12 \
+ --s3-chunk-size 64M \
+ "$1" remote:bucket
+```
+
+## Next steps
+
+- Learn about [download performance optimization](docId:optimize-download-perf)
+- Set up [monitoring and analytics](docId:bucket-logging) for transfer metrics
+- Configure [automated sync](docId:use-rclone) for ongoing file management
\ No newline at end of file
diff --git a/app/(docs)/dcs/how-to/setup-bucket-logging.md b/app/(docs)/dcs/how-to/setup-bucket-logging.md
new file mode 100644
index 000000000..8e07f83fa
--- /dev/null
+++ b/app/(docs)/dcs/how-to/setup-bucket-logging.md
@@ -0,0 +1,171 @@
+---
+title: Set up bucket logging
+docId: setup-bucket-logging
+metadata:
+ title: How to Set Up Bucket Logging - Storj DCS
+ description: Enable server access logging for your Storj DCS buckets to track all requests and operations
+---
+
+Enable bucket logging to track all access requests to your buckets. This feature is available upon request and provides detailed logs in S3 server access log format.
+
+## Prerequisites
+
+- Active Storj DCS project
+- Target bucket to monitor
+- Destination bucket for storing logs
+- Write-only access grant for the destination bucket
+
+## Request bucket logging activation
+
+### Submit support request
+
+1. Go to [Storj DCS Support](https://supportdcs.storj.io/hc/en-us/requests/new?ticket_form_id=360000379291)
+2. Select "Enable Bucket Logging" as the subject
+3. Allow up to two weeks for processing
+
+### Prepare required information
+
+Gather the following details for your request:
+
+**Source bucket information:**
+- Satellite (AP1, EU1, or US1)
+- Project name
+- Bucket name(s) to monitor
+
+**Destination bucket information:**
+- Destination project name
+- Destination bucket name
+- Optional: Key prefix for log files
+- Write-only access grant (see next section)
+
+## Create write-only access grant
+
+### Generate the access grant
+
+1. **Open Storj console**: Log in to your satellite UI
+2. **Create new access**: Click "New Access Key" → "Access Grant"
+3. **Name the grant**: Use descriptive name like "bucket-logging-destination"
+
+### Configure advanced options
+
+4. **Select Advanced Options**: Click "Advanced Options" on the second screen
+5. **Set encryption passphrase**: Enter a secure passphrase
+
+{% callout type="warning" %}
+**Important:** Save this passphrase securely. You'll need it to decrypt log data later.
+{% /callout %}
+
+### Set permissions
+
+6. **Choose Write Only**: Select "Write Only" permissions
+7. **Limit to destination bucket**: Specify the exact bucket for logs
+8. **Set no expiration**: Select "No Expiration" to ensure continuous logging
+
+### Complete access grant creation
+
+9. **Review settings**: Verify all selections are correct
+10. **Create access**: Click "Create Access" to generate the grant
+11. **Save the access grant**: Copy and securely store the generated access grant string
+
+## Submit your logging request
+
+Include this information in your support ticket:
+
+```
+Subject: Enable Bucket Logging
+
+Source Bucket Details:
+- Satellite: US1
+- Project Name: my-production-project
+- Bucket Name: my-monitored-bucket
+
+Destination Details:
+- Destination Project: my-logging-project
+- Destination Bucket: access-logs-bucket
+- Prefix (optional): prod-logs/
+- Write-only Access Grant: [paste your access grant here]
+```
+
+## Verification
+
+After logging is enabled by Storj support:
+
+### Check for log files
+
+1. **Wait for activity**: Perform some operations on your monitored bucket
+2. **Check destination bucket**: Look for log files in your destination bucket
+3. **Verify log format**: Confirm logs follow the expected naming pattern:
+ ```
+ [prefix]YYYY-MM-DD-hh-mm-ss-[UniqueString]
+ ```
+
+### Download and examine logs
+
+```bash
+# Download recent log files
+uplink cp sj://logs-bucket/prod-logs/ ./logs/ --recursive
+
+# Examine log content (example format)
+cat 2024-08-29-15-30-45-ABC123.log
+```
+
+## Understanding log format
+
+Log entries follow [Amazon S3 Server Access Log Format](https://docs.aws.amazon.com/AmazonS3/latest/userguide/LogFormat.html) with these key fields:
+
+- **Timestamp**: When the request occurred
+- **Remote IP**: Client IP address
+- **Requester**: Authenticated user ID
+- **Request ID**: Unique identifier for the request
+- **Operation**: API operation performed (GET, PUT, DELETE, etc.)
+- **Key**: Object key accessed
+- **HTTP Status**: Response code (200, 404, etc.)
+- **Bytes Sent**: Size of response
+- **User Agent**: Client application identifier
+
+### Example log entry
+
+```
+project-id bucket-name [29/Aug/2024:15:30:45 +0000] 192.168.1.100
+user-id ABC123 GetObject myfile.pdf "GET /bucket-name/myfile.pdf HTTP/1.1"
+200 - 1024 - - - "curl/7.68.0" - request-signature SigV4
+```
+
+## Troubleshooting
+
+**No log files appearing**:
+- Verify bucket logging was activated by support
+- Confirm the access grant has write permissions to destination bucket
+- Check that monitored bucket has actual activity
+
+**Cannot decrypt log files**:
+- Ensure you're using the correct encryption passphrase from access grant creation
+- Verify the access grant hasn't expired
+
+**Access denied errors**:
+- Confirm the write-only access grant is valid
+- Check that destination bucket exists and is accessible
+- Verify project permissions
+
+## Monitor and manage logs
+
+### Log rotation and storage
+
+Logs are automatically created with timestamps. Consider:
+- Setting up lifecycle policies for log retention
+- Monitoring storage costs for log accumulation
+- Implementing automated log processing pipelines
+
+### Analyze access patterns
+
+Use logs to:
+- Monitor access frequency and patterns
+- Identify unusual access attempts
+- Track bandwidth usage per client
+- Audit compliance requirements
+
+## Next steps
+
+- Set up [automated log processing](docId:your-log-processing-guide)
+- Configure [monitoring alerts](docId:your-monitoring-guide) for unusual access patterns
+- Learn about [security best practices](docId:your-security-guide) for log management
\ No newline at end of file
diff --git a/app/(docs)/dcs/how-to/setup-object-versioning.md b/app/(docs)/dcs/how-to/setup-object-versioning.md
new file mode 100644
index 000000000..edf15cc83
--- /dev/null
+++ b/app/(docs)/dcs/how-to/setup-object-versioning.md
@@ -0,0 +1,126 @@
+---
+title: Set up object versioning
+docId: setup-object-vers1
+metadata:
+ title: How to Set Up Object Versioning - Storj DCS
+ description: Step-by-step guide to enable object versioning on your Storj DCS buckets for data protection and recovery
+---
+
+Set up object versioning to preserve, retrieve, and restore every version of every object in your bucket. This adds data protection against accidental deletions and overwrites.
+
+## Prerequisites
+
+- Storj DCS account with active project
+- S3-compatible credentials (access key and secret key)
+- Bucket where you want to enable versioning
+- S3-compatible tool or SDK (AWS CLI, boto3, etc.)
+
+## Enable versioning on a bucket
+
+### Using AWS CLI
+
+Configure your credentials and enable versioning:
+
+```bash
+# Configure credentials (one time setup)
+aws configure set aws_access_key_id YOUR_ACCESS_KEY
+aws configure set aws_secret_access_key YOUR_SECRET_KEY
+aws configure set default.region us-east-1
+
+# Enable versioning on bucket
+aws s3api put-bucket-versioning \
+ --bucket YOUR_BUCKET_NAME \
+ --versioning-configuration Status=Enabled \
+ --endpoint-url https://gateway.storjshare.io
+```
+
+### Using Python (boto3)
+
+```python
+import boto3
+
+# Create S3 client
+s3 = boto3.client(
+ 's3',
+ aws_access_key_id='YOUR_ACCESS_KEY',
+ aws_secret_access_key='YOUR_SECRET_KEY',
+ endpoint_url='https://gateway.storjshare.io'
+)
+
+# Enable versioning
+s3.put_bucket_versioning(
+ Bucket='YOUR_BUCKET_NAME',
+ VersioningConfiguration={
+ 'Status': 'Enabled'
+ }
+)
+```
+
+## Verify versioning is enabled
+
+Check the versioning status of your bucket:
+
+```bash
+# Using AWS CLI
+aws s3api get-bucket-versioning \
+ --bucket YOUR_BUCKET_NAME \
+ --endpoint-url https://gateway.storjshare.io
+```
+
+Expected output:
+```json
+{
+ "Status": "Enabled"
+}
+```
+
+## Upload versioned objects
+
+Once versioning is enabled, each object upload creates a new version:
+
+```bash
+# Upload the same file multiple times
+echo "Version 1" > test-file.txt
+aws s3 cp test-file.txt s3://YOUR_BUCKET_NAME/ --endpoint-url https://gateway.storjshare.io
+
+echo "Version 2" > test-file.txt
+aws s3 cp test-file.txt s3://YOUR_BUCKET_NAME/ --endpoint-url https://gateway.storjshare.io
+```
+
+List all versions:
+```bash
+aws s3api list-object-versions \
+ --bucket YOUR_BUCKET_NAME \
+ --endpoint-url https://gateway.storjshare.io
+```
+
+## Suspend versioning
+
+To stop creating new versions while keeping existing ones:
+
+```bash
+aws s3api put-bucket-versioning \
+ --bucket YOUR_BUCKET_NAME \
+ --versioning-configuration Status=Suspended \
+ --endpoint-url https://gateway.storjshare.io
+```
+
+## Verification
+
+1. **Check versioning status**: Run `get-bucket-versioning` and confirm `Status: "Enabled"`
+2. **Upload test files**: Upload the same filename twice and verify multiple versions exist
+3. **List versions**: Use `list-object-versions` to see all versions of your objects
+
+## Troubleshooting
+
+**"Versioning cannot be enabled" error**: The bucket was created before versioning support. Create a new bucket to use versioning.
+
+**No versions appearing**: Ensure you're using the S3-compatible gateway endpoint (`https://gateway.storjshare.io`) in your commands.
+
+**Cost concerns**: Each object version is stored separately and incurs storage costs. Monitor your usage and implement lifecycle policies to manage older versions.
+
+## Next steps
+
+- [Configure Object Lock](docId:gjrGzPNnhpYrAGTTAUaj) for additional protection
+- Learn about [object lifecycle management](docId:your-lifecycle-doc)
+- Set up [bucket logging](docId:your-logging-doc) to track version access
\ No newline at end of file
diff --git a/app/(docs)/dcs/how-to/use-presigned-urls.md b/app/(docs)/dcs/how-to/use-presigned-urls.md
new file mode 100644
index 000000000..f6fa25852
--- /dev/null
+++ b/app/(docs)/dcs/how-to/use-presigned-urls.md
@@ -0,0 +1,168 @@
+---
+title: Use presigned URLs
+docId: use-presigned-urls1
+metadata:
+ title: How to Use Presigned URLs - Storj DCS
+ description: Create presigned URLs to allow unauthenticated access to your Storj objects for uploads and downloads
+---
+
+Create presigned URLs to enable unauthenticated users to upload or download objects without exposing your credentials.
+
+## Prerequisites
+
+- Storj DCS account with S3-compatible credentials
+- Python 3.x installed
+- boto3 library (`pip install boto3`)
+- Target bucket already created
+
+## Create presigned URL for uploads
+
+### Set up the upload script
+
+Create `create_upload_url.py`:
+
+```python
+import boto3
+
+# Configure your credentials
+ACCESS_KEY = "your_access_key_here"
+SECRET_KEY = "your_secret_key_here"
+ENDPOINT_URL = "https://gateway.storjshare.io"
+BUCKET_NAME = "your-bucket-name"
+
+# Create S3 client
+session = boto3.session.Session()
+s3 = session.client(
+ service_name="s3",
+ aws_access_key_id=ACCESS_KEY,
+ aws_secret_access_key=SECRET_KEY,
+ endpoint_url=ENDPOINT_URL
+)
+
+# Generate presigned URL for upload (valid for 1 hour)
+upload_url = s3.generate_presigned_url(
+ 'put_object',
+ Params={
+ "Bucket": BUCKET_NAME,
+ "Key": "uploads/my-file.txt" # Path where file will be stored
+ },
+ ExpiresIn=3600 # URL expires in 1 hour
+)
+
+print("Upload URL:", upload_url)
+```
+
+### Run the script
+
+```bash
+python3 create_upload_url.py
+```
+
+### Use the presigned URL
+
+Upload a file using curl:
+
+```bash
+curl -X PUT \
+ --upload-file local-file.txt \
+ "YOUR_GENERATED_PRESIGNED_URL"
+```
+
+## Create presigned URL for downloads
+
+### Set up the download script
+
+Create `create_download_url.py`:
+
+```python
+import boto3
+
+# Configure your credentials
+ACCESS_KEY = "your_access_key_here"
+SECRET_KEY = "your_secret_key_here"
+ENDPOINT_URL = "https://gateway.storjshare.io"
+BUCKET_NAME = "your-bucket-name"
+
+# Create S3 client
+session = boto3.session.Session()
+s3 = session.client(
+ service_name="s3",
+ aws_access_key_id=ACCESS_KEY,
+ aws_secret_access_key=SECRET_KEY,
+ endpoint_url=ENDPOINT_URL
+)
+
+# Generate presigned URL for download (valid for 1 hour)
+download_url = s3.generate_presigned_url(
+ 'get_object',
+ Params={
+ "Bucket": BUCKET_NAME,
+ "Key": "path/to/your/file.txt" # Existing file path
+ },
+ ExpiresIn=3600 # URL expires in 1 hour
+)
+
+print("Download URL:", download_url)
+```
+
+### Use the download URL
+
+Download the file:
+
+```bash
+curl -o downloaded-file.txt "YOUR_GENERATED_PRESIGNED_URL"
+```
+
+Or share the URL directly with users to download in their browser.
+
+## Customize expiration time
+
+Set different expiration periods based on your needs:
+
+```python
+# 15 minutes
+ExpiresIn=900
+
+# 24 hours
+ExpiresIn=86400
+
+# 7 days
+ExpiresIn=604800
+```
+
+## Verification
+
+1. **Generate URL**: Run your script and confirm it outputs a valid URL
+2. **Test upload**: Use curl to upload a file with the presigned upload URL
+3. **Check object**: Verify the object appears in your bucket
+4. **Test download**: Generate a download URL and verify you can retrieve the file
+5. **Test expiration**: Wait for URL to expire and confirm it no longer works
+
+## Troubleshooting
+
+**"Access Denied" errors**:
+- Verify your S3 credentials have proper permissions
+- Check that the bucket name is correct
+- Ensure you're using the correct endpoint URL
+
+**"URL expired" errors**:
+- Generate a new presigned URL
+- Increase the `ExpiresIn` value if needed
+
+**Upload fails**:
+- Verify the object key path is valid
+- Ensure the bucket allows uploads
+- Check that the file size is within limits
+
+## Security considerations
+
+- URLs contain temporary credentials in query parameters
+- Share URLs only over secure channels (HTTPS)
+- Use appropriate expiration times (shorter is more secure)
+- Monitor bucket access logs for unauthorized usage
+
+## Next steps
+
+- Learn about [Storj Linkshare](docId:sN2GhYgGUtqBVF65GhKEa) as an alternative sharing method
+- Set up [static website hosting](docId:GkgE6Egi02wRZtyryFyPz) for public file sharing
+- Configure [bucket CORS settings](docId:configure-cors) for web applications
\ No newline at end of file
diff --git a/app/(docs)/dcs/tutorials/_meta.json b/app/(docs)/dcs/tutorials/_meta.json
new file mode 100644
index 000000000..173bb3e2d
--- /dev/null
+++ b/app/(docs)/dcs/tutorials/_meta.json
@@ -0,0 +1,13 @@
+{
+ "title": "Tutorials",
+ "nav": [
+ {
+ "title": "Your first week with Storj",
+ "id": "your-first-week-with-storj"
+ },
+ {
+ "title": "Build your first app",
+ "id": "build-your-first-app"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/app/(docs)/dcs/tutorials/build-your-first-app.md b/app/(docs)/dcs/tutorials/build-your-first-app.md
new file mode 100644
index 000000000..a52833db0
--- /dev/null
+++ b/app/(docs)/dcs/tutorials/build-your-first-app.md
@@ -0,0 +1,827 @@
+---
+title: Build your first app
+docId: build-first-app-storj
+metadata:
+ title: Build Your First App with Storj DCS
+ description: 30-minute tutorial to build a simple file sharing web application using Storj DCS
+---
+
+Build a simple file sharing web application using Storj DCS in just 30 minutes. Perfect for developers new to Storj.
+
+## What you'll build
+
+A web application that allows users to:
+- Upload files to Storj DCS
+- View uploaded files
+- Share files with generated links
+- Delete files when needed
+
+**Time to complete**: 30 minutes
+**Skill level**: Beginner developer
+**Prerequisites**: Basic HTML/JavaScript knowledge, Node.js installed
+
+## Prerequisites
+
+- Node.js 16+ installed
+- Storj DCS account with project created
+- Text editor or IDE
+- Web browser
+
+## Step 1: Set up Storj credentials
+
+### Create S3-compatible access
+
+1. **Log in to Storj console**
+2. **Go to Access page**
+3. **Click "Create S3 Credentials"**
+4. **Configure access**:
+ - Name: "file-share-app"
+ - Permissions: All
+ - Buckets: All buckets
+ - Expiration: None (for tutorial)
+5. **Save Access Key and Secret Key**
+
+### Create a bucket
+
+6. **Go to Buckets page**
+7. **Click "Create Bucket"**
+8. **Name it**: "file-share-demo"
+9. **Choose your preferred region**
+10. **Create the bucket**
+
+**Expected result**: You have S3-compatible credentials and a bucket ready.
+
+## Step 2: Set up the project
+
+### Initialize Node.js project
+
+```bash
+# Create project directory
+mkdir storj-file-share
+cd storj-file-share
+
+# Initialize npm project
+npm init -y
+
+# Install dependencies
+npm install express multer aws-sdk cors dotenv
+npm install --save-dev nodemon
+```
+
+### Create project structure
+
+```bash
+# Create directories
+mkdir public uploads
+
+# Create files
+touch server.js .env
+touch public/index.html public/style.css public/app.js
+```
+
+**Expected result**: Project structure is set up with required dependencies.
+
+## Step 3: Configure environment
+
+### Set up environment variables
+
+Create `.env` file:
+```env
+STORJ_ACCESS_KEY=your_access_key_here
+STORJ_SECRET_KEY=your_secret_key_here
+STORJ_BUCKET=file-share-demo
+STORJ_ENDPOINT=https://gateway.storjshare.io
+PORT=3000
+```
+
+**Replace the credentials** with your actual Storj S3 credentials from Step 1.
+
+**Expected result**: Environment variables are configured securely.
+
+## Step 4: Build the server
+
+### Create the Express server
+
+Create `server.js`:
+```javascript
+const express = require('express');
+const multer = require('multer');
+const AWS = require('aws-sdk');
+const cors = require('cors');
+const path = require('path');
+require('dotenv').config();
+
+const app = express();
+const PORT = process.env.PORT || 3000;
+
+// Configure AWS SDK for Storj
+const s3 = new AWS.S3({
+ accessKeyId: process.env.STORJ_ACCESS_KEY,
+ secretAccessKey: process.env.STORJ_SECRET_KEY,
+ endpoint: process.env.STORJ_ENDPOINT,
+ s3ForcePathStyle: true,
+ signatureVersion: 'v4',
+ region: 'us-east-1'
+});
+
+// Middleware
+app.use(cors());
+app.use(express.json());
+app.use(express.static('public'));
+app.use('/uploads', express.static('uploads'));
+
+// Configure multer for file uploads
+const upload = multer({
+ dest: 'uploads/',
+ limits: { fileSize: 10 * 1024 * 1024 } // 10MB limit
+});
+
+// Routes
+app.get('/', (req, res) => {
+ res.sendFile(path.join(__dirname, 'public', 'index.html'));
+});
+
+// Upload file to Storj
+app.post('/upload', upload.single('file'), async (req, res) => {
+ if (!req.file) {
+ return res.status(400).json({ error: 'No file uploaded' });
+ }
+
+ try {
+ const fileContent = require('fs').readFileSync(req.file.path);
+ const fileName = `${Date.now()}-${req.file.originalname}`;
+
+ const params = {
+ Bucket: process.env.STORJ_BUCKET,
+ Key: fileName,
+ Body: fileContent,
+ ContentType: req.file.mimetype
+ };
+
+ const result = await s3.upload(params).promise();
+
+ // Clean up local file
+ require('fs').unlinkSync(req.file.path);
+
+ res.json({
+ success: true,
+ fileName: fileName,
+ url: result.Location,
+ size: req.file.size
+ });
+ } catch (error) {
+ console.error('Upload error:', error);
+ res.status(500).json({ error: 'Upload failed' });
+ }
+});
+
+// List files
+app.get('/files', async (req, res) => {
+ try {
+ const params = {
+ Bucket: process.env.STORJ_BUCKET,
+ MaxKeys: 50
+ };
+
+ const data = await s3.listObjectsV2(params).promise();
+
+ const files = data.Contents.map(file => ({
+ name: file.Key,
+ size: file.Size,
+ lastModified: file.LastModified
+ }));
+
+ res.json({ files });
+ } catch (error) {
+ console.error('List files error:', error);
+ res.status(500).json({ error: 'Failed to list files' });
+ }
+});
+
+// Generate shareable link
+app.post('/share/:fileName', async (req, res) => {
+ try {
+ const params = {
+ Bucket: process.env.STORJ_BUCKET,
+ Key: req.params.fileName,
+ Expires: 60 * 60 * 24 * 7 // 1 week
+ };
+
+ const url = s3.getSignedUrl('getObject', params);
+
+ res.json({
+ shareUrl: url,
+ expires: '7 days'
+ });
+ } catch (error) {
+ console.error('Share link error:', error);
+ res.status(500).json({ error: 'Failed to generate share link' });
+ }
+});
+
+// Delete file
+app.delete('/files/:fileName', async (req, res) => {
+ try {
+ const params = {
+ Bucket: process.env.STORJ_BUCKET,
+ Key: req.params.fileName
+ };
+
+ await s3.deleteObject(params).promise();
+
+ res.json({ success: true });
+ } catch (error) {
+ console.error('Delete error:', error);
+ res.status(500).json({ error: 'Failed to delete file' });
+ }
+});
+
+app.listen(PORT, () => {
+ console.log(`Server running at http://localhost:${PORT}`);
+});
+```
+
+**Expected result**: Server is configured with all necessary API endpoints.
+
+## Step 5: Build the frontend
+
+### Create the HTML structure
+
+Create `public/index.html`:
+```html
+
+
+
+
+
+ Storj File Share
+
+
+
+
+
+
🚀 Storj File Share
+
Upload, share, and manage your files with decentralized storage