Skip to content

Commit e655745

Browse files
author
Hein
committed
Add supabase connector
1 parent f54ef3f commit e655745

File tree

3 files changed

+274
-1
lines changed

3 files changed

+274
-1
lines changed

mint.json

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -386,7 +386,8 @@
386386
"group": "Code Examples",
387387
"pages": [
388388
"tutorials/code-changes/code-changes-overview",
389-
"tutorials/code-changes/aws-s3-storage-adapter"
389+
"tutorials/code-changes/aws-s3-storage-adapter",
390+
"tutorials/code-changes/supabase-connector-performance"
390391
]
391392
}
392393
]

tutorials/code-changes/code-changes-overview.mdx

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,4 +5,5 @@ description: "A collection of tutorials exploring approaches, optimizations, and
55

66
<CardGroup>
77
<Card title="Use AWS S3 for attachment storage" icon="server" href="/tutorials/code-changes/aws-s3-storage-adapter" horizontal/>
8+
<Card title="Improve Supabase Connector Performance" icon="server" href="/tutorials/code-changes/supabase-connector-performance" horizontal/>
89
</CardGroup>
Lines changed: 271 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,271 @@
1+
---
2+
title: "Improve Supabase Connector Performance"
3+
description: "In this tutorial we will show you how to improve the performance of the Supabase Connector for the [React Native To-Do List example app](https://github.com/powersync-ja/powersync-js/tree/main/demos/react-native-supabase-todolist)."
4+
---
5+
6+
<AccordionGroup>
7+
<Accordion title="Sequential Merge Strategy">
8+
<Note>
9+
Shoutout to Christoffer Årstrand for the original implementation of this optimization.
10+
</Note>
11+
```typescript {7-8, 11, 13-15, 17, 19-20, 24-36, 39, 43-56, 59-60, 75}
12+
async uploadData(database: AbstractPowerSyncDatabase): Promise<void> {
13+
const transaction = await database.getNextCrudTransaction();
14+
if (!transaction) {
15+
return;
16+
}
17+
18+
const MERGE_BATCH_LIMIT = 100;
19+
let batchedOps: CrudEntry[] = [];
20+
21+
try {
22+
console.log(`Processing transaction with ${transaction.crud.length} operations`);
23+
24+
for (let i = 0; i < transaction.crud.length; i++) {
25+
const cruds = transaction.crud;
26+
const op = cruds[i];
27+
const table = this.client.from(op.table);
28+
batchedOps.push(op);
29+
30+
let result: any;
31+
let batched = 1;
32+
33+
switch (op.op) {
34+
case UpdateType.PUT:
35+
const records = [{ ...cruds[i].opData, id: cruds[i].id }];
36+
while (
37+
i + 1 < cruds.length &&
38+
cruds[i + 1].op === op.op &&
39+
cruds[i + 1].op === op.op &&
40+
batched < MERGE_BATCH_LIMIT
41+
) {
42+
i++;
43+
records.push({ ...cruds[i].opData, id: cruds[i].id });
44+
batchedOps.push(cruds[i]);
45+
batched++;
46+
}
47+
result = await table.upsert(records);
48+
break;
49+
case UpdateType.PATCH:
50+
batchedOps = [op];
51+
result = await table.update(op.opData).eq('id', op.id);
52+
break;
53+
case UpdateType.DELETE:
54+
batchedOps = [op];
55+
const ids = [op.id];
56+
while (
57+
i + 1 < cruds.length &&
58+
cruds[i + 1].op === op.op &&
59+
cruds[i + 1].op === op.table &&
60+
batched < MERGE_BATCH_LIMIT
61+
) {
62+
i++;
63+
ids.push(cruds[i].id);
64+
batchedOps.push(cruds[i]);
65+
batched++;
66+
}
67+
result = await table.delete().in('id', ids);
68+
break;
69+
}
70+
if (batched > 1) {
71+
console.log(`Merged ${batched} ${op.op} operations for table ${op.table}`);
72+
}
73+
}
74+
await transaction.complete();
75+
} catch (ex: any) {
76+
console.debug(ex);
77+
if (typeof ex.code == 'string' && FATAL_RESPONSE_CODES.some((regex) => regex.test(ex.code))) {
78+
/**
79+
* Instead of blocking the queue with these errors,
80+
* discard the (rest of the) transaction.
81+
*
82+
* Note that these errors typically indicate a bug in the application.
83+
* If protecting against data loss is important, save the failing records
84+
* elsewhere instead of discarding, and/or notify the user.
85+
*/
86+
console.error('Data upload error - discarding:', ex);
87+
await transaction.complete();
88+
} else {
89+
// Error may be retryable - e.g. network error or temporary server error.
90+
// Throwing an error here causes this call to be retried after a delay.
91+
throw ex;
92+
}
93+
}
94+
}
95+
```
96+
</Accordion>
97+
<Accordion title="Pre-sorted Batch Strategy ">
98+
```typescript {8-11, 17-20, 23, 26-29, 32-53, 56, 72}
99+
async uploadData(database: AbstractPowerSyncDatabase): Promise<void> {
100+
const transaction = await database.getNextCrudTransaction();
101+
if (!transaction) {
102+
return;
103+
}
104+
105+
try {
106+
// Group operations by type and table
107+
const putOps: { [table: string]: any[] } = {};
108+
const deleteOps: { [table: string]: string[] } = {};
109+
let patchOps: CrudEntry[] = [];
110+
111+
// Organize operations
112+
for (const op of transaction.crud) {
113+
switch (op.op) {
114+
case UpdateType.PUT:
115+
if (!putOps[op.table]) {
116+
putOps[op.table] = [];
117+
}
118+
putOps[op.table].push({ ...op.opData, id: op.id });
119+
break;
120+
case UpdateType.PATCH:
121+
patchOps.push(op);
122+
break;
123+
case UpdateType.DELETE:
124+
if (!deleteOps[op.table]) {
125+
deleteOps[op.table] = [];
126+
}
127+
deleteOps[op.table].push(op.id);
128+
break;
129+
}
130+
}
131+
132+
// Execute bulk operations
133+
for (const table of Object.keys(putOps)) {
134+
const result = await this.client.from(table).upsert(putOps[table]);
135+
if (result.error) {
136+
console.error(result.error);
137+
throw new Error(`Could not bulk PUT data to Supabase table ${table}: ${JSON.stringify(result)}`);
138+
}
139+
}
140+
141+
for (const table of Object.keys(deleteOps)) {
142+
const result = await this.client.from(table).delete().in('id', deleteOps[table]);
143+
if (result.error) {
144+
console.error(result.error);
145+
throw new Error(`Could not bulk DELETE data from Supabase table ${table}: ${JSON.stringify(result)}`);
146+
}
147+
}
148+
149+
// Execute PATCH operations individually since they can't be easily batched
150+
for (const op of patchOps) {
151+
const result = await this.client.from(op.table).update(op.opData).eq('id', op.id);
152+
if (result.error) {
153+
console.error(result.error);
154+
throw new Error(`Could not PATCH data in Supabase: ${JSON.stringify(result)}`);
155+
}
156+
}
157+
158+
await transaction.complete();
159+
} catch (ex: any) {
160+
console.debug(ex);
161+
if (typeof ex.code == 'string' && FATAL_RESPONSE_CODES.some((regex) => regex.test(ex.code))) {
162+
/**
163+
* Instead of blocking the queue with these errors,
164+
* discard the (rest of the) transaction.
165+
*
166+
* Note that these errors typically indicate a bug in the application.
167+
* If protecting against data loss is important, save the failing records
168+
* elsewhere instead of discarding, and/or notify the user.
169+
*/
170+
console.error('Data upload error - discarding transaction:', ex);
171+
await transaction.complete();
172+
} else {
173+
// Error may be retryable - e.g. network error or temporary server error.
174+
// Throwing an error here causes this call to be retried after a delay.
175+
throw ex;
176+
}
177+
}
178+
}
179+
```
180+
</Accordion>
181+
</AccordionGroup>
182+
183+
# Differences
184+
185+
<AccordionGroup>
186+
<Accordion title="Operation grouping strategy">
187+
### Sequential merge strategy
188+
```typescript
189+
const MERGE_BATCH_LIMIT = 100;
190+
let batchedOps: CrudEntry[] = [];
191+
```
192+
- Pre-sorts all operations by type and table
193+
- Processes each type in bulk after grouping
194+
195+
### Pre-sorted batch strategy
196+
```typescript
197+
const putOps: { [table: string]: any[] } = {};
198+
const deleteOps: { [table: string]: string[] } = {};
199+
let patchOps: CrudEntry[] = [];
200+
```
201+
- Processes operations sequentially
202+
- Merges consecutive operations of the same type up to a batch limit
203+
- More dynamic/streaming approach
204+
</Accordion>
205+
<Accordion title="Batching methodology">
206+
### Sequential merge strategy
207+
- Uses a sliding window approach with `MERGE_BATCH_LIMIT`
208+
- Merges consecutive operations up to the limit
209+
- More granular control over batch sizes
210+
- Better for mixed operation types
211+
212+
### Pre-sorted batch strategy
213+
- Groups ALL operations of the same type together
214+
- Executes one bulk operation per type per table
215+
- Better for large numbers of similar operations
216+
</Accordion>
217+
</AccordionGroup>
218+
219+
220+
## Key similarities and differences
221+
<CardGroup cols={2}>
222+
<Card title="Key Similarities">
223+
Handling of CRUD operations (PUT, PATCH, DELETE) to sync local changes to Supabase
224+
<br />
225+
Transaction management with `getNextCrudTransaction()`
226+
<br />
227+
Implement similar error handling for fatal and retryable errors
228+
<br />
229+
Complete the transaction after successful processing
230+
</Card>
231+
<Card title="Key Differences">
232+
Operation grouping strategy
233+
<br />
234+
Batching methodology
235+
</Card>
236+
</CardGroup>
237+
238+
# Use cases
239+
240+
<CardGroup cols={2}>
241+
<Card title="Sequential Merge Strategy">
242+
You need more granular control over batch sizes
243+
244+
Memory might be constrained
245+
246+
You want more detailed operation logging
247+
248+
You need to handle mixed operation types more efficiently
249+
<br />
250+
<br />
251+
**Best for**: Mixed operation types
252+
<br />
253+
**Optimizes for**: Memory efficiency
254+
<br />
255+
**Trade-off**: Potentially more network requests
256+
</Card>
257+
<Card title="Pre-sorted Batch Strategy">
258+
You have a large number of similar operations.
259+
260+
Memory isn't a constraint.
261+
262+
You want to minimize the number of network requests.
263+
<br />
264+
<br />
265+
**Best for**: Large volumes of similar operations
266+
<br />
267+
**Optimizes for**: Minimal network requests
268+
<br />
269+
**Trade-off**: Higher memory usage
270+
</Card>
271+
</CardGroup>

0 commit comments

Comments
 (0)