Skip to content

Instantly share code, notes, and snippets.

@planetoftheweb
Created February 3, 2026 05:27
Show Gist options
  • Select an option

  • Save planetoftheweb/d25a0a07f37392d1df75e8e96b70620b to your computer and use it in GitHub Desktop.

Select an option

Save planetoftheweb/d25a0a07f37392d1df75e8e96b70620b to your computer and use it in GitHub Desktop.
Moltbot workspace backup/restore - critical files (2026-02-03)
#!/bin/bash
# Startup script for Moltbot in Cloudflare Sandbox
# This script:
# 1. Restores config from R2 backup if available
# 2. Configures moltbot from environment variables
# 3. Starts a background sync to backup config to R2
# 4. Starts the gateway
set -e
# Check if clawdbot gateway is already running - bail early if so
# Note: CLI is still named "clawdbot" until upstream renames it
if pgrep -f "clawdbot gateway" > /dev/null 2>&1; then
echo "Moltbot gateway is already running, exiting."
exit 0
fi
# Paths (clawdbot paths are used internally - upstream hasn't renamed yet)
CONFIG_DIR="/root/.clawdbot"
CONFIG_FILE="$CONFIG_DIR/clawdbot.json"
TEMPLATE_DIR="/root/.clawdbot-templates"
TEMPLATE_FILE="$TEMPLATE_DIR/moltbot.json.template"
BACKUP_DIR="/data/moltbot"
echo "Config directory: $CONFIG_DIR"
echo "Backup directory: $BACKUP_DIR"
# Log env vars for debugging
echo "=== Environment Variables ==="
echo "GITHUB_TOKEN: ${GITHUB_TOKEN:+[SET]}"
echo "BRAVE_API_KEY: ${BRAVE_API_KEY:+[SET]}"
echo "ANTHROPIC_API_KEY: ${ANTHROPIC_API_KEY:+[SET]}"
echo "TELEGRAM_BOT_TOKEN: ${TELEGRAM_BOT_TOKEN:+[SET]}"
echo "TWITTER_AUTH_TOKEN: ${TWITTER_AUTH_TOKEN:+[SET]}"
echo "TWITTER_CT0: ${TWITTER_CT0:+[SET]}"
echo "============================="
# Create config directory
mkdir -p "$CONFIG_DIR"
# ============================================================
# RESTORE FROM R2 BACKUP
# ============================================================
# Check if R2 backup exists by looking for clawdbot.json
# The BACKUP_DIR may exist but be empty if R2 was just mounted
# Note: backup structure is $BACKUP_DIR/clawdbot/ and $BACKUP_DIR/skills/
# Wait for R2 mount to be ready (s3fs can take a moment)
echo "Waiting for R2 mount..."
for i in 1 2 3 4 5; do
if mount | grep -q "s3fs on $BACKUP_DIR"; then
echo "R2 mounted successfully"
ls -la "$BACKUP_DIR/" 2>/dev/null || true
break
fi
echo "R2 not mounted yet, waiting... ($i/5)"
sleep 2
done
# Debug: show what's in backup dir
echo "=== R2 Backup Contents ==="
ls -la "$BACKUP_DIR/" 2>/dev/null || echo "Cannot list backup dir"
ls -la "$BACKUP_DIR/clawdbot/" 2>/dev/null || echo "Cannot list clawdbot dir"
echo "=========================="
# Helper function to check if R2 backup is newer than local
should_restore_from_r2() {
local R2_SYNC_FILE="$BACKUP_DIR/.last-sync"
local LOCAL_SYNC_FILE="$CONFIG_DIR/.last-sync"
# If no R2 sync timestamp, don't restore
if [ ! -f "$R2_SYNC_FILE" ]; then
echo "No R2 sync timestamp found, skipping restore"
return 1
fi
# If no local sync timestamp, restore from R2
if [ ! -f "$LOCAL_SYNC_FILE" ]; then
echo "No local sync timestamp, will restore from R2"
return 0
fi
# Compare timestamps
R2_TIME=$(cat "$R2_SYNC_FILE" 2>/dev/null)
LOCAL_TIME=$(cat "$LOCAL_SYNC_FILE" 2>/dev/null)
echo "R2 last sync: $R2_TIME"
echo "Local last sync: $LOCAL_TIME"
# Convert to epoch seconds for comparison
R2_EPOCH=$(date -d "$R2_TIME" +%s 2>/dev/null || echo "0")
LOCAL_EPOCH=$(date -d "$LOCAL_TIME" +%s 2>/dev/null || echo "0")
if [ "$R2_EPOCH" -gt "$LOCAL_EPOCH" ]; then
echo "R2 backup is newer, will restore"
return 0
else
echo "Local data is newer or same, skipping restore"
return 1
fi
}
if [ -f "$BACKUP_DIR/clawdbot/clawdbot.json" ]; then
if should_restore_from_r2; then
echo "Restoring from R2 backup at $BACKUP_DIR/clawdbot..."
cp -a "$BACKUP_DIR/clawdbot/." "$CONFIG_DIR/"
# Copy the sync timestamp to local so we know what version we have
cp -f "$BACKUP_DIR/.last-sync" "$CONFIG_DIR/.last-sync" 2>/dev/null || true
echo "Restored config from R2 backup"
fi
elif [ -f "$BACKUP_DIR/clawdbot.json" ]; then
# Legacy backup format (flat structure)
if should_restore_from_r2; then
echo "Restoring from legacy R2 backup at $BACKUP_DIR..."
cp -a "$BACKUP_DIR/." "$CONFIG_DIR/"
cp -f "$BACKUP_DIR/.last-sync" "$CONFIG_DIR/.last-sync" 2>/dev/null || true
echo "Restored config from legacy R2 backup"
fi
elif [ -d "$BACKUP_DIR" ]; then
echo "R2 mounted at $BACKUP_DIR but no backup data found yet"
else
echo "R2 not mounted, starting fresh"
fi
# Restore skills from R2 backup if available (only if R2 is newer)
SKILLS_DIR="/root/clawd/skills"
if [ -d "$BACKUP_DIR/skills" ] && [ "$(ls -A $BACKUP_DIR/skills 2>/dev/null)" ]; then
if should_restore_from_r2; then
echo "Restoring skills from $BACKUP_DIR/skills..."
mkdir -p "$SKILLS_DIR"
cp -a "$BACKUP_DIR/skills/." "$SKILLS_DIR/"
echo "Restored skills from R2 backup"
fi
fi
# ============================================================
# RESTORE WORKSPACE FROM R2 (CRITICAL: Bot memory lives here!)
# ============================================================
# The workspace (/root/clawd/) contains:
# - IDENTITY.md, USER.md (bot personality)
# - memory/ directory (conversation history)
# - Any files the bot creates during conversations
WORKSPACE_DIR="/root/clawd"
if [ -d "$BACKUP_DIR/workspace" ] && [ "$(ls -A $BACKUP_DIR/workspace 2>/dev/null)" ]; then
echo "=== RESTORING WORKSPACE (BOT MEMORY) ==="
echo "Found workspace backup at $BACKUP_DIR/workspace"
ls -la "$BACKUP_DIR/workspace/" 2>/dev/null || true
mkdir -p "$WORKSPACE_DIR"
# Use rsync to merge, don't delete local files that might be newer
rsync -a --no-times "$BACKUP_DIR/workspace/" "$WORKSPACE_DIR/"
echo "Restored workspace from R2 backup"
echo "Local workspace now contains:"
ls -la "$WORKSPACE_DIR/" 2>/dev/null || true
echo "========================================"
else
echo "No workspace backup found in R2 - bot will start with fresh memory"
fi
# If config file still doesn't exist, create from template
if [ ! -f "$CONFIG_FILE" ]; then
echo "No existing config found, initializing from template..."
if [ -f "$TEMPLATE_FILE" ]; then
cp "$TEMPLATE_FILE" "$CONFIG_FILE"
else
# Create minimal config if template doesn't exist
cat > "$CONFIG_FILE" << 'EOFCONFIG'
{
"agents": {
"defaults": {
"workspace": "/root/clawd"
}
},
"gateway": {
"port": 18789,
"mode": "local"
}
}
EOFCONFIG
fi
else
echo "Using existing config"
fi
# ============================================================
# UPDATE CONFIG FROM ENVIRONMENT VARIABLES
# ============================================================
node << EOFNODE
const fs = require('fs');
const configPath = '/root/.clawdbot/clawdbot.json';
console.log('Updating config at:', configPath);
let config = {};
try {
config = JSON.parse(fs.readFileSync(configPath, 'utf8'));
} catch (e) {
console.log('Starting with empty config');
}
// Ensure nested objects exist
config.agents = config.agents || {};
config.agents.defaults = config.agents.defaults || {};
config.agents.defaults.model = config.agents.defaults.model || {};
config.gateway = config.gateway || {};
config.channels = config.channels || {};
// Clean up any broken anthropic provider config from previous runs
// (older versions didn't include required 'name' field)
if (config.models?.providers?.anthropic?.models) {
const hasInvalidModels = config.models.providers.anthropic.models.some(m => !m.name);
if (hasInvalidModels) {
console.log('Removing broken anthropic provider config (missing model names)');
delete config.models.providers.anthropic;
}
}
// Clean up invalid telegram 'dm' key from previous runs
// (telegram uses 'dmPolicy' at top level, not nested 'dm' object)
if (config.channels?.telegram?.dm !== undefined) {
console.log('Removing invalid telegram.dm key (use dmPolicy instead)');
delete config.channels.telegram.dm;
}
// Clean up broken provider configs from R2 backup (invalid api types caused crashes)
if (config.models?.providers?.openrouter) {
console.log('Removing broken openrouter provider from R2 backup');
delete config.models.providers.openrouter;
}
if (config.models?.providers?.google) {
console.log('Removing broken google provider from R2 backup');
delete config.models.providers.google;
}
if (config.models?.providers?.openai?.api === 'openai-chat') {
console.log('Removing openai provider with invalid api type');
delete config.models.providers.openai;
}
// Clean up model aliases for removed providers
if (config.agents?.defaults?.models) {
Object.keys(config.agents.defaults.models).forEach(k => {
if (k.startsWith('openrouter/') || k.startsWith('google/')) {
console.log('Removing model alias:', k);
delete config.agents.defaults.models[k];
}
});
}
// Gateway configuration
config.gateway.port = 18789;
config.gateway.mode = 'local';
config.gateway.trustedProxies = ['10.1.0.0'];
// Set gateway token if provided
if (process.env.CLAWDBOT_GATEWAY_TOKEN) {
config.gateway.auth = config.gateway.auth || {};
config.gateway.auth.token = process.env.CLAWDBOT_GATEWAY_TOKEN;
}
// Allow insecure auth for dev mode
if (process.env.CLAWDBOT_DEV_MODE === 'true') {
config.gateway.controlUi = config.gateway.controlUi || {};
config.gateway.controlUi.allowInsecureAuth = true;
}
// Telegram configuration
if (process.env.TELEGRAM_BOT_TOKEN) {
config.channels.telegram = config.channels.telegram || {};
config.channels.telegram.botToken = process.env.TELEGRAM_BOT_TOKEN;
config.channels.telegram.enabled = true;
config.channels.telegram.dmPolicy = process.env.TELEGRAM_DM_POLICY || 'pairing';
}
// Discord configuration
if (process.env.DISCORD_BOT_TOKEN) {
config.channels.discord = config.channels.discord || {};
config.channels.discord.token = process.env.DISCORD_BOT_TOKEN;
config.channels.discord.enabled = true;
config.channels.discord.dm = config.channels.discord.dm || {};
config.channels.discord.dm.policy = process.env.DISCORD_DM_POLICY || 'pairing';
}
// Slack configuration
if (process.env.SLACK_BOT_TOKEN && process.env.SLACK_APP_TOKEN) {
config.channels.slack = config.channels.slack || {};
config.channels.slack.botToken = process.env.SLACK_BOT_TOKEN;
config.channels.slack.appToken = process.env.SLACK_APP_TOKEN;
config.channels.slack.enabled = true;
}
// Base URL override (e.g., for Cloudflare AI Gateway)
// Usage: Set AI_GATEWAY_BASE_URL or ANTHROPIC_BASE_URL to your endpoint like:
// https://gateway.ai.cloudflare.com/v1/{account_id}/{gateway_id}/anthropic
// https://gateway.ai.cloudflare.com/v1/{account_id}/{gateway_id}/openai
const baseUrl = (process.env.AI_GATEWAY_BASE_URL || process.env.ANTHROPIC_BASE_URL || '').replace(/\/+$/, '');
const isOpenAI = baseUrl.endsWith('/openai');
if (isOpenAI) {
// Create custom openai provider config with baseUrl override
// Omit apiKey so moltbot falls back to OPENAI_API_KEY env var
console.log('Configuring OpenAI provider with base URL:', baseUrl);
config.models = config.models || {};
config.models.providers = config.models.providers || {};
config.models.providers.openai = {
baseUrl: baseUrl,
api: 'openai-responses',
models: [
{ id: 'gpt-5.2', name: 'GPT-5.2', contextWindow: 200000 },
{ id: 'gpt-5', name: 'GPT-5', contextWindow: 200000 },
{ id: 'gpt-4.5-preview', name: 'GPT-4.5 Preview', contextWindow: 128000 },
]
};
// Add models to the allowlist so they appear in /models
config.agents.defaults.models = config.agents.defaults.models || {};
config.agents.defaults.models['openai/gpt-5.2'] = { alias: 'GPT-5.2' };
config.agents.defaults.models['openai/gpt-5'] = { alias: 'GPT-5' };
config.agents.defaults.models['openai/gpt-4.5-preview'] = { alias: 'GPT-4.5' };
config.agents.defaults.model.primary = 'openai/gpt-5.2';
} else if (baseUrl) {
console.log('Configuring Anthropic provider with base URL:', baseUrl);
config.models = config.models || {};
config.models.providers = config.models.providers || {};
const providerConfig = {
baseUrl: baseUrl,
api: 'anthropic-messages',
models: [
{ id: 'claude-opus-4-5-20251101', name: 'Claude Opus 4.5', contextWindow: 200000 },
{ id: 'claude-sonnet-4-5-20250929', name: 'Claude Sonnet 4.5', contextWindow: 200000 },
{ id: 'claude-haiku-4-5-20251001', name: 'Claude Haiku 4.5', contextWindow: 200000 },
]
};
// Include API key in provider config if set (required when using custom baseUrl)
if (process.env.ANTHROPIC_API_KEY) {
providerConfig.apiKey = process.env.ANTHROPIC_API_KEY;
}
config.models.providers.anthropic = providerConfig;
// Add models to the allowlist so they appear in /models
config.agents.defaults.models = config.agents.defaults.models || {};
config.agents.defaults.models['anthropic/claude-opus-4-5-20251101'] = { alias: 'Opus 4.5' };
config.agents.defaults.models['anthropic/claude-sonnet-4-5-20250929'] = { alias: 'Sonnet 4.5' };
config.agents.defaults.models['anthropic/claude-haiku-4-5-20251001'] = { alias: 'Haiku 4.5' };
config.agents.defaults.model.primary = 'anthropic/claude-opus-4-5-20251101';
} else {
// Default to Anthropic without custom base URL (uses built-in pi-ai catalog)
config.agents.defaults.model.primary = 'anthropic/claude-opus-4-5';
}
// Write updated config
fs.writeFileSync(configPath, JSON.stringify(config, null, 2));
console.log('Configuration updated successfully');
console.log('Config:', JSON.stringify(config, null, 2));
EOFNODE
# ============================================================
# START GATEWAY
# ============================================================
# Note: R2 backup sync is handled by the Worker's cron trigger
echo "Starting Moltbot Gateway..."
echo "Gateway will be available on port 18789"
# Clean up stale lock files
rm -f /tmp/clawdbot-gateway.lock 2>/dev/null || true
rm -f "$CONFIG_DIR/gateway.lock" 2>/dev/null || true
BIND_MODE="lan"
echo "Dev mode: ${CLAWDBOT_DEV_MODE:-false}, Bind mode: $BIND_MODE"
if [ -n "$CLAWDBOT_GATEWAY_TOKEN" ]; then
echo "Starting gateway with token auth..."
exec clawdbot gateway --port 18789 --verbose --allow-unconfigured --bind "$BIND_MODE" --token "$CLAWDBOT_GATEWAY_TOKEN"
else
echo "Starting gateway with device pairing (no token)..."
exec clawdbot gateway --port 18789 --verbose --allow-unconfigured --bind "$BIND_MODE"
fi
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { syncToR2 } from './sync';
import {
createMockEnv,
createMockEnvWithR2,
createMockProcess,
createMockSandbox,
suppressConsole
} from '../test-utils';
describe('syncToR2', () => {
beforeEach(() => {
suppressConsole();
});
describe('configuration checks', () => {
it('returns error when R2 is not configured', async () => {
const { sandbox } = createMockSandbox();
const env = createMockEnv();
const result = await syncToR2(sandbox, env);
expect(result.success).toBe(false);
expect(result.error).toBe('R2 storage is not configured');
});
it('returns error when mount fails', async () => {
const { sandbox, startProcessMock, mountBucketMock } = createMockSandbox();
startProcessMock.mockResolvedValue(createMockProcess(''));
mountBucketMock.mockRejectedValue(new Error('Mount failed'));
const env = createMockEnvWithR2();
const result = await syncToR2(sandbox, env);
expect(result.success).toBe(false);
expect(result.error).toBe('Failed to mount R2 storage');
});
});
describe('sanity checks', () => {
it('returns error when source is missing clawdbot.json', async () => {
const { sandbox, startProcessMock } = createMockSandbox();
startProcessMock
.mockResolvedValueOnce(createMockProcess('s3fs on /data/moltbot type fuse.s3fs\n'))
.mockResolvedValueOnce(createMockProcess('')); // No "ok" output
const env = createMockEnvWithR2();
const result = await syncToR2(sandbox, env);
// Error message still references clawdbot.json since that's the actual file name
expect(result.success).toBe(false);
expect(result.error).toBe('Sync aborted: source missing clawdbot.json');
expect(result.details).toContain('missing critical files');
});
});
describe('sync execution', () => {
it('returns success when sync completes', async () => {
const { sandbox, startProcessMock } = createMockSandbox();
const timestamp = '2026-01-27T12:00:00+00:00';
// Calls: mount check, sanity check, rsync, cat timestamp
startProcessMock
.mockResolvedValueOnce(createMockProcess('s3fs on /data/moltbot type fuse.s3fs\n'))
.mockResolvedValueOnce(createMockProcess('ok'))
.mockResolvedValueOnce(createMockProcess(''))
.mockResolvedValueOnce(createMockProcess(timestamp));
const env = createMockEnvWithR2();
const result = await syncToR2(sandbox, env);
expect(result.success).toBe(true);
expect(result.lastSync).toBe(timestamp);
});
it('returns error when rsync fails (no timestamp created)', async () => {
const { sandbox, startProcessMock } = createMockSandbox();
// Calls: mount check, sanity check, rsync (fails), cat timestamp (empty)
startProcessMock
.mockResolvedValueOnce(createMockProcess('s3fs on /data/moltbot type fuse.s3fs\n'))
.mockResolvedValueOnce(createMockProcess('ok'))
.mockResolvedValueOnce(createMockProcess('', { exitCode: 1 }))
.mockResolvedValueOnce(createMockProcess(''));
const env = createMockEnvWithR2();
const result = await syncToR2(sandbox, env);
expect(result.success).toBe(false);
expect(result.error).toBe('Sync failed');
});
it('verifies rsync command is called with correct flags', async () => {
const { sandbox, startProcessMock } = createMockSandbox();
const timestamp = '2026-01-27T12:00:00+00:00';
startProcessMock
.mockResolvedValueOnce(createMockProcess('s3fs on /data/moltbot type fuse.s3fs\n'))
.mockResolvedValueOnce(createMockProcess('ok'))
.mockResolvedValueOnce(createMockProcess(''))
.mockResolvedValueOnce(createMockProcess(timestamp));
const env = createMockEnvWithR2();
await syncToR2(sandbox, env);
// Third call should be rsync (paths still use clawdbot internally)
const rsyncCall = startProcessMock.mock.calls[2][0];
expect(rsyncCall).toContain('rsync');
expect(rsyncCall).toContain('--no-times');
expect(rsyncCall).toContain('--delete');
expect(rsyncCall).toContain('/root/.clawdbot/');
expect(rsyncCall).toContain('/data/moltbot/');
});
// CRITICAL: This test ensures we never accidentally remove workspace backup
it('CRITICAL: verifies workspace (bot memory) is included in backup', async () => {
const { sandbox, startProcessMock } = createMockSandbox();
const timestamp = '2026-01-27T12:00:00+00:00';
startProcessMock
.mockResolvedValueOnce(createMockProcess('s3fs on /data/moltbot type fuse.s3fs\n'))
.mockResolvedValueOnce(createMockProcess('ok'))
.mockResolvedValueOnce(createMockProcess(''))
.mockResolvedValueOnce(createMockProcess(timestamp));
const env = createMockEnvWithR2();
await syncToR2(sandbox, env);
const rsyncCall = startProcessMock.mock.calls[2][0];
// The workspace (/root/clawd/) MUST be backed up to R2
// This contains IDENTITY.md, USER.md, memory/, and all bot context
// DO NOT REMOVE THIS - it's the bot's memory!
expect(rsyncCall).toContain('/root/clawd/');
expect(rsyncCall).toContain('/data/moltbot/workspace/');
});
});
});
import type { Sandbox } from '@cloudflare/sandbox';
import type { MoltbotEnv } from '../types';
import { R2_MOUNT_PATH } from '../config';
import { mountR2Storage } from './r2';
import { waitForProcess } from './utils';
export interface SyncResult {
success: boolean;
lastSync?: string;
error?: string;
details?: string;
}
/**
* Sync moltbot config, workspace, and skills from container to R2 for persistence.
*
* This function:
* 1. Mounts R2 if not already mounted
* 2. Verifies source has critical files (prevents overwriting good backup with empty data)
* 3. Runs rsync to copy config, workspace, and skills to R2
* 4. Writes a timestamp file for tracking
*
* CRITICAL: The workspace directory (/root/clawd/) contains the bot's memory!
* This includes IDENTITY.md, USER.md, memory/, and all conversation context.
* Without backing it up, the bot loses all context on container restart.
* DO NOT REMOVE the workspace backup!
*
* @param sandbox - The sandbox instance
* @param env - Worker environment bindings
* @returns SyncResult with success status and optional error details
*/
export async function syncToR2(sandbox: Sandbox, env: MoltbotEnv): Promise<SyncResult> {
// Check if R2 is configured
if (!env.R2_ACCESS_KEY_ID || !env.R2_SECRET_ACCESS_KEY || !env.CF_ACCOUNT_ID) {
return { success: false, error: 'R2 storage is not configured' };
}
// Mount R2 if not already mounted
const mounted = await mountR2Storage(sandbox, env);
if (!mounted) {
return { success: false, error: 'Failed to mount R2 storage' };
}
// Sanity check: verify source has critical files before syncing
// This prevents accidentally overwriting a good backup with empty/corrupted data
try {
const checkProc = await sandbox.startProcess('test -f /root/.clawdbot/clawdbot.json && echo "ok"');
await waitForProcess(checkProc, 5000);
const checkLogs = await checkProc.getLogs();
if (!checkLogs.stdout?.includes('ok')) {
return {
success: false,
error: 'Sync aborted: source missing clawdbot.json',
details: 'The local config directory is missing critical files. This could indicate corruption or an incomplete setup.',
};
}
} catch (err) {
return {
success: false,
error: 'Failed to verify source files',
details: err instanceof Error ? err.message : 'Unknown error',
};
}
// Run rsync to backup config, workspace, and skills to R2
// Note: Use --no-times because s3fs doesn't support setting timestamps
// CRITICAL: The workspace (/root/clawd/) contains the bot's memory!
// This includes IDENTITY.md, USER.md, memory/, and all conversation context.
// DO NOT REMOVE the workspace backup - it's the bot's persistent memory!
const syncCmd = `rsync -r --no-times --delete --exclude='*.lock' --exclude='*.log' --exclude='*.tmp' --exclude='node_modules' /root/.clawdbot/ ${R2_MOUNT_PATH}/clawdbot/ && rsync -r --no-times --delete --exclude='*.lock' --exclude='*.log' --exclude='*.tmp' --exclude='node_modules' --exclude='.git' /root/clawd/ ${R2_MOUNT_PATH}/workspace/ && date -Iseconds > ${R2_MOUNT_PATH}/.last-sync`;
try {
const proc = await sandbox.startProcess(syncCmd);
await waitForProcess(proc, 30000); // 30 second timeout for sync
// Check for success by reading the timestamp file
// (process status may not update reliably in sandbox API)
const timestampProc = await sandbox.startProcess(`cat ${R2_MOUNT_PATH}/.last-sync`);
await waitForProcess(timestampProc, 5000);
const timestampLogs = await timestampProc.getLogs();
const lastSync = timestampLogs.stdout?.trim();
if (lastSync && lastSync.match(/^\d{4}-\d{2}-\d{2}/)) {
return { success: true, lastSync };
} else {
const logs = await proc.getLogs();
return {
success: false,
error: 'Sync failed',
details: logs.stderr || logs.stdout || 'No timestamp file created',
};
}
} catch (err) {
return {
success: false,
error: 'Sync error',
details: err instanceof Error ? err.message : 'Unknown error',
};
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment