Skip to content

Instantly share code, notes, and snippets.

@validatorru
Created December 20, 2025 21:12
Show Gist options
  • Select an option

  • Save validatorru/d45efbded30690a6949ad1f8c18e337e to your computer and use it in GitHub Desktop.

Select an option

Save validatorru/d45efbded30690a6949ad1f8c18e337e to your computer and use it in GitHub Desktop.
#!/usr/bin/env python3
"""
Nuxt SSR Title Tester - Tests page titles after SSR hydration
"""
import asyncio
import json
import csv
import sys
from datetime import datetime
from pathlib import Path
from typing import List, Dict, Optional
import argparse
from enum import Enum
from playwright.async_api import async_playwright, Browser, Page
class TitleStatus(Enum):
"""Status of title verification"""
SSR_CORRECT = "SSR_CORRECT" # Initial title matches expected
HYDRATED_CORRECT = "HYDRATED_CORRECT" # Title correct after hydration
SSR_WRONG = "SSR_WRONG" # Initial title wrong
HYDRATED_WRONG = "HYDRATED_WRONG" # Title wrong after hydration
TIMEOUT = "TIMEOUT" # Page timeout
ERROR = "ERROR" # Other errors
class PageTester:
def __init__(self, headless: bool = True, slow_mo: int = 0):
self.headless = headless
self.slow_mo = slow_mo
self.browser: Optional[Browser] = None
self.results = []
async def setup(self):
"""Initialize browser"""
playwright = await async_playwright().start()
self.browser = await playwright.chromium.launch(
headless=self.headless,
slow_mo=self.slow_mo
)
async def cleanup(self):
"""Clean up resources"""
if self.browser:
await self.browser.close()
async def test_page(
self,
url: str,
expected_title: str,
wait_time: int = 10,
timeout: int = 30000
) -> Dict:
"""Test a single page"""
result = {
"url": url,
"expected_title": expected_title,
"initial_title": "",
"final_title": "",
"status": "",
"error": "",
"timestamp": datetime.now().isoformat(),
"load_time_ms": 0,
"has_flash": False
}
if not self.browser:
result["status"] = TitleStatus.ERROR.value
result["error"] = "Browser not initialized"
return result
context = await self.browser.new_context(
viewport={"width": 1920, "height": 1080},
user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
)
page = await context.new_page()
try:
# Capture console messages for debugging
# page.on("console", lambda msg: print(f"[Console] {msg.text}"))
# Set timeout for navigation
page.set_default_timeout(timeout)
# Record start time
start_time = datetime.now()
# Navigate to page and get initial title (SSR title)
response = await page.goto(url, wait_until="domcontentloaded")
# Get initial title immediately after DOMContentLoaded
initial_title = await page.title()
result["initial_title"] = initial_title
# Check if initial title matches expected
initial_match = initial_title == expected_title
# Wait for hydration (Vue/Nuxt to mount)
await asyncio.sleep(wait_time)
# Get final title after hydration
final_title = await page.title()
result["final_title"] = final_title
# Calculate load time
end_time = datetime.now()
result["load_time_ms"] = (end_time - start_time).total_seconds() * 1000
# Determine status
final_match = final_title == expected_title
result["has_flash"] = initial_title != final_title
if initial_match and final_match:
result["status"] = TitleStatus.SSR_CORRECT.value
elif not initial_match and final_match:
result["status"] = TitleStatus.HYDRATED_CORRECT.value
elif initial_match and not final_match:
result["status"] = TitleStatus.HYDRATED_WRONG.value
elif not initial_match and not final_match:
result["status"] = TitleStatus.SSR_WRONG.value
# Take screenshot for visual verification
screenshot_dir = Path("screenshots")
screenshot_dir.mkdir(exist_ok=True)
safe_url = url.replace("https://", "").replace("http://", "").replace("/", "_")
screenshot_path = screenshot_dir / f"{safe_url}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.png"
await page.screenshot(path=str(screenshot_path))
result["screenshot"] = str(screenshot_path)
except Exception as e:
result["status"] = TitleStatus.ERROR.value
result["error"] = str(e)
finally:
await page.close()
await context.close()
return result
async def test_multiple_pages(
self,
urls: List[str],
expected_titles: List[str],
wait_time: int = 10,
concurrent: int = 3
) -> List[Dict]:
"""Test multiple pages with concurrency"""
tasks = []
semaphore = asyncio.Semaphore(concurrent)
async def test_with_semaphore(url, title):
async with semaphore:
return await self.test_page(url, title, wait_time)
for url, title in zip(urls, expected_titles):
task = asyncio.create_task(test_with_semaphore(url, title))
tasks.append(task)
self.results = await asyncio.gather(*tasks, return_exceptions=True)
# Handle exceptions
for i, result in enumerate(self.results):
if isinstance(result, Exception):
self.results[i] = {
"url": urls[i],
"expected_title": expected_titles[i],
"status": TitleStatus.ERROR.value,
"error": str(result),
"timestamp": datetime.now().isoformat()
}
return self.results
def print_results(self, verbose: bool = False):
"""Print results in a readable format"""
print("\n" + "="*80)
print("NUXT SSR TITLE TEST RESULTS")
print("="*80)
status_counts = {}
for result in self.results:
status = result.get("status", "UNKNOWN")
status_counts[status] = status_counts.get(status, 0) + 1
# Print summary
print(f"\nSUMMARY: {len(self.results)} pages tested")
for status, count in status_counts.items():
print(f" {status}: {count}")
# Print detailed results
print(f"\nDETAILED RESULTS:")
for i, result in enumerate(self.results):
print(f"\n [{i+1}] {result['url']}")
print(f" Expected: {result.get('expected_title', 'N/A')}")
print(f" Initial (SSR): {result.get('initial_title', 'N/A')}")
print(f" Final (Hydrated): {result.get('final_title', 'N/A')}")
print(f" Status: {result.get('status', 'N/A')}")
if result.get('has_flash'):
print(f" Title flash detected!")
if result.get('error'):
print(f" Error: {result.get('error')}")
if verbose:
print(f" Load time: {result.get('load_time_ms', 0):.0f}ms")
if result.get('screenshot'):
print(f" Screenshot: {result.get('screenshot')}")
def save_results(self, format: str = "json"):
"""Save results to file"""
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
if format == "json":
filename = f"title_test_results_{timestamp}.json"
with open(filename, "w") as f:
json.dump(self.results, f, indent=2)
print(f"\nResults saved to {filename}")
elif format == "csv":
filename = f"title_test_results_{timestamp}.csv"
with open(filename, "w", newline="") as f:
writer = csv.DictWriter(f, fieldnames=[
"timestamp", "url", "expected_title", "initial_title",
"final_title", "status", "has_flash", "load_time_ms", "error"
])
writer.writeheader()
writer.writerows(self.results)
print(f"\nResults saved to {filename}")
def load_urls_from_file(filename: str) -> tuple[List[str], List[str]]:
"""Load URLs and titles from various file formats"""
filepath = Path(filename)
if not filepath.exists():
raise FileNotFoundError(f"File not found: {filename}")
if filepath.suffix == ".json":
with open(filename) as f:
data = json.load(f)
if isinstance(data, list):
urls = [item["url"] for item in data]
titles = [item.get("title", "") for item in data]
else:
urls = list(data.keys())
titles = list(data.values())
elif filepath.suffix == ".csv":
with open(filename) as f:
reader = csv.DictReader(f)
data = list(reader)
urls = [row["url"] for row in data]
titles = [row.get("title", "") for row in data]
elif filepath.suffix in [".txt", ""]:
with open(filename) as f:
lines = [line.strip() for line in f if line.strip()]
urls = []
titles = []
for line in lines:
if "," in line:
url, title = line.split(",", 1)
urls.append(url.strip())
titles.append(title.strip())
else:
urls.append(line)
titles.append("") # Empty title
else:
raise ValueError(f"Unsupported file format: {filepath.suffix}")
return urls, titles
async def main():
parser = argparse.ArgumentParser(description="Test Nuxt SSR page titles")
parser.add_argument("--urls", type=str, help="Comma-separated list of URLs")
parser.add_argument("--titles", type=str, help="Comma-separated list of expected titles")
parser.add_argument("--file", type=str, help="File containing URLs and titles (JSON, CSV, TXT)")
parser.add_argument("--wait", type=int, default=10, help="Wait time after load in seconds")
parser.add_argument("--concurrent", type=int, default=3, help="Number of concurrent tests")
parser.add_argument("--visible", action="store_true", help="Run browser in visible mode")
parser.add_argument("--verbose", action="store_true", help="Verbose output")
parser.add_argument("--output", choices=["json", "csv"], default="json", help="Output format")
parser.add_argument("--save", action="store_true", help="Save results to file")
args = parser.parse_args()
# Load URLs and titles
urls = []
titles = []
if args.file:
urls, titles = load_urls_from_file(args.file)
elif args.urls:
urls = [url.strip() for url in args.urls.split(",")]
if args.titles:
titles = [title.strip() for title in args.titles.split(",")]
else:
titles = [""] * len(urls)
else:
print("Error: Provide either --file or --urls argument")
sys.exit(1)
if len(urls) != len(titles):
print("Warning: Number of URLs doesn't match number of titles")
# Extend titles if needed
if len(titles) < len(urls):
titles.extend([""] * (len(urls) - len(titles)))
print(f"Testing {len(urls)} pages...")
# Run tests
tester = PageTester(headless=not args.visible)
await tester.setup()
try:
await tester.test_multiple_pages(
urls,
titles,
wait_time=args.wait,
concurrent=args.concurrent
)
tester.print_results(verbose=args.verbose)
if args.save:
tester.save_results(format=args.output)
finally:
await tester.cleanup()
if __name__ == "__main__":
asyncio.run(main())
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment