Created
January 30, 2026 08:53
-
-
Save mitcdh/443c4d40297813b8435a3de9cc82c425 to your computer and use it in GitHub Desktop.
SimklBackup.json to Letterboxd CSV for Import
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import json | |
| import csv | |
| import sys | |
| from datetime import datetime | |
| def parse_simkl_to_letterboxd(input_file, output_file): | |
| try: | |
| with open(input_file, 'r', encoding='utf-8') as f: | |
| data = json.load(f) | |
| except FileNotFoundError: | |
| print(f"Error: The file '{input_file}' was not found.") | |
| return | |
| except json.JSONDecodeError: | |
| print(f"Error: Failed to decode JSON from '{input_file}'.") | |
| return | |
| # Letterboxd CSV Headers | |
| headers = [ | |
| 'tmdbID', | |
| 'imdbID', | |
| 'Title', | |
| 'Year', | |
| 'Rating10', | |
| 'WatchedDate', | |
| 'Tags' | |
| ] | |
| records = [] | |
| # STRICT MODE: Only looking at movies and anime. | |
| # Standard 'shows' are excluded entirely. | |
| categories = ['movies', 'anime'] | |
| print(f"Processing entries from: {input_file}...") | |
| for category in categories: | |
| items = data.get(category, []) | |
| for entry in items: | |
| # 1. Skip items that are strictly "Plan to Watch" | |
| if entry.get('status') == 'plantowatch': | |
| continue | |
| # 2. STRICT FILTER: | |
| # If we are processing Anime, ignore it if it is a TV series. | |
| # We only want 'movie', 'special', or 'ona' if they fit your criteria. | |
| if category == 'anime' and entry.get('anime_type') == 'tv': | |
| continue | |
| # 3. Extract the inner metadata object | |
| # Simkl puts movie metadata in 'movie', but anime metadata in 'show' | |
| metadata = entry.get('movie') or entry.get('show') | |
| if not metadata: | |
| continue | |
| ids = metadata.get('ids', {}) | |
| # 4. Extract Identifiers | |
| tmdb_id = ids.get('tmdb', '') | |
| imdb_id = ids.get('imdb', '') | |
| title = metadata.get('title', '') | |
| year = metadata.get('year', '') | |
| # 5. Extract User Data | |
| user_rating = entry.get('user_rating', '') | |
| # Date Parsing | |
| raw_date = entry.get('last_watched_at') | |
| watched_date = '' | |
| if raw_date: | |
| try: | |
| # Parse ISO format and output YYYY-MM-DD | |
| dt = datetime.strptime(raw_date, "%Y-%m-%dT%H:%M:%SZ") | |
| watched_date = dt.strftime("%Y-%m-%d") | |
| except ValueError: | |
| watched_date = raw_date[:10] | |
| # 6. Inclusion Logic | |
| # We include it if the user has Rated it, Completed it, or has a Watch Date. | |
| is_completed = entry.get('status') == 'completed' | |
| is_watching = entry.get('status') == 'watching' | |
| if not (user_rating or watched_date or is_completed or is_watching): | |
| continue | |
| # 7. Tags | |
| tags_list = ['Simkl Import', category] | |
| # Optional: Add specific tag for anime type (e.g., "movie", "ona") | |
| if category == 'anime': | |
| tags_list.append(entry.get('anime_type', 'unknown')) | |
| tags_string = ",".join(tags_list) | |
| records.append({ | |
| 'tmdbID': tmdb_id, | |
| 'imdbID': imdb_id, | |
| 'Title': title, | |
| 'Year': year, | |
| 'Rating10': user_rating, | |
| 'WatchedDate': watched_date, | |
| 'Tags': tags_string | |
| }) | |
| # Write to CSV | |
| if records: | |
| try: | |
| with open(output_file, 'w', newline='', encoding='utf-8') as csvfile: | |
| writer = csv.DictWriter(csvfile, fieldnames=headers) | |
| writer.writeheader() | |
| writer.writerows(records) | |
| print(f"Success! Processed {len(records)} items (Strict Mode: No TV).") | |
| print(f"Output saved to: {output_file}") | |
| except IOError as e: | |
| print(f"Error writing to file: {e}") | |
| else: | |
| print("No eligible entries found to export.") | |
| if __name__ == "__main__": | |
| INPUT_FILENAME = 'SimklBackup.json' | |
| OUTPUT_FILENAME = 'letterboxd_strict_import.csv' | |
| parse_simkl_to_letterboxd(INPUT_FILENAME, OUTPUT_FILENAME) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment