-
-
Save jitinnair1/da406224739c90a262f56f76d7e17e4a to your computer and use it in GitHub Desktop.
Download all suno songs using Python by profile.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # Based on : https://gist.github.com/dppancake/f9f43825209050b27900347014459fa4 | |
| # JN: added a function to get a unique filename that add a v2, v3 etc. to the title | |
| import argparse | |
| import os | |
| import random | |
| import re | |
| import sys | |
| import time | |
| import requests | |
| from colorama import Fore | |
| from colorama import init | |
| from mutagen.id3 import ID3, APIC, error, TIT2, TPE1 | |
| from mutagen.mp3 import MP3 | |
| # Initialize colorama | |
| init(autoreset=True) | |
| FILENAME_BAD_CHARS = r'[<>:"/\\|?*\x00-\x1F]' | |
| def sanitize_filename(name, maxlen=200): | |
| safe = re.sub(FILENAME_BAD_CHARS, "_", name) | |
| # avoid trailing dots/spaces (Windows) | |
| safe = safe.strip(" .") | |
| return (safe[:maxlen]) if len(safe) > maxlen else safe | |
| def pick_proxy_dict(proxies_list): | |
| if not proxies_list: | |
| return None | |
| proxy = random.choice(proxies_list) | |
| return {"http": proxy, "https": proxy} | |
| def embed_metadata(mp3_path, image_url=None, title=None, artist=None, proxies_list=None, timeout=15): | |
| proxy_dict = pick_proxy_dict(proxies_list) | |
| r = requests.get(image_url, proxies=proxy_dict, timeout=timeout) | |
| r.raise_for_status() | |
| image_bytes = r.content | |
| mime = r.headers.get("Content-Type", "image/jpeg").split(";")[0] | |
| audio = MP3(mp3_path, ID3=ID3) | |
| try: | |
| audio.add_tags() | |
| except error: | |
| pass # tags already exist | |
| # Titel | |
| if title: | |
| audio.tags["TIT2"] = TIT2(encoding=3, text=title) | |
| # Artiest | |
| if artist: | |
| audio.tags["TPE1"] = TPE1(encoding=3, text=artist) | |
| # Remove old covers if any, then add ours | |
| for key in list(audio.tags.keys()): | |
| if key.startswith("APIC"): | |
| del audio.tags[key] | |
| audio.tags.add(APIC( | |
| encoding=3, # UTF-8 | |
| mime=mime, # e.g. "image/jpeg" or "image/png" | |
| type=3, # 3 = front cover | |
| desc="Cover", | |
| data=image_bytes | |
| )) | |
| audio.save(v2_version=3) # ID3v2.3 works well with Windows Explorer | |
| def extract_song_info_from_suno(profile, proxies_list=None): | |
| print(f"{Fore.CYAN}Extracting songs from profile: {profile}") | |
| username = profile.lstrip("@") | |
| base_url = ( | |
| f"https://studio-api.prod.suno.com/api/profiles/{username}" | |
| f"?playlists_sort_by=created_at&clips_sort_by=created_at&page=" | |
| ) | |
| song_info = {} | |
| page = 1 | |
| MAX_SLEEP = 60 | |
| INITIAL_SLEEP = 10 | |
| INCREMENT = 5 | |
| while True: | |
| api_url = f"{base_url}{page}" | |
| retry_sleep = INITIAL_SLEEP | |
| while True: | |
| try: | |
| if proxies_list: | |
| proxy = random.choice(proxies_list) | |
| proxy_dict = {"http": proxy, "https": proxy} | |
| response = requests.get(api_url, proxies=proxy_dict, timeout=10) | |
| else: | |
| response = requests.get(api_url, timeout=10) | |
| # Handle 429 without raising to catch status code cleanly | |
| if response.status_code == 429: | |
| if retry_sleep > MAX_SLEEP: | |
| print(f"{Fore.RED}Hit 429 too many times on page {page}. " | |
| f"Exceeded {MAX_SLEEP}s backoff. Exiting.") | |
| return song_info | |
| print(f"{Fore.YELLOW}429 received on page {page}. " | |
| f"Sleeping {retry_sleep}s then retrying…") | |
| time.sleep(retry_sleep) | |
| retry_sleep += INCREMENT | |
| continue # retry same page | |
| response.raise_for_status() | |
| data = response.json() | |
| break # successful fetch, exit retry loop | |
| except requests.exceptions.HTTPError as e: | |
| # Non-429 HTTP errors: log and exit | |
| status = getattr(e.response, "status_code", "Unknown") | |
| print(f"{Fore.RED}HTTP error on page {page} (status {status}): {e}") | |
| return song_info | |
| except Exception as e: | |
| print(f"{Fore.RED}Failed to retrieve data from API (page {page}): {e}") | |
| return song_info | |
| clips = data.get("clips", []) | |
| if not clips: | |
| break | |
| for clip in clips: | |
| uuid = clip.get("id") | |
| title = clip.get("title") | |
| audio_url = clip.get("audio_url") | |
| image_url = clip.get("image_url") | |
| video_url = clip.get("video_url") | |
| if (uuid and title and audio_url) and uuid not in song_info: | |
| song_info[uuid] = { | |
| "title": title, | |
| "audio_url": audio_url, | |
| "video_url": video_url, | |
| "image_url": image_url | |
| } | |
| page += 1 | |
| # Light delay between pages to reduce chance of 429s | |
| time.sleep(5) | |
| return song_info | |
| def refresh_song_info(profile, proxies_list): | |
| all_songs = extract_song_info_from_suno(profile, proxies_list) | |
| if not all_songs: | |
| return {} | |
| return all_songs | |
| def get_unique_filename(filename): | |
| if not os.path.exists(filename): | |
| return filename | |
| name, extn = os.path.splitext(filename) | |
| counter = 2; | |
| while True: | |
| new_filename = f"{name}v{counter}{extn}" | |
| if not os.path.exists(new_filename): | |
| return new_filename | |
| counter += 1 | |
| def download_file(url, filename, proxies_list=None, timeout=30): | |
| proxy_dict = pick_proxy_dict(proxies_list) | |
| with requests.get(url, stream=True, proxies=proxy_dict, timeout=timeout) as r: | |
| r.raise_for_status() | |
| with open(get_unique_filename(filename), "wb") as f: | |
| for chunk in r.iter_content(chunk_size=8192): | |
| if chunk: | |
| f.write(chunk) | |
| return filename | |
| def main(): | |
| parser = argparse.ArgumentParser(description="Bulk download suno songs by profile") | |
| group = parser.add_mutually_exclusive_group(required=True) | |
| group.add_argument("--profile", type=str, help="Suno profile name (e.g., '@xxxx')") | |
| parser.add_argument("--proxy", type=str, required=False, | |
| help="Proxy with protocol. You can add multiple proxies by splitting them with a comma.") | |
| parser.add_argument("--directory", type=str, required=False, default="suno-downloads", | |
| help="Local directory for saving the files") | |
| parser.add_argument( | |
| "--with-thumbnail", | |
| action="store_true", | |
| help="Embed the song's thumbnail into the MP3 file (default: disabled)" | |
| ) | |
| args = parser.parse_args() | |
| if args.proxy: | |
| proxies_list = args.proxy.split(",") | |
| else: | |
| proxies_list = None | |
| raw_input = args.profile.strip() | |
| profile_name = raw_input.lstrip("@") | |
| profile = f"@{profile_name}" | |
| songs = refresh_song_info(profile, proxies_list) | |
| if not songs: | |
| print(f"{Fore.RED}No songs found for profile: {profile}") | |
| sys.exit(1) | |
| # Create a directory to store the downloaded files | |
| if not os.path.exists(args.directory): | |
| os.makedirs(args.directory) | |
| for uuid, obj, in songs.items(): | |
| title = obj["title"] or uuid | |
| fname = sanitize_filename(title) + ".mp3" | |
| out_path = os.path.join(args.directory, fname) | |
| print(f"Downloading: {Fore.GREEN}🎵 {title} — {uuid}") | |
| try: | |
| saved = download_file(obj["audio_url"], out_path, proxies_list) | |
| if args.with_thumbnail and obj.get("image_url"): | |
| _artist, _title = None, None | |
| # This part is just something I needed for my own use case. It's not required. | |
| # It splits the title by the '-' and uses the first part as the artist and second as the song title | |
| # Then adds them to the metadata of the file | |
| # if "-" in title: | |
| # parts = title.split("-") | |
| # if len(parts) == 2: | |
| # _artist, _title = parts[0], parts[1] | |
| embed_metadata(saved, image_url=obj["image_url"], proxies_list=proxies_list, artist=_artist, | |
| title=_title) | |
| except Exception as e: | |
| print(f"{Fore.RED}Failed on {title}: {e}") | |
| print(f"\n{Fore.BLUE}All songs have being downloaded and saved into {args.directory}.") | |
| sys.exit(0) | |
| if __name__ == "__main__": | |
| main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment