Last updated: December 10, 2025
X Subscriber Exporter is a Chrome extension that helps users export their X (Twitter) subscriber list.
| const CONFIG = { | |
| baseUrl: "https://x.com/", | |
| followingPagePath: "/following", | |
| unfollowButtonSelector: 'button[data-testid$="-unfollow"]', | |
| followIndicatorSelector: 'div[data-testid="userFollowIndicator"]', | |
| confirmUnfollowButtonSelector: | |
| 'button[data-testid="confirmationSheetConfirm"]', | |
| scrollAmount: 0.8, | |
| scrollRetryDelay: 800, | |
| dryRunScrollDelay: 200, |
| curl -s -X POST "localhost:10101/audio_query?speaker=888753760" --get --data-urlencode text="タスクが完了したの!" | curl -s -H "Content-Type: application/json" -X POST "localhost:10101/synthesis?speaker=888753760" -d @- > aivis_task_done.wav && afplay aivis_task_done.wav |
| curl -s -X POST "localhost:50021/audio_query?speaker=1" --get --data-urlencode text="タスクガカンリョウシタノダ!" | jq --argjson accents "$(curl -s -X POST "localhost:50021/accent_phrases?speaker=1&is_kana=true" --get --data-urlencode text="タ'スクガ/カンリョウシ'タノダ")" '.accent_phrases = $accents' | curl -s -H "Content-Type: application/json" -X POST "localhost:50021/synthesis?speaker=1" -d @- > voicevox_task_done.wav && afplay voicevox_task_done.wav |
| tell application "Google Chrome" | |
| set tab_list to every tab in the front window | |
| repeat with the_tab in tab_list | |
| set the_url to the URL of the_tab | |
| tell application "Safari" to open location the_url | |
| end repeat | |
| end tell |
| import torch | |
| import torch.nn as nn | |
| import torch.nn.functional as F | |
| from torchvision import datasets, transforms | |
| class ConvNet(nn.Module): | |
| def __init__(self): | |
| super(ConvNet, self).__init__() | |
| # 畳み込み層 | |
| self.conv1 = nn.Conv2d(1, 32, 3, padding=1) |
| import email | |
| import os | |
| from email import policy | |
| from email.parser import BytesParser | |
| def extract_email_content(eml_file): | |
| """Extract plain text content from email file.""" | |
| with open(eml_file, 'rb') as f: | |
| msg = BytesParser(policy=policy.default).parse(f) | |
| import re | |
| from bs4 import BeautifulSoup | |
| import json | |
| def extract_rss_urls(html_file): | |
| with open(html_file, 'r', encoding='utf-8') as f: | |
| html_content = f.read() | |
| soup = BeautifulSoup(html_content, 'html.parser') |
| import contextlib | |
| import wave | |
| import logging | |
| import asyncio | |
| import os | |
| import sounddevice as sd | |
| import soundfile as sf | |
| from google import genai | |
| # ロガーの設定 |
| from typing_extensions import TypedDict, Literal | |
| import random | |
| from langgraph.graph import StateGraph, START | |
| from langgraph.types import Command | |
| # Define graph state | |
| class State(TypedDict): | |
| foo: str | |
| # Define the nodes |