Skip to content

Instantly share code, notes, and snippets.

@Geeknasty
Created June 28, 2025 01:23
Show Gist options
  • Select an option

  • Save Geeknasty/728155eb39061cf0eef9e7079c3ead2e to your computer and use it in GitHub Desktop.

Select an option

Save Geeknasty/728155eb39061cf0eef9e7079c3ead2e to your computer and use it in GitHub Desktop.
Some useful scripts for converting and inspecting safetensors.
import safetensors.torch
from safetensors import safe_open
from collections import defaultdict
# Input and output file paths
input_path = "non_comfy_fal_format_lora.safetensors"
output_path = "comfy_format_lora.safetensors"
# Read in the weights
tensors = {}
with safe_open(input_path, framework="pt", device="cpu") as f:
for k in f.keys():
tensors[k] = f.get_tensor(k)
converted = defaultdict(dict)
for key, tensor in tensors.items():
if not key.startswith("base_model.model."):
continue
# Strip prefix
subkey = key.replace("base_model.model.", "")
# Determine base key (before .lora_A/B.weight)
if ".lora_A.weight" in subkey:
base_name = subkey.replace(".lora_A.weight", "")
suffix = "lora_down.weight"
elif ".lora_B.weight" in subkey:
base_name = subkey.replace(".lora_B.weight", "")
suffix = "lora_up.weight"
else:
continue # Skip keys that don't match LoRA pattern
# Create comfy-style key
comfy_base = "lora_unet_" + base_name.replace(".", "_")
comfy_key = f"{comfy_base}.{suffix}"
# Store in nested structure
converted[comfy_base][comfy_key] = tensor
# Flatten into final dict structure for saving
final_sd = {}
for group, weights in converted.items():
for k, v in weights.items():
final_sd[k] = v
# Save to safetensors
safetensors.torch.save_file(final_sd, output_path)
print(f"Converted LoRA saved to: {output_path}")
import json
from safetensors import safe_open
def get_safetensors_metadata_and_blocks(file_path):
# Convert backslashes to forward slashes
normalized_path = file_path.replace("\\", "/")
# Initialize the output dictionary
output_data = {"metadata": {}, "model_weights": {}}
# Open the safetensors file to extract metadata
with safe_open(normalized_path, framework="pt") as f:
# Extract metadata if available
metadata = f.metadata()
if metadata:
output_data["metadata"] = metadata
else:
output_data["metadata"] = {"info": "No metadata available"}
# Organize model weights into sections
for key in f.keys():
tensor = f.get_tensor(key)
tensor_info = {"shape": list(tensor.shape), "dtype": str(tensor.dtype)}
# Determine hierarchy based on key naming
parts = key.split('.')
main_section = parts[0]
sub_section = ".".join(parts[:2]) if len(parts) > 1 else None
if main_section not in output_data["model_weights"]:
output_data["model_weights"][main_section] = {}
if sub_section:
if sub_section not in output_data["model_weights"][main_section]:
output_data["model_weights"][main_section][sub_section] = {}
output_data["model_weights"][main_section][sub_section][key] = tensor_info
else:
output_data["model_weights"][main_section][key] = tensor_info
return output_data
# Safetensors to inspect
file_path = "name_of_safetensors_to_inspect.safetensors"
output_data = get_safetensors_metadata_and_blocks(file_path)
# Save to a JSON file
output_file = "name_of_safetensors_to_inspect.json"
with open(output_file, "w") as f:
json.dump(output_data, f, indent=4)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment