Skip to content

Instantly share code, notes, and snippets.

@9Dave9
Created February 1, 2026 22:50
Show Gist options
  • Select an option

  • Save 9Dave9/efc12b499be29684f035e8771a66711f to your computer and use it in GitHub Desktop.

Select an option

Save 9Dave9/efc12b499be29684f035e8771a66711f to your computer and use it in GitHub Desktop.
deep=-emo openAPi spec json
{
"openapi": "3.1.0",
"info": {
"title": "Deep-Emo API",
"description": "\n## Human Attribute and Emotion Identifier API\n\nThis API provides facial attribute analysis using the DeepFace library.\nIt can detect and analyze:\n\n- **Age**: Estimated age in years\n- **Gender**: Male/Female classification with confidence scores\n- **Race/Ethnicity**: Classification across multiple categories\n- **Emotion**: Detection of facial expressions (happy, sad, angry, etc.)\n\n### Image Input Formats\n\nThe API accepts images in the following formats:\n1. **File Upload**: Direct file upload via multipart form\n2. **Base64**: Base64-encoded image string (with or without data URL prefix)\n\n### Image Optimization\n\nImages are automatically optimized to reduce size while preserving\nessential details for accurate analysis. You can control optimization\nparameters or disable it entirely.\n\n### Token Counting\n\nEach response includes token count information for both input (image)\nand output (results). This is informational and helps estimate\ncomputational cost.\n ",
"contact": {
"name": "Deep-Emo Support",
"email": "support@deep-emo.example.com"
},
"license": {
"name": "MIT",
"url": "https://opensource.org/licenses/MIT"
},
"version": "1.0.0"
},
"paths": {
"/health": {
"get": {
"tags": [
"Utilities"
],
"summary": "Health Check",
"description": "Check if the API service is running and healthy.",
"operationId": "health_check_health_get",
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HealthResponse"
}
}
}
}
}
}
},
"/analyze/upload": {
"post": {
"tags": [
"Analysis"
],
"summary": "Analyze Image via File Upload",
"description": "Upload an image file for facial attribute analysis.\n\nSupported formats: JPEG, PNG, GIF, BMP, WebP\n\nThe image will be automatically optimized unless `optimize=false` is specified.\nUses robust face detection with automatic backend fallback.",
"operationId": "analyze_upload_analyze_upload_post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_analyze_upload_analyze_upload_post"
}
}
},
"required": true
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/AnalysisResponse"
}
}
}
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
}
}
}
}
},
"/analyze/base64": {
"post": {
"tags": [
"Analysis"
],
"summary": "Analyze Image via Base64",
"description": "Send a base64-encoded image for facial attribute analysis.\n\nThe image can be sent as:\n- Plain base64 string\n- Data URL format: `data:image/jpeg;base64,...`\n\nThis endpoint is useful for web applications that already have\nimages in memory as base64 strings.\nUses robust face detection with automatic backend fallback.",
"operationId": "analyze_base64_analyze_base64_post",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Base64ImageRequest"
}
}
},
"required": true
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/AnalysisResponse"
}
}
}
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
}
}
}
}
},
"/optimize": {
"post": {
"tags": [
"Utilities"
],
"summary": "Optimize Image",
"description": "Optimize an image without performing analysis.\n\nThis endpoint is useful when you want to prepare an image\nfor analysis or reduce its size for transmission.\n\nReturns the optimized image as base64 along with size information.",
"operationId": "optimize_image_endpoint_optimize_post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_optimize_image_endpoint_optimize_post"
}
}
},
"required": true
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ImageOptimizationResponse"
}
}
}
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
}
}
}
}
}
},
"components": {
"schemas": {
"AnalysisResponse": {
"properties": {
"success": {
"type": "boolean",
"title": "Success",
"description": "Whether the analysis was successful"
},
"data": {
"anyOf": [
{
"additionalProperties": true,
"type": "object"
},
{
"type": "null"
}
],
"title": "Data",
"description": "Analysis results"
},
"error": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"title": "Error",
"description": "Error message if analysis failed"
},
"token_info": {
"$ref": "#/components/schemas/TokenInfo",
"description": "Token count information"
},
"image_info": {
"$ref": "#/components/schemas/ImageInfo",
"description": "Image processing information"
}
},
"type": "object",
"required": [
"success",
"token_info",
"image_info"
],
"title": "AnalysisResponse",
"description": "Response model for analysis endpoints.",
"examples": [
{
"data": {
"faces": [
{
"age": {
"estimated_age": 28
},
"emotion": {
"confidence_scores": {
"angry": 1,
"disgust": 0.5,
"fear": 0.5,
"happy": 85,
"neutral": 10,
"sad": 2,
"surprise": 1
},
"dominant": "happy"
},
"face_id": 1,
"gender": {
"confidence_scores": {
"Man": 4.5,
"Woman": 95.5
},
"dominant": "Woman"
},
"race": {
"confidence_scores": {
"asian": 10,
"black": 5,
"indian": 1.5,
"latino hispanic": 0.5,
"middle eastern": 3,
"white": 80
},
"dominant": "white"
},
"region": {
"h": 200,
"w": 200,
"x": 100,
"y": 50
}
}
],
"summary": {
"analysis_performed": [
"age",
"gender",
"race",
"emotion"
],
"total_faces": 1
}
},
"image_info": {
"faces_detected": 1,
"optimized_size_bytes": 51200,
"original_size_bytes": 102400
},
"success": true,
"token_info": {
"input_tokens": 1024,
"output_tokens": 256
}
}
]
},
"Base64ImageRequest": {
"properties": {
"image": {
"type": "string",
"title": "Image",
"description": "Base64-encoded image string (with or without data URL prefix)"
},
"actions": {
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "null"
}
],
"title": "Actions",
"description": "List of analyses to perform",
"default": [
"age",
"gender",
"race",
"emotion"
]
},
"enforce_detection": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"title": "Enforce Detection",
"description": "If True, raise error when no face is detected. Default False for robustness.",
"default": false
},
"detector_backend": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"title": "Detector Backend",
"description": "Face detection backend: retinaface (most accurate), mtcnn, opencv (fastest), ssd, dlib",
"default": "retinaface"
},
"try_all_backends": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"title": "Try All Backends",
"description": "If True, tries multiple face detection backends on failure for better robustness",
"default": true
},
"optimize": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"title": "Optimize",
"description": "Whether to optimize the image before analysis",
"default": true
},
"max_dimension": {
"anyOf": [
{
"type": "integer"
},
{
"type": "null"
}
],
"title": "Max Dimension",
"description": "Maximum dimension for image optimization",
"default": 1024
},
"quality": {
"anyOf": [
{
"type": "integer",
"maximum": 100,
"minimum": 1
},
{
"type": "null"
}
],
"title": "Quality",
"description": "JPEG quality for optimization (1-100)",
"default": 85
}
},
"type": "object",
"required": [
"image"
],
"title": "Base64ImageRequest",
"description": "Request model for base64-encoded image analysis.",
"examples": [
{
"actions": [
"age",
"gender",
"race",
"emotion"
],
"detector_backend": "retinaface",
"enforce_detection": false,
"image": "data:image/jpeg;base64,/9j/4AAQSkZJRg...",
"max_dimension": 1024,
"optimize": true,
"quality": 85,
"try_all_backends": true
}
]
},
"Body_analyze_upload_analyze_upload_post": {
"properties": {
"file": {
"type": "string",
"format": "binary",
"title": "File",
"description": "Image file to analyze"
},
"actions": {
"type": "string",
"title": "Actions",
"description": "Comma-separated list of analyses to perform",
"default": "age,gender,race,emotion"
},
"enforce_detection": {
"type": "boolean",
"title": "Enforce Detection",
"description": "If True, raise error when no face is detected. Default False for robustness.",
"default": false
},
"detector_backend": {
"type": "string",
"title": "Detector Backend",
"description": "Primary face detection backend (retinaface, mtcnn, opencv, ssd, dlib)",
"default": "retinaface"
},
"try_all_backends": {
"type": "boolean",
"title": "Try All Backends",
"description": "If True, tries multiple backends on failure for better robustness",
"default": true
},
"optimize": {
"type": "boolean",
"title": "Optimize",
"description": "Whether to optimize the image",
"default": true
},
"max_dimension": {
"type": "integer",
"title": "Max Dimension",
"description": "Maximum dimension for optimization",
"default": 1024
},
"quality": {
"type": "integer",
"maximum": 100,
"minimum": 1,
"title": "Quality",
"description": "JPEG quality for optimization",
"default": 85
}
},
"type": "object",
"required": [
"file"
],
"title": "Body_analyze_upload_analyze_upload_post"
},
"Body_optimize_image_endpoint_optimize_post": {
"properties": {
"file": {
"type": "string",
"format": "binary",
"title": "File",
"description": "Image file to optimize"
},
"max_dimension": {
"type": "integer",
"title": "Max Dimension",
"description": "Maximum dimension for optimization",
"default": 1024
},
"quality": {
"type": "integer",
"maximum": 100,
"minimum": 1,
"title": "Quality",
"description": "JPEG quality for optimization",
"default": 85
}
},
"type": "object",
"required": [
"file"
],
"title": "Body_optimize_image_endpoint_optimize_post"
},
"HTTPValidationError": {
"properties": {
"detail": {
"items": {
"$ref": "#/components/schemas/ValidationError"
},
"type": "array",
"title": "Detail"
}
},
"type": "object",
"title": "HTTPValidationError"
},
"HealthResponse": {
"properties": {
"status": {
"type": "string",
"title": "Status",
"description": "Service status"
},
"message": {
"type": "string",
"title": "Message",
"description": "Status message"
}
},
"type": "object",
"required": [
"status",
"message"
],
"title": "HealthResponse",
"description": "Health check response."
},
"ImageInfo": {
"properties": {
"faces_detected": {
"type": "integer",
"title": "Faces Detected",
"description": "Number of faces detected in the image"
},
"original_size_bytes": {
"type": "integer",
"title": "Original Size Bytes",
"description": "Original image size in bytes"
},
"optimized_size_bytes": {
"type": "integer",
"title": "Optimized Size Bytes",
"description": "Optimized image size in bytes"
}
},
"type": "object",
"required": [
"faces_detected",
"original_size_bytes",
"optimized_size_bytes"
],
"title": "ImageInfo",
"description": "Image information after processing."
},
"ImageOptimizationResponse": {
"properties": {
"success": {
"type": "boolean",
"title": "Success",
"description": "Whether optimization was successful"
},
"original_size_bytes": {
"type": "integer",
"title": "Original Size Bytes",
"description": "Original image size"
},
"optimized_size_bytes": {
"type": "integer",
"title": "Optimized Size Bytes",
"description": "Optimized image size"
},
"compression_ratio": {
"type": "number",
"title": "Compression Ratio",
"description": "Compression ratio achieved"
},
"optimized_image_base64": {
"type": "string",
"title": "Optimized Image Base64",
"description": "Optimized image as base64"
},
"image_info": {
"additionalProperties": true,
"type": "object",
"title": "Image Info",
"description": "Information about the optimized image"
}
},
"type": "object",
"required": [
"success",
"original_size_bytes",
"optimized_size_bytes",
"compression_ratio",
"optimized_image_base64",
"image_info"
],
"title": "ImageOptimizationResponse",
"description": "Response for image optimization endpoint."
},
"TokenInfo": {
"properties": {
"input_tokens": {
"type": "integer",
"title": "Input Tokens",
"description": "Number of input tokens (image representation)"
},
"output_tokens": {
"type": "integer",
"title": "Output Tokens",
"description": "Number of output tokens (result representation)"
}
},
"type": "object",
"required": [
"input_tokens",
"output_tokens"
],
"title": "TokenInfo",
"description": "Token information for the request."
},
"ValidationError": {
"properties": {
"loc": {
"items": {
"anyOf": [
{
"type": "string"
},
{
"type": "integer"
}
]
},
"type": "array",
"title": "Location"
},
"msg": {
"type": "string",
"title": "Message"
},
"type": {
"type": "string",
"title": "Error Type"
}
},
"type": "object",
"required": [
"loc",
"msg",
"type"
],
"title": "ValidationError"
}
}
},
"tags": [
{
"name": "Analysis",
"description": "Facial attribute analysis endpoints"
},
{
"name": "Utilities",
"description": "Utility endpoints for image processing and health checks"
}
]
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment